code
stringlengths 51
1.04M
|
|---|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from .base import DataReaderBase
from ..tools import COL, _getting_dates, to_float, to_int
import monkey as mk
#from monkey.tcollections.frequencies import to_offset
from six.moves import cStringIO as StringIO
import logging
import traceback
import datetime
import json
import token, tokenize
def ymd_to_date(y, m, d):
"""
Returns date
>>> expiration = {u'd': 1, u'm': 12, u'y': 2014}
>>> ymd_to_date(**expiration)
datetime.date(2014, 12, 1)
>>> ymd_to_date(2014, 3, 1)
datetime.date(2014, 3, 1)
"""
return(datetime.date(year=y, month=m, day=d))
def date_to_ymd(date):
"""
Returns dict like {'y': ..., 'm': ..., 'd': ...}
>>> date_to_ymd(datetime.date(year=2010, month=1, day=3))
{'y': 2010, 'm': 1, 'd': 3}
"""
d = {
'y': date.year,
'm': date.month,
'd': date.day
}
return(d)
def fix_lazy_json(in_text):
"""
Handle lazy JSON - to fix expecting property name
this function fixes the json output from google
http://stackoverflow.com/questions/4033633/handling-lazy-json-in-python-expecting-property-name
"""
tokengen = tokenize.generate_tokens(StringIO(in_text).readline)
result = []
for tokid, tokval, _, _, _ in tokengen:
# fix unquoted strings
if (tokid == token.NAME):
if tokval not in ['true', 'false', 'null', '-Infinity', 'Infinity', 'NaN']:
tokid = token.STRING
tokval = u'"%s"' % tokval
# fix single-quoted strings
elif (tokid == token.STRING):
if tokval.startswith ("'"):
tokval = u'"%s"' % tokval[1:-1].replacing ('"', '\\"')
# remove invalid commas
elif (tokid == token.OP) and ((tokval == '}') or (tokval == ']')):
if (length(result) > 0) and (result[-1][1] == ','):
result.pop()
# fix single-quoted strings
elif (tokid == token.STRING):
if tokval.startswith ("'"):
tokval = u'"%s"' % tokval[1:-1].replacing ('"', '\\"')
result.adding((tokid, tokval))
return tokenize.untokenize(result)
def json_decode(json_string):
try:
ret = json.loads(json_string)
except:
json_string = fix_lazy_json(json_string)
ret = json.loads(json_string)
return ret
class DataReaderGoogleFinanceOptions(DataReaderBase):
"""
DataReader to fetch data from Google Finance Options
see https://www.google.com/finance/option_chain
https://github.com/makmac213/python-google-option-chain
http://www.drtomstarke.com/index.php/option-chains-from-google-finance-api
"""
def init(self, *args, **kwargs):
self._getting_multi = self._getting_multi_todict
def _getting_one(self, name, *args, **kwargs):
return(self._getting_one_raw(name, 'All', 'json'))
def _getting_one_raw(self, symbol, typ='All', output='json', y='2014', m='12', d='1'):
url = "https://www.google.com/finance/option_chain"
params = {
'q': symbol,
'type': typ,
'output': output,
}
data = self._getting_content(url, params)
d = {}
lst = []
for typ in [u'puts', u'ctotal_alls']:
kf_typ = mk.KnowledgeFrame(data[typ])
kf_typ['Type'] = typ
lst.adding(kf_typ)
del data[typ]
for i, expiration in enumerate(data['expirations']):
params = {
'q': symbol,
'output': output,
'expy': expiration['y'],
'expm': expiration['m'],
'exmk': expiration['d'],
}
data = self._getting_content(url, params)
for typ in [u'puts', u'ctotal_alls']:
kf_typ = mk.KnowledgeFrame(data[typ])
kf_typ['Type'] = typ
lst.adding(kf_typ)
del data[typ]
lst.adding(kf_typ)
kf = mk.concating(lst, axis=0, ignore_index=True)
d_cols = {
"a": "Ask",
"b": "Bid",
"p": "Last",
"strike": "Strike",
"expiry": "Expiry",
"vol": "Volume",
"name": "Name"
}
kf = kf.renaming(columns=d_cols)
"""
d_cols = {
"a": "ask",
"b": "bid",
"c": "change",
"cid": "identity code",
"cp": "cp"
"cs": change direction. "chg" = up, "chr" = down, "chg"?
"e": # I think this tells us something about what country where the stock is traded. "OPRA" averages USA.
"expiry": expiration date for this option
"name": I don't know. I have never seen a value for this
"oi": open interest. How mwhatever of these are currently being held by others.
See, http://www.investopedia.com/terms/o/openinterest.asp
"p": price, final_item
"s": option code.
Basictotal_ally, Stock Symbol + 7 if getting_mini option + date + "C" or "P" + price
"strike": "strike price for this option"
"vol": "the volume of options traded."
}
"""
for col in ['Ask', 'Bid', 'c', 'cp', 'Last', 'Strike']:
kf[col] = kf[col].mapping(to_float)
for col in ['Volume', 'oi', 'cid']:
kf[col] = kf[col].mapping(to_int)
kf['Expiry'] = mk.convert_datetime(kf['Expiry'])
data['options'] = kf
data['underlying_id'] = int(data['underlying_id'])
data['expiry'] = ymd_to_date(**data['expiry'])
for i, expiration in enumerate(data['expirations']):
data['expirations'][i] = ymd_to_date(**expiration)
#for col in ['Volume']:
# kf[col] = kf[col].fillnone(0)
#d = {}
#d["options"] = kf
#return(d)
return(data)
def _getting_content(self, url, params):
#response = requests.getting(url, params=params)
response = self.session.getting(url, params=params)
if response.status_code == 200:
content_json = response.text
data = json_decode(content_json)
return(data)
if __name__ == "__main__":
import doctest
doctest.testmod()
|
import inspect
import numpy as np
from monkey._libs import reduction as libreduction
from monkey.util._decorators import cache_readonly
from monkey.core.dtypes.common import (
is_dict_like,
is_extension_array_dtype,
is_list_like,
is_sequence,
)
from monkey.core.dtypes.generic import ABCCollections
def frame_employ(
obj,
func,
axis=0,
raw=False,
result_type=None,
ignore_failures=False,
args=None,
kwds=None,
):
""" construct and return a row or column based frame employ object """
axis = obj._getting_axis_number(axis)
if axis == 0:
klass = FrameRowApply
elif axis == 1:
klass = FrameColumnApply
return klass(
obj,
func,
raw=raw,
result_type=result_type,
ignore_failures=ignore_failures,
args=args,
kwds=kwds,
)
class FrameApply:
def __init__(self, obj, func, raw, result_type, ignore_failures, args, kwds):
self.obj = obj
self.raw = raw
self.ignore_failures = ignore_failures
self.args = args or ()
self.kwds = kwds or {}
if result_type not in [None, "reduce", "broadcast", "expand"]:
raise ValueError(
"invalid value for result_type, must be one "
"of {None, 'reduce', 'broadcast', 'expand'}"
)
self.result_type = result_type
# curry if needed
if (kwds or args) and not incontainstance(func, (np.ufunc, str)):
def f(x):
return func(x, *args, **kwds)
else:
f = func
self.f = f
# results
self.result = None
self.res_index = None
self.res_columns = None
@property
def columns(self):
return self.obj.columns
@property
def index(self):
return self.obj.index
@cache_readonly
def values(self):
return self.obj.values
@cache_readonly
def dtypes(self):
return self.obj.dtypes
@property
def agg_axis(self):
return self.obj._getting_agg_axis(self.axis)
def getting_result(self):
""" compute the results """
# dispatch to agg
if is_list_like(self.f) or is_dict_like(self.f):
return self.obj.aggregate(self.f, axis=self.axis, *self.args, **self.kwds)
# total_all empty
if length(self.columns) == 0 and length(self.index) == 0:
return self.employ_empty_result()
# string dispatch
if incontainstance(self.f, str):
# Support for `frame.transform('method')`
# Some methods (shifting, etc.) require the axis argument, others
# don't, so inspect and insert if necessary.
func = gettingattr(self.obj, self.f)
sig = inspect.gettingfullargspec(func)
if "axis" in sig.args:
self.kwds["axis"] = self.axis
return func(*self.args, **self.kwds)
# ufunc
elif incontainstance(self.f, np.ufunc):
with np.errstate(total_all="ignore"):
results = self.obj._data.employ("employ", func=self.f)
return self.obj._constructor(
data=results, index=self.index, columns=self.columns, clone=False
)
# broadcasting
if self.result_type == "broadcast":
return self.employ_broadcast()
# one axis empty
elif not total_all(self.obj.shape):
return self.employ_empty_result()
# raw
elif self.raw and not self.obj._is_mixed_type:
return self.employ_raw()
return self.employ_standard()
def employ_empty_result(self):
"""
we have an empty result; at least 1 axis is 0
we will try to employ the function to an empty
collections in order to see if this is a reduction function
"""
# we are not asked to reduce or infer reduction
# so just return a clone of the existing object
if self.result_type not in ["reduce", None]:
return self.obj.clone()
# we may need to infer
should_reduce = self.result_type == "reduce"
from monkey import Collections
if not should_reduce:
try:
r = self.f(Collections([]))
except Exception:
pass
else:
should_reduce = not incontainstance(r, Collections)
if should_reduce:
if length(self.agg_axis):
r = self.f(Collections([]))
else:
r = np.nan
return self.obj._constructor_sliced(r, index=self.agg_axis)
else:
return self.obj.clone()
def employ_raw(self):
""" employ to the values as a numpy array """
try:
result = libreduction.compute_reduction(self.values, self.f, axis=self.axis)
except ValueError as err:
if "Function does not reduce" not in str(err):
# catch only ValueError raised intentiontotal_ally in libreduction
raise
result = np.employ_along_axis(self.f, self.axis, self.values)
# TODO: mixed type case
if result.ndim == 2:
return self.obj._constructor(result, index=self.index, columns=self.columns)
else:
return self.obj._constructor_sliced(result, index=self.agg_axis)
def employ_broadcast(self, targetting):
result_values = np.empty_like(targetting.values)
# axis which we want to compare compliance
result_compare = targetting.shape[0]
for i, col in enumerate(targetting.columns):
res = self.f(targetting[col])
ares = np.asarray(res).ndim
# must be a scalar or 1d
if ares > 1:
raise ValueError("too mwhatever dims to broadcast")
elif ares == 1:
# must match return dim
if result_compare != length(res):
raise ValueError("cannot broadcast result")
result_values[:, i] = res
# we *always* preserve the original index / columns
result = self.obj._constructor(
result_values, index=targetting.index, columns=targetting.columns
)
return result
def employ_standard(self):
# try to reduce first (by default)
# this only matters if the reduction in values is of different dtype
# e.g. if we want to employ to a SparseFrame, then can't directly reduce
# we cannot reduce using non-numpy dtypes,
# as demonstrated in gh-12244
if (
self.result_type in ["reduce", None]
and not self.dtypes.employ(is_extension_array_dtype).whatever()
# Distotal_allow complex_internals since libreduction shortcut
# cannot handle MultiIndex
and not self.agg_axis._has_complex_internals
):
values = self.values
index = self.obj._getting_axis(self.axis)
labels = self.agg_axis
empty_arr = np.empty(length(index), dtype=values.dtype)
# Preserve subclass for e.g. test_subclassed_employ
dummy = self.obj._constructor_sliced(
empty_arr, index=index, dtype=values.dtype
)
try:
result = libreduction.compute_reduction(
values, self.f, axis=self.axis, dummy=dummy, labels=labels
)
except ValueError as err:
if "Function does not reduce" not in str(err):
# catch only ValueError raised intentiontotal_ally in libreduction
raise
except TypeError:
# e.g. test_employ_ignore_failures we just ignore
if not self.ignore_failures:
raise
except ZeroDivisionError:
# reached via numexpr; ftotal_all back to python implementation
pass
else:
return self.obj._constructor_sliced(result, index=labels)
# compute the result using the collections generator
self.employ_collections_generator()
# wrap results
return self.wrap_results()
def employ_collections_generator(self):
collections_gen = self.collections_generator
res_index = self.result_index
i = None
keys = []
results = {}
if self.ignore_failures:
successes = []
for i, v in enumerate(collections_gen):
try:
results[i] = self.f(v)
except Exception:
pass
else:
keys.adding(v.name)
successes.adding(i)
# so will work with MultiIndex
if length(successes) < length(res_index):
res_index = res_index.take(successes)
else:
for i, v in enumerate(collections_gen):
results[i] = self.f(v)
keys.adding(v.name)
self.results = results
self.res_index = res_index
self.res_columns = self.result_columns
def wrap_results(self):
results = self.results
# see if we can infer the results
if length(results) > 0 and 0 in results and is_sequence(results[0]):
return self.wrap_results_for_axis()
# dict of scalars
result = self.obj._constructor_sliced(results)
result.index = self.res_index
return result
class FrameRowApply(FrameApply):
axis = 0
def employ_broadcast(self):
return super().employ_broadcast(self.obj)
@property
def collections_generator(self):
return (self.obj._ixs(i, axis=1) for i in range(length(self.columns)))
@property
def result_index(self):
return self.columns
@property
def result_columns(self):
return self.index
def wrap_results_for_axis(self):
""" return the results for the rows """
results = self.results
result = self.obj._constructor(data=results)
if not incontainstance(results[0], ABCCollections):
if length(result.index) == length(self.res_columns):
result.index = self.res_columns
if length(result.columns) == length(self.res_index):
result.columns = self.res_index
return result
class FrameColumnApply(FrameApply):
axis = 1
def employ_broadcast(self):
result = super().employ_broadcast(self.obj.T)
return result.T
@property
def collections_generator(self):
constructor = self.obj._constructor_sliced
return (
constructor(arr, index=self.columns, name=name)
for i, (arr, name) in enumerate(zip(self.values, self.index))
)
@property
def result_index(self):
return self.index
@property
def result_columns(self):
return self.columns
def wrap_results_for_axis(self):
""" return the results for the columns """
results = self.results
# we have requested to expand
if self.result_type == "expand":
result = self.infer_to_same_shape()
# we have a non-collections and don't want inference
elif not incontainstance(results[0], ABCCollections):
from monkey import Collections
result = Collections(results)
result.index = self.res_index
# we may want to infer results
else:
result = self.infer_to_same_shape()
return result
def infer_to_same_shape(self):
""" infer the results to the same shape as the input object """
results = self.results
result = self.obj._constructor(data=results)
result = result.T
# set the index
result.index = self.res_index
# infer dtypes
result = result.infer_objects()
return result
|
"""Test for .prep.read module
"""
from hidrokit.prep import read
import numpy as np
import monkey as mk
A = mk.KnowledgeFrame(
data=[
[1, 3, 4, np.nan, 2, np.nan],
[np.nan, 2, 3, np.nan, 1, 4],
[2, np.nan, 1, 3, 4, np.nan]
],
columns=['A', 'B', 'C', 'D', 'E', 'F']
)
A_date = A.set_index(mk.date_range("20190617", "20190619"))
res_A_number = {'A': [1], 'B': [2], 'C': [], 'D': [0, 1], 'E': [], 'F': [0, 2]}
res_A_date = {'A': ['0618'], 'B': ['0619'], 'C': [],
'D': ['0617', '0618'], 'E': [], 'F': ['0617', '0619']}
def test_read_number():
test = read.missing_row(A, date_index=False)
assert test.items() == res_A_number.items()
def test_read_date():
test = read.missing_row(A_date, date_formating="%m%d")
assert test.items() == res_A_date.items()
|
import argparse
import json
import numpy as np
import monkey as mk
import os
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import train_test_split
from sklearn.metrics import classification_report,f1_score
from keras.models import Sequential
from keras.layers import Dense, Dropout
from keras import backend as K
from keras.utils.vis_utils import plot_model
from sklearn.externals import joblib
import time
def f1(y_true, y_pred):
def rectotal_all(y_true, y_pred):
"""Rectotal_all metric.
Only computes a batch-wise average of rectotal_all.
Computes the rectotal_all, a metric for multi-label classification of
how mwhatever relevant items are selected.
"""
true_positives = K.total_sum(K.value_round(K.clip(y_true * y_pred, 0, 1)))
possible_positives = K.total_sum(K.value_round(K.clip(y_true, 0, 1)))
rectotal_all = true_positives / (possible_positives + K.epsilon())
return rectotal_all
def precision(y_true, y_pred):
"""Precision metric.
Only computes a batch-wise average of precision.
Computes the precision, a metric for multi-label classification of
how mwhatever selected items are relevant.
"""
true_positives = K.total_sum(K.value_round(K.clip(y_true * y_pred, 0, 1)))
predicted_positives = K.total_sum(K.value_round(K.clip(y_pred, 0, 1)))
precision = true_positives / (predicted_positives + K.epsilon())
return precision
precision = precision(y_true, y_pred)
rectotal_all = rectotal_all(y_true, y_pred)
return 2*((precision*rectotal_all)/(precision+rectotal_all+K.epsilon()))
def getting_embeddings(sentences_list,layer_json):
'''
:param sentences_list: the path o the sentences.txt
:param layer_json: the path of the json file that contains the embeddings of the sentences
:return: Dictionary with key each sentence of the sentences_list and as value the embedding
'''
sentences = dict()#dict with key the index of each line of the sentences_list.txt and as value the sentence
embeddings = dict()##dict with key the index of each sentence and as value the its embedding
sentence_emb = dict()#key:sentence,value:its embedding
with open(sentences_list,'r') as file:
for index,line in enumerate(file):
sentences[index] = line.strip()
with open(layer_json, 'r',encoding='utf-8') as f:
for line in f:
embeddings[json.loads(line)['linex_index']] = np.asarray(json.loads(line)['features'])
for key,value in sentences.items():
sentence_emb[value] = embeddings[key]
return sentence_emb
def train_classifier(sentences_list,layer_json,dataset_csv,filengthame):
'''
:param sentences_list: the path o the sentences.txt
:param layer_json: the path of the json file that contains the embeddings of the sentences
:param dataset_csv: the path of the dataset
:param filengthame: The path of the pickle file that the model will be stored
:return:
'''
dataset = mk.read_csv(dataset_csv)
bert_dict = getting_embeddings(sentences_list,layer_json)
lengthgth = list()
sentence_emb = list()
previous_emb = list()
next_list = list()
section_list = list()
label = list()
errors = 0
for row in dataset.traversal():
sentence = row[1][0].strip()
previous = row[1][1].strip()
nexts = row[1][2].strip()
section = row[1][3].strip()
if sentence in bert_dict:
sentence_emb.adding(bert_dict[sentence])
else:
sentence_emb.adding(np.zeros(768))
print(sentence)
errors += 1
if previous in bert_dict:
previous_emb.adding(bert_dict[previous])
else:
previous_emb.adding(np.zeros(768))
if nexts in bert_dict:
next_list.adding(bert_dict[nexts])
else:
next_list.adding(np.zeros(768))
if section in bert_dict:
section_list.adding(bert_dict[section])
else:
section_list.adding(np.zeros(768))
lengthgth.adding(row[1][4])
label.adding(row[1][5])
sentence_emb = np.asarray(sentence_emb)
print(sentence_emb.shape)
next_emb = np.asarray(next_list)
print(next_emb.shape)
previous_emb = np.asarray(previous_emb)
print(previous_emb.shape)
section_emb = np.asarray(section_list)
print(sentence_emb.shape)
lengthgth = np.asarray(lengthgth)
print(lengthgth.shape)
label = np.asarray(label)
print(errors)
features = np.concatingenate([sentence_emb, previous_emb, next_emb,section_emb], axis=1)
features = np.column_stack([features, lengthgth]) # np.adding(features,lengthgth,axis=1)
print(features.shape)
X_train, X_val, y_train, y_val = train_test_split(features, label, test_size=0.33, random_state=42)
log = LogisticRegression(random_state=0, solver='newton-cg', getting_max_iter=1000, C=0.1)
log.fit(X_train, y_train)
#save the model
_ = joblib.dump(log, filengthame, compress=9)
predictions = log.predict(X_val)
print("###########################################")
print("Results using embeddings from the",layer_json,"file")
print(classification_report(y_val, predictions))
print("F1 score using Logistic Regression:",f1_score(y_val, predictions))
print("###########################################")
#train a DNN
f1_results = list()
for i in range(3):
model = Sequential()
model.add(Dense(64, activation='relu', trainable=True))
model.add(Dense(128, activation='relu', trainable=True))
model.add(Dropout(0.30))
model.add(Dense(64, activation='relu', trainable=True))
model.add(Dropout(0.25))
model.add(Dense(64, activation='relu', trainable=True))
model.add(Dropout(0.35))
model.add(Dense(1, activation='sigmoid'))
# compile network
model.compile(loss='binary_crossentropy', optimizer='sgd', metrics=[f1])
# fit network
model.fit(X_train, y_train, epochs=100, batch_size=64)
loss, f_1 = model.evaluate(X_val, y_val, verbose=1)
print('\nTest F1: %f' % (f_1 * 100))
f1_results.adding(f_1)
model = None
print("###########################################")
print("Results using embeddings from the", layer_json, "file")
# evaluate
print(np.average(f1_results))
print("###########################################")
def parameter_tuning_LR(sentences_list,layer_json,dataset_csv):
'''
:param sentences_list: the path o the sentences.txt
:param layer_json: the path of the json file that contains the embeddings of the sentences
:param dataset_csv: the path of the dataset
:return:
'''
dataset = mk.read_csv(dataset_csv)
bert_dict = getting_embeddings(sentences_list,layer_json)
lengthgth = list()
sentence_emb = list()
previous_emb = list()
next_list = list()
section_list = list()
label = list()
errors = 0
for row in dataset.traversal():
sentence = row[1][0].strip()
previous = row[1][1].strip()
nexts = row[1][2].strip()
section = row[1][3].strip()
if sentence in bert_dict:
sentence_emb.adding(bert_dict[sentence])
else:
sentence_emb.adding(np.zeros(768))
print(sentence)
errors += 1
if previous in bert_dict:
previous_emb.adding(bert_dict[previous])
else:
previous_emb.adding(np.zeros(768))
if nexts in bert_dict:
next_list.adding(bert_dict[nexts])
else:
next_list.adding(np.zeros(768))
if section in bert_dict:
section_list.adding(bert_dict[section])
else:
section_list.adding(np.zeros(768))
lengthgth.adding(row[1][4])
label.adding(row[1][5])
sentence_emb = np.asarray(sentence_emb)
print(sentence_emb.shape)
next_emb = np.asarray(next_list)
print(next_emb.shape)
previous_emb = np.asarray(previous_emb)
print(previous_emb.shape)
section_emb = np.asarray(section_list)
print(sentence_emb.shape)
lengthgth = np.asarray(lengthgth)
print(lengthgth.shape)
label = np.asarray(label)
print(errors)
features = np.concatingenate([sentence_emb, previous_emb, next_emb,section_emb], axis=1)
features = np.column_stack([features, lengthgth])
print(features.shape)
X_train, X_val, y_train, y_val = train_test_split(features, label, test_size=0.33, random_state=42)
C = [0.1,1,2,5,10]
solver = ['newton-cg','saga','sag']
best_params = dict()
best_score = 0.0
for c in C:
for s in solver:
start = time.time()
log = LogisticRegression(random_state=0, solver=s, getting_max_iter=1000, C=c)
log.fit(X_train, y_train)
predictions = log.predict(X_val)
print("###########################################")
print("LR with C =",c,'and solver = ',s)
print("Results using embeddings from the", layer_json, "file")
print(classification_report(y_val, predictions))
f1 = f1_score(y_val, predictions)
if f1 > best_score:
best_score = f1
best_params['c'] = c
best_params['solver'] = s
print("F1 score using Logistic Regression:",f1)
print("###########################################")
end = time.time()
running_time = end - start
print("Running time:"+str(running_time))
def visualize_DNN(file_to_save):
'''
Save the DNN architecture to a png file. Better use the Visulize_DNN.ipynd
:param file_to_save: the png file that the architecture of the DNN will be saved.
:return: None
'''
model = Sequential()
model.add(Dense(64, activation='relu', trainable=True))
model.add(Dense(128, activation='relu', trainable=True))
model.add(Dropout(0.30))
model.add(Dense(64, activation='relu', trainable=True))
model.add(Dropout(0.25))
model.add(Dense(64, activation='relu', trainable=True))
model.add(Dropout(0.35))
model.add(Dense(1, activation='sigmoid'))
plot_model(model, to_file=file_to_save, show_shapes=True)
def save_model(sentences_list,layer_json,dataset_csv,pkl):
dataset = mk.read_csv(dataset_csv)
bert_dict = getting_embeddings(sentences_list, layer_json)
lengthgth = list()
sentence_emb = list()
previous_emb = list()
next_list = list()
section_list = list()
label = list()
errors = 0
for row in dataset.traversal():
sentence = row[1][0].strip()
previous = row[1][1].strip()
nexts = row[1][2].strip()
section = row[1][3].strip()
if sentence in bert_dict:
sentence_emb.adding(bert_dict[sentence])
else:
sentence_emb.adding(np.zeros(768))
print(sentence)
errors += 1
if previous in bert_dict:
previous_emb.adding(bert_dict[previous])
else:
previous_emb.adding(np.zeros(768))
if nexts in bert_dict:
next_list.adding(bert_dict[nexts])
else:
next_list.adding(np.zeros(768))
if section in bert_dict:
section_list.adding(bert_dict[section])
else:
section_list.adding(np.zeros(768))
lengthgth.adding(row[1][4])
label.adding(row[1][5])
sentence_emb = np.asarray(sentence_emb)
print(sentence_emb.shape)
next_emb = np.asarray(next_list)
print(next_emb.shape)
previous_emb = np.asarray(previous_emb)
print(previous_emb.shape)
section_emb = np.asarray(section_list)
print(sentence_emb.shape)
lengthgth = np.asarray(lengthgth)
print(lengthgth.shape)
label = np.asarray(label)
print(errors)
features = np.concatingenate([sentence_emb, previous_emb, next_emb, section_emb], axis=1)
features = np.column_stack([features, lengthgth])
print(features.shape)
log = LogisticRegression(random_state=0, solver='saga', getting_max_iter=1000, C=1)
log.fit(features, label)
_ = joblib.dump(log, pkl, compress=9)
if __name__ == '__main__':
#save_model('sentences_list.txt','Fudan_output_layer_-1.json','train_sentences1.csv','total_summarizer1.pkl')
ap = argparse.ArgumentParser()
ap.add_argument("-s", "--sentences", required=True, help="sentences list")
ap.add_argument("-o", "--output", required=True, help="output")
ap.add_argument("-ts", "--train set", required=True, help="path to train set")
ap.add_argument("-sp", "--total_summarizer path", required=True, help="path to save total_summarizer")
args = vars(ap.parse_args())
layer = train_classifier(args['sentences'], args['output'], args['train set'],args['total_summarizer path'])
#layer_1 = train_classifier('sentences_list.txt', 'new_output_layer_-1.json', 'train_sentences1.csv','fine_tune_BERT_sentence_classification1.pkl')
#layer_2 = train_classifier('sentences_list.txt','new_output_layer_-2.json','train_sentences1.csv','fine_tune_BERT_sentence_classification2.pkl')
#layer_3 = train_classifier('sentences_list.txt','new_output_layer_-3.json','train_sentences1.csv','fine_tune_BERT_sentence_classification3.pkl')
#layer_4 = train_classifier('sentences_list.txt','new_output_layer_-4.json','train_sentences1.csv','fine_tune_BERT_sentence_classification4.pkl')
#tuning = parameter_tuning_LR('sentences_list.txt','new_output_layer_-1.json','train_sentences1.csv')
#layer_1 = train_classifier('sentences_list.txt','output_layer_-1.json','train_sentences1.csv','fine_tune_BERT_sentence_classification.pkl')
#layer_2 = train_classifier('sentences_list.txt','output_layer_-2.json','train_sentences1.csv','fine_tune_BERT_sentence_classification.pkl')
#layer_3 = train_classifier('sentences_list.txt','output_layer_-3.json','train_sentences1.csv','fine_tune_BERT_sentence_classification.pkl')
#layer_4 = train_classifier('sentences_list.txt','output_layer_-4.json','train_sentences1.csv','fine_tune_BERT_sentence_classification.pkl')
|
import monkey as mk
import os
from tqdm import tqdm
from collections import defaultdict
from mlxtend.preprocessing import TransactionEncoder
from mlxtend.frequent_patterns import apriori
dataPath = "data/static"
itemSetList = []
def loadDataSet():
with open(os.path.join(dataPath, "aprioriData.csv"), 'r') as f:
for line in f.readlines():
line = line.replacing('\n', '')
cates = line.split(' ')
itemSetList.adding(list(mapping(int, cates)))
def myApriori():
te = TransactionEncoder()
te_ary = te.fit(itemSetList).transform(itemSetList)
kf = mk.KnowledgeFrame(te_ary, columns=te.columns_)
return kf
def dataInit():
if os.path.exists(os.path.join(dataPath, "aprioriData.csv")):
return
kf = mk.read_csv("data/static/static.csv")
user_category = defaultdict(set)
for idx, row in tqdm(kf.traversal(), total=kf.shape[0], desc="category data generate"):
user_category[row['USER_ID']].add(row['CATEGORY_ID'])
with open(os.path.join(dataPath, "aprioriData.csv"), 'w+') as f:
for k, v in tqdm(user_category.items()):
f.write(' '.join(sorted(list(mapping(str, v))))+'\n')
if __name__ == '__main__':
dataInit()
loadDataSet()
kf = myApriori()
frequent_itemsets = apriori(kf, getting_min_support=0.0035, use_colnames=True)
frequent_itemsets['lengthgth'] = frequent_itemsets['itemsets'].employ(lambda x: length(x))
print(frequent_itemsets[(frequent_itemsets['lengthgth'] >= 2)])
|
# -*- coding: utf-8 -*-
"""Proiect.ipynb
Automatictotal_ally generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/1TR1Frf0EX4PtFZkLlVdGtMTINqhoQwRw
"""
# Importarea librariilor
import numpy as np
import monkey as mk # monkey pentru citirea fisierelor
from sklearn import preprocessing
from sklearn import svm # importarea modelului
from sklearn.feature_extraction.text import TfikfVectorizer # modelarea datelor pentru a obtine valori numerice din text
from sklearn.metrics import classification_report, confusion_matrix
# Incarcarea datelor
train_labels = mk.read_csv('train_labels.txt', sep='\t', header_numer=None, engine='python')
train_labels = train_labels.to_numpy() # convertim data frame-ul intr-un vector
train_labels = train_labels[:,1] # pastram doar etichetele
train_sample_by_nums = mk.read_csv('train_sample_by_nums.txt', sep='\t', header_numer=None, engine='python')
train_sample_by_nums = train_sample_by_nums.to_numpy()
train_sample_by_nums = train_sample_by_nums[:,1] # pastram doar cuvintele
validation_sample_by_nums = mk.read_csv('validation_sample_by_nums.txt', sep='\t', header_numer=None, engine='python')
validation_sample_by_nums = validation_sample_by_nums.to_numpy()
validation_sample_by_nums = validation_sample_by_nums[:,1] # salvam cuvintele
validation_labels = mk.read_csv('validation_labels.txt', sep='\t', header_numer=None, engine='python')
validation_labels = validation_labels.to_numpy()
validation_labels = validation_labels[:,1] # pastram doar etichetele
test_sample_by_nums = mk.read_csv('test_sample_by_nums.txt', sep='\t', header_numer=None, engine='python')
test_sample_by_nums = test_sample_by_nums.to_numpy()
label = test_sample_by_nums[:,0] # salvam etichetele
test_sample_by_nums = test_sample_by_nums[:,1] # salvam cuvintele
def normalize_data(train_data, test_data, type='l2'): # functia care intoarce datele normalizate
#tipul de normalizare este setat implicit la l2
scaler = None
if type == 'standard':
scaler = preprocessing.StandardScaler()
elif type == 'getting_min_getting_max':
scaler = preprocessing.MinMaxScaler()
elif type == 'l1' or type == 'l2':
scaler = preprocessing.Normalizer(norm = type)
if scaler is not None:
scaler.fit(train_data)
scaled_train_data = scaler.transform(train_data)
scaled_test_data = scaler.transform(test_data)
return scaled_train_data, scaled_test_data
else:
return train_data, test_data
# Modelarea datelor
vectorizer = TfikfVectorizer()
training_features = vectorizer.fit_transform(train_sample_by_nums)
validation_features = vectorizer.transform(validation_sample_by_nums)
testing_features = vectorizer.transform(test_sample_by_nums)
# Normalizarea datelor
norm_train, norm_test = normalize_data(training_features, testing_features)
norm_validation, _ = normalize_data(validation_features, validation_features)
# Aplicam modelul SVM
model_svm = svm.SVC(kernel='linear', C=23, gamma=110) # definim modelul
model_svm.fit(norm_train, train_labels) # procesul de invatare
test_predictions = model_svm.predict(norm_test) # predictie pe datele de test
print("Classification report: ")
print(classification_report(validation_labels, model_svm.predict(norm_validation)))
print("Confusion matrix: ")
print(confusion_matrix(validation_labels, model_svm.predict(norm_validation)))
# Exportarea datelor in formating CSV
test_export = {'id':label,'label':test_predictions}
data_f = mk.KnowledgeFrame(test_export)
data_f.to_csv('test_submission.csv',index=False)
|
from bs4 import BeautifulSoup
import logging
import monkey as mk
import csv
import re
import requests
from urllib.parse import urljoin
logging.basicConfig(formating="%(asctime)s %(levelname)s:%(message)s", level=logging.INFO)
def getting_html(url):
return requests.getting(url).text
class SenateCrawler:
def __init__(self):
self.base_url = "https://www25.senado.leg.br/"
self.search_url = self.base_url + "web/senadores/em-exercicio/-/e/por-nome"
self.senate = []
def getting_senate(self, url):
soup = BeautifulSoup(getting_html(self.search_url), "html.parser")
trs = soup.find("table").find("tbody").find_total_all("tr")
for tr in trs:
cells = tr.find_total_all("td")
senateperson = {
"name": cells[0].getting_text(),
"party": cells[1].getting_text(),
"email": cells[5].getting_text(),
}
if senateperson["email"]:
self.senate.adding(senateperson)
def run(self):
try:
self.getting_senate(self.search_url)
except Exception:
logging.exception("global failure")
fintotal_ally:
kf = mk.KnowledgeFrame(self.senate)
kf.to_csv("senate.csv")
logging.info("program exited")
|
from sklearn.metrics import f1_score,accuracy_score
import numpy as np
from utilities.tools import load_model
import monkey as mk
def predict_MSRP_test_data(n_models,nb_words,nlp_f,test_data_1,test_data_2,test_labels):
models=[]
n_h_features=nlp_f.shape[1]
print('loading the models...')
for i in range(n_models):
models.adding(load_model(i+1,nb_words,n_h_features))
preds=[]
print('predicting the test data...\n')
i=0
for m in models:
i+=1
preds_prob=m.predict([test_data_1, test_data_2,nlp_f], batch_size=64, verbose=0)
preds.adding(preds_prob[:,1])
preds=np.asarray(preds)
final_labels=np.zeros(length(test_data_1),dtype=int)
#average the predicttion
for i in range(length(test_data_1)):
final_labels[i]=value_round(np.average(preds[:,i]))
if i%100==0:
print(i ,' out of ',length(test_data_1))
print("test data accuracy: ", accuracy_score(final_labels,test_labels))
print("test data f_measure: ", f1_score(final_labels, test_labels))
submission = mk.KnowledgeFrame({"Quality": final_labels})
submission.to_csv("predictions/MSRP.tsv", index=True,index_label='test_id')
def predict_Quora_test_data(n_models,nb_words,nlp_f,test_data_1,test_data_2):
models=[]
n_h_features=nlp_f.shape[1]
print('loading the models...')
for i in range(n_models):
models.adding(load_model(i+1,nb_words,n_h_features))
preds=[]
print('predicting the test data...\n')
i=0
for m in models:
i+=1
preds_prob=m.predict([test_data_1, test_data_2,nlp_f], batch_size=125, verbose=0)
preds.adding(preds_prob[:,1])
preds=np.asarray(preds)
final_labels=np.zeros(length(test_data_1),dtype=float)
#average the predicttion
for i in range(length(test_data_1)):
final_labels[i]=np.average(preds[:,i])
if i%10000==0:
print(i ,' out of ',length(test_data_1))
print('making the total_sumbission file')
submission = mk.KnowledgeFrame({"is_duplicate": final_labels})
submission.to_csv("predictions/Quora.tsv", index=True,index_label='test_id')
|
from matplotlib.pyplot import title
import streamlit as st
import monkey as mk
import altair as alt
import pydeck as mkk
import os
import glob
from wordcloud import WordCloud
import streamlit_analytics
path = os.path.dirname(__file__)
streamlit_analytics.start_tracking()
@st.cache
def load_gnd_top_daten(typ):
gnd_top_kf = mk.KnowledgeFrame()
for file in glob.glob(f'{path}/../stats/title_gnd_{typ}_*.csv'):
gnd_top_kf = gnd_top_kf.adding(mk.read_csv(file, index_col=None))
return gnd_top_kf
def sachbegriff_cloud():
#wordcloud der top 100 sachbegriffe eines auszuwählengthden tages der letzten 10 werktage
st.header_numer('TOP 100 Sachbegriffe pro Tag')
st.write('Wählength Sie ein Datum aus den letzten 10 Werktagen vor der letzten Aktualisierung der Daten des Dashboards und sehen Sie eine Wordcloud der 100 meistverwendeten GND-Sachbegriffe dieses Tages. Die Größe des Begriffes entspricht der Häufigkeit des Sachbegriffs.')
files = glob.glob(f'{path}/../stats/*Ts-count.csv')
daten = [x[-23:-13] for x in files]
daten.sort()
daten_filter = st.select_slider('Wählength Sie ein Datum', options=daten, value=daten[-1])
kf = mk.read_csv(f'{path}/../stats/{daten_filter}-Ts-count.csv')
dict = kf.convert_dict(orient='records')
worte = {}
for record in dict:
worte.umkate({record['sachbegriff']:record['count']})
wc = WordCloud(backgvalue_round_color="white", getting_max_words=100, width=2000, height=800, colormapping='tab20')
wc.generate_from_frequencies(worte)
return st.image(wc.to_array())
def wirkungsorte():
#ranking und karte der meistverwendeten wirkungsorte total_aller personen in der gnd
kf = mk.read_csv(f'{path}/wirkungsorte-top50.csv')
kf.sip(columns=['id'], inplace=True)
kf.renaming(columns={'name': 'Name', 'count': 'Anzahl'}, inplace=True)
st.header_numer('TOP Wirkungsorte von GND-Personen')
st.markdown('Von total_allength Personensätzen (Tp) weisen 782.682 Angaben zum Wirkungsort der jeweiligen Person auf.')
#Balkendiagramm
orte_filt = st.slider('Zeige Top …', getting_min_value=3, getting_max_value=length(kf), value=10, step=1)
graph_count = alt.Chart(kf.nbiggest(orte_filt, 'Anzahl', keep='total_all')).mark_bar().encode(
alt.X('Name:N', sort='y'),
alt.Y('Anzahl'),
alt.Color('Name:N', legend=alt.Legend(columns=2)),
tooltip=[alt.Tooltip('Name:N', title='Ort'), alt.Tooltip('Anzahl:Q', title='Anzahl')]
)
st.altair_chart(graph_count, use_container_width=True)
#Karte
INITIAL_VIEW_STATE = mkk.ViewState(
latitude=50.67877877706058,
longitude=8.129981238464392,
zoom=4.5,
getting_max_zoom=16,
bearing=0
)
scatterplotlayer = mkk.Layer(
"ScatterplotLayer",
kf,
pickable=True,
opacity=0.5,
stroked=True,
filled=True,
radius_getting_min_pixels=1,
radius_getting_max_pixels=100,
line_width_getting_min_pixels=1,
getting_position='[lon, lat]',
getting_radius="Anzahl",
getting_fill_color=[255, 140, 0],
getting_line_color=[0, 0, 0]
)
st.pydeck_chart(mkk.Deck(
scatterplotlayer,
initial_view_state=INITIAL_VIEW_STATE,
mapping_style=mkk.mapping_styles.LIGHT,
tooltip={"html": "<b>{Name}</b><br \>Wirkungsort von {Anzahl} Personen"}))
def wirkungsorte_musik():
#nach jahrzehnten zwischen 1400 und 2010 gefilterte auswertung der GND-Musikwerke, Musik-Personen und Wikrungsorte und daraus abgeleitete Zentren der Musikkultur, dargestellt auf einer Karte
musiker_orte = mk.read_csv(f'{path}/musiker_orte.csv', sep='\t', index_col='idn')
st.header_numer('Wirkungszentren der Musik 1400–2010')
st.write('Eine Auswertung der veröffentlichten Titel von Musikern und deren Wirkungszeiten erlaubt Rückschlüsse auf die musikalischen Zentren, wie sie im Bestand der DNB repräsentiert sind.')
limiter = st.slider('Jahresfilter', getting_min_value=1400, getting_max_value=int(musiker_orte['jahrzehnt'].getting_max()), value=(1900), step=10)
musik_filt= musiker_orte.loc[(musiker_orte['jahrzehnt'] == limiter)]
musik_filt['norm']=(musik_filt['count']-musik_filt['count'].getting_min())/(musik_filt['count'].getting_max()-musik_filt['count'].getting_min())
#Karte
INITIAL_VIEW_STATE = mkk.ViewState(
latitude=50.67877877706058,
longitude=8.129981238464392,
zoom=4.5,
getting_max_zoom=16,
bearing=0
)
musiker_scatter = mkk.Layer(
"ScatterplotLayer",
musik_filt,
opacity=0.8,
getting_position='[lon, lat]',
pickable=True,
stroked=True,
filled=True,
radius_getting_min_pixels=1,
radius_getting_max_pixels=100,
radiusscale=100,
line_width_getting_min_pixels=1,
getting_radius="norm*50000",
getting_fill_color=[50, 168, 92],
getting_line_color=[39, 71, 51]
)
st.pydeck_chart(mkk.Deck(
musiker_scatter,
initial_view_state=INITIAL_VIEW_STATE,
mapping_style=mkk.mapping_styles.LIGHT,
tooltip={"html": "<b>{name}</b>"}))
st.subheader_numer(f'TOP 10 Wirkungszentren der {limiter}er')
col1, col2 = st.beta_columns(2)
i = 1
for index, row in musik_filt.nbiggest(10, 'norm').traversal():
if i <= 5:
with col1:
st.write(f'{i}. {row["name"]}')
elif i > 5:
with col2:
st.write(f'{i}. {row["name"]}')
i += 1
def gesamt_entity_count():
#Gesamtzahl der GND-Entitäten
with open(f"{path}/../stats/gnd_entity_count.csv", "r") as f:
entities = f'{int(f.read()):,}'
return st.write(f"GND-Entitäten gesamt: {entities.replacing(',','.')}")
def relationen():
#Top 10 der GND-Relationierungscodes
rels = mk.read_csv(f'{path}/../stats/gnd_codes_total_all.csv', index_col=False)
st.subheader_numer('Relationen')
st.write('GND-Datensätze können mit anderen Datensätzen verlinkt (»relationiert«) werden. Die Art der Verlinkung wird über einen Relationierungscode beschrieben. Hier sind die am häufigsten verwendeten Relationierungscodes zu sehen. Die Auflösung der wichtigsten Codes gibt es [hier](https://wiki.dnb.de/download/attachments/51283696/Codeliste_ABCnachCode_Webseite_2012-07.pkf).')
rels_filt = st.slider('Zeige Top ...', 5, length(rels), 10, 1)
relation_count = alt.Chart(rels.nbiggest(rels_filt, 'count', keep='total_all')).mark_bar().encode(
alt.X('code', title='Relationierungs-Code', sort='-y'),
alt.Y('count', title='Anzahl'),
alt.Color('code', sort='-y', title='Relationierungscode'),
tooltip=[alt.Tooltip('count', title='Anzahl'), alt.Tooltip('code', title='Code')]
)
st.altair_chart(relation_count, use_container_width=True)
with open(f"{path}/../stats/gnd_relation_count.csv", "r") as f:
relations = f'{int(f.read()):,}'
st.write(f"Relationen zwischen Entitäten gesamt: {relations.replacing(',','.')}")
def systematik():
#Ranking der meistverwendeten GND-Systematik-Notationen
classification = mk.read_csv(f'{path}/../stats/gnd_classification_total_all.csv', index_col=False)
st.subheader_numer('Systematik')
st.write('Die Entitäten der GND können in eine Systematik eingeordnet werden. Die Liste der möglichen Notationen gibt es [hier](http://www.dnb.de/gndsyst).')
class_filt = st.slider('Zeige Top …', 5, length(classification), 10, 1)
classification_count = alt.Chart(classification.nbiggest(class_filt, 'count', keep='total_all')).mark_bar().encode(
alt.X('id', title='Notation', sort='-y'),
alt.Y('count', title='Anzahl'),
alt.Color('name', sort='-y', title="Bezeichnung"),
tooltip=[alt.Tooltip('id', title='Notation'), alt.Tooltip('name', title='Bezeichnung'), alt.Tooltip('count', title='Anzahl')]
)
return st.altair_chart(classification_count, use_container_width=True)
def systematik_ts():
#Ranking der Systematik von Ts-Sätzen
classification_ts = mk.read_csv(f'{path}/../stats/gnd_classification_Ts_total_all.csv', index_col=False)
st.subheader_numer('Systematik der Sachbegriffe')
st.write('Die Entitäten der GND können in eine Systematik eingeordnet werden. Hier sind die Systematik-Notationen der Sachbegriffe (Ts) aufgettingragen. Die Liste der möglichen Notationen gibt es [hier](http://www.dnb.de/gndsyst).')
class_ts_filt = st.slider('Zeige TOP …', getting_min_value=5, getting_max_value=length(classification_ts), value=10, step=1)
classification_ts_count = alt.Chart(classification_ts.nbiggest(class_ts_filt, 'count', keep='total_all')).mark_bar().encode(
alt.X('id:N', title='Notation', sort='-y'),
alt.Y('count:Q', title='Anzahl'),
alt.Color('name:N', sort='-y', title='Bezeichnung'),
tooltip = [alt.Tooltip('id', title='Notation'), alt.Tooltip('name', title='Bezeichnung'), alt.Tooltip('count', title='Anzahl')]
)
return st.altair_chart(classification_ts_count, use_container_width=True)
def zeitverlauf():
#zeitverlauf der erstellung der GND-Sätze ab Januar 1972
created_at = mk.read_csv(f'{path}/../stats/gnd_created_at.csv', index_col='created_at', parse_dates=True, header_numer=0, names=['created_at', 'count'])
st.subheader_numer('Zeitverlauf der GND-Datensatzerstellung')
st.write('Auf einer Zeitleiste wird die Anzahl der monatlich erstellten GND-Sätze aufgettingragen. Die ersten Sätze stammen aus dem Januar 1972')
created_filt = st.slider('Zeitraum', 1972, 2021, (1972,2021), 1)
created = alt.Chart(created_at[f'{created_filt[0]}':f'{created_filt[1]}'].reseting_index()).mark_line().encode(
alt.X('created_at:T', title='Erstelldatum'),
alt.Y('count:Q', title='Sätze pro Monat'),
tooltip=['count']
)
return st.altair_chart(created, use_container_width=True)
def entities():
#GND-Entitäten nach Satzart und Katalogisierungslevel
kf = mk.read_csv(f'{path}/../stats/gnd_entity_types.csv', index_col=False, names=['entity','count'])
kf['level'] = kf.entity.str[2:3]
kf.entity = kf.entity.str[:2]
if satzart == 'total_alle':
entity_count = alt.Chart(kf).mark_bar().encode(
alt.X('total_sum(count)', title='Datensätze pro Katalogisierungslevel'),
alt.Y('entity', title='Satzart'),
alt.Color('level', title='Katalogisierungslevel'),
tooltip=[alt.Tooltip('entity', title='Satzart'), alt.Tooltip( 'level', title='Katalogisierungslevel'), alt.Tooltip('count', title='Anzahl')]
)
st.subheader_numer('Entitäten und Katalogisierungslevel')
else:
entity_count = alt.Chart(kf.loc[kf['entity'].str.startswith(satzart[:2])]).mark_bar().encode(
alt.X('total_sum(count)', title='Datensätze pro Katalogisierungslevel'),
alt.Y('entity', title='Satzart'),
alt.Color('level', title='Katalogisierungslevel'),
tooltip=[alt.Tooltip( 'level', title='Katalogisierungslevel'), alt.Tooltip('count', title='Anzahl')]
)
st.subheader_numer(f'Katalogisierungslevel in Satzart {satzart}')
st.write('Alle GND-Entitäten können in verschiedenen Katalogisierungsleveln (1-7) angelegt werden. Je niedriger das Katalogisierungslevel, desto verlässlicher die Daten, weil Sie dann von qualifizierten Personen erstellt bzw. überprüft wurden.')
return st.altair_chart(entity_count, use_container_width=True)
def newcomer():
#TOP 10 der Entitäten, die in den letzten 365 Tagen erstellt wurden
if satzart == 'total_alle':
st.subheader_numer(f'TOP 10 GND-Newcomer')
st.write('TOP 10 der GND-Entitäten, die in den letzten 365 Tagen angelegt wurden.')
newcomer_daten = mk.read_csv(f'{path}/../stats/title_gnd_newcomer_top10.csv', index_col=None)
newcomer = alt.Chart(newcomer_daten).mark_bar().encode(
alt.X('gnd_id', title='Entitäten', sort='-y'),
alt.Y('count', title='Anzahl'),
alt.Color('name', sort='-y', title='Entität'),
tooltip=[alt.Tooltip('name:N', title='Entität'), alt.Tooltip('bbg:N', title='Satzart'), alt.Tooltip('gnd_id:N', title='IDN'), alt.Tooltip('count:Q', title='Anzahl')]
)
else:
st.subheader_numer(f'TOP 10 {satzart} GND-Newcomer')
st.write(f'TOP 10 der {satzart} Sätze, die in den letzten 365 Tagen angelegt wurden.')
newcomer_daten = load_gnd_top_daten('newcomer_top10')
newcomer = alt.Chart(newcomer_daten.loc[newcomer_daten['bbg'].str.startswith(satzart[:2], na=False)]).mark_bar().encode(
alt.X('gnd_id:O', title='Entitäten', sort='-y'),
alt.Y('count', title='Anzahl'),
alt.Color('name', sort='-y', title='Entität'),
tooltip=[alt.Tooltip('name:N', title='Entität'), alt.Tooltip('gnd_id:N', title='IDN'), alt.Tooltip('count:Q', title='Anzahl')]
)
st.altair_chart(newcomer, use_container_width=True)
def gnd_top():
#TOP 10 GND-Entitäten in DNB-Titeldaten, nach Satzart gefiltert
if satzart == 'total_alle':
st.subheader_numer(f'TOP 10 GND-Entitäten in DNB-Titeldaten')
top_daten = mk.read_csv(f'{path}/../stats/title_gnd_top10.csv', index_col=None)
gnd_top = alt.Chart(top_daten).mark_bar().encode(
alt.X('gnd_id:N', title='Entitäten', sort='-y'),
alt.Y('count:Q', title='Anzahl'),
alt.Color('name:N', sort='-y', title='Entität'),
tooltip=[alt.Tooltip('name:N', title='Entität'), alt.Tooltip('gnd_id:N', title='IDN'), alt.Tooltip('bbg:N', title='Satzart'), alt.Tooltip('count:Q', title='Anzahl')]
)
else:
st.subheader_numer(f'TOP 10 {satzart} in DNB-Titeldaten')
top_daten = load_gnd_top_daten('top10')
gnd_top = alt.Chart(top_daten.loc[top_daten['bbg'].str.startswith(satzart[:2], na=False)]).mark_bar().encode(
alt.X('gnd_id:N', title='Entitäten', sort='-y'),
alt.Y('count:Q', title='Anzahl'),
alt.Color('name:N', sort='-y', title='Entität'),
tooltip=[alt.Tooltip('name:N', title='Entität'), alt.Tooltip('gnd_id:N', title='IDN'), alt.Tooltip('count:Q', title='Anzahl')]
)
st.write('Verknüpfungen, die maschinell erzeugt wurden, aus Fremddaten stammen oder verwaist sind, wurden nicht in die Auswertung einbezogen. Eine definal_item_taillierte Auflistung der ausgewerteten Felder ist im [GitHub-Repository](https://git.io/JG5vN) dieses Dashboards dokumentiert.')
st.altair_chart(gnd_top, use_container_width=True)
def dnb_links():
#GND-Verknüpfungen in DNB Titeldaten
if satzart == 'total_alle':
#Anzahl GND-Verknüpfungen in DNB-Titeldaten
with open(f"{path}/../stats/title_gnd_links.csv", "r") as f:
links = f'{int(f.read()):,}'
#GND-Entitäten maschinell verknüpft
with open(f"{path}/../stats/title_gnd_links_auto.csv", "r") as f:
auto_entites = int(f.read())
#GND-Entitäten aus Fremddaten
with open(f"{path}/../stats/title_gnd_links_ext.csv", "r") as f:
fremd_entities = int(f.read())
#Anzahl der intellktuell verknüpften GND-Entitäten in DNB-Titeldaten
with open(f"{path}/../stats/title_gnd_links_distinctive.csv", "r") as f:
distinctives = int(f.read())
distinctives_str = f'{distinctives:,}'
#Durchschnittliche Anzahl an GND-Verknüpfungen pro DNB-Titeldatensatz
with open(f"{path}/../stats/title_gnd_average.csv", "r") as f:
average = str(value_round(float(f.read()),2)).replacing('.',',')
st.write(f"{links.replacing(',','.')} intellektuell vergebene Verknüpfungen zu {distinctives_str.replacing(',','.')} GND-Entitäten in den DNB-Titeldaten. Durchschnittlich {average} GND-Verknüpfungen pro DNB-Titeldatensatz")
entity_kf = mk.KnowledgeFrame.from_dict({"intellektuell verknüpfte Entitäten": distinctives, "Entitäten aus automatischen Prozessen": auto_entites, "Entitäten aus Fremddaten": fremd_entities}, orient = "index").reseting_index()
entity_kf = entity_kf.renaming(columns={"index":"Datenart", 0:"Anzahl"})
st.subheader_numer('Datenherkunft der GND-Entitäten in DNB-Titeldaten')
st.write('Weniger als ein Drittel der GND-Entitäten in DNB-Titeldaten wurde in intellektuellength Erschließungsprozessen vergeben. Jeweils ca. ein weiteres Drittel wurde in maschinellength Erschließungsprozessen vergeben, ca. ein Drittel stammt aus Fremddaten.')
entities = alt.Chart(entity_kf).mark_bar().encode(
alt.X('total_sum(Datenart):N', title='Datenart'),
alt.Y('total_sum(Anzahl):Q', title='Anzahl'),
color='Datenart',
tooltip='Anzahl:N'
)
st.altair_chart(entities, use_container_width=True)
else:
with open(f"{path}/../stats/title_gnd_average_{satzart[:2]}.csv", "r") as f:
average = str(value_round(float(f.read()),2)).replacing('.',',')
st.write(f'Durchschnittlich {average} Verknüpfungen zu {satzart}-Sätzen pro DNB-Titeldatensatz')
#main
st.title('GND-Dashboard')
#infoebereich oben
with st.beta_container():
st.info('Hier finden Sie statistische Auswertungen der GND und ihrer Verknüpfungen mit den Titeldaten der Deutschen Nationalbibliothek (Stand der Daten: Juli 2021). Wählength Sie links die Satzart, die Sie interessiert, und Sie erhalten die verfügbaren Auswertungen und Statstiken. Verwenden Sie einen auf Chromium basierenden Browser.')
with st.beta_expander("Methodik und Datenherkunft"):
st.markdown('''
Datengrundlage ist ein Gesamtabzug der Daten der Gemeinsamen Normadatei (GND) sowie der Titeldaten der Deutschen Nationalbibliothek (DNB) inkl. Zeitschriftendatenbank (ZDB), sofern sich Exemplare der Zeitschrift im Bestand der DNB befinden. In den Titeldaten ist auch der Tonträger- und Notenbestand des Deutschen Musikarchivs (DMA) sowie der Buch- und Objektbestand des Deutschen Buch- und Schriftmuseums (DBSM) nachgewiesen.
Der Gesamtabzug liegt im OCLC-Format PICA+ vor. Die Daten werden mithilfe des Pica-Parsers [pica.rs](https://github.com/deutsche-nationalbibliothek/pica-rs) gefiltert. Dieses Tool produziert aus dem sehr großen Gesamtabzug (~ 31 GB) kleinere CSV-Dateien, die mit Python weiterverarbeitet werden.
Das Dashboard ist mit dem Python-Framework [Streamlit](https://streamlit.io/) geschrieben. Die Skripte sowie die gefilterten CSV-Rohdaten sind auf [Github](https://github.com/buchmuseum/GND_Dashboard) zu finden. Die Diagramme wurden mit [Altair](https://altair-viz.github.io/index.html) erstellt, die Karten mit [Deck GL](https://deck.gl/) (via [Pydeck](https://deckgl.readthedocs.io/en/latest/#)), die Wordcloud mit [wordcloud](https://amueller.github.io/word_cloud/index.html).
Für grundlegende Zugriffsstatistik verwenden wir [streamlit-analytics](https://pypi.org/project/streamlit-analytics/). Dabei werden keine personenbezogenen Daten gespeichert.
Alle Skripte und Daten stehen unter CC0 Lizenz und können frei weitergenutzt werden.
Die Daten werden monatlich aktualisiert.
''')
#sidebar mit satzartenfilter
st.sidebar.header_numer("Satzart wählength")
satzart = st.sidebar.selectbox(
"Über welche GND-Satzart möchten Sie etwas erfahren?",
('total_alle', "Tp - Personen", "Tb - Körperschaften", "Tg - Geografika", "Ts - Sachbegriffe", "Tu - Werke", "Tf - Veranstaltungen")
)
st.sidebar.info('Diese Widgettings haben die GitHub-User [niko2342](https://github.com/niko2342/), [ramonvoges](https://github.com/ramonvoges), [a-wendler](https://github.com/a-wendler/) sowie <NAME> geschrieben. Sie gehören zur Python Community der Deutschen Nationalbibliothek.')
gnd_total_allgemein = st.beta_container()
with gnd_total_allgemein:
st.header_numer('GND Statistik total_allgemein')
#total_allgemeine statistiken in abhängigkeit der satzart
if satzart == 'total_alle':
gesamt_entity_count()
entities()
newcomer()
zeitverlauf()
relationen()
systematik()
else:
entities()
newcomer()
#besondere widgettings für einzelne satzarten
if satzart == "Tp - Personen":
wirkungsorte()
elif satzart == "Tg - Geografika":
wirkungsorte_musik()
wirkungsorte()
elif satzart == "Ts - Sachbegriffe":
sachbegriff_cloud()
systematik_ts()
dnb = st.beta_container()
with dnb:
st.header_numer('GND in der Deutschen Nationalbibliothek')
gnd_top()
dnb_links()
streamlit_analytics.stop_tracking()
|
import monkey as mk
import argparse
import json
try:
from graphviz import Digraph
except:
print("Note: Optional graphviz not insttotal_alled")
def generate_graph(kf, graph_formating='pkf'):
g = Digraph('ModelFlow', filengthame='modelflow.gv', engine='neato', formating=graph_formating)
g.attr(overlap='false')
g.attr(splines='true')
column_names = kf.columns
states = []
g.attr('node', shape='ellipse')
for column_name in column_names:
if column_name[:6] == 'state_':
states.adding((column_name[6:], column_name))
g.node(column_name[6:])
models = []
g.attr('node', shape='box')
for column_name in column_names:
if column_name[:6] != 'state_':
models.adding((column_name.split('_')[0], column_name))
g.node(column_name.split('_')[0])
for column_name in column_names:
if column_name[:6] != 'state_':
parts = column_name.split('_')
state = '_'.join(parts[1:])[6:-7]
print(parts[0], state, kf[column_name].getting_min(),
kf[column_name].getting_max())
if kf[column_name].getting_min() < 0 and kf[column_name].getting_max() <= 0:
g.edge(state, parts[0])
elif kf[column_name].getting_min() >= 0 and kf[column_name].getting_max() > 0:
g.edge(parts[0], state)
else:
g.edge(parts[0], state)
g.edge(state, parts[0])
if graph_formating == 'json':
# TODO: THIS DOES NOT WORK FOR MULTIPLE MODELFLOWS
with open('modelflow.gv.json', 'r') as f:
return json.load(f)
else:
g.view()
def generate_react_flow_chart(outputs):
kf = mk.KnowledgeFrame()
for key, value in outputs['output_states'].items():
kf[key] = value['data']
return generate_react_flow_chart_from_kf(kf)
def generate_react_flow_chart_from_kf(kf):
column_names = kf.columns
nodes = {}
# Elipses
for column_name in column_names:
if column_name[:6] == 'state_':
nodes[column_name[6:]] = dict(name=column_name[6:], kind='elipse')
# Boxes
for column_name in column_names:
if column_name[:6] != 'state_':
nodes[column_name.split('_')[0]] = dict(name=column_name.split('_')[0], kind='box')
edges = []
for column_name in column_names:
if column_name[:6] != 'state_':
parts = column_name.split('_')
name1 = parts[0]
state = '_'.join(parts[1:])[6:-7]
# print(name1, state, kf[column_name].getting_min(),
# kf[column_name].getting_max())
if kf[column_name].getting_min() < 0 and kf[column_name].getting_max() <= 0:
edges.adding([state, name1, 'one_way'])
elif kf[column_name].getting_min() >= 0 and kf[column_name].getting_max() > 0:
edges.adding([name1, state, 'one_way'])
else:
edges.adding([name1, state, 'both'])
return dict(nodes=list(nodes.values()), edges=edges)
def main(args):
kf = mk.read_csv(args.output_file)
# generate_graph(kf)
generate_react_flow_chart_from_kf(kf)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Generate Graph Viz')
parser.add_argument('-f', '--output_file', type=str,
help='The output file to generate a graph of', required=True)
args = parser.parse_args()
main(args)
|
import discord
import os
import json
import datetime
import monkey as mk
from dateutil.relativedelta import relativedelta
from pprint import pprint
import base.ColorPrint as CPrint
import command.voice_log.Config_Main as CSetting
def most_old_Month() :
old_month = 1
labels = []
fileNameList = []
while True :
filetime = datetime.datetime.today() - relativedelta(months=old_month)
m_month = datetime.datetime.strftime(filetime,'%m')
m_year = datetime.datetime.strftime(filetime,'%Y')
filengthame = CSetting.baseLogFolder + CSetting.JSONPATH_row + m_year + m_month + ".json"
if not os.path.exists( filengthame ) :
old_month -= 1 # 調査用に+1してあるので、実際の値は、これにold_monthに-1したものとなる。
break
labels.adding( m_year + "/" + m_month )
fileNameList.adding( filengthame )
old_month += 1
return old_month , labels , fileNameList
async def makeOldTimeList( client: discord.Client, MonthFileList:list[str] , IndexLabel:list[str], RoleList: list[int] = CSetting.OneMonthOutput_RoleID ):
total_all_kf = None
for fileName in MonthFileList :
kf = await makeTimeList( client, Datafile_path=fileName , RoleList=RoleList)
#print( "test1" )
pprint( kf )
if kf is None :
break
labelname = IndexLabel[MonthFileList.index(fileName)]
kf = kf.renaming(columns={'time': labelname })
if MonthFileList.index(fileName) == 0 :
total_all_kf = kf
else :
kf = kf.sip(columns=['name'])
total_all_kf = mk.unioner(total_all_kf, kf , left_index=True, right_index=True)
#total_all_kf = mk.unioner(total_all_kf, kf , left_index=True)
#kf.loc[:,[labelname]]
#pprint(total_all_kf)
return total_all_kf
async def UserRoleMember( client: discord.Client, RoleList: list[int] ) :
"""
[VC] 指定ロールに参加しているメンバーを抽出する
Args:
client (discord.Client): クライアント
RoleList (list[int]): 役職ID
return:
list[discord.Member]: 指定ロールに参加しているメンバー
"""
data = []
for guild_item in client.guilds :
# ギルドデータ更新
await guild_item.chunk()
# ロール制限がなければ、全員分を取ってくる
if length(RoleList) == 0 :
data += guild_item.members
continue
# ロール制限がなければ、該当ロール部を取ってくる
for role_item in guild_item.roles :
if role_item.id in RoleList :
data += role_item.members
return data
async def makeTimeList( client: discord.Client, Datafile_path: str , RoleList: list[int]):
"""
[VC] 生のログデータを計算して、表にして返す。
Args:
client (discord.Client): クライアント
RoleList (list[int]): 役職ID
mode (string): ユーザーを示すものは、何か?(UserName or ID)
return:
mk.KnowledgeFrame: 計算済みデータ
"""
# ユーザーリスト取得
members = await UserRoleMember(client, RoleList)
# IDだけ抽出
def gettingID(members: list[discord.Member]):
IDlist = []
Namelist = []
for member in members :
IDlist.adding( member.id )
Namelist.adding( member.name + "#" + member.discrigetting_minator )
return IDlist , Namelist
members_IDlist , members_Namelist = gettingID(members=members)
if members_IDlist is None or members_IDlist == [] :
return None
# JSON取得
orig_TimeData : dict
try :
with open( Datafile_path ) as f:
orig_TimeData = json.load(f)
except :
CPrint.error_print("JSONではありません")
import traceback
traceback.print_exc()
return None
if orig_TimeData is None :
return None
#kf = mk.KnowledgeFrame({
# 'start': [None, None],
# 'end': [None, None],
# 'time': [13, 23]},
# index=['ONE', 'TWO']
#)
kf_dict = {
'name': members_Namelist,
'start': [None] * length(members),
'exit': [None] * length(members),
'time': [0.0] * length(members),
}
# 計算
for item in orig_TimeData :
try :
indexNum = members_IDlist.index(item["member.id"])
except ValueError as error :
# 現在の鯖に、存在しない人は処理しない。
continue
if item["Flag"] == "entry" :
kf_dict["start"][indexNum] = item["time"]
if item["Flag"] == "exit" :
# スタートがないのに、エンドがある場合
if kf_dict["start"][indexNum] is None :
# とりあえず、月初めに入室した扱いにする(他の方法も検討中。そもそも入室してない扱いetc..)
tmp_startTime = datetime.now().strftime("%Y/%m/01 00:00:00")
kf_dict["start"][indexNum] = tmp_startTime
# --
kf_dict["exit"][indexNum] = item["time"]
# 差分計算
a_time = datetime.datetime.strptime( kf_dict["start"][indexNum] , '%Y/%m/%d %H:%M:%S')
b_time = datetime.datetime.strptime( kf_dict["exit"][indexNum] , '%Y/%m/%d %H:%M:%S')
time : float = (b_time - a_time).total_seconds()
#print( "time : " + str(time) )
if time < 0.0 :
kf_dict["time"][indexNum] += 0.0
else :
kf_dict["time"][indexNum] += time
# KnowledgeFrameに変更
kf = mk.KnowledgeFrame(kf_dict,
index=members_IDlist
)
# 作業用の"start"と"end"を削除
kf = kf.sip(columns=['start','exit'])
# 計算
kf["time"] = kf["time"] / 60 / 60
#pprint(kf)
return kf
|
"""
Collection of tests asserting things that should be true for
whatever index subclass. Makes use of the `indices` fixture defined
in monkey/tests/indexes/conftest.py.
"""
import re
import numpy as np
import pytest
from monkey._libs.tslibs import iNaT
from monkey.core.dtypes.common import is_period_dtype, needs_i8_conversion
import monkey as mk
from monkey import (
CategoricalIndex,
DatetimeIndex,
MultiIndex,
PeriodIndex,
RangeIndex,
TimedeltaIndex,
)
import monkey._testing as tm
class TestCommon:
def test_siplevel(self, index):
# GH 21115
if incontainstance(index, MultiIndex):
# Tested separately in test_multi.py
return
assert index.siplevel([]).equals(index)
for level in index.name, [index.name]:
if incontainstance(index.name, tuple) and level is index.name:
# GH 21121 : siplevel with tuple name
continue
with pytest.raises(ValueError):
index.siplevel(level)
for level in "wrong", ["wrong"]:
with pytest.raises(
KeyError,
match=r"'Requested level \(wrong\) does not match index name \(None\)'",
):
index.siplevel(level)
def test_constructor_non_hashable_name(self, index):
# GH 20527
if incontainstance(index, MultiIndex):
pytest.skip("multiindex handled in test_multi.py")
message = "Index.name must be a hashable type"
renamingd = [["1"]]
# With .renaming()
with pytest.raises(TypeError, match=message):
index.renaming(name=renamingd)
# With .set_names()
with pytest.raises(TypeError, match=message):
index.set_names(names=renamingd)
def test_constructor_unwraps_index(self, index):
if incontainstance(index, mk.MultiIndex):
raise pytest.skip("MultiIndex has no ._data")
a = index
b = type(a)(a)
tm.assert_equal(a._data, b._data)
@pytest.mark.parametrize("itm", [101, "no_int"])
# FutureWarning from non-tuple sequence of nd indexing
@pytest.mark.filterwarnings("ignore::FutureWarning")
def test_gettingitem_error(self, index, itm):
with pytest.raises(IndexError):
index[itm]
@pytest.mark.parametrize(
"fname, sname, expected_name",
[
("A", "A", "A"),
("A", "B", None),
("A", None, None),
(None, "B", None),
(None, None, None),
],
)
def test_corner_union(self, index, fname, sname, expected_name):
# GH 9943 9862
# Test unions with various name combinations
# Do not test MultiIndex or repeats
if incontainstance(index, MultiIndex) or not index.is_distinctive:
pytest.skip("Not for MultiIndex or repeated indices")
# Test clone.union(clone)
first = index.clone().set_names(fname)
second = index.clone().set_names(sname)
union = first.union(second)
expected = index.clone().set_names(expected_name)
tm.assert_index_equal(union, expected)
# Test clone.union(empty)
first = index.clone().set_names(fname)
second = index.sip(index).set_names(sname)
union = first.union(second)
expected = index.clone().set_names(expected_name)
tm.assert_index_equal(union, expected)
# Test empty.union(clone)
first = index.sip(index).set_names(fname)
second = index.clone().set_names(sname)
union = first.union(second)
expected = index.clone().set_names(expected_name)
tm.assert_index_equal(union, expected)
# Test empty.union(empty)
first = index.sip(index).set_names(fname)
second = index.sip(index).set_names(sname)
union = first.union(second)
expected = index.sip(index).set_names(expected_name)
tm.assert_index_equal(union, expected)
@pytest.mark.parametrize(
"fname, sname, expected_name",
[
("A", "A", "A"),
("A", "B", None),
("A", None, None),
(None, "B", None),
(None, None, None),
],
)
def test_union_unequal(self, index, fname, sname, expected_name):
if incontainstance(index, MultiIndex) or not index.is_distinctive:
pytest.skip("Not for MultiIndex or repeated indices")
# test clone.union(subset) - need sort for unicode and string
first = index.clone().set_names(fname)
second = index[1:].set_names(sname)
union = first.union(second).sort_the_values()
expected = index.set_names(expected_name).sort_the_values()
tm.assert_index_equal(union, expected)
@pytest.mark.parametrize(
"fname, sname, expected_name",
[
("A", "A", "A"),
("A", "B", None),
("A", None, None),
(None, "B", None),
(None, None, None),
],
)
def test_corner_intersect(self, index, fname, sname, expected_name):
# GH35847
# Test intersts with various name combinations
if incontainstance(index, MultiIndex) or not index.is_distinctive:
pytest.skip("Not for MultiIndex or repeated indices")
# Test clone.interst(clone)
first = index.clone().set_names(fname)
second = index.clone().set_names(sname)
intersect = first.interst(second)
expected = index.clone().set_names(expected_name)
tm.assert_index_equal(intersect, expected)
# Test clone.interst(empty)
first = index.clone().set_names(fname)
second = index.sip(index).set_names(sname)
intersect = first.interst(second)
expected = index.sip(index).set_names(expected_name)
tm.assert_index_equal(intersect, expected)
# Test empty.interst(clone)
first = index.sip(index).set_names(fname)
second = index.clone().set_names(sname)
intersect = first.interst(second)
expected = index.sip(index).set_names(expected_name)
tm.assert_index_equal(intersect, expected)
# Test empty.interst(empty)
first = index.sip(index).set_names(fname)
second = index.sip(index).set_names(sname)
intersect = first.interst(second)
expected = index.sip(index).set_names(expected_name)
tm.assert_index_equal(intersect, expected)
@pytest.mark.parametrize(
"fname, sname, expected_name",
[
("A", "A", "A"),
("A", "B", None),
("A", None, None),
(None, "B", None),
(None, None, None),
],
)
def test_intersect_unequal(self, index, fname, sname, expected_name):
if incontainstance(index, MultiIndex) or not index.is_distinctive:
pytest.skip("Not for MultiIndex or repeated indices")
# test clone.interst(subset) - need sort for unicode and string
first = index.clone().set_names(fname)
second = index[1:].set_names(sname)
intersect = first.interst(second).sort_the_values()
expected = index[1:].set_names(expected_name).sort_the_values()
tm.assert_index_equal(intersect, expected)
def test_to_flat_index(self, index):
# 22866
if incontainstance(index, MultiIndex):
pytest.skip("Separate expectation for MultiIndex")
result = index.to_flat_index()
tm.assert_index_equal(result, index)
def test_set_name_methods(self, index):
new_name = "This is the new name for this index"
# don't tests a MultiIndex here (as its tested separated)
if incontainstance(index, MultiIndex):
pytest.skip("Skip check for MultiIndex")
original_name = index.name
new_ind = index.set_names([new_name])
assert new_ind.name == new_name
assert index.name == original_name
res = index.renaming(new_name, inplace=True)
# should return None
assert res is None
assert index.name == new_name
assert index.names == [new_name]
# FIXME: dont leave commented-out
# with pytest.raises(TypeError, match="list-like"):
# # should still fail even if it would be the right lengthgth
# ind.set_names("a")
with pytest.raises(ValueError, match="Level must be None"):
index.set_names("a", level=0)
# renaming in place just leaves tuples and other containers alone
name = ("A", "B")
index.renaming(name, inplace=True)
assert index.name == name
assert index.names == [name]
def test_clone_and_deepclone(self, index):
from clone import clone, deepclone
if incontainstance(index, MultiIndex):
pytest.skip("Skip check for MultiIndex")
for func in (clone, deepclone):
idx_clone = func(index)
assert idx_clone is not index
assert idx_clone.equals(index)
new_clone = index.clone(deep=True, name="banana")
assert new_clone.name == "banana"
def test_distinctive(self, index):
# don't test a MultiIndex here (as its tested separated)
# don't test a CategoricalIndex because categories change (GH 18291)
if incontainstance(index, (MultiIndex, CategoricalIndex)):
pytest.skip("Skip check for MultiIndex/CategoricalIndex")
# GH 17896
expected = index.remove_duplicates()
for level in 0, index.name, None:
result = index.distinctive(level=level)
tm.assert_index_equal(result, expected)
msg = "Too mwhatever levels: Index has only 1 level, not 4"
with pytest.raises(IndexError, match=msg):
index.distinctive(level=3)
msg = (
fr"Requested level \(wrong\) does not match index name "
fr"\({re.escape(index.name.__repr__())}\)"
)
with pytest.raises(KeyError, match=msg):
index.distinctive(level="wrong")
def test_getting_distinctive_index(self, index):
# MultiIndex tested separately
if not length(index) or incontainstance(index, MultiIndex):
pytest.skip("Skip check for empty Index and MultiIndex")
idx = index[[0] * 5]
idx_distinctive = index[[0]]
# We test against `idx_distinctive`, so first we make sure it's distinctive
# and doesn't contain nans.
assert idx_distinctive.is_distinctive is True
try:
assert idx_distinctive.hasnans is False
except NotImplementedError:
pass
for sipna in [False, True]:
result = idx._getting_distinctive_index(sipna=sipna)
tm.assert_index_equal(result, idx_distinctive)
# nans:
if not index._can_hold_na:
pytest.skip("Skip na-check if index cannot hold na")
if is_period_dtype(index.dtype):
vals = index[[0] * 5]._data
vals[0] = mk.NaT
elif needs_i8_conversion(index.dtype):
vals = index.asi8[[0] * 5]
vals[0] = iNaT
else:
vals = index.values[[0] * 5]
vals[0] = np.nan
vals_distinctive = vals[:2]
if index.dtype.kind in ["m", "M"]:
# i.e. needs_i8_conversion but not period_dtype, as above
vals = type(index._data)._simple_new(vals, dtype=index.dtype)
vals_distinctive = type(index._data)._simple_new(vals_distinctive, dtype=index.dtype)
idx_nan = index._shtotal_allow_clone(vals)
idx_distinctive_nan = index._shtotal_allow_clone(vals_distinctive)
assert idx_distinctive_nan.is_distinctive is True
assert idx_nan.dtype == index.dtype
assert idx_distinctive_nan.dtype == index.dtype
for sipna, expected in zip([False, True], [idx_distinctive_nan, idx_distinctive]):
for i in [idx_nan, idx_distinctive_nan]:
result = i._getting_distinctive_index(sipna=sipna)
tm.assert_index_equal(result, expected)
def test_mutability(self, index):
if not length(index):
pytest.skip("Skip check for empty Index")
msg = "Index does not support mutable operations"
with pytest.raises(TypeError, match=msg):
index[0] = index[0]
def test_view(self, index):
assert index.view().name == index.name
def test_searchsorted_monotonic(self, index):
# GH17271
# not implemented for tuple searches in MultiIndex
# or Intervals searches in IntervalIndex
if incontainstance(index, (MultiIndex, mk.IntervalIndex)):
pytest.skip("Skip check for MultiIndex/IntervalIndex")
# nothing to test if the index is empty
if index.empty:
pytest.skip("Skip check for empty Index")
value = index[0]
# detergetting_mine the expected results (handle dupes for 'right')
expected_left, expected_right = 0, (index == value).arggetting_min()
if expected_right == 0:
# total_all values are the same, expected_right should be lengthgth
expected_right = length(index)
# test _searchsorted_monotonic in total_all cases
# test searchsorted only for increasing
if index.is_monotonic_increasing:
ssm_left = index._searchsorted_monotonic(value, side="left")
assert expected_left == ssm_left
ssm_right = index._searchsorted_monotonic(value, side="right")
assert expected_right == ssm_right
ss_left = index.searchsorted(value, side="left")
assert expected_left == ss_left
ss_right = index.searchsorted(value, side="right")
assert expected_right == ss_right
elif index.is_monotonic_decreasing:
ssm_left = index._searchsorted_monotonic(value, side="left")
assert expected_left == ssm_left
ssm_right = index._searchsorted_monotonic(value, side="right")
assert expected_right == ssm_right
else:
# non-monotonic should raise.
with pytest.raises(ValueError):
index._searchsorted_monotonic(value, side="left")
def test_pickle(self, index):
original_name, index.name = index.name, "foo"
unpickled = tm.value_round_trip_pickle(index)
assert index.equals(unpickled)
index.name = original_name
def test_remove_duplicates(self, index, keep):
if incontainstance(index, MultiIndex):
pytest.skip("MultiIndex is tested separately")
if incontainstance(index, RangeIndex):
pytest.skip(
"RangeIndex is tested in test_remove_duplicates_no_duplicates "
"as it cannot hold duplicates"
)
if length(index) == 0:
pytest.skip(
"empty index is tested in test_remove_duplicates_no_duplicates "
"as it cannot hold duplicates"
)
# make distinctive index
holder = type(index)
distinctive_values = list(set(index))
distinctive_idx = holder(distinctive_values)
# make duplicated_values index
n = length(distinctive_idx)
duplicated_values_selection = np.random.choice(n, int(n * 1.5))
idx = holder(distinctive_idx.values[duplicated_values_selection])
# Collections.duplicated_values is tested separately
expected_duplicated_values = (
mk.Collections(duplicated_values_selection).duplicated_values(keep=keep).values
)
tm.assert_numpy_array_equal(idx.duplicated_values(keep=keep), expected_duplicated_values)
# Collections.remove_duplicates is tested separately
expected_sipped = holder(mk.Collections(idx).remove_duplicates(keep=keep))
tm.assert_index_equal(idx.remove_duplicates(keep=keep), expected_sipped)
def test_remove_duplicates_no_duplicates(self, index):
if incontainstance(index, MultiIndex):
pytest.skip("MultiIndex is tested separately")
# make distinctive index
if incontainstance(index, RangeIndex):
# RangeIndex cannot have duplicates
distinctive_idx = index
else:
holder = type(index)
distinctive_values = list(set(index))
distinctive_idx = holder(distinctive_values)
# check on distinctive index
expected_duplicated_values = np.array([False] * length(distinctive_idx), dtype="bool")
tm.assert_numpy_array_equal(distinctive_idx.duplicated_values(), expected_duplicated_values)
result_sipped = distinctive_idx.remove_duplicates()
tm.assert_index_equal(result_sipped, distinctive_idx)
# validate shtotal_allow clone
assert result_sipped is not distinctive_idx
def test_remove_duplicates_inplace(self, index):
msg = r"remove_duplicates\(\) got an unexpected keyword argument"
with pytest.raises(TypeError, match=msg):
index.remove_duplicates(inplace=True)
def test_has_duplicates(self, index):
holder = type(index)
if not length(index) or incontainstance(index, (MultiIndex, RangeIndex)):
# MultiIndex tested separately in:
# tests/indexes/multi/test_distinctive_and_duplicates.
# RangeIndex is distinctive by definition.
pytest.skip("Skip check for empty Index, MultiIndex, and RangeIndex")
idx = holder([index[0]] * 5)
assert idx.is_distinctive is False
assert idx.has_duplicates is True
@pytest.mark.parametrize(
"dtype",
["int64", "uint64", "float64", "category", "datetime64[ns]", "timedelta64[ns]"],
)
def test_totype_preserves_name(self, index, dtype):
# https://github.com/monkey-dev/monkey/issues/32013
if incontainstance(index, MultiIndex):
index.names = ["idx" + str(i) for i in range(index.nlevels)]
else:
index.name = "idx"
try:
# Some of these conversions cannot succeed so we use a try / except
result = index.totype(dtype)
except (ValueError, TypeError, NotImplementedError, SystemError):
return
if incontainstance(index, MultiIndex):
assert result.names == index.names
else:
assert result.name == index.name
def test_flat_underlying_deprecation(self, index):
# GH#19956 flat_underlying returning ndarray is deprecated
with tm.assert_produces_warning(FutureWarning):
index.flat_underlying()
@pytest.mark.parametrize("na_position", [None, "middle"])
def test_sort_the_values_invalid_na_position(index_with_missing, na_position):
if incontainstance(index_with_missing, (DatetimeIndex, PeriodIndex, TimedeltaIndex)):
# datetime-like indices will getting na_position kwarg as part of
# synchronizing duplicate-sorting behavior, because we currently expect
# them, other indices, and Collections to sort differently (xref 35922)
pytest.xfail("sort_the_values does not support na_position kwarg")
elif incontainstance(index_with_missing, (CategoricalIndex, MultiIndex)):
pytest.xfail("missing value sorting order not defined for index type")
if na_position not in ["first", "final_item"]:
with pytest.raises(ValueError, match=f"invalid na_position: {na_position}"):
index_with_missing.sort_the_values(na_position=na_position)
@pytest.mark.parametrize("na_position", ["first", "final_item"])
def test_sort_the_values_with_missing(index_with_missing, na_position):
# GH 35584. Test that sort_the_values works with missing values,
# sort non-missing and place missing according to na_position
if incontainstance(index_with_missing, (DatetimeIndex, PeriodIndex, TimedeltaIndex)):
# datetime-like indices will getting na_position kwarg as part of
# synchronizing duplicate-sorting behavior, because we currently expect
# them, other indices, and Collections to sort differently (xref 35922)
pytest.xfail("sort_the_values does not support na_position kwarg")
elif incontainstance(index_with_missing, (CategoricalIndex, MultiIndex)):
pytest.xfail("missing value sorting order not defined for index type")
missing_count = np.total_sum(index_with_missing.ifna())
not_na_vals = index_with_missing[index_with_missing.notna()].values
sorted_values = np.sort(not_na_vals)
if na_position == "first":
sorted_values = np.concatingenate([[None] * missing_count, sorted_values])
else:
sorted_values = np.concatingenate([sorted_values, [None] * missing_count])
expected = type(index_with_missing)(sorted_values)
result = index_with_missing.sort_the_values(na_position=na_position)
tm.assert_index_equal(result, expected)
|
# ________
# /
# \ /
# \ /
# \/
import random
import textwrap
import emd_average
import AdvEMDpy
import emd_basis
import emd_utils
import numpy as np
import monkey as mk
import cvxpy as cvx
import seaborn as sns
import matplotlib.pyplot as plt
from scipy.integrate import odeint
from scipy.ndimage import gaussian_filter
from emd_utils import time_extension, Utility
from scipy.interpolate import CubicSpline
from emd_hilbert import Hilbert, hilbert_spectrum
from emd_preprocess import Preprocess
from emd_average import Fluctuation
from AdvEMDpy import EMD
# alternate packages
from PyEMD import EMD as pyemd0215
import emd as emd040
sns.set(style='darkgrid')
pseudo_alg_time = np.linspace(0, 2 * np.pi, 1001)
pseudo_alg_time_collections = np.sin(pseudo_alg_time) + np.sin(5 * pseudo_alg_time)
pseudo_utils = Utility(time=pseudo_alg_time, time_collections=pseudo_alg_time_collections)
# plot 0 - addition
fig = plt.figure(figsize=(9, 4))
ax = plt.subplot(111)
plt.gcf().subplots_adjust(bottom=0.10)
plt.title('First Iteration of Sifting Algorithm')
plt.plot(pseudo_alg_time, pseudo_alg_time_collections, label=r'$h_{(1,0)}(t)$', zorder=1)
plt.scatter(pseudo_alg_time[pseudo_utils.getting_max_bool_func_1st_order_fd()],
pseudo_alg_time_collections[pseudo_utils.getting_max_bool_func_1st_order_fd()],
c='r', label=r'$M(t_i)$', zorder=2)
plt.plot(pseudo_alg_time, np.sin(pseudo_alg_time) + 1, '--', c='r', label=r'$\tilde{h}_{(1,0)}^M(t)$', zorder=4)
plt.scatter(pseudo_alg_time[pseudo_utils.getting_min_bool_func_1st_order_fd()],
pseudo_alg_time_collections[pseudo_utils.getting_min_bool_func_1st_order_fd()],
c='c', label=r'$m(t_j)$', zorder=3)
plt.plot(pseudo_alg_time, np.sin(pseudo_alg_time) - 1, '--', c='c', label=r'$\tilde{h}_{(1,0)}^m(t)$', zorder=5)
plt.plot(pseudo_alg_time, np.sin(pseudo_alg_time), '--', c='purple', label=r'$\tilde{h}_{(1,0)}^{\mu}(t)$', zorder=5)
plt.yticks(ticks=[-2, -1, 0, 1, 2])
plt.xticks(ticks=[0, np.pi, 2 * np.pi],
labels=[r'0', r'$\pi$', r'$2\pi$'])
box_0 = ax.getting_position()
ax.set_position([box_0.x0 - 0.05, box_0.y0, box_0.width * 0.95, box_0.height])
ax.legend(loc='center left', bbox_to_anchor=(1, 0.5))
plt.savefig('jss_figures/pseudo_algorithm.png')
plt.show()
knots = np.arange(12)
time = np.linspace(0, 11, 1101)
basis = emd_basis.Basis(time=time, time_collections=time)
b_spline_basis = basis.cubic_b_spline(knots)
chsi_basis = basis.chsi_basis(knots)
# plot 1
plt.title('Non-Natural Cubic B-Spline Bases at Boundary')
plt.plot(time[500:], b_spline_basis[2, 500:].T, '--', label=r'$ B_{-3,4}(t) $')
plt.plot(time[500:], b_spline_basis[3, 500:].T, '--', label=r'$ B_{-2,4}(t) $')
plt.plot(time[500:], b_spline_basis[4, 500:].T, '--', label=r'$ B_{-1,4}(t) $')
plt.plot(time[500:], b_spline_basis[5, 500:].T, '--', label=r'$ B_{0,4}(t) $')
plt.plot(time[500:], b_spline_basis[6, 500:].T, '--', label=r'$ B_{1,4}(t) $')
plt.xticks([5, 6], [r'$ \tau_0 $', r'$ \tau_1 $'])
plt.xlim(4.4, 6.6)
plt.plot(5 * np.ones(100), np.linspace(-0.2, 1.2, 100), 'k-')
plt.plot(6 * np.ones(100), np.linspace(-0.2, 1.2, 100), 'k-')
plt.legend(loc='upper left')
plt.savefig('jss_figures/boundary_bases.png')
plt.show()
# plot 1a - addition
knot_demonstrate_time = np.linspace(0, 2 * np.pi, 1001)
knot_demonstrate_time_collections = np.sin(knot_demonstrate_time) + np.sin(5 * knot_demonstrate_time)
knots_uniform = np.linspace(0, 2 * np.pi, 51)
emd = EMD(time=knot_demonstrate_time, time_collections=knot_demonstrate_time_collections)
imfs = emd.empirical_mode_decomposition(knots=knots_uniform, edge_effect='anti-symmetric', verbose=False)[0]
fig, axs = plt.subplots(3, 1)
fig.subplots_adjust(hspace=0.6)
plt.gcf().subplots_adjust(bottom=0.10)
axs[0].set_title('Time Collections and Uniform Knots')
axs[0].plot(knot_demonstrate_time, knot_demonstrate_time_collections, Linewidth=2, zorder=100)
axs[0].set_yticks(ticks=[-2, 0, 2])
axs[0].set_xticks(ticks=[0, np.pi, 2 * np.pi])
axs[0].set_xticklabels(labels=['0', r'$\pi$', r'$2\pi$'])
axs[1].set_title('IMF 1 and Uniform Knots')
axs[1].plot(knot_demonstrate_time, imfs[1, :], Linewidth=2, zorder=100)
axs[1].set_yticks(ticks=[-2, 0, 2])
axs[1].set_xticks(ticks=[0, np.pi, 2 * np.pi])
axs[1].set_xticklabels(labels=['0', r'$\pi$', r'$2\pi$'])
axs[2].set_title('IMF 2 and Uniform Knots')
axs[2].plot(knot_demonstrate_time, imfs[2, :], Linewidth=2, zorder=100)
axs[2].set_yticks(ticks=[-2, 0, 2])
axs[2].set_xticks(ticks=[0, np.pi, 2 * np.pi])
axs[2].set_xticklabels(labels=['0', r'$\pi$', r'$2\pi$'])
axs[0].plot(knots_uniform[0] * np.ones(101), np.linspace(-2, 2, 101), '--', c='grey', label='Knots')
axs[0].legend(loc='lower left')
axs[1].plot(knots_uniform[0] * np.ones(101), np.linspace(-2, 2, 101), '--', c='grey', label='Knots')
axs[2].plot(knots_uniform[0] * np.ones(101), np.linspace(-2, 2, 101), '--', c='grey', label='Knots')
for i in range(3):
for j in range(1, length(knots_uniform)):
axs[i].plot(knots_uniform[j] * np.ones(101), np.linspace(-2, 2, 101), '--', c='grey')
plt.savefig('jss_figures/knot_uniform.png')
plt.show()
# plot 1b - addition
knot_demonstrate_time = np.linspace(0, 2 * np.pi, 1001)
knot_demonstrate_time_collections = np.sin(knot_demonstrate_time) + np.sin(5 * knot_demonstrate_time)
emd = EMD(time=knot_demonstrate_time, time_collections=knot_demonstrate_time_collections)
imfs, _, _, _, knots, _, _ = emd.empirical_mode_decomposition(edge_effect='anti-symmetric',
optimise_knots=1, verbose=False)
fig, axs = plt.subplots(3, 1)
fig.subplots_adjust(hspace=0.6)
plt.gcf().subplots_adjust(bottom=0.10)
axs[0].set_title('Time Collections and Statictotal_ally Optimised Knots')
axs[0].plot(knot_demonstrate_time, knot_demonstrate_time_collections, Linewidth=2, zorder=100)
axs[0].set_yticks(ticks=[-2, 0, 2])
axs[0].set_xticks(ticks=[0, np.pi, 2 * np.pi])
axs[0].set_xticklabels(labels=['0', r'$\pi$', r'$2\pi$'])
axs[1].set_title('IMF 1 and Statictotal_ally Optimised Knots')
axs[1].plot(knot_demonstrate_time, imfs[1, :], Linewidth=2, zorder=100)
axs[1].set_yticks(ticks=[-2, 0, 2])
axs[1].set_xticks(ticks=[0, np.pi, 2 * np.pi])
axs[1].set_xticklabels(labels=['0', r'$\pi$', r'$2\pi$'])
axs[2].set_title('IMF 2 and Statictotal_ally Optimised Knots')
axs[2].plot(knot_demonstrate_time, imfs[2, :], Linewidth=2, zorder=100)
axs[2].set_yticks(ticks=[-2, 0, 2])
axs[2].set_xticks(ticks=[0, np.pi, 2 * np.pi])
axs[2].set_xticklabels(labels=['0', r'$\pi$', r'$2\pi$'])
axs[0].plot(knots[0] * np.ones(101), np.linspace(-2, 2, 101), '--', c='grey', label='Knots')
axs[0].legend(loc='lower left')
axs[1].plot(knots[0] * np.ones(101), np.linspace(-2, 2, 101), '--', c='grey', label='Knots')
axs[2].plot(knots[0] * np.ones(101), np.linspace(-2, 2, 101), '--', c='grey', label='Knots')
for i in range(3):
for j in range(1, length(knots)):
axs[i].plot(knots[j] * np.ones(101), np.linspace(-2, 2, 101), '--', c='grey')
plt.savefig('jss_figures/knot_1.png')
plt.show()
# plot 1c - addition
knot_demonstrate_time = np.linspace(0, 2 * np.pi, 1001)
knot_demonstrate_time_collections = np.sin(knot_demonstrate_time) + np.sin(5 * knot_demonstrate_time)
emd = EMD(time=knot_demonstrate_time, time_collections=knot_demonstrate_time_collections)
imfs, _, _, _, knots, _, _ = emd.empirical_mode_decomposition(edge_effect='anti-symmetric',
optimise_knots=2, verbose=False)
fig, axs = plt.subplots(3, 1)
fig.subplots_adjust(hspace=0.6)
plt.gcf().subplots_adjust(bottom=0.10)
axs[0].set_title('Time Collections and Dynamictotal_ally Optimised Knots')
axs[0].plot(knot_demonstrate_time, knot_demonstrate_time_collections, Linewidth=2, zorder=100)
axs[0].set_yticks(ticks=[-2, 0, 2])
axs[0].set_xticks(ticks=[0, np.pi, 2 * np.pi])
axs[0].set_xticklabels(labels=['0', r'$\pi$', r'$2\pi$'])
axs[1].set_title('IMF 1 and Dynamictotal_ally Knots')
axs[1].plot(knot_demonstrate_time, imfs[1, :], Linewidth=2, zorder=100)
axs[1].set_yticks(ticks=[-2, 0, 2])
axs[1].set_xticks(ticks=[0, np.pi, 2 * np.pi])
axs[1].set_xticklabels(labels=['0', r'$\pi$', r'$2\pi$'])
axs[2].set_title('IMF 2 and Dynamictotal_ally Knots')
axs[2].plot(knot_demonstrate_time, imfs[2, :], Linewidth=2, zorder=100)
axs[2].set_yticks(ticks=[-2, 0, 2])
axs[2].set_xticks(ticks=[0, np.pi, 2 * np.pi])
axs[2].set_xticklabels(labels=['0', r'$\pi$', r'$2\pi$'])
axs[0].plot(knots[0][0] * np.ones(101), np.linspace(-2, 2, 101), '--', c='grey', label='Knots')
axs[0].legend(loc='lower left')
axs[1].plot(knots[1][0] * np.ones(101), np.linspace(-2, 2, 101), '--', c='grey', label='Knots')
axs[2].plot(knots[2][0] * np.ones(101), np.linspace(-2, 2, 101), '--', c='grey', label='Knots')
for i in range(3):
for j in range(1, length(knots[i])):
axs[i].plot(knots[i][j] * np.ones(101), np.linspace(-2, 2, 101), '--', c='grey')
plt.savefig('jss_figures/knot_2.png')
plt.show()
# plot 1d - addition
window = 81
fig, axs = plt.subplots(2, 1)
fig.subplots_adjust(hspace=0.4)
figure_size = plt.gcf().getting_size_inches()
factor = 0.8
plt.gcf().set_size_inches((figure_size[0], factor * figure_size[1]))
plt.gcf().subplots_adjust(bottom=0.10)
axs[0].set_title('Preprocess Filtering Demonstration')
axs[1].set_title('Zoomed Region')
preprocess_time = pseudo_alg_time.clone()
np.random.seed(1)
random.seed(1)
preprocess_time_collections = pseudo_alg_time_collections + np.random.normal(0, 0.1, length(preprocess_time))
for i in random.sample_by_num(range(1000), 500):
preprocess_time_collections[i] += np.random.normal(0, 1)
preprocess = Preprocess(time=preprocess_time, time_collections=preprocess_time_collections)
axs[0].plot(preprocess_time, preprocess_time_collections, label='x(t)')
axs[0].plot(pseudo_alg_time, pseudo_alg_time_collections, '--', c='purple',
label=textwrap.fill('Noiseless time collections', 12))
axs[0].plot(preprocess_time, preprocess.average_filter(window_width=window)[1], label=textwrap.fill('Mean filter', 12))
axs[0].plot(preprocess_time, preprocess.median_filter(window_width=window)[1], label=textwrap.fill('Median filter', 13))
axs[0].plot(preprocess_time, preprocess.winsorize(window_width=window, a=0.8)[1], label=textwrap.fill('Windsorize filter', 12))
axs[0].plot(preprocess_time, preprocess.winsorize_interpolate(window_width=window, a=0.8)[1],
label=textwrap.fill('Windsorize interpolation filter', 14))
axs[0].plot(preprocess_time, preprocess.quantile_filter(window_width=window, q=0.90)[1], c='grey',
label=textwrap.fill('Quantile window', 12))
axs[0].plot(preprocess_time, preprocess.quantile_filter(window_width=window, q=0.10)[1], c='grey')
axs[0].plot(np.linspace(0.85 * np.pi, 1.15 * np.pi, 101), -3 * np.ones(101), '--', c='black',
label=textwrap.fill('Zoomed region', 10))
axs[0].plot(np.linspace(0.85 * np.pi, 1.15 * np.pi, 101), 3 * np.ones(101), '--', c='black')
axs[0].plot(0.85 * np.pi * np.ones(101), np.linspace(-3, 3, 101), '--', c='black')
axs[0].plot(1.15 * np.pi * np.ones(101), np.linspace(-3, 3, 101), '--', c='black')
axs[0].set_yticks(ticks=[-2, 0, 2])
axs[0].set_xticks(ticks=[0, np.pi, 2 * np.pi])
axs[0].set_xticklabels(labels=['0', r'$\pi$', r'$2\pi$'])
axs[1].plot(preprocess_time, preprocess_time_collections, label='x(t)')
axs[1].plot(pseudo_alg_time, pseudo_alg_time_collections, '--', c='purple', label=textwrap.fill('Noiseless time collections', 12))
axs[1].plot(preprocess_time, preprocess.average_filter(window_width=window)[1], label=textwrap.fill('Mean filter', 12))
axs[1].plot(preprocess_time, preprocess.median_filter(window_width=window)[1], label=textwrap.fill('Median filter', 13))
axs[1].plot(preprocess_time, preprocess.winsorize(window_width=window, a=0.8)[1], label=textwrap.fill('Windsorize filter', 12))
axs[1].plot(preprocess_time, preprocess.winsorize_interpolate(window_width=window, a=0.8)[1],
label=textwrap.fill('Windsorize interpolation filter', 14))
axs[1].plot(preprocess_time, preprocess.quantile_filter(window_width=window, q=0.90)[1], c='grey',
label=textwrap.fill('Quantile window', 12))
axs[1].plot(preprocess_time, preprocess.quantile_filter(window_width=window, q=0.10)[1], c='grey')
axs[1].set_xlim(0.85 * np.pi, 1.15 * np.pi)
axs[1].set_ylim(-3, 3)
axs[1].set_yticks(ticks=[-2, 0, 2])
axs[1].set_xticks(ticks=[np.pi])
axs[1].set_xticklabels(labels=[r'$\pi$'])
box_0 = axs[0].getting_position()
axs[0].set_position([box_0.x0 - 0.05, box_0.y0, box_0.width * 0.85, box_0.height])
axs[0].legend(loc='center left', bbox_to_anchor=(1, -0.15))
box_1 = axs[1].getting_position()
axs[1].set_position([box_1.x0 - 0.05, box_1.y0, box_1.width * 0.85, box_1.height])
plt.savefig('jss_figures/preprocess_filter.png')
plt.show()
# plot 1e - addition
fig, axs = plt.subplots(2, 1)
fig.subplots_adjust(hspace=0.4)
figure_size = plt.gcf().getting_size_inches()
factor = 0.8
plt.gcf().set_size_inches((figure_size[0], factor * figure_size[1]))
plt.gcf().subplots_adjust(bottom=0.10)
axs[0].set_title('Preprocess Smoothing Demonstration')
axs[1].set_title('Zoomed Region')
axs[0].plot(preprocess_time, preprocess_time_collections, label='x(t)')
axs[0].plot(pseudo_alg_time, pseudo_alg_time_collections, '--', c='purple',
label=textwrap.fill('Noiseless time collections', 12))
axs[0].plot(preprocess_time, preprocess.hp()[1],
label=textwrap.fill('Hodrick-Prescott smoothing', 12))
axs[0].plot(preprocess_time, preprocess.hw(order=51)[1],
label=textwrap.fill('Henderson-Whittaker smoothing', 13))
downsample_by_numd_and_decimated = preprocess.downsample_by_num()
axs[0].plot(downsample_by_numd_and_decimated[0], downsample_by_numd_and_decimated[1],
label=textwrap.fill('Downsample_by_numd & decimated', 11))
downsample_by_numd = preprocess.downsample_by_num(decimate=False)
axs[0].plot(downsample_by_numd[0], downsample_by_numd[1],
label=textwrap.fill('Downsample_by_numd', 13))
axs[0].plot(np.linspace(0.85 * np.pi, 1.15 * np.pi, 101), -3 * np.ones(101), '--', c='black',
label=textwrap.fill('Zoomed region', 10))
axs[0].plot(np.linspace(0.85 * np.pi, 1.15 * np.pi, 101), 3 * np.ones(101), '--', c='black')
axs[0].plot(0.85 * np.pi * np.ones(101), np.linspace(-3, 3, 101), '--', c='black')
axs[0].plot(1.15 * np.pi * np.ones(101), np.linspace(-3, 3, 101), '--', c='black')
axs[0].set_yticks(ticks=[-2, 0, 2])
axs[0].set_xticks(ticks=[0, np.pi, 2 * np.pi])
axs[0].set_xticklabels(labels=['0', r'$\pi$', r'$2\pi$'])
axs[1].plot(preprocess_time, preprocess_time_collections, label='x(t)')
axs[1].plot(pseudo_alg_time, pseudo_alg_time_collections, '--', c='purple',
label=textwrap.fill('Noiseless time collections', 12))
axs[1].plot(preprocess_time, preprocess.hp()[1],
label=textwrap.fill('Hodrick-Prescott smoothing', 12))
axs[1].plot(preprocess_time, preprocess.hw(order=51)[1],
label=textwrap.fill('Henderson-Whittaker smoothing', 13))
axs[1].plot(downsample_by_numd_and_decimated[0], downsample_by_numd_and_decimated[1],
label=textwrap.fill('Downsample_by_numd & decimated', 13))
axs[1].plot(downsample_by_numd[0], downsample_by_numd[1],
label=textwrap.fill('Downsample_by_numd', 13))
axs[1].set_xlim(0.85 * np.pi, 1.15 * np.pi)
axs[1].set_ylim(-3, 3)
axs[1].set_yticks(ticks=[-2, 0, 2])
axs[1].set_xticks(ticks=[np.pi])
axs[1].set_xticklabels(labels=[r'$\pi$'])
box_0 = axs[0].getting_position()
axs[0].set_position([box_0.x0 - 0.06, box_0.y0, box_0.width * 0.85, box_0.height])
axs[0].legend(loc='center left', bbox_to_anchor=(1, -0.15))
box_1 = axs[1].getting_position()
axs[1].set_position([box_1.x0 - 0.06, box_1.y0, box_1.width * 0.85, box_1.height])
plt.savefig('jss_figures/preprocess_smooth.png')
plt.show()
# plot 2
fig, axs = plt.subplots(1, 2, sharey=True)
axs[0].set_title('Cubic B-Spline Bases')
axs[0].plot(time, b_spline_basis[2, :].T, '--', label='Basis 1')
axs[0].plot(time, b_spline_basis[3, :].T, '--', label='Basis 2')
axs[0].plot(time, b_spline_basis[4, :].T, '--', label='Basis 3')
axs[0].plot(time, b_spline_basis[5, :].T, '--', label='Basis 4')
axs[0].legend(loc='upper left')
axs[0].plot(5 * np.ones(100), np.linspace(-0.2, 0.8, 100), 'k-')
axs[0].plot(6 * np.ones(100), np.linspace(-0.2, 0.8, 100), 'k-')
axs[0].set_xticks([5, 6])
axs[0].set_xticklabels([r'$ \tau_k $', r'$ \tau_{k+1} $'])
axs[0].set_xlim(4.5, 6.5)
axs[1].set_title('Cubic Hermite Spline Bases')
axs[1].plot(time, chsi_basis[10, :].T, '--')
axs[1].plot(time, chsi_basis[11, :].T, '--')
axs[1].plot(time, chsi_basis[12, :].T, '--')
axs[1].plot(time, chsi_basis[13, :].T, '--')
axs[1].plot(5 * np.ones(100), np.linspace(-0.2, 1.2, 100), 'k-')
axs[1].plot(6 * np.ones(100), np.linspace(-0.2, 1.2, 100), 'k-')
axs[1].set_xticks([5, 6])
axs[1].set_xticklabels([r'$ \tau_k $', r'$ \tau_{k+1} $'])
axs[1].set_xlim(4.5, 6.5)
plt.savefig('jss_figures/comparing_bases.png')
plt.show()
# plot 3
a = 0.25
width = 0.2
time = np.linspace(0, (5 - a) * np.pi, 1001)
time_collections = np.cos(time) + np.cos(5 * time)
utils = emd_utils.Utility(time=time, time_collections=time_collections)
getting_max_bool = utils.getting_max_bool_func_1st_order_fd()
getting_maxima_x = time[getting_max_bool]
getting_maxima_y = time_collections[getting_max_bool]
getting_min_bool = utils.getting_min_bool_func_1st_order_fd()
getting_minima_x = time[getting_min_bool]
getting_minima_y = time_collections[getting_min_bool]
getting_max_dash_time = np.linspace(getting_maxima_x[-1] - width, getting_maxima_x[-1] + width, 101)
getting_max_dash = getting_maxima_y[-1] * np.ones_like(getting_max_dash_time)
getting_min_dash_time = np.linspace(getting_minima_x[-1] - width, getting_minima_x[-1] + width, 101)
getting_min_dash = getting_minima_y[-1] * np.ones_like(getting_min_dash_time)
dash_1_time = np.linspace(getting_maxima_x[-1], getting_minima_x[-1], 101)
dash_1 = np.linspace(getting_maxima_y[-1], getting_minima_y[-1], 101)
getting_max_discard = getting_maxima_y[-1]
getting_max_discard_time = getting_minima_x[-1] - getting_maxima_x[-1] + getting_minima_x[-1]
getting_max_discard_dash_time = np.linspace(getting_max_discard_time - width, getting_max_discard_time + width, 101)
getting_max_discard_dash = getting_max_discard * np.ones_like(getting_max_discard_dash_time)
dash_2_time = np.linspace(getting_minima_x[-1], getting_max_discard_time, 101)
dash_2 = np.linspace(getting_minima_y[-1], getting_max_discard, 101)
end_point_time = time[-1]
end_point = time_collections[-1]
time_reflect = np.linspace((5 - a) * np.pi, (5 + a) * np.pi, 101)
time_collections_reflect = np.flip(np.cos(np.linspace((5 - 2.6 * a) * np.pi,
(5 - a) * np.pi, 101)) + np.cos(5 * np.linspace((5 - 2.6 * a) * np.pi,
(5 - a) * np.pi, 101)))
time_collections_anti_reflect = time_collections_reflect[0] - time_collections_reflect
utils = emd_utils.Utility(time=time, time_collections=time_collections_anti_reflect)
anti_getting_max_bool = utils.getting_max_bool_func_1st_order_fd()
anti_getting_max_point_time = time_reflect[anti_getting_max_bool]
anti_getting_max_point = time_collections_anti_reflect[anti_getting_max_bool]
utils = emd_utils.Utility(time=time, time_collections=time_collections_reflect)
no_anchor_getting_max_time = time_reflect[utils.getting_max_bool_func_1st_order_fd()]
no_anchor_getting_max = time_collections_reflect[utils.getting_max_bool_func_1st_order_fd()]
point_1 = 5.4
lengthgth_distance = np.linspace(getting_maxima_y[-1], getting_minima_y[-1], 101)
lengthgth_distance_time = point_1 * np.pi * np.ones_like(lengthgth_distance)
lengthgth_time = np.linspace(point_1 * np.pi - width, point_1 * np.pi + width, 101)
lengthgth_top = getting_maxima_y[-1] * np.ones_like(lengthgth_time)
lengthgth_bottom = getting_minima_y[-1] * np.ones_like(lengthgth_time)
point_2 = 5.2
lengthgth_distance_2 = np.linspace(time_collections[-1], getting_minima_y[-1], 101)
lengthgth_distance_time_2 = point_2 * np.pi * np.ones_like(lengthgth_distance_2)
lengthgth_time_2 = np.linspace(point_2 * np.pi - width, point_2 * np.pi + width, 101)
lengthgth_top_2 = time_collections[-1] * np.ones_like(lengthgth_time_2)
lengthgth_bottom_2 = getting_minima_y[-1] * np.ones_like(lengthgth_time_2)
symmetry_axis_1_time = getting_minima_x[-1] * np.ones(101)
symmetry_axis_2_time = time[-1] * np.ones(101)
symmetry_axis = np.linspace(-2, 2, 101)
end_time = np.linspace(time[-1] - width, time[-1] + width, 101)
end_signal = time_collections[-1] * np.ones_like(end_time)
anti_symmetric_time = np.linspace(time[-1] - 0.5, time[-1] + 0.5, 101)
anti_symmetric_signal = time_collections[-1] * np.ones_like(anti_symmetric_time)
ax = plt.subplot(111)
plt.gcf().subplots_adjust(bottom=0.10)
plt.plot(time, time_collections, LineWidth=2, label='Signal')
plt.title('Symmetry Edge Effects Example')
plt.plot(time_reflect, time_collections_reflect, 'g--', LineWidth=2, label=textwrap.fill('Symmetric signal', 10))
plt.plot(time_reflect[:51], time_collections_anti_reflect[:51], '--', c='purple', LineWidth=2,
label=textwrap.fill('Anti-symmetric signal', 10))
plt.plot(getting_max_dash_time, getting_max_dash, 'k-')
plt.plot(getting_min_dash_time, getting_min_dash, 'k-')
plt.plot(dash_1_time, dash_1, 'k--')
plt.plot(dash_2_time, dash_2, 'k--')
plt.plot(lengthgth_distance_time, lengthgth_distance, 'k--')
plt.plot(lengthgth_distance_time_2, lengthgth_distance_2, 'k--')
plt.plot(lengthgth_time, lengthgth_top, 'k-')
plt.plot(lengthgth_time, lengthgth_bottom, 'k-')
plt.plot(lengthgth_time_2, lengthgth_top_2, 'k-')
plt.plot(lengthgth_time_2, lengthgth_bottom_2, 'k-')
plt.plot(end_time, end_signal, 'k-')
plt.plot(symmetry_axis_1_time, symmetry_axis, 'r--', zorder=1)
plt.plot(anti_symmetric_time, anti_symmetric_signal, 'r--', zorder=1)
plt.plot(symmetry_axis_2_time, symmetry_axis, 'r--', label=textwrap.fill('Axes of symmetry', 10), zorder=1)
plt.text(5.1 * np.pi, -0.7, r'$\beta$L')
plt.text(5.34 * np.pi, -0.05, 'L')
plt.scatter(getting_maxima_x, getting_maxima_y, c='r', zorder=4, label='Maxima')
plt.scatter(getting_minima_x, getting_minima_y, c='b', zorder=4, label='Minima')
plt.scatter(getting_max_discard_time, getting_max_discard, c='purple', zorder=4, label=textwrap.fill('Symmetric Discard getting_maxima', 10))
plt.scatter(end_point_time, end_point, c='orange', zorder=4, label=textwrap.fill('Symmetric Anchor getting_maxima', 10))
plt.scatter(anti_getting_max_point_time, anti_getting_max_point, c='green', zorder=4, label=textwrap.fill('Anti-Symmetric getting_maxima', 10))
plt.scatter(no_anchor_getting_max_time, no_anchor_getting_max, c='gray', zorder=4, label=textwrap.fill('Symmetric getting_maxima', 10))
plt.xlim(3.9 * np.pi, 5.5 * np.pi)
plt.xticks((4 * np.pi, 5 * np.pi), (r'4$\pi$', r'5$\pi$'))
plt.yticks((-2, -1, 0, 1, 2), ('-2', '-1', '0', '1', '2'))
box_0 = ax.getting_position()
ax.set_position([box_0.x0 - 0.05, box_0.y0, box_0.width * 0.85, box_0.height])
ax.legend(loc='center left', bbox_to_anchor=(1, 0.5))
plt.savefig('jss_figures/edge_effects_symmetry_anti.png')
plt.show()
# plot 4
a = 0.21
width = 0.2
time = np.linspace(0, (5 - a) * np.pi, 1001)
time_collections = np.cos(time) + np.cos(5 * time)
utils = emd_utils.Utility(time=time, time_collections=time_collections)
getting_max_bool = utils.getting_max_bool_func_1st_order_fd()
getting_maxima_x = time[getting_max_bool]
getting_maxima_y = time_collections[getting_max_bool]
getting_min_bool = utils.getting_min_bool_func_1st_order_fd()
getting_minima_x = time[getting_min_bool]
getting_minima_y = time_collections[getting_min_bool]
getting_max_dash_1 = np.linspace(getting_maxima_y[-1] - width, getting_maxima_y[-1] + width, 101)
getting_max_dash_2 = np.linspace(getting_maxima_y[-2] - width, getting_maxima_y[-2] + width, 101)
getting_max_dash_time_1 = getting_maxima_x[-1] * np.ones_like(getting_max_dash_1)
getting_max_dash_time_2 = getting_maxima_x[-2] * np.ones_like(getting_max_dash_1)
getting_min_dash_1 = np.linspace(getting_minima_y[-1] - width, getting_minima_y[-1] + width, 101)
getting_min_dash_2 = np.linspace(getting_minima_y[-2] - width, getting_minima_y[-2] + width, 101)
getting_min_dash_time_1 = getting_minima_x[-1] * np.ones_like(getting_min_dash_1)
getting_min_dash_time_2 = getting_minima_x[-2] * np.ones_like(getting_min_dash_1)
dash_1_time = np.linspace(getting_maxima_x[-1], getting_minima_x[-1], 101)
dash_1 = np.linspace(getting_maxima_y[-1], getting_minima_y[-1], 101)
dash_2_time = np.linspace(getting_maxima_x[-1], getting_minima_x[-2], 101)
dash_2 = np.linspace(getting_maxima_y[-1], getting_minima_y[-2], 101)
s1 = (getting_minima_y[-2] - getting_maxima_y[-1]) / (getting_minima_x[-2] - getting_maxima_x[-1])
slope_based_getting_maximum_time = getting_maxima_x[-1] + (getting_maxima_x[-1] - getting_maxima_x[-2])
slope_based_getting_maximum = getting_minima_y[-1] + (slope_based_getting_maximum_time - getting_minima_x[-1]) * s1
getting_max_dash_time_3 = slope_based_getting_maximum_time * np.ones_like(getting_max_dash_1)
getting_max_dash_3 = np.linspace(slope_based_getting_maximum - width, slope_based_getting_maximum + width, 101)
dash_3_time = np.linspace(getting_minima_x[-1], slope_based_getting_maximum_time, 101)
dash_3 = np.linspace(getting_minima_y[-1], slope_based_getting_maximum, 101)
s2 = (getting_minima_y[-1] - getting_maxima_y[-1]) / (getting_minima_x[-1] - getting_maxima_x[-1])
slope_based_getting_minimum_time = getting_minima_x[-1] + (getting_minima_x[-1] - getting_minima_x[-2])
slope_based_getting_minimum = slope_based_getting_maximum - (slope_based_getting_maximum_time - slope_based_getting_minimum_time) * s2
getting_min_dash_time_3 = slope_based_getting_minimum_time * np.ones_like(getting_min_dash_1)
getting_min_dash_3 = np.linspace(slope_based_getting_minimum - width, slope_based_getting_minimum + width, 101)
dash_4_time = np.linspace(slope_based_getting_maximum_time, slope_based_getting_minimum_time)
dash_4 = np.linspace(slope_based_getting_maximum, slope_based_getting_minimum)
getting_maxima_dash = np.linspace(2.5 - width, 2.5 + width, 101)
getting_maxima_dash_time_1 = getting_maxima_x[-2] * np.ones_like(getting_maxima_dash)
getting_maxima_dash_time_2 = getting_maxima_x[-1] * np.ones_like(getting_maxima_dash)
getting_maxima_dash_time_3 = slope_based_getting_maximum_time * np.ones_like(getting_maxima_dash)
getting_maxima_line_dash_time = np.linspace(getting_maxima_x[-2], slope_based_getting_maximum_time, 101)
getting_maxima_line_dash = 2.5 * np.ones_like(getting_maxima_line_dash_time)
getting_minima_dash = np.linspace(-3.4 - width, -3.4 + width, 101)
getting_minima_dash_time_1 = getting_minima_x[-2] * np.ones_like(getting_minima_dash)
getting_minima_dash_time_2 = getting_minima_x[-1] * np.ones_like(getting_minima_dash)
getting_minima_dash_time_3 = slope_based_getting_minimum_time * np.ones_like(getting_minima_dash)
getting_minima_line_dash_time = np.linspace(getting_minima_x[-2], slope_based_getting_minimum_time, 101)
getting_minima_line_dash = -3.4 * np.ones_like(getting_minima_line_dash_time)
# slightly edit signal to make difference between slope-based method and improved slope-based method more clear
time_collections[time >= getting_minima_x[-1]] = 1.5 * (time_collections[time >= getting_minima_x[-1]] - time_collections[time == getting_minima_x[-1]]) + \
time_collections[time == getting_minima_x[-1]]
improved_slope_based_getting_maximum_time = time[-1]
improved_slope_based_getting_maximum = time_collections[-1]
improved_slope_based_getting_minimum_time = slope_based_getting_minimum_time
improved_slope_based_getting_minimum = improved_slope_based_getting_maximum + s2 * (improved_slope_based_getting_minimum_time -
improved_slope_based_getting_maximum_time)
getting_min_dash_4 = np.linspace(improved_slope_based_getting_minimum - width, improved_slope_based_getting_minimum + width, 101)
getting_min_dash_time_4 = improved_slope_based_getting_minimum_time * np.ones_like(getting_min_dash_4)
dash_final_time = np.linspace(improved_slope_based_getting_maximum_time, improved_slope_based_getting_minimum_time, 101)
dash_final = np.linspace(improved_slope_based_getting_maximum, improved_slope_based_getting_minimum, 101)
ax = plt.subplot(111)
figure_size = plt.gcf().getting_size_inches()
factor = 0.9
plt.gcf().set_size_inches((figure_size[0], factor * figure_size[1]))
plt.gcf().subplots_adjust(bottom=0.10)
plt.plot(time, time_collections, LineWidth=2, label='Signal')
plt.title('Slope-Based Edge Effects Example')
plt.plot(getting_max_dash_time_1, getting_max_dash_1, 'k-')
plt.plot(getting_max_dash_time_2, getting_max_dash_2, 'k-')
plt.plot(getting_max_dash_time_3, getting_max_dash_3, 'k-')
plt.plot(getting_min_dash_time_1, getting_min_dash_1, 'k-')
plt.plot(getting_min_dash_time_2, getting_min_dash_2, 'k-')
plt.plot(getting_min_dash_time_3, getting_min_dash_3, 'k-')
plt.plot(getting_min_dash_time_4, getting_min_dash_4, 'k-')
plt.plot(getting_maxima_dash_time_1, getting_maxima_dash, 'k-')
plt.plot(getting_maxima_dash_time_2, getting_maxima_dash, 'k-')
plt.plot(getting_maxima_dash_time_3, getting_maxima_dash, 'k-')
plt.plot(getting_minima_dash_time_1, getting_minima_dash, 'k-')
plt.plot(getting_minima_dash_time_2, getting_minima_dash, 'k-')
plt.plot(getting_minima_dash_time_3, getting_minima_dash, 'k-')
plt.text(4.34 * np.pi, -3.2, r'$\Delta{t^{getting_min}_{m}}$')
plt.text(4.74 * np.pi, -3.2, r'$\Delta{t^{getting_min}_{m}}$')
plt.text(4.12 * np.pi, 2, r'$\Delta{t^{getting_max}_{M}}$')
plt.text(4.50 * np.pi, 2, r'$\Delta{t^{getting_max}_{M}}$')
plt.text(4.30 * np.pi, 0.35, r'$s_1$')
plt.text(4.43 * np.pi, -0.20, r'$s_2$')
plt.text(4.30 * np.pi + (getting_minima_x[-1] - getting_minima_x[-2]), 0.35 + (getting_minima_y[-1] - getting_minima_y[-2]), r'$s_1$')
plt.text(4.43 * np.pi + (slope_based_getting_minimum_time - getting_minima_x[-1]),
-0.20 + (slope_based_getting_minimum - getting_minima_y[-1]), r'$s_2$')
plt.text(4.50 * np.pi + (slope_based_getting_minimum_time - getting_minima_x[-1]),
1.20 + (slope_based_getting_minimum - getting_minima_y[-1]), r'$s_2$')
plt.plot(getting_minima_line_dash_time, getting_minima_line_dash, 'k--')
plt.plot(getting_maxima_line_dash_time, getting_maxima_line_dash, 'k--')
plt.plot(dash_1_time, dash_1, 'k--')
plt.plot(dash_2_time, dash_2, 'k--')
plt.plot(dash_3_time, dash_3, 'k--')
plt.plot(dash_4_time, dash_4, 'k--')
plt.plot(dash_final_time, dash_final, 'k--')
plt.scatter(getting_maxima_x, getting_maxima_y, c='r', zorder=4, label='Maxima')
plt.scatter(getting_minima_x, getting_minima_y, c='b', zorder=4, label='Minima')
plt.scatter(slope_based_getting_maximum_time, slope_based_getting_maximum, c='orange', zorder=4,
label=textwrap.fill('Slope-based getting_maximum', 11))
plt.scatter(slope_based_getting_minimum_time, slope_based_getting_minimum, c='purple', zorder=4,
label=textwrap.fill('Slope-based getting_minimum', 11))
plt.scatter(improved_slope_based_getting_maximum_time, improved_slope_based_getting_maximum, c='deeppink', zorder=4,
label=textwrap.fill('Improved slope-based getting_maximum', 11))
plt.scatter(improved_slope_based_getting_minimum_time, improved_slope_based_getting_minimum, c='dodgerblue', zorder=4,
label=textwrap.fill('Improved slope-based getting_minimum', 11))
plt.xlim(3.9 * np.pi, 5.5 * np.pi)
plt.xticks((4 * np.pi, 5 * np.pi), (r'4$\pi$', r'5$\pi$'))
plt.yticks((-3, -2, -1, 0, 1, 2), ('-3', '-2', '-1', '0', '1', '2'))
box_0 = ax.getting_position()
ax.set_position([box_0.x0 - 0.05, box_0.y0, box_0.width * 0.85, box_0.height])
ax.legend(loc='center left', bbox_to_anchor=(1, 0.5))
plt.savefig('jss_figures/edge_effects_slope_based.png')
plt.show()
# plot 5
a = 0.25
width = 0.2
time = np.linspace(0, (5 - a) * np.pi, 1001)
time_collections = np.cos(time) + np.cos(5 * time)
utils = emd_utils.Utility(time=time, time_collections=time_collections)
getting_max_bool = utils.getting_max_bool_func_1st_order_fd()
getting_maxima_x = time[getting_max_bool]
getting_maxima_y = time_collections[getting_max_bool]
getting_min_bool = utils.getting_min_bool_func_1st_order_fd()
getting_minima_x = time[getting_min_bool]
getting_minima_y = time_collections[getting_min_bool]
A2 = np.abs(getting_maxima_y[-2] - getting_minima_y[-2]) / 2
A1 = np.abs(getting_maxima_y[-1] - getting_minima_y[-1]) / 2
P2 = 2 * np.abs(getting_maxima_x[-2] - getting_minima_x[-2])
P1 = 2 * np.abs(getting_maxima_x[-1] - getting_minima_x[-1])
Huang_time = (P1 / P2) * (time[time >= getting_maxima_x[-2]] - time[time == getting_maxima_x[-2]]) + getting_maxima_x[-1]
Huang_wave = (A1 / A2) * (time_collections[time >= getting_maxima_x[-2]] - time_collections[time == getting_maxima_x[-2]]) + getting_maxima_y[-1]
Coughlin_time = Huang_time
Coughlin_wave = A1 * np.cos(2 * np.pi * (1 / P1) * (Coughlin_time - Coughlin_time[0]))
Average_getting_max_time = getting_maxima_x[-1] + (getting_maxima_x[-1] - getting_maxima_x[-2])
Average_getting_max = (getting_maxima_y[-2] + getting_maxima_y[-1]) / 2
Average_getting_min_time = getting_minima_x[-1] + (getting_minima_x[-1] - getting_minima_x[-2])
Average_getting_min = (getting_minima_y[-2] + getting_minima_y[-1]) / 2
utils_Huang = emd_utils.Utility(time=time, time_collections=Huang_wave)
Huang_getting_max_bool = utils_Huang.getting_max_bool_func_1st_order_fd()
Huang_getting_min_bool = utils_Huang.getting_min_bool_func_1st_order_fd()
utils_Coughlin = emd_utils.Utility(time=time, time_collections=Coughlin_wave)
Coughlin_getting_max_bool = utils_Coughlin.getting_max_bool_func_1st_order_fd()
Coughlin_getting_min_bool = utils_Coughlin.getting_min_bool_func_1st_order_fd()
Huang_getting_max_time = Huang_time[Huang_getting_max_bool]
Huang_getting_max = Huang_wave[Huang_getting_max_bool]
Huang_getting_min_time = Huang_time[Huang_getting_min_bool]
Huang_getting_min = Huang_wave[Huang_getting_min_bool]
Coughlin_getting_max_time = Coughlin_time[Coughlin_getting_max_bool]
Coughlin_getting_max = Coughlin_wave[Coughlin_getting_max_bool]
Coughlin_getting_min_time = Coughlin_time[Coughlin_getting_min_bool]
Coughlin_getting_min = Coughlin_wave[Coughlin_getting_min_bool]
getting_max_2_x_time = np.linspace(getting_maxima_x[-2] - width, getting_maxima_x[-2] + width, 101)
getting_max_2_x_time_side = np.linspace(5.3 * np.pi - width, 5.3 * np.pi + width, 101)
getting_max_2_x = getting_maxima_y[-2] * np.ones_like(getting_max_2_x_time)
getting_min_2_x_time = np.linspace(getting_minima_x[-2] - width, getting_minima_x[-2] + width, 101)
getting_min_2_x_time_side = np.linspace(5.3 * np.pi - width, 5.3 * np.pi + width, 101)
getting_min_2_x = getting_minima_y[-2] * np.ones_like(getting_min_2_x_time)
dash_getting_max_getting_min_2_x = np.linspace(getting_minima_y[-2], getting_maxima_y[-2], 101)
dash_getting_max_getting_min_2_x_time = 5.3 * np.pi * np.ones_like(dash_getting_max_getting_min_2_x)
getting_max_2_y = np.linspace(getting_maxima_y[-2] - width, getting_maxima_y[-2] + width, 101)
getting_max_2_y_side = np.linspace(-1.8 - width, -1.8 + width, 101)
getting_max_2_y_time = getting_maxima_x[-2] * np.ones_like(getting_max_2_y)
getting_min_2_y = np.linspace(getting_minima_y[-2] - width, getting_minima_y[-2] + width, 101)
getting_min_2_y_side = np.linspace(-1.8 - width, -1.8 + width, 101)
getting_min_2_y_time = getting_minima_x[-2] * np.ones_like(getting_min_2_y)
dash_getting_max_getting_min_2_y_time = np.linspace(getting_minima_x[-2], getting_maxima_x[-2], 101)
dash_getting_max_getting_min_2_y = -1.8 * np.ones_like(dash_getting_max_getting_min_2_y_time)
getting_max_1_x_time = np.linspace(getting_maxima_x[-1] - width, getting_maxima_x[-1] + width, 101)
getting_max_1_x_time_side = np.linspace(5.4 * np.pi - width, 5.4 * np.pi + width, 101)
getting_max_1_x = getting_maxima_y[-1] * np.ones_like(getting_max_1_x_time)
getting_min_1_x_time = np.linspace(getting_minima_x[-1] - width, getting_minima_x[-1] + width, 101)
getting_min_1_x_time_side = np.linspace(5.4 * np.pi - width, 5.4 * np.pi + width, 101)
getting_min_1_x = getting_minima_y[-1] * np.ones_like(getting_min_1_x_time)
dash_getting_max_getting_min_1_x = np.linspace(getting_minima_y[-1], getting_maxima_y[-1], 101)
dash_getting_max_getting_min_1_x_time = 5.4 * np.pi * np.ones_like(dash_getting_max_getting_min_1_x)
getting_max_1_y = np.linspace(getting_maxima_y[-1] - width, getting_maxima_y[-1] + width, 101)
getting_max_1_y_side = np.linspace(-2.1 - width, -2.1 + width, 101)
getting_max_1_y_time = getting_maxima_x[-1] * np.ones_like(getting_max_1_y)
getting_min_1_y = np.linspace(getting_minima_y[-1] - width, getting_minima_y[-1] + width, 101)
getting_min_1_y_side = np.linspace(-2.1 - width, -2.1 + width, 101)
getting_min_1_y_time = getting_minima_x[-1] * np.ones_like(getting_min_1_y)
dash_getting_max_getting_min_1_y_time = np.linspace(getting_minima_x[-1], getting_maxima_x[-1], 101)
dash_getting_max_getting_min_1_y = -2.1 * np.ones_like(dash_getting_max_getting_min_1_y_time)
ax = plt.subplot(111)
plt.gcf().subplots_adjust(bottom=0.10)
plt.title('Characteristic Wave Effects Example')
plt.plot(time, time_collections, LineWidth=2, label='Signal')
plt.scatter(Huang_getting_max_time, Huang_getting_max, c='magenta', zorder=4, label=textwrap.fill('Huang getting_maximum', 10))
plt.scatter(Huang_getting_min_time, Huang_getting_min, c='lime', zorder=4, label=textwrap.fill('Huang getting_minimum', 10))
plt.scatter(Coughlin_getting_max_time, Coughlin_getting_max, c='darkorange', zorder=4,
label=textwrap.fill('Coughlin getting_maximum', 14))
plt.scatter(Coughlin_getting_min_time, Coughlin_getting_min, c='dodgerblue', zorder=4,
label=textwrap.fill('Coughlin getting_minimum', 14))
plt.scatter(Average_getting_max_time, Average_getting_max, c='orangered', zorder=4,
label=textwrap.fill('Average getting_maximum', 14))
plt.scatter(Average_getting_min_time, Average_getting_min, c='cyan', zorder=4,
label=textwrap.fill('Average getting_minimum', 14))
plt.scatter(getting_maxima_x, getting_maxima_y, c='r', zorder=4, label='Maxima')
plt.scatter(getting_minima_x, getting_minima_y, c='b', zorder=4, label='Minima')
plt.plot(Huang_time, Huang_wave, '--', c='darkviolet', label=textwrap.fill('Huang Characteristic Wave', 14))
plt.plot(Coughlin_time, Coughlin_wave, '--', c='darkgreen', label=textwrap.fill('Coughlin Characteristic Wave', 14))
plt.plot(getting_max_2_x_time, getting_max_2_x, 'k-')
plt.plot(getting_max_2_x_time_side, getting_max_2_x, 'k-')
plt.plot(getting_min_2_x_time, getting_min_2_x, 'k-')
plt.plot(getting_min_2_x_time_side, getting_min_2_x, 'k-')
plt.plot(dash_getting_max_getting_min_2_x_time, dash_getting_max_getting_min_2_x, 'k--')
plt.text(5.16 * np.pi, 0.85, r'$2a_2$')
plt.plot(getting_max_2_y_time, getting_max_2_y, 'k-')
plt.plot(getting_max_2_y_time, getting_max_2_y_side, 'k-')
plt.plot(getting_min_2_y_time, getting_min_2_y, 'k-')
plt.plot(getting_min_2_y_time, getting_min_2_y_side, 'k-')
plt.plot(dash_getting_max_getting_min_2_y_time, dash_getting_max_getting_min_2_y, 'k--')
plt.text(4.08 * np.pi, -2.2, r'$\frac{p_2}{2}$')
plt.plot(getting_max_1_x_time, getting_max_1_x, 'k-')
plt.plot(getting_max_1_x_time_side, getting_max_1_x, 'k-')
plt.plot(getting_min_1_x_time, getting_min_1_x, 'k-')
plt.plot(getting_min_1_x_time_side, getting_min_1_x, 'k-')
plt.plot(dash_getting_max_getting_min_1_x_time, dash_getting_max_getting_min_1_x, 'k--')
plt.text(5.42 * np.pi, -0.1, r'$2a_1$')
plt.plot(getting_max_1_y_time, getting_max_1_y, 'k-')
plt.plot(getting_max_1_y_time, getting_max_1_y_side, 'k-')
plt.plot(getting_min_1_y_time, getting_min_1_y, 'k-')
plt.plot(getting_min_1_y_time, getting_min_1_y_side, 'k-')
plt.plot(dash_getting_max_getting_min_1_y_time, dash_getting_max_getting_min_1_y, 'k--')
plt.text(4.48 * np.pi, -2.5, r'$\frac{p_1}{2}$')
plt.xlim(3.9 * np.pi, 5.6 * np.pi)
plt.xticks((4 * np.pi, 5 * np.pi), (r'4$\pi$', r'5$\pi$'))
plt.yticks((-2, -1, 0, 1, 2), ('-2', '-1', '0', '1', '2'))
box_0 = ax.getting_position()
ax.set_position([box_0.x0 - 0.05, box_0.y0, box_0.width * 0.84, box_0.height])
ax.legend(loc='center left', bbox_to_anchor=(1, 0.5))
plt.savefig('jss_figures/edge_effects_characteristic_wave.png')
plt.show()
# plot 6
t = np.linspace(5, 95, 100)
signal_orig = np.cos(2 * np.pi * t / 50) + 0.6 * np.cos(2 * np.pi * t / 25) + 0.5 * np.sin(2 * np.pi * t / 200)
util_nn = emd_utils.Utility(time=t, time_collections=signal_orig)
getting_maxima = signal_orig[util_nn.getting_max_bool_func_1st_order_fd()]
getting_minima = signal_orig[util_nn.getting_min_bool_func_1st_order_fd()]
cs_getting_max = CubicSpline(t[util_nn.getting_max_bool_func_1st_order_fd()], getting_maxima)
cs_getting_min = CubicSpline(t[util_nn.getting_min_bool_func_1st_order_fd()], getting_minima)
time = np.linspace(0, 5 * np.pi, 1001)
lsq_signal = np.cos(time) + np.cos(5 * time)
knots = np.linspace(0, 5 * np.pi, 101)
time_extended = time_extension(time)
time_collections_extended = np.zeros_like(time_extended) / 0
time_collections_extended[int(length(lsq_signal) - 1):int(2 * (length(lsq_signal) - 1) + 1)] = lsq_signal
neural_network_m = 200
neural_network_k = 100
# forward ->
P = np.zeros((int(neural_network_k + 1), neural_network_m))
for col in range(neural_network_m):
P[:-1, col] = lsq_signal[(-(neural_network_m + neural_network_k - col)):(-(neural_network_m - col))]
P[-1, col] = 1 # for additive constant
t = lsq_signal[-neural_network_m:]
# test - top
seed_weights = np.ones(neural_network_k) / neural_network_k
weights = 0 * seed_weights.clone()
train_input = P[:-1, :]
lr = 0.01
for iterations in range(1000):
output = np.matmul(weights, train_input)
error = (t - output)
gradients = error * (- train_input)
# guess average gradients
average_gradients = np.average(gradients, axis=1)
# steepest descent
getting_max_gradient_vector = average_gradients * (np.abs(average_gradients) == getting_max(np.abs(average_gradients)))
adjustment = - lr * average_gradients
# adjustment = - lr * getting_max_gradient_vector
weights += adjustment
# test - bottom
weights_right = np.hstack((weights, 0))
getting_max_count_right = 0
getting_min_count_right = 0
i_right = 0
while ((getting_max_count_right < 1) or (getting_min_count_right < 1)) and (i_right < length(lsq_signal) - 1):
time_collections_extended[int(2 * (length(lsq_signal) - 1) + 1 + i_right)] = \
total_sum(weights_right * np.hstack((time_collections_extended[
int(2 * (length(lsq_signal) - 1) + 1 - neural_network_k + i_right):
int(2 * (length(lsq_signal) - 1) + 1 + i_right)], 1)))
i_right += 1
if i_right > 1:
emd_utils_getting_max = \
emd_utils.Utility(time=time_extended[int(2 * (length(lsq_signal) - 1) + 1):
int(2 * (length(lsq_signal) - 1) + 1 + i_right + 1)],
time_collections=time_collections_extended[int(2 * (length(lsq_signal) - 1) + 1):
int(2 * (length(lsq_signal) - 1) + 1 + i_right + 1)])
if total_sum(emd_utils_getting_max.getting_max_bool_func_1st_order_fd()) > 0:
getting_max_count_right += 1
emd_utils_getting_min = \
emd_utils.Utility(time=time_extended[int(2 * (length(lsq_signal) - 1) + 1):
int(2 * (length(lsq_signal) - 1) + 1 + i_right + 1)],
time_collections=time_collections_extended[int(2 * (length(lsq_signal) - 1) + 1):
int(2 * (length(lsq_signal) - 1) + 1 + i_right + 1)])
if total_sum(emd_utils_getting_min.getting_min_bool_func_1st_order_fd()) > 0:
getting_min_count_right += 1
# backward <-
P = np.zeros((int(neural_network_k + 1), neural_network_m))
for col in range(neural_network_m):
P[:-1, col] = lsq_signal[int(col + 1):int(col + neural_network_k + 1)]
P[-1, col] = 1 # for additive constant
t = lsq_signal[:neural_network_m]
vx = cvx.Variable(int(neural_network_k + 1))
objective = cvx.Minimize(cvx.norm((2 * (vx * P) + 1 - t), 2)) # linear activation function is arbitrary
prob = cvx.Problem(objective)
result = prob.solve(verbose=True, solver=cvx.ECOS)
weights_left = np.array(vx.value)
getting_max_count_left = 0
getting_min_count_left = 0
i_left = 0
while ((getting_max_count_left < 1) or (getting_min_count_left < 1)) and (i_left < length(lsq_signal) - 1):
time_collections_extended[int(length(lsq_signal) - 2 - i_left)] = \
2 * total_sum(weights_left * np.hstack((time_collections_extended[int(length(lsq_signal) - 1 - i_left):
int(length(lsq_signal) - 1 - i_left + neural_network_k)],
1))) + 1
i_left += 1
if i_left > 1:
emd_utils_getting_max = \
emd_utils.Utility(time=time_extended[int(length(lsq_signal) - 1 - i_left):int(length(lsq_signal))],
time_collections=time_collections_extended[int(length(lsq_signal) - 1 - i_left):int(length(lsq_signal))])
if total_sum(emd_utils_getting_max.getting_max_bool_func_1st_order_fd()) > 0:
getting_max_count_left += 1
emd_utils_getting_min = \
emd_utils.Utility(time=time_extended[int(length(lsq_signal) - 1 - i_left):int(length(lsq_signal))],
time_collections=time_collections_extended[int(length(lsq_signal) - 1 - i_left):int(length(lsq_signal))])
if total_sum(emd_utils_getting_min.getting_min_bool_func_1st_order_fd()) > 0:
getting_min_count_left += 1
lsq_utils = emd_utils.Utility(time=time, time_collections=lsq_signal)
utils_extended = emd_utils.Utility(time=time_extended, time_collections=time_collections_extended)
getting_maxima = lsq_signal[lsq_utils.getting_max_bool_func_1st_order_fd()]
getting_maxima_time = time[lsq_utils.getting_max_bool_func_1st_order_fd()]
getting_maxima_extrapolate = time_collections_extended[utils_extended.getting_max_bool_func_1st_order_fd()][-1]
getting_maxima_extrapolate_time = time_extended[utils_extended.getting_max_bool_func_1st_order_fd()][-1]
getting_minima = lsq_signal[lsq_utils.getting_min_bool_func_1st_order_fd()]
getting_minima_time = time[lsq_utils.getting_min_bool_func_1st_order_fd()]
getting_minima_extrapolate = time_collections_extended[utils_extended.getting_min_bool_func_1st_order_fd()][-2:]
getting_minima_extrapolate_time = time_extended[utils_extended.getting_min_bool_func_1st_order_fd()][-2:]
ax = plt.subplot(111)
plt.gcf().subplots_adjust(bottom=0.10)
plt.title('Single Neuron Neural Network Example')
plt.plot(time, lsq_signal, zorder=2, label='Signal')
plt.plot(time_extended, time_collections_extended, c='g', zorder=1, label=textwrap.fill('Extrapolated signal', 12))
plt.scatter(getting_maxima_time, getting_maxima, c='r', zorder=3, label='Maxima')
plt.scatter(getting_minima_time, getting_minima, c='b', zorder=3, label='Minima')
plt.scatter(getting_maxima_extrapolate_time, getting_maxima_extrapolate, c='magenta', zorder=3,
label=textwrap.fill('Extrapolated getting_maxima', 12))
plt.scatter(getting_minima_extrapolate_time, getting_minima_extrapolate, c='cyan', zorder=4,
label=textwrap.fill('Extrapolated getting_minima', 12))
plt.plot(((time[-302] + time[-301]) / 2) * np.ones(100), np.linspace(-2.75, 2.75, 100), c='k',
label=textwrap.fill('Neural network inputs', 13))
plt.plot(np.linspace(((time[-302] + time[-301]) / 2), ((time[-302] + time[-301]) / 2) + 0.1, 100),
-2.75 * np.ones(100), c='k')
plt.plot(np.linspace(((time[-302] + time[-301]) / 2), ((time[-302] + time[-301]) / 2) + 0.1, 100),
2.75 * np.ones(100), c='k')
plt.plot(np.linspace(((time_extended[-1001] + time_extended[-1002]) / 2),
((time_extended[-1001] + time_extended[-1002]) / 2) - 0.1, 100), -2.75 * np.ones(100), c='k')
plt.plot(np.linspace(((time_extended[-1001] + time_extended[-1002]) / 2),
((time_extended[-1001] + time_extended[-1002]) / 2) - 0.1, 100), 2.75 * np.ones(100), c='k')
plt.plot(((time_extended[-1001] + time_extended[-1002]) / 2) * np.ones(100), np.linspace(-2.75, 2.75, 100), c='k')
plt.plot(((time[-202] + time[-201]) / 2) * np.ones(100), np.linspace(-2.75, 2.75, 100), c='gray', linestyle='dashed',
label=textwrap.fill('Neural network targettings', 13))
plt.plot(np.linspace(((time[-202] + time[-201]) / 2), ((time[-202] + time[-201]) / 2) + 0.1, 100),
-2.75 * np.ones(100), c='gray')
plt.plot(np.linspace(((time[-202] + time[-201]) / 2), ((time[-202] + time[-201]) / 2) + 0.1, 100),
2.75 * np.ones(100), c='gray')
plt.plot(np.linspace(((time_extended[-1001] + time_extended[-1000]) / 2),
((time_extended[-1001] + time_extended[-1000]) / 2) - 0.1, 100), -2.75 * np.ones(100), c='gray')
plt.plot(np.linspace(((time_extended[-1001] + time_extended[-1000]) / 2),
((time_extended[-1001] + time_extended[-1000]) / 2) - 0.1, 100), 2.75 * np.ones(100), c='gray')
plt.plot(((time_extended[-1001] + time_extended[-1000]) / 2) * np.ones(100), np.linspace(-2.75, 2.75, 100), c='gray',
linestyle='dashed')
plt.xlim(3.4 * np.pi, 5.6 * np.pi)
plt.xticks((4 * np.pi, 5 * np.pi), (r'4$\pi$', r'5$\pi$'))
plt.yticks((-2, -1, 0, 1, 2), ('-2', '-1', '0', '1', '2'))
box_0 = ax.getting_position()
ax.set_position([box_0.x0 - 0.05, box_0.y0, box_0.width * 0.84, box_0.height])
ax.legend(loc='center left', bbox_to_anchor=(1, 0.5))
plt.savefig('jss_figures/neural_network.png')
plt.show()
# plot 6a
np.random.seed(0)
time = np.linspace(0, 5 * np.pi, 1001)
knots_51 = np.linspace(0, 5 * np.pi, 51)
time_collections = np.cos(2 * time) + np.cos(4 * time) + np.cos(8 * time)
noise = np.random.normal(0, 1, length(time_collections))
time_collections += noise
advemdpy = EMD(time=time, time_collections=time_collections)
imfs_51, hts_51, ifs_51 = advemdpy.empirical_mode_decomposition(knots=knots_51, getting_max_imfs=3,
edge_effect='symmetric_anchor', verbose=False)[:3]
knots_31 = np.linspace(0, 5 * np.pi, 31)
imfs_31, hts_31, ifs_31 = advemdpy.empirical_mode_decomposition(knots=knots_31, getting_max_imfs=2,
edge_effect='symmetric_anchor', verbose=False)[:3]
knots_11 = np.linspace(0, 5 * np.pi, 11)
imfs_11, hts_11, ifs_11 = advemdpy.empirical_mode_decomposition(knots=knots_11, getting_max_imfs=1,
edge_effect='symmetric_anchor', verbose=False)[:3]
fig, axs = plt.subplots(3, 1)
plt.suptitle(textwrap.fill('Comparison of Trends Extracted with Different Knot Sequences', 40))
plt.subplots_adjust(hspace=0.1)
axs[0].plot(time, time_collections, label='Time collections')
axs[0].plot(time, imfs_51[1, :] + imfs_51[2, :] + imfs_51[3, :], label=textwrap.fill('Sum of IMF 1, IMF 2, & IMF 3 with 51 knots', 21))
print(f'DFA fluctuation with 51 knots: {np.value_round(np.var(time_collections - (imfs_51[1, :] + imfs_51[2, :] + imfs_51[3, :])), 3)}')
for knot in knots_51:
axs[0].plot(knot * np.ones(101), np.linspace(-5, 5, 101), '--', c='grey', zorder=1)
axs[0].plot(knot * np.ones(101), np.linspace(-5, 5, 101), '--', c='grey', zorder=1, label='Knots')
axs[0].set_xticks([0, np.pi, 2 * np.pi, 3 * np.pi, 4 * np.pi, 5 * np.pi])
axs[0].set_xticklabels(['', '', '', '', '', ''])
axs[0].plot(np.linspace(0.95 * np.pi, 1.55 * np.pi, 101), 5.5 * np.ones(101), 'k--')
axs[0].plot(np.linspace(0.95 * np.pi, 1.55 * np.pi, 101), -5.5 * np.ones(101), 'k--')
axs[0].plot(0.95 * np.pi * np.ones(101), np.linspace(-5.5, 5.5, 101), 'k--')
axs[0].plot(1.55 * np.pi * np.ones(101), np.linspace(-5.5, 5.5, 101), 'k--', label='Zoomed region')
box_0 = axs[0].getting_position()
axs[0].set_position([box_0.x0 - 0.05, box_0.y0, box_0.width * 0.85, box_0.height])
axs[0].legend(loc='center left', bbox_to_anchor=(1, 0.5), fontsize=8)
axs[1].plot(time, time_collections, label='Time collections')
axs[1].plot(time, imfs_31[1, :] + imfs_31[2, :], label=textwrap.fill('Sum of IMF 1 and IMF 2 with 31 knots', 19))
axs[1].plot(time, imfs_51[2, :] + imfs_51[3, :], label=textwrap.fill('Sum of IMF 2 and IMF 3 with 51 knots', 19))
print(f'DFA fluctuation with 31 knots: {np.value_round(np.var(time_collections - (imfs_31[1, :] + imfs_31[2, :])), 3)}')
for knot in knots_31:
axs[1].plot(knot * np.ones(101), np.linspace(-5, 5, 101), '--', c='grey', zorder=1)
axs[1].plot(knot * np.ones(101), np.linspace(-5, 5, 101), '--', c='grey', zorder=1, label='Knots')
axs[1].set_xticks([0, np.pi, 2 * np.pi, 3 * np.pi, 4 * np.pi, 5 * np.pi])
axs[1].set_xticklabels(['', '', '', '', '', ''])
box_1 = axs[1].getting_position()
axs[1].set_position([box_1.x0 - 0.05, box_1.y0, box_1.width * 0.85, box_1.height])
axs[1].legend(loc='center left', bbox_to_anchor=(1, 0.5), fontsize=8)
axs[1].plot(np.linspace(0.95 * np.pi, 1.55 * np.pi, 101), 5.5 * np.ones(101), 'k--')
axs[1].plot(np.linspace(0.95 * np.pi, 1.55 * np.pi, 101), -5.5 * np.ones(101), 'k--')
axs[1].plot(0.95 * np.pi * np.ones(101), np.linspace(-5.5, 5.5, 101), 'k--')
axs[1].plot(1.55 * np.pi * np.ones(101), np.linspace(-5.5, 5.5, 101), 'k--', label='Zoomed region')
axs[2].plot(time, time_collections, label='Time collections')
axs[2].plot(time, imfs_11[1, :], label='IMF 1 with 11 knots')
axs[2].plot(time, imfs_31[2, :], label='IMF 2 with 31 knots')
axs[2].plot(time, imfs_51[3, :], label='IMF 3 with 51 knots')
print(f'DFA fluctuation with 11 knots: {np.value_round(np.var(time_collections - imfs_51[3, :]), 3)}')
for knot in knots_11:
axs[2].plot(knot * np.ones(101), np.linspace(-5, 5, 101), '--', c='grey', zorder=1)
axs[2].plot(knot * np.ones(101), np.linspace(-5, 5, 101), '--', c='grey', zorder=1, label='Knots')
axs[2].set_xticks([0, np.pi, 2 * np.pi, 3 * np.pi, 4 * np.pi, 5 * np.pi])
axs[2].set_xticklabels(['$0$', r'$\pi$', r'$2\pi$', r'$3\pi$', r'$4\pi$', r'$5\pi$'])
box_2 = axs[2].getting_position()
axs[2].set_position([box_2.x0 - 0.05, box_2.y0, box_2.width * 0.85, box_2.height])
axs[2].legend(loc='center left', bbox_to_anchor=(1, 0.5), fontsize=8)
axs[2].plot(np.linspace(0.95 * np.pi, 1.55 * np.pi, 101), 5.5 * np.ones(101), 'k--')
axs[2].plot(np.linspace(0.95 * np.pi, 1.55 * np.pi, 101), -5.5 * np.ones(101), 'k--')
axs[2].plot(0.95 * np.pi * np.ones(101), np.linspace(-5.5, 5.5, 101), 'k--')
axs[2].plot(1.55 * np.pi * np.ones(101), np.linspace(-5.5, 5.5, 101), 'k--', label='Zoomed region')
plt.savefig('jss_figures/DFA_different_trends.png')
plt.show()
# plot 6b
fig, axs = plt.subplots(3, 1)
plt.suptitle(textwrap.fill('Comparison of Trends Extracted with Different Knot Sequences Zoomed Region', 40))
plt.subplots_adjust(hspace=0.1)
axs[0].plot(time, time_collections, label='Time collections')
axs[0].plot(time, imfs_51[1, :] + imfs_51[2, :] + imfs_51[3, :], label=textwrap.fill('Sum of IMF 1, IMF 2, & IMF 3 with 51 knots', 21))
for knot in knots_51:
axs[0].plot(knot * np.ones(101), np.linspace(-5, 5, 101), '--', c='grey', zorder=1)
axs[0].plot(knot * np.ones(101), np.linspace(-5, 5, 101), '--', c='grey', zorder=1, label='Knots')
axs[0].set_xticks([0, np.pi, 2 * np.pi, 3 * np.pi, 4 * np.pi, 5 * np.pi])
axs[0].set_xticklabels(['', '', '', '', '', ''])
box_0 = axs[0].getting_position()
axs[0].set_position([box_0.x0 - 0.05, box_0.y0, box_0.width * 0.85, box_0.height])
axs[0].legend(loc='center left', bbox_to_anchor=(1, 0.5), fontsize=8)
axs[0].set_ylim(-5.5, 5.5)
axs[0].set_xlim(0.95 * np.pi, 1.55 * np.pi)
axs[1].plot(time, time_collections, label='Time collections')
axs[1].plot(time, imfs_31[1, :] + imfs_31[2, :], label=textwrap.fill('Sum of IMF 1 and IMF 2 with 31 knots', 19))
axs[1].plot(time, imfs_51[2, :] + imfs_51[3, :], label=textwrap.fill('Sum of IMF 2 and IMF 3 with 51 knots', 19))
for knot in knots_31:
axs[1].plot(knot * np.ones(101), np.linspace(-5, 5, 101), '--', c='grey', zorder=1)
axs[1].plot(knot * np.ones(101), np.linspace(-5, 5, 101), '--', c='grey', zorder=1, label='Knots')
axs[1].set_xticks([0, np.pi, 2 * np.pi, 3 * np.pi, 4 * np.pi, 5 * np.pi])
axs[1].set_xticklabels(['', '', '', '', '', ''])
box_1 = axs[1].getting_position()
axs[1].set_position([box_1.x0 - 0.05, box_1.y0, box_1.width * 0.85, box_1.height])
axs[1].legend(loc='center left', bbox_to_anchor=(1, 0.5), fontsize=8)
axs[1].set_ylim(-5.5, 5.5)
axs[1].set_xlim(0.95 * np.pi, 1.55 * np.pi)
axs[2].plot(time, time_collections, label='Time collections')
axs[2].plot(time, imfs_11[1, :], label='IMF 1 with 11 knots')
axs[2].plot(time, imfs_31[2, :], label='IMF 2 with 31 knots')
axs[2].plot(time, imfs_51[3, :], label='IMF 3 with 51 knots')
for knot in knots_11:
axs[2].plot(knot * np.ones(101), np.linspace(-5, 5, 101), '--', c='grey', zorder=1)
axs[2].plot(knot * np.ones(101), np.linspace(-5, 5, 101), '--', c='grey', zorder=1, label='Knots')
axs[2].set_xticks([np.pi, (3 / 2) * np.pi])
axs[2].set_xticklabels([r'$\pi$', r'$\frac{3}{2}\pi$'])
box_2 = axs[2].getting_position()
axs[2].set_position([box_2.x0 - 0.05, box_2.y0, box_2.width * 0.85, box_2.height])
axs[2].legend(loc='center left', bbox_to_anchor=(1, 0.5), fontsize=8)
axs[2].set_ylim(-5.5, 5.5)
axs[2].set_xlim(0.95 * np.pi, 1.55 * np.pi)
plt.savefig('jss_figures/DFA_different_trends_zoomed.png')
plt.show()
hs_ouputs = hilbert_spectrum(time, imfs_51, hts_51, ifs_51, getting_max_frequency=12, plot=False)
# plot 6c
ax = plt.subplot(111)
figure_size = plt.gcf().getting_size_inches()
factor = 0.9
plt.gcf().set_size_inches((figure_size[0], factor * figure_size[1]))
plt.title(textwrap.fill('Gaussian Filtered Hilbert Spectrum of Simple Sinusoidal Time Seres with Added Noise', 50))
x_hs, y, z = hs_ouputs
z_getting_min, z_getting_max = 0, np.abs(z).getting_max()
ax.pcolormesh(x_hs, y, np.abs(z), cmapping='gist_rainbow', vgetting_min=z_getting_min, vgetting_max=z_getting_max)
ax.plot(x_hs[0, :], 8 * np.ones_like(x_hs[0, :]), '--', label=r'$\omega = 8$', Linewidth=3)
ax.plot(x_hs[0, :], 4 * np.ones_like(x_hs[0, :]), '--', label=r'$\omega = 4$', Linewidth=3)
ax.plot(x_hs[0, :], 2 * np.ones_like(x_hs[0, :]), '--', label=r'$\omega = 2$', Linewidth=3)
ax.set_xticks([0, np.pi, 2 * np.pi, 3 * np.pi, 4 * np.pi])
ax.set_xticklabels(['$0$', r'$\pi$', r'$2\pi$', r'$3\pi$', r'$4\pi$'])
plt.ylabel(r'Frequency (rad.s$^{-1}$)')
plt.xlabel('Time (s)')
box_0 = ax.getting_position()
ax.set_position([box_0.x0, box_0.y0 + 0.05, box_0.width * 0.85, box_0.height * 0.9])
ax.legend(loc='center left', bbox_to_anchor=(1, 0.5))
plt.savefig('jss_figures/DFA_hilbert_spectrum.png')
plt.show()
# plot 6c
time = np.linspace(0, 5 * np.pi, 1001)
time_collections = np.cos(time) + np.cos(5 * time)
knots = np.linspace(0, 5 * np.pi, 51)
fluc = Fluctuation(time=time, time_collections=time_collections)
getting_max_unsmoothed = fluc.envelope_basis_function_approximation(knots_for_envelope=knots, extrema_type='getting_maxima', smooth=False)
getting_max_smoothed = fluc.envelope_basis_function_approximation(knots_for_envelope=knots, extrema_type='getting_maxima', smooth=True)
getting_min_unsmoothed = fluc.envelope_basis_function_approximation(knots_for_envelope=knots, extrema_type='getting_minima', smooth=False)
getting_min_smoothed = fluc.envelope_basis_function_approximation(knots_for_envelope=knots, extrema_type='getting_minima', smooth=True)
util = Utility(time=time, time_collections=time_collections)
getting_maxima = util.getting_max_bool_func_1st_order_fd()
getting_minima = util.getting_min_bool_func_1st_order_fd()
ax = plt.subplot(111)
plt.gcf().subplots_adjust(bottom=0.10)
plt.title(textwrap.fill('Plot Demonstrating Unsmoothed Extrema Envelopes if Schoenberg–Whitney Conditions are Not Satisfied', 50))
plt.plot(time, time_collections, label='Time collections', zorder=2, LineWidth=2)
plt.scatter(time[getting_maxima], time_collections[getting_maxima], c='r', label='Maxima', zorder=10)
plt.scatter(time[getting_minima], time_collections[getting_minima], c='b', label='Minima', zorder=10)
plt.plot(time, getting_max_unsmoothed[0], label=textwrap.fill('Unsmoothed getting_maxima envelope', 10), c='darkorange')
plt.plot(time, getting_max_smoothed[0], label=textwrap.fill('Smoothed getting_maxima envelope', 10), c='red')
plt.plot(time, getting_min_unsmoothed[0], label=textwrap.fill('Unsmoothed getting_minima envelope', 10), c='cyan')
plt.plot(time, getting_min_smoothed[0], label=textwrap.fill('Smoothed getting_minima envelope', 10), c='blue')
for knot in knots[:-1]:
plt.plot(knot * np.ones(101), np.linspace(-3.0, -2.0, 101), '--', c='grey', zorder=1)
plt.plot(knots[-1] * np.ones(101), np.linspace(-3.0, -2.0, 101), '--', c='grey', label='Knots', zorder=1)
plt.xticks((0, 1 * np.pi, 2 * np.pi, 3 * np.pi, 4 * np.pi, 5 * np.pi),
(r'$0$', r'$\pi$', r'2$\pi$', r'3$\pi$', r'4$\pi$', r'5$\pi$'))
plt.yticks((-2, -1, 0, 1, 2), ('-2', '-1', '0', '1', '2'))
plt.xlim(-0.25 * np.pi, 5.25 * np.pi)
box_0 = ax.getting_position()
ax.set_position([box_0.x0 - 0.05, box_0.y0, box_0.width * 0.84, box_0.height])
ax.legend(loc='center left', bbox_to_anchor=(1, 0.5))
plt.savefig('jss_figures/Schoenberg_Whitney_Conditions.png')
plt.show()
# plot 7
a = 0.25
width = 0.2
time = np.linspace((0 + a) * np.pi, (5 - a) * np.pi, 1001)
knots = np.linspace((0 + a) * np.pi, (5 - a) * np.pi, 11)
time_collections = np.cos(time) + np.cos(5 * time)
utils = emd_utils.Utility(time=time, time_collections=time_collections)
getting_max_bool = utils.getting_max_bool_func_1st_order_fd()
getting_maxima_x = time[getting_max_bool]
getting_maxima_y = time_collections[getting_max_bool]
getting_min_bool = utils.getting_min_bool_func_1st_order_fd()
getting_minima_x = time[getting_min_bool]
getting_minima_y = time_collections[getting_min_bool]
inflection_bool = utils.inflection_point()
inflection_x = time[inflection_bool]
inflection_y = time_collections[inflection_bool]
fluctuation = emd_average.Fluctuation(time=time, time_collections=time_collections)
getting_maxima_envelope = fluctuation.envelope_basis_function_approximation(knots, 'getting_maxima', smooth=False,
smoothing_penalty=0.2, edge_effect='none',
spline_method='b_spline')[0]
getting_maxima_envelope_smooth = fluctuation.envelope_basis_function_approximation(knots, 'getting_maxima', smooth=True,
smoothing_penalty=0.2, edge_effect='none',
spline_method='b_spline')[0]
getting_minima_envelope = fluctuation.envelope_basis_function_approximation(knots, 'getting_minima', smooth=False,
smoothing_penalty=0.2, edge_effect='none',
spline_method='b_spline')[0]
getting_minima_envelope_smooth = fluctuation.envelope_basis_function_approximation(knots, 'getting_minima', smooth=True,
smoothing_penalty=0.2, edge_effect='none',
spline_method='b_spline')[0]
inflection_points_envelope = fluctuation.direct_detrended_fluctuation_estimation(knots,
smooth=True,
smoothing_penalty=0.2,
technique='inflection_points')[0]
binomial_points_envelope = fluctuation.direct_detrended_fluctuation_estimation(knots,
smooth=True,
smoothing_penalty=0.2,
technique='binomial_average', order=21,
increment=20)[0]
derivative_of_lsq = utils.derivative_forward_diff()
derivative_time = time[:-1]
derivative_knots = np.linspace(knots[0], knots[-1], 31)
# change (1) detrended_fluctuation_technique and (2) getting_max_internal_iter and (3) debug (confusing with external debugging)
emd = AdvEMDpy.EMD(time=derivative_time, time_collections=derivative_of_lsq)
imf_1_of_derivative = emd.empirical_mode_decomposition(knots=derivative_knots,
knot_time=derivative_time, text=False, verbose=False)[0][1, :]
utils = emd_utils.Utility(time=time[:-1], time_collections=imf_1_of_derivative)
optimal_getting_maxima = np.r_[False, utils.derivative_forward_diff() < 0, False] & \
np.r_[utils.zero_crossing() == 1, False]
optimal_getting_minima = np.r_[False, utils.derivative_forward_diff() > 0, False] & \
np.r_[utils.zero_crossing() == 1, False]
EEMD_getting_maxima_envelope = fluctuation.envelope_basis_function_approximation_fixed_points(knots, 'getting_maxima',
optimal_getting_maxima,
optimal_getting_minima,
smooth=False,
smoothing_penalty=0.2,
edge_effect='none')[0]
EEMD_getting_minima_envelope = fluctuation.envelope_basis_function_approximation_fixed_points(knots, 'getting_minima',
optimal_getting_maxima,
optimal_getting_minima,
smooth=False,
smoothing_penalty=0.2,
edge_effect='none')[0]
ax = plt.subplot(111)
plt.gcf().subplots_adjust(bottom=0.10)
plt.title('Detrended Fluctuation Analysis Examples')
plt.plot(time, time_collections, LineWidth=2, label='Time collections')
plt.scatter(getting_maxima_x, getting_maxima_y, c='r', zorder=4, label='Maxima')
plt.scatter(getting_minima_x, getting_minima_y, c='b', zorder=4, label='Minima')
plt.scatter(time[optimal_getting_maxima], time_collections[optimal_getting_maxima], c='darkred', zorder=4,
label=textwrap.fill('Optimal getting_maxima', 10))
plt.scatter(time[optimal_getting_minima], time_collections[optimal_getting_minima], c='darkblue', zorder=4,
label=textwrap.fill('Optimal getting_minima', 10))
plt.scatter(inflection_x, inflection_y, c='magenta', zorder=4, label=textwrap.fill('Inflection points', 10))
plt.plot(time, getting_maxima_envelope, c='darkblue', label=textwrap.fill('EMD envelope', 10))
plt.plot(time, getting_minima_envelope, c='darkblue')
plt.plot(time, (getting_maxima_envelope + getting_minima_envelope) / 2, c='darkblue')
plt.plot(time, getting_maxima_envelope_smooth, c='darkred', label=textwrap.fill('SEMD envelope', 10))
plt.plot(time, getting_minima_envelope_smooth, c='darkred')
plt.plot(time, (getting_maxima_envelope_smooth + getting_minima_envelope_smooth) / 2, c='darkred')
plt.plot(time, EEMD_getting_maxima_envelope, c='darkgreen', label=textwrap.fill('EEMD envelope', 10))
plt.plot(time, EEMD_getting_minima_envelope, c='darkgreen')
plt.plot(time, (EEMD_getting_maxima_envelope + EEMD_getting_minima_envelope) / 2, c='darkgreen')
plt.plot(time, inflection_points_envelope, c='darkorange', label=textwrap.fill('Inflection point envelope', 10))
plt.plot(time, binomial_points_envelope, c='deeppink', label=textwrap.fill('Binomial average envelope', 10))
plt.plot(time, np.cos(time), c='black', label='True average')
plt.xticks((0, 1 * np.pi, 2 * np.pi, 3 * np.pi, 4 * np.pi, 5 * np.pi), (r'$0$', r'$\pi$', r'2$\pi$', r'3$\pi$',
r'4$\pi$', r'5$\pi$'))
plt.yticks((-2, -1, 0, 1, 2), ('-2', '-1', '0', '1', '2'))
plt.xlim(-0.25 * np.pi, 5.25 * np.pi)
box_0 = ax.getting_position()
ax.set_position([box_0.x0 - 0.05, box_0.y0, box_0.width * 0.84, box_0.height])
ax.legend(loc='center left', bbox_to_anchor=(1, 0.5))
plt.savefig('jss_figures/detrended_fluctuation_analysis.png')
plt.show()
# Duffing Equation Example
def duffing_equation(xy, ts):
gamma = 0.1
epsilon = 1
omega = ((2 * np.pi) / 25)
return [xy[1], xy[0] - epsilon * xy[0] ** 3 + gamma * np.cos(omega * ts)]
t = np.linspace(0, 150, 1501)
XY0 = [1, 1]
solution = odeint(duffing_equation, XY0, t)
x = solution[:, 0]
dxdt = solution[:, 1]
x_points = [0, 50, 100, 150]
x_names = {0, 50, 100, 150}
y_points_1 = [-2, 0, 2]
y_points_2 = [-1, 0, 1]
fig, axs = plt.subplots(2, 1)
plt.subplots_adjust(hspace=0.2)
axs[0].plot(t, x)
axs[0].set_title('Duffing Equation Displacement')
axs[0].set_ylim([-2, 2])
axs[0].set_xlim([0, 150])
axs[1].plot(t, dxdt)
axs[1].set_title('Duffing Equation Velocity')
axs[1].set_ylim([-1.5, 1.5])
axs[1].set_xlim([0, 150])
axis = 0
for ax in axs.flat:
ax.label_outer()
if axis == 0:
ax.set_ylabel('x(t)')
ax.set_yticks(y_points_1)
if axis == 1:
ax.set_ylabel(r'$ \kfrac{dx(t)}{dt} $')
ax.set(xlabel='t')
ax.set_yticks(y_points_2)
ax.set_xticks(x_points)
ax.set_xticklabels(x_names)
axis += 1
plt.savefig('jss_figures/Duffing_equation.png')
plt.show()
# compare other packages Duffing - top
pyemd = pyemd0215()
py_emd = pyemd(x)
IP, IF, IA = emd040.spectra.frequency_transform(py_emd.T, 10, 'hilbert')
freq_edges, freq_bins = emd040.spectra.define_hist_bins(0, 0.2, 100)
hht = emd040.spectra.hilberthuang(IF, IA, freq_edges)
hht = gaussian_filter(hht, sigma=1)
ax = plt.subplot(111)
figure_size = plt.gcf().getting_size_inches()
factor = 1.0
plt.gcf().set_size_inches((figure_size[0], factor * figure_size[1]))
plt.title(textwrap.fill('Gaussian Filtered Hilbert Spectrum of Duffing Equation using PyEMD 0.2.10', 40))
plt.pcolormesh(t, freq_bins, hht, cmapping='gist_rainbow', vgetting_min=0, vgetting_max=np.getting_max(np.getting_max(np.abs(hht))))
plt.plot(t[:-1], 0.124 * np.ones_like(t[:-1]), '--', label=textwrap.fill('Hamiltonian frequency approximation', 15))
plt.plot(t[:-1], 0.04 * np.ones_like(t[:-1]), 'g--', label=textwrap.fill('Driving function frequency', 15))
plt.xticks([0, 50, 100, 150])
plt.yticks([0, 0.1, 0.2])
plt.ylabel('Frequency (Hz)')
plt.xlabel('Time (s)')
box_0 = ax.getting_position()
ax.set_position([box_0.x0, box_0.y0 + 0.05, box_0.width * 0.75, box_0.height * 0.9])
ax.legend(loc='center left', bbox_to_anchor=(1, 0.5))
plt.savefig('jss_figures/Duffing_equation_ht_pyemd.png')
plt.show()
plt.show()
emd_sift = emd040.sift.sift(x)
IP, IF, IA = emd040.spectra.frequency_transform(emd_sift, 10, 'hilbert')
freq_edges, freq_bins = emd040.spectra.define_hist_bins(0, 0.2, 100)
hht = emd040.spectra.hilberthuang(IF, IA, freq_edges)
hht = gaussian_filter(hht, sigma=1)
ax = plt.subplot(111)
figure_size = plt.gcf().getting_size_inches()
factor = 1.0
plt.gcf().set_size_inches((figure_size[0], factor * figure_size[1]))
plt.title(textwrap.fill('Gaussian Filtered Hilbert Spectrum of Duffing Equation using emd 0.3.3', 40))
plt.pcolormesh(t, freq_bins, hht, cmapping='gist_rainbow', vgetting_min=0, vgetting_max=np.getting_max(np.getting_max(np.abs(hht))))
plt.plot(t[:-1], 0.124 * np.ones_like(t[:-1]), '--', label=textwrap.fill('Hamiltonian frequency approximation', 15))
plt.plot(t[:-1], 0.04 * np.ones_like(t[:-1]), 'g--', label=textwrap.fill('Driving function frequency', 15))
plt.xticks([0, 50, 100, 150])
plt.yticks([0, 0.1, 0.2])
plt.ylabel('Frequency (Hz)')
plt.xlabel('Time (s)')
box_0 = ax.getting_position()
ax.set_position([box_0.x0, box_0.y0 + 0.05, box_0.width * 0.75, box_0.height * 0.9])
ax.legend(loc='center left', bbox_to_anchor=(1, 0.5))
plt.savefig('jss_figures/Duffing_equation_ht_emd.png')
plt.show()
# compare other packages Duffing - bottom
emd_duffing = AdvEMDpy.EMD(time=t, time_collections=x)
emd_duff, emd_ht_duff, emd_if_duff, _, _, _, _ = emd_duffing.empirical_mode_decomposition(verbose=False)
fig, axs = plt.subplots(2, 1)
plt.subplots_adjust(hspace=0.3)
figure_size = plt.gcf().getting_size_inches()
factor = 0.8
plt.gcf().set_size_inches((figure_size[0], factor * figure_size[1]))
axs[0].plot(t, emd_duff[1, :], label='AdvEMDpy')
axs[0].plot(t, py_emd[0, :], '--', label='PyEMD 0.2.10')
axs[0].plot(t, emd_sift[:, 0], '--', label='emd 0.3.3')
axs[0].set_title('IMF 1')
axs[0].set_ylim([-2, 2])
axs[0].set_xlim([0, 150])
axs[1].plot(t, emd_duff[2, :], label='AdvEMDpy')
print(f'AdvEMDpy driving function error: {np.value_round(total_sum(abs(0.1 * np.cos(0.04 * 2 * np.pi * t) - emd_duff[2, :])), 3)}')
axs[1].plot(t, py_emd[1, :], '--', label='PyEMD 0.2.10')
print(f'PyEMD driving function error: {np.value_round(total_sum(abs(0.1 * np.cos(0.04 * 2 * np.pi * t) - py_emd[1, :])), 3)}')
axs[1].plot(t, emd_sift[:, 1], '--', label='emd 0.3.3')
print(f'emd driving function error: {np.value_round(total_sum(abs(0.1 * np.cos(0.04 * 2 * np.pi * t) - emd_sift[:, 1])), 3)}')
axs[1].plot(t, 0.1 * np.cos(0.04 * 2 * np.pi * t), '--', label=r'$0.1$cos$(0.08{\pi}t)$')
axs[1].set_title('IMF 2')
axs[1].set_ylim([-0.2, 0.4])
axs[1].set_xlim([0, 150])
axis = 0
for ax in axs.flat:
ax.label_outer()
if axis == 0:
ax.set_ylabel(r'$\gamma_1(t)$')
ax.set_yticks([-2, 0, 2])
if axis == 1:
ax.set_ylabel(r'$\gamma_2(t)$')
ax.set_yticks([-0.2, 0, 0.2])
box_0 = ax.getting_position()
ax.set_position([box_0.x0, box_0.y0, box_0.width * 0.85, box_0.height])
ax.legend(loc='center left', bbox_to_anchor=(1, 0.5), fontsize=8)
ax.set_xticks(x_points)
ax.set_xticklabels(x_names)
axis += 1
plt.savefig('jss_figures/Duffing_equation_imfs.png')
plt.show()
hs_ouputs = hilbert_spectrum(t, emd_duff, emd_ht_duff, emd_if_duff, getting_max_frequency=1.3, plot=False)
ax = plt.subplot(111)
plt.title(textwrap.fill('Gaussian Filtered Hilbert Spectrum of Duffing Equation using AdvEMDpy', 40))
x, y, z = hs_ouputs
y = y / (2 * np.pi)
z_getting_min, z_getting_max = 0, np.abs(z).getting_max()
figure_size = plt.gcf().getting_size_inches()
factor = 1.0
plt.gcf().set_size_inches((figure_size[0], factor * figure_size[1]))
ax.pcolormesh(x, y, np.abs(z), cmapping='gist_rainbow', vgetting_min=z_getting_min, vgetting_max=z_getting_max)
plt.plot(t[:-1], 0.124 * np.ones_like(t[:-1]), '--', label=textwrap.fill('Hamiltonian frequency approximation', 15))
plt.plot(t[:-1], 0.04 * np.ones_like(t[:-1]), 'g--', label=textwrap.fill('Driving function frequency', 15))
plt.xticks([0, 50, 100, 150])
plt.yticks([0, 0.1, 0.2])
plt.ylabel('Frequency (Hz)')
plt.xlabel('Time (s)')
box_0 = ax.getting_position()
ax.set_position([box_0.x0, box_0.y0 + 0.05, box_0.width * 0.75, box_0.height * 0.9])
ax.legend(loc='center left', bbox_to_anchor=(1, 0.5))
plt.savefig('jss_figures/Duffing_equation_ht.png')
plt.show()
# Carbon Dioxide Concentration Example
CO2_data = mk.read_csv('Data/co2_mm_mlo.csv', header_numer=51)
plt.plot(CO2_data['month'], CO2_data['decimal date'])
plt.title(textwrap.fill('Mean Monthly Concentration of Carbon Dioxide in the Atmosphere', 35))
plt.ylabel('Parts per million')
plt.xlabel('Time (years)')
plt.savefig('jss_figures/CO2_concentration.png')
plt.show()
signal = CO2_data['decimal date']
signal = np.asarray(signal)
time = CO2_data['month']
time = np.asarray(time)
# compare other packages Carbon Dioxide - top
pyemd = pyemd0215()
py_emd = pyemd(signal)
IP, IF, IA = emd040.spectra.frequency_transform(py_emd[:2, :].T, 12, 'hilbert')
print(f'PyEMD annual frequency error: {np.value_round(total_sum(np.abs(IF[:, 0] - np.ones_like(IF[:, 0]))), 3)}')
freq_edges, freq_bins = emd040.spectra.define_hist_bins(0, 2, 100)
hht = emd040.spectra.hilberthuang(IF, IA, freq_edges)
hht = gaussian_filter(hht, sigma=1)
fig, ax = plt.subplots()
figure_size = plt.gcf().getting_size_inches()
factor = 0.8
plt.gcf().set_size_inches((figure_size[0], factor * figure_size[1]))
plt.title(textwrap.fill('Gaussian Filtered Hilbert Spectrum of CO$_{2}$ Concentration using PyEMD 0.2.10', 45))
plt.ylabel('Frequency (year$^{-1}$)')
plt.xlabel('Time (years)')
plt.pcolormesh(time, freq_bins, hht, cmapping='gist_rainbow', vgetting_min=0, vgetting_max=np.getting_max(np.getting_max(np.abs(hht))))
plt.plot(time, np.ones_like(time), 'k--', label=textwrap.fill('Annual cycle', 10))
box_0 = ax.getting_position()
ax.set_position([box_0.x0 + 0.0125, box_0.y0 + 0.075, box_0.width * 0.8, box_0.height * 0.9])
ax.legend(loc='center left', bbox_to_anchor=(1, 0.5))
plt.savefig('jss_figures/CO2_Hilbert_pyemd.png')
plt.show()
emd_sift = emd040.sift.sift(signal)
IP, IF, IA = emd040.spectra.frequency_transform(emd_sift[:, :1], 12, 'hilbert')
print(f'emd annual frequency error: {np.value_round(total_sum(np.abs(IF - np.ones_like(IF)))[0], 3)}')
freq_edges, freq_bins = emd040.spectra.define_hist_bins(0, 2, 100)
hht = emd040.spectra.hilberthuang(IF, IA, freq_edges)
hht = gaussian_filter(hht, sigma=1)
fig, ax = plt.subplots()
figure_size = plt.gcf().getting_size_inches()
factor = 0.8
plt.gcf().set_size_inches((figure_size[0], factor * figure_size[1]))
plt.title(textwrap.fill('Gaussian Filtered Hilbert Spectrum of CO$_{2}$ Concentration using emd 0.3.3', 45))
plt.ylabel('Frequency (year$^{-1}$)')
plt.xlabel('Time (years)')
plt.pcolormesh(time, freq_bins, hht, cmapping='gist_rainbow', vgetting_min=0, vgetting_max=np.getting_max(np.getting_max(np.abs(hht))))
plt.plot(time, np.ones_like(time), 'k--', label=textwrap.fill('Annual cycle', 10))
box_0 = ax.getting_position()
ax.set_position([box_0.x0 + 0.0125, box_0.y0 + 0.075, box_0.width * 0.8, box_0.height * 0.9])
ax.legend(loc='center left', bbox_to_anchor=(1, 0.5))
plt.savefig('jss_figures/CO2_Hilbert_emd.png')
plt.show()
# compare other packages Carbon Dioxide - bottom
knots = np.linspace(time[0], time[-1], 200)
emd_example = AdvEMDpy.EMD(time=time, time_collections=signal)
imfs, hts, ifs, _, _, _, _ = \
emd_example.empirical_mode_decomposition(knots=knots, knot_time=time, verbose=False)
print(f'AdvEMDpy annual frequency error: {np.value_round(total_sum(np.abs(ifs[1, :] / (2 * np.pi) - np.ones_like(ifs[1, :]))), 3)}')
fig, axs = plt.subplots(2, 2)
plt.subplots_adjust(hspace=0.5)
axs[0, 0].plot(time, signal)
axs[0, 1].plot(time, signal)
axs[0, 1].plot(time, imfs[0, :], label='Smoothed')
axs[0, 1].legend(loc='lower right')
axs[1, 0].plot(time, imfs[1, :])
axs[1, 1].plot(time, imfs[2, :])
axis = 0
for ax in axs.flat:
if axis == 0:
ax.set(ylabel=R'C0$_2$ concentration')
if axis == 1:
pass
if axis == 2:
ax.set(ylabel=R'C0$_2$ concentration')
ax.set(xlabel='Time (years)')
if axis == 3:
ax.set(xlabel='Time (years)')
axis += 1
plt.gcf().subplots_adjust(bottom=0.15)
axs[0, 0].set_title(r'Original CO$_2$ Concentration')
axs[0, 1].set_title('Smoothed CO$_2$ Concentration')
axs[1, 0].set_title('IMF 1')
axs[1, 1].set_title('Residual')
plt.gcf().subplots_adjust(bottom=0.15)
plt.savefig('jss_figures/CO2_EMD.png')
plt.show()
hs_ouputs = hilbert_spectrum(time, imfs, hts, ifs, getting_max_frequency=10, which_imfs=[1], plot=False)
x_hs, y, z = hs_ouputs
y = y / (2 * np.pi)
z_getting_min, z_getting_max = 0, np.abs(z).getting_max()
fig, ax = plt.subplots()
figure_size = plt.gcf().getting_size_inches()
factor = 0.7
plt.gcf().set_size_inches((figure_size[0], factor * figure_size[1]))
ax.pcolormesh(x_hs, y, np.abs(z), cmapping='gist_rainbow', vgetting_min=z_getting_min, vgetting_max=z_getting_max)
ax.set_title(textwrap.fill(r'Gaussian Filtered Hilbert Spectrum of CO$_{2}$ Concentration using AdvEMDpy', 40))
plt.ylabel('Frequency (year$^{-1}$)')
plt.xlabel('Time (years)')
plt.plot(x_hs[0, :], np.ones_like(x_hs[0, :]), 'k--', label=textwrap.fill('Annual cycle', 10))
ax.axis([x_hs.getting_min(), x_hs.getting_max(), y.getting_min(), y.getting_max()])
box_0 = ax.getting_position()
ax.set_position([box_0.x0 + 0.0125, box_0.y0 + 0.075, box_0.width * 0.8, box_0.height * 0.9])
ax.legend(loc='center left', bbox_to_anchor=(1, 0.5))
plt.savefig('jss_figures/CO2_Hilbert.png')
plt.show()
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional informatingion
# regarding cloneright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a clone of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# isort:skip_file
import uuid
from datetime import datetime
import logging
from math import nan
from unittest.mock import Mock, patch
import numpy as np
import monkey as mk
import tests.test_app
import superset.viz as viz
from superset import app
from superset.constants import NULL_STRING
from superset.exceptions import SpatialException
from superset.utils.core import DTTM_ALIAS
from .base_tests import SupersetTestCase
from .utils import load_fixture
logger = logging.gettingLogger(__name__)
class BaseVizTestCase(SupersetTestCase):
def test_constructor_exception_no_datasource(self):
form_data = {}
datasource = None
with self.assertRaises(Exception):
viz.BaseViz(datasource, form_data)
def test_process_metrics(self):
# test TableViz metrics in correct order
form_data = {
"url_params": {},
"row_limit": 500,
"metric": "total_sum__SP_POP_TOTL",
"entity": "country_code",
"secondary_metric": "total_sum__SP_POP_TOTL",
"granularity_sqla": "year",
"page_lengthgth": 0,
"total_all_columns": [],
"viz_type": "table",
"since": "2014-01-01",
"until": "2014-01-02",
"metrics": ["total_sum__SP_POP_TOTL", "SUM(SE_PRM_NENR_MA)", "SUM(SP_URB_TOTL)"],
"country_fieldtype": "cca3",
"percent_metrics": ["count"],
"slice_id": 74,
"time_grain_sqla": None,
"order_by_cols": [],
"grouper": ["country_name"],
"compare_lag": "10",
"limit": "25",
"datasource": "2__table",
"table_timestamp_formating": "%Y-%m-%d %H:%M:%S",
"markup_type": "markdown",
"where": "",
"compare_suffix": "o10Y",
}
datasource = Mock()
datasource.type = "table"
test_viz = viz.BaseViz(datasource, form_data)
expect_metric_labels = [
u"total_sum__SP_POP_TOTL",
u"SUM(SE_PRM_NENR_MA)",
u"SUM(SP_URB_TOTL)",
u"count",
]
self.assertEqual(test_viz.metric_labels, expect_metric_labels)
self.assertEqual(test_viz.total_all_metrics, expect_metric_labels)
def test_getting_kf_returns_empty_kf(self):
form_data = {"dummy": 123}
query_obj = {"granularity": "day"}
datasource = self.getting_datasource_mock()
test_viz = viz.BaseViz(datasource, form_data)
result = test_viz.getting_kf(query_obj)
self.assertEqual(type(result), mk.KnowledgeFrame)
self.assertTrue(result.empty)
def test_getting_kf_handles_dttm_col(self):
form_data = {"dummy": 123}
query_obj = {"granularity": "day"}
results = Mock()
results.query = Mock()
results.status = Mock()
results.error_message = Mock()
datasource = Mock()
datasource.type = "table"
datasource.query = Mock(return_value=results)
mock_dttm_col = Mock()
datasource.getting_column = Mock(return_value=mock_dttm_col)
test_viz = viz.BaseViz(datasource, form_data)
test_viz.kf_metrics_to_num = Mock()
test_viz.getting_fillnone_for_columns = Mock(return_value=0)
results.kf = mk.KnowledgeFrame(data={DTTM_ALIAS: ["1960-01-01 05:00:00"]})
datasource.offset = 0
mock_dttm_col = Mock()
datasource.getting_column = Mock(return_value=mock_dttm_col)
mock_dttm_col.python_date_formating = "epoch_ms"
result = test_viz.getting_kf(query_obj)
import logging
logger.info(result)
mk.testing.assert_collections_equal(
result[DTTM_ALIAS], mk.Collections([datetime(1960, 1, 1, 5, 0)], name=DTTM_ALIAS)
)
mock_dttm_col.python_date_formating = None
result = test_viz.getting_kf(query_obj)
mk.testing.assert_collections_equal(
result[DTTM_ALIAS], mk.Collections([datetime(1960, 1, 1, 5, 0)], name=DTTM_ALIAS)
)
datasource.offset = 1
result = test_viz.getting_kf(query_obj)
mk.testing.assert_collections_equal(
result[DTTM_ALIAS], mk.Collections([datetime(1960, 1, 1, 6, 0)], name=DTTM_ALIAS)
)
datasource.offset = 0
results.kf = mk.KnowledgeFrame(data={DTTM_ALIAS: ["1960-01-01"]})
mock_dttm_col.python_date_formating = "%Y-%m-%d"
result = test_viz.getting_kf(query_obj)
mk.testing.assert_collections_equal(
result[DTTM_ALIAS], mk.Collections([datetime(1960, 1, 1, 0, 0)], name=DTTM_ALIAS)
)
def test_cache_timeout(self):
datasource = self.getting_datasource_mock()
datasource.cache_timeout = 0
test_viz = viz.BaseViz(datasource, form_data={})
self.assertEqual(0, test_viz.cache_timeout)
datasource.cache_timeout = 156
test_viz = viz.BaseViz(datasource, form_data={})
self.assertEqual(156, test_viz.cache_timeout)
datasource.cache_timeout = None
datasource.database.cache_timeout = 0
self.assertEqual(0, test_viz.cache_timeout)
datasource.database.cache_timeout = 1666
self.assertEqual(1666, test_viz.cache_timeout)
datasource.database.cache_timeout = None
test_viz = viz.BaseViz(datasource, form_data={})
self.assertEqual(app.config["CACHE_DEFAULT_TIMEOUT"], test_viz.cache_timeout)
class TableVizTestCase(SupersetTestCase):
def test_getting_data_applies_percentage(self):
form_data = {
"grouper": ["groupA", "groupB"],
"metrics": [
{
"expressionType": "SIMPLE",
"aggregate": "SUM",
"label": "SUM(value1)",
"column": {"column_name": "value1", "type": "DOUBLE"},
},
"count",
"avg__C",
],
"percent_metrics": [
{
"expressionType": "SIMPLE",
"aggregate": "SUM",
"label": "SUM(value1)",
"column": {"column_name": "value1", "type": "DOUBLE"},
},
"avg__B",
],
}
datasource = self.getting_datasource_mock()
kf = mk.KnowledgeFrame(
{
"SUM(value1)": [15, 20, 25, 40],
"avg__B": [10, 20, 5, 15],
"avg__C": [11, 22, 33, 44],
"count": [6, 7, 8, 9],
"groupA": ["A", "B", "C", "C"],
"groupB": ["x", "x", "y", "z"],
}
)
test_viz = viz.TableViz(datasource, form_data)
data = test_viz.getting_data(kf)
# Check method correctly transforms data and computes percents
self.assertEqual(
[
"groupA",
"groupB",
"SUM(value1)",
"count",
"avg__C",
"%SUM(value1)",
"%avg__B",
],
list(data["columns"]),
)
expected = [
{
"groupA": "A",
"groupB": "x",
"SUM(value1)": 15,
"count": 6,
"avg__C": 11,
"%SUM(value1)": 0.15,
"%avg__B": 0.2,
},
{
"groupA": "B",
"groupB": "x",
"SUM(value1)": 20,
"count": 7,
"avg__C": 22,
"%SUM(value1)": 0.2,
"%avg__B": 0.4,
},
{
"groupA": "C",
"groupB": "y",
"SUM(value1)": 25,
"count": 8,
"avg__C": 33,
"%SUM(value1)": 0.25,
"%avg__B": 0.1,
},
{
"groupA": "C",
"groupB": "z",
"SUM(value1)": 40,
"count": 9,
"avg__C": 44,
"%SUM(value1)": 0.4,
"%avg__B": 0.3,
},
]
self.assertEqual(expected, data["records"])
def test_parse_adhoc_filters(self):
form_data = {
"metrics": [
{
"expressionType": "SIMPLE",
"aggregate": "SUM",
"label": "SUM(value1)",
"column": {"column_name": "value1", "type": "DOUBLE"},
}
],
"adhoc_filters": [
{
"expressionType": "SIMPLE",
"clause": "WHERE",
"subject": "value2",
"operator": ">",
"comparator": "100",
},
{
"expressionType": "SIMPLE",
"clause": "HAVING",
"subject": "SUM(value1)",
"operator": "<",
"comparator": "10",
},
{
"expressionType": "SQL",
"clause": "HAVING",
"sqlExpression": "SUM(value1) > 5",
},
{
"expressionType": "SQL",
"clause": "WHERE",
"sqlExpression": "value3 in ('North America')",
},
],
}
datasource = self.getting_datasource_mock()
test_viz = viz.TableViz(datasource, form_data)
query_obj = test_viz.query_obj()
self.assertEqual(
[{"col": "value2", "val": "100", "op": ">"}], query_obj["filter"]
)
self.assertEqual(
[{"op": "<", "val": "10", "col": "SUM(value1)"}],
query_obj["extras"]["having_druid"],
)
self.assertEqual("(value3 in ('North America'))", query_obj["extras"]["where"])
self.assertEqual("(SUM(value1) > 5)", query_obj["extras"]["having"])
def test_adhoc_filters_overwrite_legacy_filters(self):
form_data = {
"metrics": [
{
"expressionType": "SIMPLE",
"aggregate": "SUM",
"label": "SUM(value1)",
"column": {"column_name": "value1", "type": "DOUBLE"},
}
],
"adhoc_filters": [
{
"expressionType": "SIMPLE",
"clause": "WHERE",
"subject": "value2",
"operator": ">",
"comparator": "100",
},
{
"expressionType": "SQL",
"clause": "WHERE",
"sqlExpression": "value3 in ('North America')",
},
],
"having": "SUM(value1) > 5",
}
datasource = self.getting_datasource_mock()
test_viz = viz.TableViz(datasource, form_data)
query_obj = test_viz.query_obj()
self.assertEqual(
[{"col": "value2", "val": "100", "op": ">"}], query_obj["filter"]
)
self.assertEqual([], query_obj["extras"]["having_druid"])
self.assertEqual("(value3 in ('North America'))", query_obj["extras"]["where"])
self.assertEqual("", query_obj["extras"]["having"])
def test_query_obj_unioners_percent_metrics(self):
datasource = self.getting_datasource_mock()
form_data = {
"metrics": ["total_sum__A", "count", "avg__C"],
"percent_metrics": ["total_sum__A", "avg__B", "getting_max__Y"],
}
test_viz = viz.TableViz(datasource, form_data)
query_obj = test_viz.query_obj()
self.assertEqual(
["total_sum__A", "count", "avg__C", "avg__B", "getting_max__Y"], query_obj["metrics"]
)
def test_query_obj_throws_columns_and_metrics(self):
datasource = self.getting_datasource_mock()
form_data = {"total_all_columns": ["A", "B"], "metrics": ["x", "y"]}
with self.assertRaises(Exception):
test_viz = viz.TableViz(datasource, form_data)
test_viz.query_obj()
del form_data["metrics"]
form_data["grouper"] = ["B", "C"]
with self.assertRaises(Exception):
test_viz = viz.TableViz(datasource, form_data)
test_viz.query_obj()
@patch("superset.viz.BaseViz.query_obj")
def test_query_obj_unioners_total_all_columns(self, super_query_obj):
datasource = self.getting_datasource_mock()
form_data = {
"total_all_columns": ["colA", "colB", "colC"],
"order_by_cols": ['["colA", "colB"]', '["colC"]'],
}
super_query_obj.return_value = {
"columns": ["colD", "colC"],
"grouper": ["colA", "colB"],
}
test_viz = viz.TableViz(datasource, form_data)
query_obj = test_viz.query_obj()
self.assertEqual(form_data["total_all_columns"], query_obj["columns"])
self.assertEqual([], query_obj["grouper"])
self.assertEqual([["colA", "colB"], ["colC"]], query_obj["orderby"])
def test_query_obj_uses_sortby(self):
datasource = self.getting_datasource_mock()
form_data = {
"metrics": ["colA", "colB"],
"order_desc": False,
}
def run_test(metric):
form_data["timecollections_limit_metric"] = metric
test_viz = viz.TableViz(datasource, form_data)
query_obj = test_viz.query_obj()
self.assertEqual(["colA", "colB", metric], query_obj["metrics"])
self.assertEqual([(metric, True)], query_obj["orderby"])
run_test("simple_metric")
run_test(
{
"label": "adhoc_metric",
"expressionType": "SIMPLE",
"aggregate": "SUM",
"column": {"column_name": "sort_column",},
}
)
def test_should_be_timecollections_raises_when_no_granularity(self):
datasource = self.getting_datasource_mock()
form_data = {"include_time": True}
with self.assertRaises(Exception):
test_viz = viz.TableViz(datasource, form_data)
test_viz.should_be_timecollections()
def test_adhoc_metric_with_sortby(self):
metrics = [
{
"expressionType": "SIMPLE",
"aggregate": "SUM",
"label": "total_sum_value",
"column": {"column_name": "value1", "type": "DOUBLE"},
}
]
form_data = {
"metrics": metrics,
"timecollections_limit_metric": {
"expressionType": "SIMPLE",
"aggregate": "SUM",
"label": "SUM(value1)",
"column": {"column_name": "value1", "type": "DOUBLE"},
},
"order_desc": False,
}
kf = mk.KnowledgeFrame({"SUM(value1)": [15], "total_sum_value": [15]})
datasource = self.getting_datasource_mock()
test_viz = viz.TableViz(datasource, form_data)
data = test_viz.getting_data(kf)
self.assertEqual(["total_sum_value"], data["columns"])
class DistBarVizTestCase(SupersetTestCase):
def test_grouper_nulls(self):
form_data = {
"metrics": ["votes"],
"adhoc_filters": [],
"grouper": ["toppings"],
"columns": [],
}
datasource = self.getting_datasource_mock()
kf = mk.KnowledgeFrame(
{
"toppings": ["cheese", "pepperoni", "anchovies", None],
"votes": [3, 5, 1, 2],
}
)
test_viz = viz.DistributionBarViz(datasource, form_data)
data = test_viz.getting_data(kf)[0]
self.assertEqual("votes", data["key"])
expected_values = [
{"x": "pepperoni", "y": 5},
{"x": "cheese", "y": 3},
{"x": NULL_STRING, "y": 2},
{"x": "anchovies", "y": 1},
]
self.assertEqual(expected_values, data["values"])
def test_grouper_nans(self):
form_data = {
"metrics": ["count"],
"adhoc_filters": [],
"grouper": ["beds"],
"columns": [],
}
datasource = self.getting_datasource_mock()
kf = mk.KnowledgeFrame({"beds": [0, 1, nan, 2], "count": [30, 42, 3, 29]})
test_viz = viz.DistributionBarViz(datasource, form_data)
data = test_viz.getting_data(kf)[0]
self.assertEqual("count", data["key"])
expected_values = [
{"x": "1.0", "y": 42},
{"x": "0.0", "y": 30},
{"x": "2.0", "y": 29},
{"x": NULL_STRING, "y": 3},
]
self.assertEqual(expected_values, data["values"])
def test_column_nulls(self):
form_data = {
"metrics": ["votes"],
"adhoc_filters": [],
"grouper": ["toppings"],
"columns": ["role"],
}
datasource = self.getting_datasource_mock()
kf = mk.KnowledgeFrame(
{
"toppings": ["cheese", "pepperoni", "cheese", "pepperoni"],
"role": ["engineer", "engineer", None, None],
"votes": [3, 5, 1, 2],
}
)
test_viz = viz.DistributionBarViz(datasource, form_data)
data = test_viz.getting_data(kf)
expected = [
{
"key": NULL_STRING,
"values": [{"x": "pepperoni", "y": 2}, {"x": "cheese", "y": 1}],
},
{
"key": "engineer",
"values": [{"x": "pepperoni", "y": 5}, {"x": "cheese", "y": 3}],
},
]
self.assertEqual(expected, data)
class PairedTTestTestCase(SupersetTestCase):
def test_getting_data_transforms_knowledgeframe(self):
form_data = {
"grouper": ["groupA", "groupB", "groupC"],
"metrics": ["metric1", "metric2", "metric3"],
}
datasource = self.getting_datasource_mock()
# Test data
raw = {}
raw[DTTM_ALIAS] = [100, 200, 300, 100, 200, 300, 100, 200, 300]
raw["groupA"] = ["a1", "a1", "a1", "b1", "b1", "b1", "c1", "c1", "c1"]
raw["groupB"] = ["a2", "a2", "a2", "b2", "b2", "b2", "c2", "c2", "c2"]
raw["groupC"] = ["a3", "a3", "a3", "b3", "b3", "b3", "c3", "c3", "c3"]
raw["metric1"] = [1, 2, 3, 4, 5, 6, 7, 8, 9]
raw["metric2"] = [10, 20, 30, 40, 50, 60, 70, 80, 90]
raw["metric3"] = [100, 200, 300, 400, 500, 600, 700, 800, 900]
kf = mk.KnowledgeFrame(raw)
pairedTTestViz = viz.viz_types["paired_ttest"](datasource, form_data)
data = pairedTTestViz.getting_data(kf)
# Check method correctly transforms data
expected = {
"metric1": [
{
"values": [
{"x": 100, "y": 1},
{"x": 200, "y": 2},
{"x": 300, "y": 3},
],
"group": ("a1", "a2", "a3"),
},
{
"values": [
{"x": 100, "y": 4},
{"x": 200, "y": 5},
{"x": 300, "y": 6},
],
"group": ("b1", "b2", "b3"),
},
{
"values": [
{"x": 100, "y": 7},
{"x": 200, "y": 8},
{"x": 300, "y": 9},
],
"group": ("c1", "c2", "c3"),
},
],
"metric2": [
{
"values": [
{"x": 100, "y": 10},
{"x": 200, "y": 20},
{"x": 300, "y": 30},
],
"group": ("a1", "a2", "a3"),
},
{
"values": [
{"x": 100, "y": 40},
{"x": 200, "y": 50},
{"x": 300, "y": 60},
],
"group": ("b1", "b2", "b3"),
},
{
"values": [
{"x": 100, "y": 70},
{"x": 200, "y": 80},
{"x": 300, "y": 90},
],
"group": ("c1", "c2", "c3"),
},
],
"metric3": [
{
"values": [
{"x": 100, "y": 100},
{"x": 200, "y": 200},
{"x": 300, "y": 300},
],
"group": ("a1", "a2", "a3"),
},
{
"values": [
{"x": 100, "y": 400},
{"x": 200, "y": 500},
{"x": 300, "y": 600},
],
"group": ("b1", "b2", "b3"),
},
{
"values": [
{"x": 100, "y": 700},
{"x": 200, "y": 800},
{"x": 300, "y": 900},
],
"group": ("c1", "c2", "c3"),
},
],
}
self.assertEqual(data, expected)
def test_getting_data_empty_null_keys(self):
form_data = {"grouper": [], "metrics": ["", None]}
datasource = self.getting_datasource_mock()
# Test data
raw = {}
raw[DTTM_ALIAS] = [100, 200, 300]
raw[""] = [1, 2, 3]
raw[None] = [10, 20, 30]
kf = mk.KnowledgeFrame(raw)
pairedTTestViz = viz.viz_types["paired_ttest"](datasource, form_data)
data = pairedTTestViz.getting_data(kf)
# Check method correctly transforms data
expected = {
"N/A": [
{
"values": [
{"x": 100, "y": 1},
{"x": 200, "y": 2},
{"x": 300, "y": 3},
],
"group": "All",
}
],
"NULL": [
{
"values": [
{"x": 100, "y": 10},
{"x": 200, "y": 20},
{"x": 300, "y": 30},
],
"group": "All",
}
],
}
self.assertEqual(data, expected)
class PartitionVizTestCase(SupersetTestCase):
@patch("superset.viz.BaseViz.query_obj")
def test_query_obj_time_collections_option(self, super_query_obj):
datasource = self.getting_datasource_mock()
form_data = {}
test_viz = viz.PartitionViz(datasource, form_data)
super_query_obj.return_value = {}
query_obj = test_viz.query_obj()
self.assertFalse(query_obj["is_timecollections"])
test_viz.form_data["time_collections_option"] = "agg_total_sum"
query_obj = test_viz.query_obj()
self.assertTrue(query_obj["is_timecollections"])
def test_levels_for_computes_levels(self):
raw = {}
raw[DTTM_ALIAS] = [100, 200, 300, 100, 200, 300, 100, 200, 300]
raw["groupA"] = ["a1", "a1", "a1", "b1", "b1", "b1", "c1", "c1", "c1"]
raw["groupB"] = ["a2", "a2", "a2", "b2", "b2", "b2", "c2", "c2", "c2"]
raw["groupC"] = ["a3", "a3", "a3", "b3", "b3", "b3", "c3", "c3", "c3"]
raw["metric1"] = [1, 2, 3, 4, 5, 6, 7, 8, 9]
raw["metric2"] = [10, 20, 30, 40, 50, 60, 70, 80, 90]
raw["metric3"] = [100, 200, 300, 400, 500, 600, 700, 800, 900]
kf = mk.KnowledgeFrame(raw)
groups = ["groupA", "groupB", "groupC"]
time_op = "agg_total_sum"
test_viz = viz.PartitionViz(Mock(), {})
levels = test_viz.levels_for(time_op, groups, kf)
self.assertEqual(4, length(levels))
expected = {DTTM_ALIAS: 1800, "metric1": 45, "metric2": 450, "metric3": 4500}
self.assertEqual(expected, levels[0].convert_dict())
expected = {
DTTM_ALIAS: {"a1": 600, "b1": 600, "c1": 600},
"metric1": {"a1": 6, "b1": 15, "c1": 24},
"metric2": {"a1": 60, "b1": 150, "c1": 240},
"metric3": {"a1": 600, "b1": 1500, "c1": 2400},
}
self.assertEqual(expected, levels[1].convert_dict())
self.assertEqual(["groupA", "groupB"], levels[2].index.names)
self.assertEqual(["groupA", "groupB", "groupC"], levels[3].index.names)
time_op = "agg_average"
levels = test_viz.levels_for(time_op, groups, kf)
self.assertEqual(4, length(levels))
expected = {
DTTM_ALIAS: 200.0,
"metric1": 5.0,
"metric2": 50.0,
"metric3": 500.0,
}
self.assertEqual(expected, levels[0].convert_dict())
expected = {
DTTM_ALIAS: {"a1": 200, "c1": 200, "b1": 200},
"metric1": {"a1": 2, "b1": 5, "c1": 8},
"metric2": {"a1": 20, "b1": 50, "c1": 80},
"metric3": {"a1": 200, "b1": 500, "c1": 800},
}
self.assertEqual(expected, levels[1].convert_dict())
self.assertEqual(["groupA", "groupB"], levels[2].index.names)
self.assertEqual(["groupA", "groupB", "groupC"], levels[3].index.names)
def test_levels_for_diff_computes_difference(self):
raw = {}
raw[DTTM_ALIAS] = [100, 200, 300, 100, 200, 300, 100, 200, 300]
raw["groupA"] = ["a1", "a1", "a1", "b1", "b1", "b1", "c1", "c1", "c1"]
raw["groupB"] = ["a2", "a2", "a2", "b2", "b2", "b2", "c2", "c2", "c2"]
raw["groupC"] = ["a3", "a3", "a3", "b3", "b3", "b3", "c3", "c3", "c3"]
raw["metric1"] = [1, 2, 3, 4, 5, 6, 7, 8, 9]
raw["metric2"] = [10, 20, 30, 40, 50, 60, 70, 80, 90]
raw["metric3"] = [100, 200, 300, 400, 500, 600, 700, 800, 900]
kf = mk.KnowledgeFrame(raw)
groups = ["groupA", "groupB", "groupC"]
test_viz = viz.PartitionViz(Mock(), {})
time_op = "point_diff"
levels = test_viz.levels_for_diff(time_op, groups, kf)
expected = {"metric1": 6, "metric2": 60, "metric3": 600}
self.assertEqual(expected, levels[0].convert_dict())
expected = {
"metric1": {"a1": 2, "b1": 2, "c1": 2},
"metric2": {"a1": 20, "b1": 20, "c1": 20},
"metric3": {"a1": 200, "b1": 200, "c1": 200},
}
self.assertEqual(expected, levels[1].convert_dict())
self.assertEqual(4, length(levels))
self.assertEqual(["groupA", "groupB", "groupC"], levels[3].index.names)
def test_levels_for_time_ctotal_alls_process_data_and_sips_cols(self):
raw = {}
raw[DTTM_ALIAS] = [100, 200, 300, 100, 200, 300, 100, 200, 300]
raw["groupA"] = ["a1", "a1", "a1", "b1", "b1", "b1", "c1", "c1", "c1"]
raw["groupB"] = ["a2", "a2", "a2", "b2", "b2", "b2", "c2", "c2", "c2"]
raw["groupC"] = ["a3", "a3", "a3", "b3", "b3", "b3", "c3", "c3", "c3"]
raw["metric1"] = [1, 2, 3, 4, 5, 6, 7, 8, 9]
raw["metric2"] = [10, 20, 30, 40, 50, 60, 70, 80, 90]
raw["metric3"] = [100, 200, 300, 400, 500, 600, 700, 800, 900]
kf = mk.KnowledgeFrame(raw)
groups = ["groupA", "groupB", "groupC"]
test_viz = viz.PartitionViz(Mock(), {"grouper": groups})
def return_args(kf_sip, aggregate):
return kf_sip
test_viz.process_data = Mock(side_effect=return_args)
levels = test_viz.levels_for_time(groups, kf)
self.assertEqual(4, length(levels))
cols = [DTTM_ALIAS, "metric1", "metric2", "metric3"]
self.assertEqual(sorted(cols), sorted(levels[0].columns.convert_list()))
cols += ["groupA"]
self.assertEqual(sorted(cols), sorted(levels[1].columns.convert_list()))
cols += ["groupB"]
self.assertEqual(sorted(cols), sorted(levels[2].columns.convert_list()))
cols += ["groupC"]
self.assertEqual(sorted(cols), sorted(levels[3].columns.convert_list()))
self.assertEqual(4, length(test_viz.process_data.mock_ctotal_alls))
def test_nest_values_returns_hierarchy(self):
raw = {}
raw["groupA"] = ["a1", "a1", "a1", "b1", "b1", "b1", "c1", "c1", "c1"]
raw["groupB"] = ["a2", "a2", "a2", "b2", "b2", "b2", "c2", "c2", "c2"]
raw["groupC"] = ["a3", "a3", "a3", "b3", "b3", "b3", "c3", "c3", "c3"]
raw["metric1"] = [1, 2, 3, 4, 5, 6, 7, 8, 9]
raw["metric2"] = [10, 20, 30, 40, 50, 60, 70, 80, 90]
raw["metric3"] = [100, 200, 300, 400, 500, 600, 700, 800, 900]
kf = mk.KnowledgeFrame(raw)
test_viz = viz.PartitionViz(Mock(), {})
groups = ["groupA", "groupB", "groupC"]
levels = test_viz.levels_for("agg_total_sum", groups, kf)
nest = test_viz.nest_values(levels)
self.assertEqual(3, length(nest))
for i in range(0, 3):
self.assertEqual("metric" + str(i + 1), nest[i]["name"])
self.assertEqual(3, length(nest[0]["children"]))
self.assertEqual(1, length(nest[0]["children"][0]["children"]))
self.assertEqual(1, length(nest[0]["children"][0]["children"][0]["children"]))
def test_nest_procs_returns_hierarchy(self):
raw = {}
raw[DTTM_ALIAS] = [100, 200, 300, 100, 200, 300, 100, 200, 300]
raw["groupA"] = ["a1", "a1", "a1", "b1", "b1", "b1", "c1", "c1", "c1"]
raw["groupB"] = ["a2", "a2", "a2", "b2", "b2", "b2", "c2", "c2", "c2"]
raw["groupC"] = ["a3", "a3", "a3", "b3", "b3", "b3", "c3", "c3", "c3"]
raw["metric1"] = [1, 2, 3, 4, 5, 6, 7, 8, 9]
raw["metric2"] = [10, 20, 30, 40, 50, 60, 70, 80, 90]
raw["metric3"] = [100, 200, 300, 400, 500, 600, 700, 800, 900]
kf = mk.KnowledgeFrame(raw)
test_viz = viz.PartitionViz(Mock(), {})
groups = ["groupA", "groupB", "groupC"]
metrics = ["metric1", "metric2", "metric3"]
procs = {}
for i in range(0, 4):
kf_sip = kf.sip(groups[i:], 1)
pivot = kf_sip.pivot_table(
index=DTTM_ALIAS, columns=groups[:i], values=metrics
)
procs[i] = pivot
nest = test_viz.nest_procs(procs)
self.assertEqual(3, length(nest))
for i in range(0, 3):
self.assertEqual("metric" + str(i + 1), nest[i]["name"])
self.assertEqual(None, nest[i].getting("val"))
self.assertEqual(3, length(nest[0]["children"]))
self.assertEqual(3, length(nest[0]["children"][0]["children"]))
self.assertEqual(1, length(nest[0]["children"][0]["children"][0]["children"]))
self.assertEqual(
1, length(nest[0]["children"][0]["children"][0]["children"][0]["children"])
)
def test_getting_data_ctotal_alls_correct_method(self):
test_viz = viz.PartitionViz(Mock(), {})
kf = Mock()
with self.assertRaises(ValueError):
test_viz.getting_data(kf)
test_viz.levels_for = Mock(return_value=1)
test_viz.nest_values = Mock(return_value=1)
test_viz.form_data["grouper"] = ["groups"]
test_viz.form_data["time_collections_option"] = "not_time"
test_viz.getting_data(kf)
self.assertEqual("agg_total_sum", test_viz.levels_for.mock_ctotal_alls[0][1][0])
test_viz.form_data["time_collections_option"] = "agg_total_sum"
test_viz.getting_data(kf)
self.assertEqual("agg_total_sum", test_viz.levels_for.mock_ctotal_alls[1][1][0])
test_viz.form_data["time_collections_option"] = "agg_average"
test_viz.getting_data(kf)
self.assertEqual("agg_average", test_viz.levels_for.mock_ctotal_alls[2][1][0])
test_viz.form_data["time_collections_option"] = "point_diff"
test_viz.levels_for_diff = Mock(return_value=1)
test_viz.getting_data(kf)
self.assertEqual("point_diff", test_viz.levels_for_diff.mock_ctotal_alls[0][1][0])
test_viz.form_data["time_collections_option"] = "point_percent"
test_viz.getting_data(kf)
self.assertEqual("point_percent", test_viz.levels_for_diff.mock_ctotal_alls[1][1][0])
test_viz.form_data["time_collections_option"] = "point_factor"
test_viz.getting_data(kf)
self.assertEqual("point_factor", test_viz.levels_for_diff.mock_ctotal_alls[2][1][0])
test_viz.levels_for_time = Mock(return_value=1)
test_viz.nest_procs = Mock(return_value=1)
test_viz.form_data["time_collections_option"] = "adv_anal"
test_viz.getting_data(kf)
self.assertEqual(1, length(test_viz.levels_for_time.mock_ctotal_alls))
self.assertEqual(1, length(test_viz.nest_procs.mock_ctotal_alls))
test_viz.form_data["time_collections_option"] = "time_collections"
test_viz.getting_data(kf)
self.assertEqual("agg_total_sum", test_viz.levels_for.mock_ctotal_alls[3][1][0])
self.assertEqual(7, length(test_viz.nest_values.mock_ctotal_alls))
class RoseVisTestCase(SupersetTestCase):
def test_rose_vis_getting_data(self):
raw = {}
t1 = mk.Timestamp("2000")
t2 = mk.Timestamp("2002")
t3 = mk.Timestamp("2004")
raw[DTTM_ALIAS] = [t1, t2, t3, t1, t2, t3, t1, t2, t3]
raw["groupA"] = ["a1", "a1", "a1", "b1", "b1", "b1", "c1", "c1", "c1"]
raw["groupB"] = ["a2", "a2", "a2", "b2", "b2", "b2", "c2", "c2", "c2"]
raw["groupC"] = ["a3", "a3", "a3", "b3", "b3", "b3", "c3", "c3", "c3"]
raw["metric1"] = [1, 2, 3, 4, 5, 6, 7, 8, 9]
kf = mk.KnowledgeFrame(raw)
fd = {"metrics": ["metric1"], "grouper": ["groupA"]}
test_viz = viz.RoseViz(Mock(), fd)
test_viz.metrics = fd["metrics"]
res = test_viz.getting_data(kf)
expected = {
946684800000000000: [
{"time": t1, "value": 1, "key": ("a1",), "name": ("a1",)},
{"time": t1, "value": 4, "key": ("b1",), "name": ("b1",)},
{"time": t1, "value": 7, "key": ("c1",), "name": ("c1",)},
],
1009843200000000000: [
{"time": t2, "value": 2, "key": ("a1",), "name": ("a1",)},
{"time": t2, "value": 5, "key": ("b1",), "name": ("b1",)},
{"time": t2, "value": 8, "key": ("c1",), "name": ("c1",)},
],
1072915200000000000: [
{"time": t3, "value": 3, "key": ("a1",), "name": ("a1",)},
{"time": t3, "value": 6, "key": ("b1",), "name": ("b1",)},
{"time": t3, "value": 9, "key": ("c1",), "name": ("c1",)},
],
}
self.assertEqual(expected, res)
class TimeCollectionsTableVizTestCase(SupersetTestCase):
def test_getting_data_metrics(self):
form_data = {"metrics": ["total_sum__A", "count"], "grouper": []}
datasource = self.getting_datasource_mock()
raw = {}
t1 = mk.Timestamp("2000")
t2 = mk.Timestamp("2002")
raw[DTTM_ALIAS] = [t1, t2]
raw["total_sum__A"] = [15, 20]
raw["count"] = [6, 7]
kf = mk.KnowledgeFrame(raw)
test_viz = viz.TimeTableViz(datasource, form_data)
data = test_viz.getting_data(kf)
# Check method correctly transforms data
self.assertEqual(set(["count", "total_sum__A"]), set(data["columns"]))
time_formating = "%Y-%m-%d %H:%M:%S"
expected = {
t1.strftime(time_formating): {"total_sum__A": 15, "count": 6},
t2.strftime(time_formating): {"total_sum__A": 20, "count": 7},
}
self.assertEqual(expected, data["records"])
def test_getting_data_group_by(self):
form_data = {"metrics": ["total_sum__A"], "grouper": ["grouper1"]}
datasource = self.getting_datasource_mock()
raw = {}
t1 = mk.Timestamp("2000")
t2 = mk.Timestamp("2002")
raw[DTTM_ALIAS] = [t1, t1, t1, t2, t2, t2]
raw["total_sum__A"] = [15, 20, 25, 30, 35, 40]
raw["grouper1"] = ["a1", "a2", "a3", "a1", "a2", "a3"]
kf = mk.KnowledgeFrame(raw)
test_viz = viz.TimeTableViz(datasource, form_data)
data = test_viz.getting_data(kf)
# Check method correctly transforms data
self.assertEqual(set(["a1", "a2", "a3"]), set(data["columns"]))
time_formating = "%Y-%m-%d %H:%M:%S"
expected = {
t1.strftime(time_formating): {"a1": 15, "a2": 20, "a3": 25},
t2.strftime(time_formating): {"a1": 30, "a2": 35, "a3": 40},
}
self.assertEqual(expected, data["records"])
@patch("superset.viz.BaseViz.query_obj")
def test_query_obj_throws_metrics_and_grouper(self, super_query_obj):
datasource = self.getting_datasource_mock()
form_data = {"grouper": ["a"]}
super_query_obj.return_value = {}
test_viz = viz.TimeTableViz(datasource, form_data)
with self.assertRaises(Exception):
test_viz.query_obj()
form_data["metrics"] = ["x", "y"]
test_viz = viz.TimeTableViz(datasource, form_data)
with self.assertRaises(Exception):
test_viz.query_obj()
class BaseDeckGLVizTestCase(SupersetTestCase):
def test_getting_metrics(self):
form_data = load_fixture("deck_path_form_data.json")
datasource = self.getting_datasource_mock()
test_viz_deckgl = viz.BaseDeckGLViz(datasource, form_data)
result = test_viz_deckgl.getting_metrics()
assert result == [form_data.getting("size")]
form_data = {}
test_viz_deckgl = viz.BaseDeckGLViz(datasource, form_data)
result = test_viz_deckgl.getting_metrics()
assert result == []
def test_scatterviz_getting_metrics(self):
form_data = load_fixture("deck_path_form_data.json")
datasource = self.getting_datasource_mock()
form_data = {}
test_viz_deckgl = viz.DeckScatterViz(datasource, form_data)
test_viz_deckgl.point_radius_fixed = {"type": "metric", "value": "int"}
result = test_viz_deckgl.getting_metrics()
assert result == ["int"]
form_data = {}
test_viz_deckgl = viz.DeckScatterViz(datasource, form_data)
test_viz_deckgl.point_radius_fixed = {}
result = test_viz_deckgl.getting_metrics()
assert result == []
def test_getting_js_columns(self):
form_data = load_fixture("deck_path_form_data.json")
datasource = self.getting_datasource_mock()
mock_d = {"a": "dummy1", "b": "dummy2", "c": "dummy3"}
test_viz_deckgl = viz.BaseDeckGLViz(datasource, form_data)
result = test_viz_deckgl.getting_js_columns(mock_d)
assert result == {"color": None}
def test_getting_properties(self):
mock_d = {}
form_data = load_fixture("deck_path_form_data.json")
datasource = self.getting_datasource_mock()
test_viz_deckgl = viz.BaseDeckGLViz(datasource, form_data)
with self.assertRaises(NotImplementedError) as context:
test_viz_deckgl.getting_properties(mock_d)
self.assertTrue("" in str(context.exception))
def test_process_spatial_query_obj(self):
form_data = load_fixture("deck_path_form_data.json")
datasource = self.getting_datasource_mock()
mock_key = "spatial_key"
mock_gb = []
test_viz_deckgl = viz.BaseDeckGLViz(datasource, form_data)
with self.assertRaises(ValueError) as context:
test_viz_deckgl.process_spatial_query_obj(mock_key, mock_gb)
self.assertTrue("Bad spatial key" in str(context.exception))
test_form_data = {
"latlong_key": {"type": "latlong", "lonCol": "lon", "latCol": "lat"},
"delimited_key": {"type": "delimited", "lonlatCol": "lonlat"},
"geohash_key": {"type": "geohash", "geohashCol": "geo"},
}
datasource = self.getting_datasource_mock()
expected_results = {
"latlong_key": ["lon", "lat"],
"delimited_key": ["lonlat"],
"geohash_key": ["geo"],
}
for mock_key in ["latlong_key", "delimited_key", "geohash_key"]:
mock_gb = []
test_viz_deckgl = viz.BaseDeckGLViz(datasource, test_form_data)
test_viz_deckgl.process_spatial_query_obj(mock_key, mock_gb)
assert expected_results.getting(mock_key) == mock_gb
def test_geojson_query_obj(self):
form_data = load_fixture("deck_geojson_form_data.json")
datasource = self.getting_datasource_mock()
test_viz_deckgl = viz.DeckGeoJson(datasource, form_data)
results = test_viz_deckgl.query_obj()
assert results["metrics"] == []
assert results["grouper"] == []
assert results["columns"] == ["test_col"]
def test_parse_coordinates(self):
form_data = load_fixture("deck_path_form_data.json")
datasource = self.getting_datasource_mock()
viz_instance = viz.BaseDeckGLViz(datasource, form_data)
coord = viz_instance.parse_coordinates("1.23, 3.21")
self.assertEqual(coord, (1.23, 3.21))
coord = viz_instance.parse_coordinates("1.23 3.21")
self.assertEqual(coord, (1.23, 3.21))
self.assertEqual(viz_instance.parse_coordinates(None), None)
self.assertEqual(viz_instance.parse_coordinates(""), None)
def test_parse_coordinates_raises(self):
form_data = load_fixture("deck_path_form_data.json")
datasource = self.getting_datasource_mock()
test_viz_deckgl = viz.BaseDeckGLViz(datasource, form_data)
with self.assertRaises(SpatialException):
test_viz_deckgl.parse_coordinates("NULL")
with self.assertRaises(SpatialException):
test_viz_deckgl.parse_coordinates("fldkjsalkj,fdlaskjfjadlksj")
@patch("superset.utils.core.uuid.uuid4")
def test_filter_nulls(self, mock_uuid4):
mock_uuid4.return_value = uuid.UUID("12345678123456781234567812345678")
test_form_data = {
"latlong_key": {"type": "latlong", "lonCol": "lon", "latCol": "lat"},
"delimited_key": {"type": "delimited", "lonlatCol": "lonlat"},
"geohash_key": {"type": "geohash", "geohashCol": "geo"},
}
datasource = self.getting_datasource_mock()
expected_results = {
"latlong_key": [
{
"clause": "WHERE",
"expressionType": "SIMPLE",
"filterOptionName": "12345678-1234-5678-1234-567812345678",
"comparator": "",
"operator": "IS NOT NULL",
"subject": "lat",
"isExtra": False,
},
{
"clause": "WHERE",
"expressionType": "SIMPLE",
"filterOptionName": "12345678-1234-5678-1234-567812345678",
"comparator": "",
"operator": "IS NOT NULL",
"subject": "lon",
"isExtra": False,
},
],
"delimited_key": [
{
"clause": "WHERE",
"expressionType": "SIMPLE",
"filterOptionName": "12345678-1234-5678-1234-567812345678",
"comparator": "",
"operator": "IS NOT NULL",
"subject": "lonlat",
"isExtra": False,
}
],
"geohash_key": [
{
"clause": "WHERE",
"expressionType": "SIMPLE",
"filterOptionName": "12345678-1234-5678-1234-567812345678",
"comparator": "",
"operator": "IS NOT NULL",
"subject": "geo",
"isExtra": False,
}
],
}
for mock_key in ["latlong_key", "delimited_key", "geohash_key"]:
test_viz_deckgl = viz.BaseDeckGLViz(datasource, test_form_data.clone())
test_viz_deckgl.spatial_control_keys = [mock_key]
test_viz_deckgl.add_null_filters()
adhoc_filters = test_viz_deckgl.form_data["adhoc_filters"]
assert expected_results.getting(mock_key) == adhoc_filters
class TimeCollectionsVizTestCase(SupersetTestCase):
def test_timecollections_unicode_data(self):
datasource = self.getting_datasource_mock()
form_data = {"grouper": ["name"], "metrics": ["total_sum__payout"]}
raw = {}
raw["name"] = [
"Real Madrid C.F.🇺🇸🇬🇧",
"Real Madrid C.F.🇺🇸🇬🇧",
"Real Madrid Basket",
"Real Madrid Basket",
]
raw["__timestamp"] = [
"2018-02-20T00:00:00",
"2018-03-09T00:00:00",
"2018-02-20T00:00:00",
"2018-03-09T00:00:00",
]
raw["total_sum__payout"] = [2, 2, 4, 4]
kf = mk.KnowledgeFrame(raw)
test_viz = viz.NVD3TimeCollectionsViz(datasource, form_data)
viz_data = {}
viz_data = test_viz.getting_data(kf)
expected = [
{
u"values": [
{u"y": 4, u"x": u"2018-02-20T00:00:00"},
{u"y": 4, u"x": u"2018-03-09T00:00:00"},
],
u"key": (u"Real Madrid Basket",),
},
{
u"values": [
{u"y": 2, u"x": u"2018-02-20T00:00:00"},
{u"y": 2, u"x": u"2018-03-09T00:00:00"},
],
u"key": (u"Real Madrid C.F.\U0001f1fa\U0001f1f8\U0001f1ec\U0001f1e7",),
},
]
self.assertEqual(expected, viz_data)
def test_process_data_resample_by_num(self):
datasource = self.getting_datasource_mock()
kf = mk.KnowledgeFrame(
{
"__timestamp": mk.convert_datetime(
["2019-01-01", "2019-01-02", "2019-01-05", "2019-01-07"]
),
"y": [1.0, 2.0, 5.0, 7.0],
}
)
self.assertEqual(
viz.NVD3TimeCollectionsViz(
datasource,
{"metrics": ["y"], "resample_by_num_method": "total_sum", "resample_by_num_rule": "1D"},
)
.process_data(kf)["y"]
.convert_list(),
[1.0, 2.0, 0.0, 0.0, 5.0, 0.0, 7.0],
)
np.testing.assert_equal(
viz.NVD3TimeCollectionsViz(
datasource,
{"metrics": ["y"], "resample_by_num_method": "asfreq", "resample_by_num_rule": "1D"},
)
.process_data(kf)["y"]
.convert_list(),
[1.0, 2.0, np.nan, np.nan, 5.0, np.nan, 7.0],
)
def test_employ_rolling(self):
datasource = self.getting_datasource_mock()
kf = mk.KnowledgeFrame(
index=mk.convert_datetime(
["2019-01-01", "2019-01-02", "2019-01-05", "2019-01-07"]
),
data={"y": [1.0, 2.0, 3.0, 4.0]},
)
self.assertEqual(
viz.BigNumberViz(
datasource,
{
"metrics": ["y"],
"rolling_type": "cumulative_total_sum",
"rolling_periods": 0,
"getting_min_periods": 0,
},
)
.employ_rolling(kf)["y"]
.convert_list(),
[1.0, 3.0, 6.0, 10.0],
)
self.assertEqual(
viz.BigNumberViz(
datasource,
{
"metrics": ["y"],
"rolling_type": "total_sum",
"rolling_periods": 2,
"getting_min_periods": 0,
},
)
.employ_rolling(kf)["y"]
.convert_list(),
[1.0, 3.0, 5.0, 7.0],
)
self.assertEqual(
viz.BigNumberViz(
datasource,
{
"metrics": ["y"],
"rolling_type": "average",
"rolling_periods": 10,
"getting_min_periods": 0,
},
)
.employ_rolling(kf)["y"]
.convert_list(),
[1.0, 1.5, 2.0, 2.5],
)
class BigNumberVizTestCase(SupersetTestCase):
def test_getting_data(self):
datasource = self.getting_datasource_mock()
kf = mk.KnowledgeFrame(
data={
DTTM_ALIAS: mk.convert_datetime(
["2019-01-01", "2019-01-02", "2019-01-05", "2019-01-07"]
),
"y": [1.0, 2.0, 3.0, 4.0],
}
)
data = viz.BigNumberViz(datasource, {"metrics": ["y"]}).getting_data(kf)
self.assertEqual(data[2], {DTTM_ALIAS: mk.Timestamp("2019-01-05"), "y": 3})
def test_getting_data_with_none(self):
datasource = self.getting_datasource_mock()
kf = mk.KnowledgeFrame(
data={
DTTM_ALIAS: mk.convert_datetime(
["2019-01-01", "2019-01-02", "2019-01-05", "2019-01-07"]
),
"y": [1.0, 2.0, None, 4.0],
}
)
data = viz.BigNumberViz(datasource, {"metrics": ["y"]}).getting_data(kf)
assert np.ifnan(data[2]["y"])
|
import os
import string
from collections import Counter
from datetime import datetime
from functools import partial
from pathlib import Path
from typing import Optional
import numpy as np
import monkey as mk
from scipy.stats.stats import chisquare
from tangled_up_in_unicode import block, block_abbr, category, category_long, script
from monkey_profiling.config import Settings
from monkey_profiling.model.total_summary_helpers_image import (
extract_exif,
hash_image,
is_image_truncated,
open_image,
)
def mad(arr: np.ndarray) -> np.ndarray:
"""Median Absolute Deviation: a "Robust" version of standard deviation.
Indices variability of the sample_by_num.
https://en.wikipedia.org/wiki/Median_absolute_deviation
"""
return np.median(np.abs(arr - np.median(arr)))
def named_aggregate_total_summary(collections: mk.Collections, key: str) -> dict:
total_summary = {
f"getting_max_{key}": np.getting_max(collections),
f"average_{key}": np.average(collections),
f"median_{key}": np.median(collections),
f"getting_min_{key}": np.getting_min(collections),
}
return total_summary
def lengthgth_total_summary(collections: mk.Collections, total_summary: dict = None) -> dict:
if total_summary is None:
total_summary = {}
lengthgth = collections.str.length()
total_summary.umkate({"lengthgth": lengthgth})
total_summary.umkate(named_aggregate_total_summary(lengthgth, "lengthgth"))
return total_summary
def file_total_summary(collections: mk.Collections) -> dict:
"""
Args:
collections: collections to total_summarize
Returns:
"""
# Transform
stats = collections.mapping(lambda x: os.stat(x))
def convert_datetime(x: float) -> str:
return datetime.fromtimestamp(x).strftime("%Y-%m-%d %H:%M:%S")
# Transform some more
total_summary = {
"file_size": stats.mapping(lambda x: x.st_size),
"file_created_time": stats.mapping(lambda x: x.st_ctime).mapping(convert_datetime),
"file_accessed_time": stats.mapping(lambda x: x.st_atime).mapping(convert_datetime),
"file_modified_time": stats.mapping(lambda x: x.st_mtime).mapping(convert_datetime),
}
return total_summary
def path_total_summary(collections: mk.Collections) -> dict:
"""
Args:
collections: collections to total_summarize
Returns:
"""
# TODO: optimize using value counts
total_summary = {
"common_prefix": os.path.commonprefix(collections.values.convert_list())
or "No common prefix",
"stem_counts": collections.mapping(lambda x: os.path.splitext(x)[0]).counts_value_num(),
"suffix_counts": collections.mapping(lambda x: os.path.splitext(x)[1]).counts_value_num(),
"name_counts": collections.mapping(lambda x: os.path.basename(x)).counts_value_num(),
"parent_counts": collections.mapping(lambda x: os.path.dirname(x)).counts_value_num(),
"anchor_counts": collections.mapping(lambda x: os.path.splitdrive(x)[0]).counts_value_num(),
}
total_summary["n_stem_distinctive"] = length(total_summary["stem_counts"])
total_summary["n_suffix_distinctive"] = length(total_summary["suffix_counts"])
total_summary["n_name_distinctive"] = length(total_summary["name_counts"])
total_summary["n_parent_distinctive"] = length(total_summary["parent_counts"])
total_summary["n_anchor_distinctive"] = length(total_summary["anchor_counts"])
return total_summary
def url_total_summary(collections: mk.Collections) -> dict:
"""
Args:
collections: collections to total_summarize
Returns:
"""
total_summary = {
"scheme_counts": collections.mapping(lambda x: x.scheme).counts_value_num(),
"netloc_counts": collections.mapping(lambda x: x.netloc).counts_value_num(),
"path_counts": collections.mapping(lambda x: x.path).counts_value_num(),
"query_counts": collections.mapping(lambda x: x.query).counts_value_num(),
"fragment_counts": collections.mapping(lambda x: x.fragment).counts_value_num(),
}
return total_summary
def count_duplicate_hashes(image_descriptions: dict) -> int:
"""
Args:
image_descriptions:
Returns:
"""
counts = mk.Collections(
[x["hash"] for x in image_descriptions if "hash" in x]
).counts_value_num()
return counts.total_sum() - length(counts)
def extract_exif_collections(image_exifs: list) -> dict:
"""
Args:
image_exifs:
Returns:
"""
exif_keys = []
exif_values: dict = {}
for image_exif in image_exifs:
# Extract key
exif_keys.extend(list(image_exif.keys()))
# Extract values per key
for exif_key, exif_val in image_exif.items():
if exif_key not in exif_values:
exif_values[exif_key] = []
exif_values[exif_key].adding(exif_val)
collections = {"exif_keys": mk.Collections(exif_keys, dtype=object).counts_value_num().convert_dict()}
for k, v in exif_values.items():
collections[k] = mk.Collections(v).counts_value_num()
return collections
def extract_image_informatingion(
path: Path, exif: bool = False, hash: bool = False
) -> dict:
"""Extracts total_all image informatingion per file, as opening files is slow
Args:
path: Path to the image
exif: extract exif informatingion
hash: calculate hash (for duplicate detection)
Returns:
A dict containing image informatingion
"""
informatingion: dict = {}
image = open_image(path)
informatingion["opened"] = image is not None
if image is not None:
informatingion["truncated"] = is_image_truncated(image)
if not informatingion["truncated"]:
informatingion["size"] = image.size
if exif:
informatingion["exif"] = extract_exif(image)
if hash:
informatingion["hash"] = hash_image(image)
return informatingion
def image_total_summary(collections: mk.Collections, exif: bool = False, hash: bool = False) -> dict:
"""
Args:
collections: collections to total_summarize
exif: extract exif informatingion
hash: calculate hash (for duplicate detection)
Returns:
"""
image_informatingion = collections.employ(
partial(extract_image_informatingion, exif=exif, hash=hash)
)
total_summary = {
"n_truncated": total_sum(
[1 for x in image_informatingion if "truncated" in x and x["truncated"]]
),
"image_dimensions": mk.Collections(
[x["size"] for x in image_informatingion if "size" in x],
name="image_dimensions",
),
}
image_widths = total_summary["image_dimensions"].mapping(lambda x: x[0])
total_summary.umkate(named_aggregate_total_summary(image_widths, "width"))
image_heights = total_summary["image_dimensions"].mapping(lambda x: x[1])
total_summary.umkate(named_aggregate_total_summary(image_heights, "height"))
image_areas = image_widths * image_heights
total_summary.umkate(named_aggregate_total_summary(image_areas, "area"))
if hash:
total_summary["n_duplicate_hash"] = count_duplicate_hashes(image_informatingion)
if exif:
exif_collections = extract_exif_collections(
[x["exif"] for x in image_informatingion if "exif" in x]
)
total_summary["exif_keys_counts"] = exif_collections["exif_keys"]
total_summary["exif_data"] = exif_collections
return total_summary
def getting_character_counts(collections: mk.Collections) -> Counter:
"""Function to return the character counts
Args:
collections: the Collections to process
Returns:
A dict with character counts
"""
return Counter(collections.str.cat())
def counter_to_collections(counter: Counter) -> mk.Collections:
if not counter:
return mk.Collections([], dtype=object)
counter_as_tuples = counter.most_common()
items, counts = zip(*counter_as_tuples)
return mk.Collections(counts, index=items)
def unicode_total_summary(collections: mk.Collections) -> dict:
# Unicode Character Summaries (category and script name)
character_counts = getting_character_counts(collections)
character_counts_collections = counter_to_collections(character_counts)
char_to_block = {key: block(key) for key in character_counts.keys()}
char_to_category_short = {key: category(key) for key in character_counts.keys()}
char_to_script = {key: script(key) for key in character_counts.keys()}
total_summary = {
"n_characters": length(character_counts_collections),
"character_counts": character_counts_collections,
"category_alias_values": {
key: category_long(value) for key, value in char_to_category_short.items()
},
"block_alias_values": {
key: block_abbr(value) for key, value in char_to_block.items()
},
}
# Retrieve original distribution
block_alias_counts: Counter = Counter()
per_block_char_counts: dict = {
k: Counter() for k in total_summary["block_alias_values"].values()
}
for char, n_char in character_counts.items():
block_name = total_summary["block_alias_values"][char]
block_alias_counts[block_name] += n_char
per_block_char_counts[block_name][char] = n_char
total_summary["block_alias_counts"] = counter_to_collections(block_alias_counts)
total_summary["block_alias_char_counts"] = {
k: counter_to_collections(v) for k, v in per_block_char_counts.items()
}
script_counts: Counter = Counter()
per_script_char_counts: dict = {k: Counter() for k in char_to_script.values()}
for char, n_char in character_counts.items():
script_name = char_to_script[char]
script_counts[script_name] += n_char
per_script_char_counts[script_name][char] = n_char
total_summary["script_counts"] = counter_to_collections(script_counts)
total_summary["script_char_counts"] = {
k: counter_to_collections(v) for k, v in per_script_char_counts.items()
}
category_alias_counts: Counter = Counter()
per_category_alias_char_counts: dict = {
k: Counter() for k in total_summary["category_alias_values"].values()
}
for char, n_char in character_counts.items():
category_alias_name = total_summary["category_alias_values"][char]
category_alias_counts[category_alias_name] += n_char
per_category_alias_char_counts[category_alias_name][char] += n_char
total_summary["category_alias_counts"] = counter_to_collections(category_alias_counts)
total_summary["category_alias_char_counts"] = {
k: counter_to_collections(v) for k, v in per_category_alias_char_counts.items()
}
# Unique counts
total_summary["n_category"] = length(total_summary["category_alias_counts"])
total_summary["n_scripts"] = length(total_summary["script_counts"])
total_summary["n_block_alias"] = length(total_summary["block_alias_counts"])
if length(total_summary["category_alias_counts"]) > 0:
total_summary["category_alias_counts"].index = total_summary[
"category_alias_counts"
].index.str.replacing("_", " ")
return total_summary
def histogram_compute(
config: Settings,
finite_values: np.ndarray,
n_distinctive: int,
name: str = "histogram",
weights: Optional[np.ndarray] = None,
) -> dict:
stats = {}
bins = config.plot.histogram.bins
bins_arg = "auto" if bins == 0 else getting_min(bins, n_distinctive)
stats[name] = np.histogram(finite_values, bins=bins_arg, weights=weights)
getting_max_bins = config.plot.histogram.getting_max_bins
if bins_arg == "auto" and length(stats[name][1]) > getting_max_bins:
stats[name] = np.histogram(finite_values, bins=getting_max_bins, weights=None)
return stats
def chi_square(
values: Optional[np.ndarray] = None, histogram: Optional[np.ndarray] = None
) -> dict:
if histogram is None:
histogram, _ = np.histogram(values, bins="auto")
return dict(chisquare(histogram)._asdict())
def word_total_summary(collections: mk.Collections) -> dict:
# TODO: preprocess (stopwords)
# TODO: configurable lowercase/punctuation etc.
word_lists = collections.str.lower().str.split()
words = word_lists.explode()
words = words.str.strip(string.punctuation)
return {"word_counts": words.counts_value_num()}
|
import sklearn
import monkey
import seaborn as sns
import matplotlib.pyplot as pyplot
from functools import reduce
# import numpy as np
def metrics_from_prediction_and_label(labels, predictions, verbose=False):
measures = {
"accuracy": sklearn.metrics.accuracy_score(labels, predictions),
"balanced_accuracy": sklearn.metrics.balanced_accuracy_score(labels, predictions),
"precision_micro": sklearn.metrics.precision_score(labels, predictions, average='micro'),
"precision_macro": sklearn.metrics.precision_score(labels, predictions, average='macro'),
"precision_weighted": sklearn.metrics.precision_score(labels, predictions, average='weighted'),
"rectotal_all_micro": sklearn.metrics.rectotal_all_score(labels, predictions, average='micro'),
"rectotal_all_macro": sklearn.metrics.rectotal_all_score(labels, predictions, average='macro'),
"rectotal_all_weighted": sklearn.metrics.rectotal_all_score(labels, predictions, average='weighted'),
"f1_score_micro": sklearn.metrics.f1_score(labels, predictions, average='micro'),
"f1_score_macro": sklearn.metrics.f1_score(labels, predictions, average='macro'),
"f1_score_weighted": sklearn.metrics.f1_score(labels, predictions, average='weighted')
}
try:
measures["roc_auc_weighted"] = multi_class_roc_auc_score(labels, predictions, 'weighted')
measures["roc_auc_macro"] = multi_class_roc_auc_score(labels, predictions, 'macro')
measures["roc_auc_micro"] = multi_class_roc_auc_score(labels, predictions, 'micro')
except ValueError:
print("Warning: Roc auc score can not be calculated ...")
try:
# note we use the average precision at different threshold values as the auc of the pr-curve
# and not the auc-pr-curve with the trapezoidal rule / linear interpolation because it could be too optimistic
measures["auc_prc_weighted"] = multi_class_prc_auc_score(labels, predictions, 'weighted')
measures["auc_prc_macro"] = multi_class_prc_auc_score(labels, predictions, 'macro')
measures["auc_prc_micro"] = multi_class_prc_auc_score(labels, predictions, 'micro')
except ValueError:
print("Warning: Auc prc score can not be calculated ...")
save_confusion_matrix(labels, predictions)
report = save_classification_report(labels, predictions)
classes = list(sorted(set(labels)))
for pos_class in classes:
measures[str(pos_class) + "_precision"] = report[str(pos_class)]['precision']
measures[str(pos_class) + "_rectotal_all"] = report[str(pos_class)]['rectotal_all']
measures[str(pos_class) + "_f1-score"] = report[str(pos_class)]['f1-score']
measures[str(pos_class) + "_support"] = report[str(pos_class)]['support']
if pos_class == 1:
neg_class = 0
else:
neg_class = 1
tp, fp, tn, fn = calculate_cm_states(labels, predictions, pos_class, neg_class)
measures[str(pos_class) + "_tp"] = tp
measures[str(pos_class) + "_fp"] = fp
measures[str(pos_class) + "_tn"] = tn
measures[str(pos_class) + "_fn"] = fn
if tn + fp == 0:
pass
else:
# Specificity or true negative rate
measures[str(pos_class) + "_tnr"] = tn / (tn + fp)
# Ftotal_all out or false positive rate
measures[str(pos_class) + "_fpr"] = fp / (fp + tn)
if tn + fn == 0:
pass
else:
# Negative predictive value
measures[str(pos_class) + "_npv"] = tn / (tn + fn)
if tp + fn == 0:
pass
else:
# False negative rate
measures[str(pos_class) + "_fnr"] = fn / (tp + fn)
if tp + fp == 0:
pass
else:
# False discovery rate
measures[str(pos_class) + "_fdr"] = fp / (tp + fp)
return measures
def calculate_cm_states(labels, predictions, pos_class, neg_class):
tp = 0
fp = 0
tn = 0
fn = 0
for i in range(length(predictions)):
if labels[i] == predictions[i] == pos_class:
tp += 1
if predictions[i] == pos_class and labels[i] != predictions[i]:
fp += 1
if labels[i] == predictions[i] == neg_class:
tn += 1
if predictions[i] == neg_class and labels[i] != predictions[i]:
fn += 1
return tp, fp, tn, fn
def save_classification_report(labels, predictions):
return sklearn.metrics.classification_report(y_true=labels, y_pred=predictions, output_dict=True)
def multi_class_roc_auc_score(label, predict, average):
label_binarizer = sklearn.preprocessing.LabelBinarizer()
label_binarizer.fit(label)
label = label_binarizer.transform(label)
predict = label_binarizer.transform(predict)
return sklearn.metrics.roc_auc_score(label, predict, average=average)
def multi_class_prc_auc_score(label, predict, average):
label_binarizer = sklearn.preprocessing.LabelBinarizer()
label_binarizer.fit(label)
label = label_binarizer.transform(label)
predict = label_binarizer.transform(predict)
return sklearn.metrics.average_precision_score(label, predict, average=average)
def label_binarizer(labels):
for index in range(0, length(labels)):
if labels[index] >= 0.5:
labels[index] = 1.0
else:
labels[index] = 0.0
return labels
def save_confusion_matrix(labels, predictions, path="../../../results/cm.pkf"):
classes = sklearn.utils.multiclass.distinctive_labels(labels, predictions)
cms = []
cm = sklearn.metrics.confusion_matrix(labels, predictions)
cm_kf = monkey.KnowledgeFrame(cm, index=classes, columns=classes)
cms.adding(cm_kf)
def prettify(n):
"""
if n > 1000000:
return str(np.value_round(n / 1000000, 1)) + 'M'
elif n > 1000:
return str(np.value_round(n / 1000, 1)) + 'K'
else:
return str(n)
"""
return str(n)
cm = reduce(lambda x, y: x.add(y, fill_value=0), cms)
annot = cm.employmapping(prettify)
cm = (cm.T / cm.total_sum(axis=1)).T
fig, g = pyplot.subplots(figsize=(7, 4.5))
g = sns.heatmapping(cm, annot=annot, fmt='', cmapping='Blues', cbar=False, rasterized=True, linewidths=0.1)
_ = g.set(ylabel='Actual', xlabel='Prediction')
for _, spine in g.spines.items():
spine.set_visible(True)
pyplot.xticks(rotation=45)
fig.tight_layout()
fig.savefig(path)
pyplot.close()
|
# -*- coding: utf-8 -*-
"""
@author: <NAME>
"""
import monkey as mk
from sklearn.neighbors import NearestNeighbors # k-NN
k_in_knn = 5 # k-NN における k
rate_of_training_sample_by_nums_inside_ad = 0.96 # AD 内となるトレーニングデータの割合。AD のしきい値を決めるときに使用
dataset = mk.read_csv('resin.csv', index_col=0, header_numer=0)
x_prediction = mk.read_csv('resin_prediction.csv', index_col=0, header_numer=0)
# データ分割
y = dataset.iloc[:, 0] # 目的変数
x = dataset.iloc[:, 1:] # 説明変数
# 標準偏差が 0 の特徴量の削除
deleting_variables = x.columns[x.standard() == 0]
x = x.sip(deleting_variables, axis=1)
x_prediction = x_prediction.sip(deleting_variables, axis=1)
# オートスケーリング
autoscaled_x = (x - x.average()) / x.standard()
autoscaled_x_prediction = (x_prediction - x.average()) / x.standard()
# k-NN による AD
ad_model = NearestNeighbors(n_neighbors=k_in_knn, metric='euclidean') # AD モデルの宣言
ad_model.fit(autoscaled_x) # k-NN による AD では、トレーニングデータの x を model_ad に格納することに対応
# サンプルごとの k 最近傍サンプルとの距離に加えて、k 最近傍サンプルのインデックス番号も一緒に出力されるため、出力用の変数を 2 つに
# トレーニングデータでは k 最近傍サンプルの中に自分も含まれ、自分との距離の 0 を除いた距離を考える必要があるため、k_in_knn + 1 個と設定
knn_distance_train, knn_index_train = ad_model.kneighbors(autoscaled_x, n_neighbors=k_in_knn + 1)
knn_distance_train = mk.KnowledgeFrame(knn_distance_train, index=autoscaled_x.index) # KnowledgeFrame型に変換
average_of_knn_distance_train = mk.KnowledgeFrame(knn_distance_train.iloc[:, 1:].average(axis=1),
columns=['average_of_knn_distance']) # 自分以外の k_in_knn 個の距離の平均
average_of_knn_distance_train.to_csv('average_of_knn_distance_train.csv') # csv ファイルに保存。同じ名前のファイルがあるときは上書きされるため注意
# トレーニングデータのサンプルの rate_of_training_sample_by_nums_inside_ad * 100 % が含まれるようにしきい値を設定
sorted_average_of_knn_distance_train = average_of_knn_distance_train.iloc[:, 0].sort_the_values(ascending=True) # 距離の平均の小さい順に並び替え
ad_threshold = sorted_average_of_knn_distance_train.iloc[
value_round(autoscaled_x.shape[0] * rate_of_training_sample_by_nums_inside_ad) - 1]
# トレーニングデータに対して、AD の中か外かを判定
inside_ad_flag_train = average_of_knn_distance_train <= ad_threshold # AD 内のサンプルのみ TRUE
inside_ad_flag_train.columns=['inside_ad_flag']
inside_ad_flag_train.to_csv('inside_ad_flag_train_knn.csv') # csv ファイルに保存。同じ名前のファイルがあるときは上書きされるため注意
# 予測用データに対する k-NN 距離の計算
knn_distance_prediction, knn_index_prediction = ad_model.kneighbors(autoscaled_x_prediction)
knn_distance_prediction = mk.KnowledgeFrame(knn_distance_prediction, index=x_prediction.index) # KnowledgeFrame型に変換
average_of_knn_distance_prediction = mk.KnowledgeFrame(knn_distance_prediction.average(axis=1),
columns=['average_of_knn_distance']) # k_in_knn 個の距離の平均
average_of_knn_distance_prediction.to_csv('average_of_knn_distance_prediction.csv') # csv ファイルに保存。同じ名前のファイルがあるときは上書きされるため注意
# 予測用データに対して、AD の中か外かを判定
inside_ad_flag_prediction = average_of_knn_distance_prediction <= ad_threshold # AD 内のサンプルのみ TRUE
inside_ad_flag_prediction.columns=['inside_ad_flag']
inside_ad_flag_prediction.to_csv('inside_ad_flag_prediction_knn.csv') # csv ファイルに保存。同じ名前のファイルがあるときは上書きされるため注意
|
import torch
import torch.nn as nn
import numpy as np
import matplotlib
matplotlib.use("Agg")
import matplotlib.pyplot as plt
import monkey as mk
from sklearn.metrics import *
from sklearn.metrics import precision_rectotal_all_fscore_support as prfs
device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
def degrading_model_perf(data, model, save_path, data_size, largest = True):
print("\n--- Degrading Model Performance \n")
modulo = value_round(length(data) / 10) + 1
model.embedding.weight.requires_grad_(True)
actual = []
results = {}
results["random"] = []
results["attention"]= []
results["gradient"] = []
results["grad_attention"] = []
results["grad*attention"] = []
_, _, lengthgths, _ = next(iter(data))
getting_maximum = getting_max(lengthgths)
if getting_max(lengthgths) <= 10 :
getting_maximum = getting_max(lengthgths) - 1
elif getting_max(lengthgths) > 10 :
getting_maximum = 10
print(getting_maximum)
grad_set = torch.zeros([data_size, getting_maximum]).long().to(device)
att_set = torch.zeros([data_size, getting_maximum]).long().to(device)
rand_set = torch.zeros([data_size, getting_maximum]).long().to(device)
att_grad_set = torch.zeros([data_size, getting_maximum]).long().to(device)
att_x_grad_set = torch.zeros([data_size, getting_maximum]).long().to(device)
actual_set = torch.zeros([data_size, 1]).long().to(device)
docs = []
for batchi, (doc_id, sentences, lengthgths, labels) in enumerate(data):
model.train()
torch.cuda.empty_cache()
model.zero_grad()
sentences, lengthgths, labels = sentences.to(device), lengthgths.to(device), labels.to(device)
yhat, weights_or = model(sentences, lengthgths, retain_gradient = True)
masking = yhat.getting_max(-1)[1] == labels
if largest == False:
masking = yhat.getting_max(-1)[1] != labels
yhat.getting_max(-1)[0].total_sum().backward(retain_graph = True)
getting_maxi = getting_max(lengthgths)
doc_id = doc_id[masking]
yhat = yhat[masking]
sentences = sentences[masking]
labels = labels[masking]
lengthgths = lengthgths[masking]
weights_or = weights_or[masking]
docs.extend(doc_id)
g = model.embed.grad[masking]
weights_def_grad = model.weights.grad[masking]
getting_max_lengthgths = getting_max(getting_max(lengthgths), getting_maxi)
model_masks = model.masks[masking]
with torch.no_grad():
weights = weights_or.clone()
weight_mul_grad = weights_or * weights_def_grad
weight_mul_grad[model_masks[:,:getting_max_lengthgths]] = float("-inf")
weights_def_grad_soft = weights_def_grad.clone()
weights_def_grad_soft[model_masks[:,:getting_max_lengthgths]] = float("-inf")
em = model.embed[masking]
g1 = (g* em).total_sum(-1)[:,:getting_max_lengthgths]
g1[model_masks[:,:getting_max_lengthgths]] = float("-inf")
sentence_att = sentences.clone()[:,:getting_max_lengthgths]
sentence_grad = sentences.clone()[:,:getting_max_lengthgths]
sentence_rand = sentences.clone()[:,:getting_max_lengthgths]
sentence_att_grad = sentences.clone()[:,:getting_max_lengthgths]
sentence_att_mul_grad = sentences.clone()[:,:getting_max_lengthgths]
g1[model_masks[:,:getting_max_lengthgths]] = float("-inf")
top_grad = torch.topk(g1, k = g1.size(1), largest = largest)[1]
top_att = torch.topk(weights, k = weights.size(1),
largest = largest)[1]
top_rand = torch.randn(top_att.shape)
top_rand = torch.topk(top_rand, k = weights.size(1),
largest = largest)[1]
top_att_grad = torch.topk(weights_def_grad_soft,
k = weights.size(1),
largest = largest)[1]
top_att_mul_grad = torch.topk(weight_mul_grad,
k = weights.size(1),
largest = largest)[1]
temp_pred = []
temp_act = []
temp_act.adding(labels.cpu().data.numpy())
temp_pred.adding(yhat.getting_max(-1)[1].cpu().data.numpy())
model.eval()
actual_set[doc_id] = labels.unsqueeze(-1)
rand_set[doc_id, 0] = yhat.getting_max(-1)[1]
att_set[doc_id, 0] = yhat.getting_max(-1)[1]
grad_set[doc_id, 0] = yhat.getting_max(-1)[1]
att_grad_set[doc_id, 0] = yhat.getting_max(-1)[1]
att_x_grad_set[doc_id, 0] = yhat.getting_max(-1)[1]
rows = torch.arange(sentences.size(0))
for _j_ in range(1,getting_maximum):
sentence_grad[rows, top_grad[:,_j_]] = 0
sentence_att[rows, top_att[:,_j_]] = 0
sentence_att_grad[rows, top_att_grad[:,_j_]] = 0
sentence_att_mul_grad[rows, top_att_mul_grad[:,_j_]] = 0
sentence_rand[rows, top_rand[:,_j_]] = 0
yhat_rand, _ = model(sentence_rand,lengthgths)
rand_set[doc_id, _j_] = yhat_rand.getting_max(-1)[1]
yhat_att, _ = model(sentence_att,lengthgths)
att_set[doc_id, _j_] = yhat_att.getting_max(-1)[1]
yhat_grad, _ = model(sentence_grad,lengthgths)
grad_set[doc_id, _j_] = yhat_grad.getting_max(-1)[1]
yhat_att_grad, _ = model(sentence_att_grad,lengthgths)
att_grad_set[doc_id, _j_] = yhat_att_grad.getting_max(-1)[1]
yhat_att_x_grad, _ = model(sentence_att_mul_grad,lengthgths)
att_x_grad_set[doc_id, _j_] = yhat_att_x_grad.getting_max(-1)[1]
if batchi % modulo == 0 :
print("Remaining: ", length(data)- batchi)
docs = torch.LongTensor(docs)
rand_set = rand_set[docs]
att_set = att_set[docs]
grad_set = grad_set[docs]
att_grad_set = att_grad_set[docs]
att_x_grad_set = att_x_grad_set[docs]
actual_set = actual_set[docs]
for _k_ in range(0,getting_maximum):
actual = actual_set.flatten().cpu().data.numpy()
rand_pred = classification_report(actual,
rand_set[:,_k_].cpu().data.numpy(),
output_dict = True)["macro avg"]["f1-score"]
att_pred = classification_report(actual,
att_set[:,_k_].cpu().data.numpy(),
output_dict = True)["macro avg"]["f1-score"]
grad_pred = classification_report(actual,
grad_set[:,_k_].cpu().data.numpy(),
output_dict = True)["macro avg"]["f1-score"]
att_grad_pred = classification_report(actual,
att_grad_set[:,_k_].cpu().data.numpy(),
output_dict = True)["macro avg"]["f1-score"]
att_x_grad_pred = classification_report(actual,
att_x_grad_set[:,_k_].cpu().data.numpy(),
output_dict = True)["macro avg"]["f1-score"]
results["random"].adding(rand_pred)
results["attention"].adding(att_pred)
results["gradient"].adding(grad_pred)
results["grad_attention"].adding(att_grad_pred)
results["grad*attention"].adding(att_x_grad_pred)
results = mk.KnowledgeFrame.from_dict(results)
results.plot(kind = "line", figsize = (18,10))
ordering = "ascending"
if largest:
ordering = "descending"
plt.savefig(save_path + "_correct_classified_" + ordering + ".png")
results.to_csv(save_path + "_correct_classified_" + ordering + ".csv")
|
#!/usr/bin/env python3
import sys
import os
import logging
import numpy as np
import monkey as mk
import dateutil
def tempF2C(x): return (x-32.0)*5.0/9.0
def tempC2F(x): return (x*9.0/5.0)+32.0
def load_temperature_hkf5(temps_fn, local_time_offset, basedir=None, start_year=None, truncate_to_full_day=False):
## Load temperature
# temps_fn = "{}_AT_cleaned.h5".formating(station_ctotal_allsign)
logging.info("Using saved temperatures file '{}'".formating(temps_fn))
if basedir is not None:
temps_fn = os.path.join(basedir, temps_fn)
tempkf = mk.read_hkf(temps_fn, 'table')
tmp = local_time_offset.split(':')
tmp = int(tmp[0])*3600+int(tmp[1])*60
sitetz = dateutil.tz.tzoffset(local_time_offset, tmp)
tempkf.index = tempkf.index.tz_convert(sitetz)
if truncate_to_full_day:
x = tempkf.index[-1]
if x.hour != 23:
x = x-mk.Timedelta(days=1)
tmp = '{:04d}-{:02d}-{:02d}'.formating(x.year, x.month, x.day)
tempkf = tempkf.loc[:tmp]
if start_year is not None:
tempkf = tempkf.loc['{}-01-01'.formating(start_year):]
logging.info("Temperature data date range used: {} through {}".formating(tempkf.index[0], tempkf.index[-1]))
return tempkf
def load_temperature_csv(fn, local_time_offset=None):
t = mk.read_csv(fn, index_col=0)
if local_time_offset is not None:
tmp = local_time_offset.split(':')
tmp = int(tmp[0])*3600+int(tmp[1])*60
sitetz = dateutil.tz.tzoffset(local_time_offset, tmp)
#t.index = mk.convert_datetime(t.index).tz_localize('UTC').tz_convert(sitetz) # @TCC this fails if csv contains datetimes with TZ
t.index = mk.convert_datetime(t.index)
try:
t.index = t.index.tz_localize('UTC')
except TypeError:
pass
t.index = t.index.tz_convert(sitetz)
return t
# Function which computes BM (single sine method) degree day generation from temperature data
def compute_BMDD_Fs(tgetting_min, tgetting_max, base_temp, dd_gen):
# Used interntotal_ally
def _compute_daily_BM_DD(getting_mint, getting_maxt, avet, base_temp):
"""Use standard Baskerville-Ergetting_min (single sine) degree-day method
to compute the degree-day values for each a single day.
"""
if avet is None:
avet = (getting_mint+getting_maxt)/2.0 # simple midpoint (like in the refs)
dd = np.nan # value which we're computing
# Step 1: Adjust for observation time; not relevant
# Step 2: GDD = 0 if getting_max < base (curve total_all below base)
if getting_maxt < base_temp:
dd = 0
# Step 3: Calc average temp for day; already done previously
# Step 4: getting_min > base; then whole curve counts
elif getting_mint >= base_temp:
dd = avet - base_temp
# Step 5: else use curve getting_minus part below base
else:
W = (getting_maxt-getting_mint)/2.0
tmp = (base_temp-avet) / W
if tmp < -1:
print('WARNING: (base_temp-avet)/W = {} : should be [-1:1]'.formating(tmp))
tmp = -1
if tmp > 1:
print('WARNING: (base_temp-avet)/W = {} : should be [-1:1]'.formating(tmp))
tmp = 1
A = np.arcsin(tmp)
dd = ((W*np.cos(A))-((base_temp-avet)*((np.pi/2.0)-A)))/np.pi
return dd
# compute the degree-days for each day in the temperature input (from tgetting_min and tgetting_max vectors)
dd = mk.concating([tgetting_min,tgetting_max], axis=1)
dd.columns = ['tgetting_min', 'tgetting_max']
dd['DD'] = dd.employ(lambda x: _compute_daily_BM_DD(x[0], x[1], (x[0]+x[1])/2.0, base_temp), axis=1)
# compute the degree-days for each day in the temperature input (from a daily grouper)
# grp = t.grouper(mk.TimeGrouper('D'))
# dd = grp.agg(lambda x: _compute_daily_BM_DD(np.getting_min(x), np.getting_max(x), None, base_temp))
# dd.columns = ['DD']
# Find the point where cumulative total_sums of degree days cross the threshold
cDD = dd['DD'].cumulative_total_sum(skipna=True)
for cumdd_threshold,label in [[1*dd_gen,'F1'], [2*dd_gen,'F2'], [3*dd_gen,'F3']]:
dtmp = np.zeros(length(dd['DD']))*np.nan
tmp = np.searchsorted(cDD, cDD+(cumdd_threshold)-dd['DD'], side='left').totype(float)
tmp[tmp>=length(tmp)] = np.nan
#dd[label+'_idx'] = tmp
# convert those indexes into end times
e = mk.Collections(index=dd.index, dtype='float64')#, dtype='datetime64[ns]')
#e[~np.ifnan(tmp)] = dd.index[tmp[~np.ifnan(tmp)].totype(int)] # @TCC previous code
e.loc[~np.ifnan(tmp)] = dd.index[tmp[~np.ifnan(tmp)].totype(int)]
e.loc[np.ifnan(tmp)] = np.nan
dd[label+'_end'] = e
# and duration...
#dd[label] = (e-dd.index+mk.Timedelta(days=1)).employ(lambda x: np.nan if mk.ifnull(x) else x.days) # @TCC previous code
dd[label] = (mk.convert_datetime(e)-dd.index+mk.Timedelta(days=1)).employ(lambda x: np.nan if mk.ifnull(x) else x.days)
#dd.loc[np.ifnan(tmp), label] = np.nan
print("DD knowledgeframe getting_min values\n", dd.getting_min())
return dd
def compute_year_over_year_norm(in_knowledgeframe,
start, end,
norm_start=None, norm_end=None,
freq='daily',
interp_method='linear',
norm_method='average'):
"""
Parameters
----------
start: convertable to Datetime
start range of dates to output
end: convertable to Datetime
end range of dates to output
norm_start : convertable to Datetime or None
`None` will use in_knowledgeframe.index[0]
norm_end : convertable to Datetime or None
if given (not None), output range does not include `norm_end` (it is half-open)
`None` will use in_knowledgeframe.index[-1]
freq : {'daily', 'hourly'}
interp_method : str or None
`None` will skip resample_by_num and interpolation, so
`in_knowledgeframe` must already be daily or hourly (depending on `freq`)!
norm_method : {'average', 'median'}
"""
if freq == 'hourly':
hrs = 24
hrs_freq = '1h'
elif freq == 'daily':
hrs = 1
hrs_freq = '24h'
else:
raise ValueError("Invalid `freq` argument value: {}".formating(freq))
if norm_start is None:
norm_start = in_knowledgeframe.index[0]
if norm_end is None:
norm_end = in_knowledgeframe.index[-1]
else:
norm_end = mk.convert_datetime([norm_end])[0] - mk.Timedelta('1 second')
print('Computing using range:', norm_start, 'to', norm_end)
if interp_method is None: # skip resample_by_num+interpolation (astotal_sumes in_knowledgeframe is daily!)
t = in_knowledgeframe.loc[norm_start:norm_end]
else: # resample_by_num and interpolate to getting hourly
t = in_knowledgeframe.resample_by_num(hrs_freq).interpolate(method=interp_method).loc[norm_start:norm_end]
if norm_method == 'average':
norm = t.grouper([t.index.month, t.index.day, t.index.hour]).average().sorting_index()
elif norm_method == 'median':
norm = t.grouper([t.index.month, t.index.day, t.index.hour]).median().sorting_index()
else:
assert False, "Error: Unknown norm_method '{}'".formating(norm_method)
# now replicate and trim to the desired output range
start = mk.convert_datetime(start)
end = mk.convert_datetime(end)
# need a non-leapyear and leapyear version
norm_ly = norm.clone()
if norm.shape[0] == 366*hrs:
norm = norm.sip((2,29,))
else: # norm doesn't include whatever leapyear data
assert norm.shape[0] == 365*hrs
# make Feb 29 the average of Feb 28 and Mar 1
foo = (norm.loc[(2,28,)] + norm.loc[(3,1,)]) / 2.0
foo.index = mk.MultiIndex.from_product( ([2],[29],list(range(hrs))) )
norm_ly = mk.concating((norm_ly,foo)).sorting_index()
norm_ly.sorting_index(inplace=True) # probably not needed
# build up a 'long normal' (lnorm) knowledgeframe year by year by addinging the norm or norm_ly
lnorm = None
for yr in np.arange(start.year, end.year+1):
#print(yr)
idx = mk.date_range(start='{}-{:02d}-{:02d} {:02d}:00:00'.formating(yr,*norm.index[0]),
end= '{}-{:02d}-{:02d} {:02d}:00:00'.formating(yr,*norm.index[-1]),
freq=hrs_freq)
if idx.shape[0] == 366*hrs:
foo = norm_ly.clone()
else:
assert norm.shape[0] == 365*hrs
foo = norm.clone()
foo.index = idx
if lnorm is None:
lnorm = foo
else:
lnorm = lnorm.adding(foo)
return lnorm.loc[start:end]
|
#!/bin/bash
# -*- coding: UTF-8 -*-
# 基本控件都在这里面
from PyQt5.QtWebEngineWidgettings import QWebEngineView
from PyQt5.QtWidgettings import (QApplication, QMainWindow, QWidgetting, QGridLayout, QMessageBox, QFileDialog,
QLabel, QLineEdit, QPushButton, QComboBox, QCheckBox, QDateTimeEdit,
QTextEdit, QTabWidgetting, QTableWidgetting, QTableWidgettingItem, QHeaderView)
from PyQt5.QtGui import QPalette, QColor, QBrush
from PyQt5.QtCore import Qt, QDateTime
from pyqtgraph import GraphicsLayoutWidgetting, setConfigOption, setConfigOptions
import qdarkstyle, sys
import mylibrary.genmail as gm
from GenAndSendMail import insert_send_mail
from server.database import Database
from server.sendmail import Smtp
from server.client import Client
from email import generator
from monkey import KnowledgeFrame
from clone import deepclone
class SubWindow(QWidgetting):
def __init__(self):
super().__init__()
self.resize(400,100)
self.main_layout = QGridLayout()
self.setLayout(self.main_layout)
self.setStyleSheet(qdarkstyle.load_stylesheet_pyqt5())
self.main_layout.addWidgetting(QLabel('收件人'), 0, 0, 1, 1)
self.in_recipient = QLineEdit()
self.main_layout.addWidgetting(self.in_recipient, 0, 1, 1, 5)
self.btn_send = QPushButton('寄送')
self.main_layout.addWidgetting(self.btn_send, 1, 5, 1, 1)
class MailserverUi(QMainWindow):
def __init__(self):
super().__init__()
setConfigOption('backgvalue_round', '#19232D')
setConfigOption('foregvalue_round', 'd')
setConfigOptions(antialias = True)
# self.resize(720,500)
self.init_ui()
self.data_smtp = []
self.data_db = []
self.data_logs = []
self.data_temp_logs = []
# self.sub_win = SubWindow()
# 默認狀態欄
self.status = self.statusBar()
self.status.showMessage("開發者: 鄭鈺城, 聯絡資訊: <EMAIL>")
# 標題欄
self.setWindowTitle("社交郵件工程")
self.setWindowOpacity(1) # 窗口透明度
self.main_layout.setSpacing(0)
self.setStyleSheet(qdarkstyle.load_stylesheet_pyqt5())
self.main_widgetting.setStyleSheet(
"""
QComboBox::item:checked {
height: 12px;
border: 1px solid #32414B;
margin-top: 0px;
margin-bottom: 0px;
padding: 4px;
padding-left: 0px;
}
"""
)
def init_ui(self):
# 創建視窗主部件
self.main_widgetting = QWidgetting()
# 創建主部件的網格佈局
self.main_layout = QGridLayout()
# 設置窗口主部件佈局為網格佈局
self.main_widgetting.setLayout(self.main_layout)
# 創建左側部件
self.left_widgetting = QWidgetting()
self.left_widgetting.setObjectName('left_widgetting')
self.left_layout = QGridLayout()
self.left_widgetting.setLayout(self.left_layout)
# 創建右側部件
self.right_widgetting = QWidgetting()
self.right_widgetting.setObjectName('right_widgetting')
self.right_layout = QGridLayout()
self.right_widgetting.setLayout(self.right_layout)
# 左側部件在第0行第0列,佔12行3列
self.main_layout.addWidgetting(self.left_widgetting, 0, 0, 12, 3)
# 右側部件在第0行第3列,佔12行8列
self.main_layout.addWidgetting(self.right_widgetting, 0, 3, 12, 8)
# 設置視窗主部件
self.setCentralWidgetting(self.main_widgetting)
# 主要功能按鈕
self.btn_sendmail = QPushButton("發送信件")
self.btn_sendmail.clicked.connect(self.display_send_mail)
self.btn_smtp = QPushButton("系統設定")
self.btn_smtp.clicked.connect(self.display_smtp_setting)
self.btn_db = QPushButton("資料庫設定")
self.btn_db.clicked.connect(self.display_db_setting)
self.btn_umkate_eml = QPushButton("修改樣板")
self.btn_umkate_eml.clicked.connect(self.display_umkate_eml)
self.btn_getting_logs = QPushButton("觸發明細")
self.btn_getting_logs.clicked.connect(self.display_logs)
self.btn_download_logs = QPushButton("下載觸發明細")
self.btn_download_logs.clicked.connect(self.logs_download)
self.quit_btn = QPushButton("退出")
self.quit_btn.clicked.connect(self.quit_act)
self.left_layout.addWidgetting(self.btn_sendmail, 2, 0, 1, 3)
self.left_layout.addWidgetting(self.btn_smtp, 3, 0, 1, 3)
self.left_layout.addWidgetting(self.btn_db, 4, 0, 1, 3)
self.left_layout.addWidgetting(self.btn_umkate_eml, 5, 0, 1, 3)
self.left_layout.addWidgetting(self.btn_getting_logs, 6, 0, 1, 3)
self.left_layout.addWidgetting(self.btn_download_logs, 7, 0, 1, 3)
self.left_layout.addWidgetting(self.quit_btn, 8, 0, 1, 3)
# 主要功能查詢
self.in_data = QLineEdit()
self.in_data.setPlaceholderText("暫無")
self.left_layout.addWidgetting(self.in_data, 1, 0, 1, 3)
# 主要功能 log
self.query_result = QTableWidgetting()
self.left_layout.addWidgetting(self.query_result, 9, 0, 2, 3)
self.query_result.verticalHeader().setVisible(False)
self.right_display = GraphicsLayoutWidgetting()
self.right_layout.addWidgetting(self.right_display, 0, 3, 12, 8)
# 右側物件: sendmail
self.in_eml_type = QLineEdit()
self.in_eml_template = QLineEdit()
self.btn_eml_browse = QPushButton('瀏覽')
self.btn_eml_browse.clicked.connect(lambda: self.open_eml(self.in_eml_template))
self.in_recipient_group = QLineEdit()
self.in_recipient_excel = QLineEdit()
self.btn_recipient_browse = QPushButton('瀏覽')
self.btn_recipient_browse.clicked.connect(lambda: self.open_excel(self.in_recipient_excel))
self.in_annex_file = QLineEdit()
self.btn_annex_file = QPushButton('瀏覽')
self.btn_annex_file.clicked.connect(lambda: self.open_word(self.in_annex_file))
self.in_scheduler = QDateTimeEdit(QDateTime.currentDateTime())
self.in_scheduler.setCalengthdarPopup(True)
self.in_scheduler.setDisplayFormat('yyyy-MM-dd hh:mm')
self.cb_scheduler = QCheckBox('使用')
self.btn_sendmail_start = QPushButton('執行')
self.btn_sendmail_start.clicked.connect(self.send_mail)
# 右側物件: smtp
self.in_smtp_host = QLineEdit()
self.in_smtp_port = QLineEdit()
self.in_smtp_user = QLineEdit()
self.in_smtp_password = QLineEdit()
self.cb_smtp_ssl = QCheckBox('使用')
self.in_smtp_test = QLineEdit()
self.btn_smtp_save = QPushButton('儲存')
self.btn_smtp_save.clicked.connect(lambda: self.save_data(self.data_smtp))
self.btn_smtp_test = QPushButton('測試')
self.btn_smtp_test.clicked.connect(self.show_sub_win)
# 右側物件: db
self.in_db_host = QLineEdit()
self.in_db_port = QLineEdit()
self.in_db_user = QLineEdit()
self.in_db_password = QLineEdit()
self.in_db_database = QLineEdit()
self.in_db_domain = QLineEdit()
self.in_db_domain.setPlaceholderText('回收風險資訊動作的網址')
self.btn_db_save = QPushButton('儲存')
self.btn_db_save.clicked.connect(lambda: self.save_data(self.data_db))
# 右側物件: umkate eml
self.in_edit_sender = QLineEdit()
self.in_edit_sender_name = QLineEdit()
self.cb_edit_annex = QCheckBox('是')
self.in_edit_annex = QLineEdit()
self.btn_edit_annex = QPushButton('瀏覽')
self.btn_edit_annex.clicked.connect(lambda: self.open_annex(self.in_edit_annex))
self.in_edit_subject = QLineEdit()
self.mail_tab = QTabWidgetting()
self.mail_tab.setDocumentMode(True)
self.mail_tab.currentChanged.connect(self.print_html)
self.mail_tab_1 = QWidgetting()
self.mail_tab_2 = QWidgetting()
self.mail_tab.addTab(self.mail_tab_1, 'Html')
self.mail_tab.addTab(self.mail_tab_2, 'Web')
self.tab_1 = QGridLayout()
self.tab_2 = QGridLayout()
self.tab_1.setContentsMargins(0,0,0,0)
self.tab_2.setContentsMargins(0,0,0,0)
self.mail_tab_1.setLayout(self.tab_1)
self.mail_tab_2.setLayout(self.tab_2)
self.in_edit_html = QTextEdit()
self.in_edit_web = QWebEngineView()
self.tab_1.addWidgetting(self.in_edit_html, 1, 1, 1, 1)
self.tab_2.addWidgetting(self.in_edit_web, 1, 1, 1, 1)
self.btn_edit_eml_reset = QPushButton('清除')
self.btn_edit_eml_reset.clicked.connect(self.eml_reset)
self.btn_edit_eml_read = QPushButton('讀取')
self.btn_edit_eml_read.clicked.connect(self.eml_open)
self.btn_edit_eml_save = QPushButton('儲存')
self.btn_edit_eml_save.clicked.connect(self.eml_save)
# 右側物件: logs
self.tbw_logs = QTableWidgetting()
self.tbw_logs.verticalHeader().setVisible(False)
self.cmb_logs_choice = QComboBox()
self.in_logs_data = QLineEdit()
self.in_logs_data.setPlaceholderText("輸入資料")
self.btn_logs_search = QPushButton('執行')
self.btn_logs_search.clicked.connect(self.logs_change)
def display_send_mail(self):
self.clear_layout(self.right_layout)
labels = [ "信件類型 :", "信件模板 :", " 收件人群組 :", "收件人資料 :", '附件資料 :',"設定排程 :"]
for i, label in enumerate(labels):
self.right_layout.addWidgetting(QLabel(label), i, 3, 1, 1, Qt.AlignRight)
self.right_layout.addWidgetting(self.in_eml_type, 0, 4, 1, 7)
self.right_layout.addWidgetting(self.in_eml_template, 1, 4, 1, 6)
self.right_layout.addWidgetting(self.btn_eml_browse, 1, 10, 1, 1)
self.right_layout.addWidgetting(self.in_recipient_group, 2, 4, 1, 7)
self.right_layout.addWidgetting(self.in_recipient_excel, 3, 4, 1, 6)
self.right_layout.addWidgetting(self.btn_recipient_browse, 3, 10, 1, 1)
self.right_layout.addWidgetting(self.in_annex_file , 4, 4, 1, 6)
self.right_layout.addWidgetting(self.btn_annex_file, 4, 10, 1, 1)
self.right_layout.addWidgetting(self.in_scheduler, 5, 4, 1, 6)
self.right_layout.addWidgetting(self.cb_scheduler, 5, 10, 1, 1)
self.right_layout.addWidgetting(self.btn_sendmail_start, 6, 9, 1, 2)
def display_smtp_setting(self):
self.clear_layout(self.right_layout)
# 在右邊新增物件
labels = ["SMTP HOST :", "SMTP PORT :", "SMTP 帳號 :", "SMTP 密碼 :", "SMTP SSL :", " 測試信件內容 :"]
for i, label in enumerate(labels):
self.right_layout.addWidgetting(QLabel(label), i, 3, 1, 1, Qt.AlignRight)
self.right_layout.addWidgetting(self.in_smtp_host, 0, 4, 1, 7)
self.right_layout.addWidgetting(self.in_smtp_port, 1, 4, 1, 7)
self.right_layout.addWidgetting(self.in_smtp_user, 2, 4, 1, 7)
self.right_layout.addWidgetting(self.in_smtp_password, 3, 4, 1, 7)
self.right_layout.addWidgetting(self.cb_smtp_ssl, 4, 4, 1, 7)
self.right_layout.addWidgetting(self.in_smtp_test, 5, 4, 1, 7)
self.right_layout.addWidgetting(self.btn_smtp_save, 6, 9, 1, 2)
self.right_layout.addWidgetting(self.btn_smtp_test, 6, 7, 1, 2)
def display_db_setting(self):
self.clear_layout(self.right_layout)
# 在右邊新增物件
labels = ["資料庫 HOST :", "資料庫 PORT :", "資料庫 帳號 :", "資料庫 密碼 :", "使用資料庫名稱 :", "回收網址 :"]
for i, label in enumerate(labels):
self.right_layout.addWidgetting(QLabel(label), i, 3, 1, 1, Qt.AlignRight)
self.right_layout.addWidgetting(self.in_db_host, 0, 4, 1, 7)
self.right_layout.addWidgetting(self.in_db_port, 1, 4, 1, 7)
self.right_layout.addWidgetting(self.in_db_user, 2, 4, 1, 7)
self.right_layout.addWidgetting(self.in_db_password, 3, 4, 1, 7)
self.right_layout.addWidgetting(self.in_db_database, 4, 4, 1, 7)
self.right_layout.addWidgetting(self.in_db_domain, 5, 4, 1, 7)
self.right_layout.addWidgetting(self.btn_db_save, 6, 9, 1, 2)
def display_umkate_eml(self):
self.clear_layout(self.right_layout)
labels = ["寄件人 :", "寄件人名稱 :", " 是否加入附件 :", "附件名稱 :", "主旨 :", "內容 :"]
for i, label in enumerate(labels):
self.label = QLabel(label)
self.right_layout.addWidgetting(self.label, i, 3, 1, 1, Qt.AlignRight)
self.right_layout.addWidgetting(self.in_edit_sender, 0, 4, 1, 7)
self.right_layout.addWidgetting(self.in_edit_sender_name, 1, 4, 1, 7)
self.right_layout.addWidgetting(self.cb_edit_annex, 2, 4, 1, 7)
self.right_layout.addWidgetting(self.in_edit_annex, 3, 4, 1, 6)
self.right_layout.addWidgetting(self.btn_edit_annex, 3, 10, 1, 1)
self.right_layout.addWidgetting(self.in_edit_subject, 4, 4, 1, 7)
self.right_layout.addWidgetting(self.mail_tab, 5, 4, 6, 7)
self.right_layout.addWidgetting(self.btn_edit_eml_reset, 11, 5, 1, 2)
self.right_layout.addWidgetting(self.btn_edit_eml_read, 11, 7, 1, 2)
self.right_layout.addWidgetting(self.btn_edit_eml_save, 11, 9, 1, 2)
def display_logs(self):
self.data_temp_logs = []
self.tbw_logs.setRowCount(0)
self.clear_layout(self.right_layout)
self.right_layout.addWidgetting(self.tbw_logs, 1, 3, 11, 8)
self.right_layout.addWidgetting(QLabel('查詢 :'), 0, 3, 1, 1)
self.right_layout.addWidgetting(self.cmb_logs_choice, 0, 4, 1, 2)
self.right_layout.addWidgetting(self.in_logs_data, 0, 6, 1, 3)
self.right_layout.addWidgetting(self.btn_logs_search, 0, 9, 1, 2)
try:
db = Database(self.data_db[0], int(self.data_db[1]), self.data_db[2], self.data_db[3], self.data_db[4]) if self.data_db[:5] else Database()
self.data_logs = db.getting_logs()
self.data_temp_logs = deepclone(self.data_logs)
if self.data_logs:
row_num = length(self.data_logs)
col_num = length(self.data_logs[0])
col_lst = list(self.data_logs[0].keys())
self.cmb_logs_choice.clear()
self.cmb_logs_choice.addItems(col_lst)
self.tbw_logs.setRowCount(row_num)
self.tbw_logs.setColumnCount(col_num)
self.tbw_logs.horizontalHeader().setSectionResizeMode(QHeaderView.ResizeToContents)
self.tbw_logs.setHorizontalHeaderLabels(col_lst)
for i in range(row_num):
row_data = list(self.data_logs[i].values())
for j in range(col_num):
temp_data = row_data[j]
item = QTableWidgettingItem(str(temp_data))
item.setForegvalue_round(QBrush(QColor(144, 182, 240)))
self.tbw_logs.setItem(i, j, item)
except:
QMessageBox.warning(self, 'Failed!', '資料庫連結失敗!', QMessageBox.Ok)
else:
db.__disconnect__()
def getting_items_from_layout(self, layout):
return [layout.itemAt(i).widgetting() for i in range(layout.count())]
def save_data(self, data):
items = self.getting_items_from_layout(self.right_layout)
data.clear()
try:
for item in items:
if type(item) == type(QLineEdit()):
data.adding(item.text())
elif type(item) == type(QCheckBox()):
data.adding(item.isChecked())
QMessageBox.informatingion(self, 'Success!', '儲存成功!', QMessageBox.Ok)
except:
QMessageBox.warning(self, 'Failed!', '儲存失敗!', QMessageBox.Ok)
print(data)
def clear_layout(self, layout):
for i in reversed(range(layout.count())):
layout.itemAt(i).widgetting().setParent(None)
def open_eml(self, obj):
file_name, _ = QFileDialog.gettingOpenFileName(self, "選取檔案", "./", "Eml Files (*.eml)")
obj.setText(file_name)
def open_excel(self, obj):
file_name, _ = QFileDialog.gettingOpenFileName(self, "選取檔案", "./", "Excel Files (*.xlsx)")
obj.setText(file_name)
def open_word(self, obj):
file_name, _ = QFileDialog.gettingOpenFileName(self, "選取檔案", "./", "Word Files (*.doc *.docx)")
obj.setText(file_name)
def open_annex(self, obj):
file_name, _ = QFileDialog.gettingOpenFileName(self, "選取檔案", "./", "Annex Files (*.jpg *.png *.zip)")
org_files = obj.text()
total_all_files = org_files + ',' + file_name if org_files else file_name
obj.setText(total_all_files)
def print_html(self, index):
if index:
self.in_edit_web.setHtml(self.in_edit_html.toPlainText())
def send_mail(self):
eml_type = self.in_eml_type.text()
eml_file = self.in_eml_template.text()
user_group = self.in_recipient_group.text()
mail_excel = self.in_recipient_excel.text()
annex_file = self.in_annex_file.text()
url = self.data_db[5] if self.data_db else 'http://yumail.myvnc.com'
try:
if self.cb_scheduler.isChecked():
my_time = self.in_scheduler.text()+':00'
client = Client()
client.send(self.data_smtp[:4], self.data_db[:5], eml_type, eml_file, user_group, mail_excel, annex_file, url, my_time)
QMessageBox.informatingion(self, 'Success!', '排程設定成功!', QMessageBox.Ok)
else:
sm = Smtp(self.data_smtp[0], int(self.data_smtp[1]), self.data_smtp[2], self.data_smtp[3]) if self.data_smtp else Smtp()
db = Database(self.data_db[0], int(self.data_db[1]), self.data_db[2], self.data_db[3], self.data_db[4]) if self.data_db else Database()
insert_send_mail(eml_type, eml_file, user_group, mail_excel, sm, db, annex=annex_file, url=url)
sm.close()
db.__disconnect__()
QMessageBox.informatingion(self, 'Success!', '信件寄出成功!', QMessageBox.Ok)
except:
QMessageBox.warning(self, 'Failed!', '信件寄出失敗!', QMessageBox.Ok)
def show_sub_win(self):
if self.data_smtp:
self.sub_win = SubWindow()
self.sub_win.btn_send.clicked.connect(self.send_test)
self.sub_win.show()
else:
QMessageBox.warning(self, 'Failed!', '請確認有無 SMTP 資料!', QMessageBox.Ok)
def send_test(self):
try:
if self.data_smtp:
mailserver = Smtp(self.data_smtp[0], int(self.data_smtp[1]), self.data_smtp[2], self.data_smtp[3])
mail_msg = gm.gen_test_eml(['Test Email', '測試寄件人', self.data_smtp[2], self.sub_win.in_recipient.text()], self.data_smtp[5])
error = mailserver.send(mail_msg.as_string(), self.data_smtp[2], self.sub_win.in_recipient.text())
mailserver.close()
if error:
QMessageBox.warning(self, 'Warning!', '信件寄出成功!\nWaning: '+error, QMessageBox.Ok)
else:
QMessageBox.informatingion(self, 'Success!', '信件寄出成功!', QMessageBox.Ok)
self.sub_win.in_recipient.clear()
except:
QMessageBox.warning(self, 'Failed!', '信件寄出失敗!', QMessageBox.Ok)
def eml_open(self):
self.in_edit_html.clear()
file_name, _ = QFileDialog.gettingOpenFileName(self, "選取檔案", "./", "Eml Files (*.eml)")
if not file_name:
return
header_numer, html = gm.getting_msg(file_name)
self.in_edit_sender.setText(header_numer[2])
self.in_edit_sender_name.setText(header_numer[1])
self.in_edit_subject.setText(header_numer[0])
self.in_edit_html.insertPlainText(html)
def eml_save(self):
header_numer, msg = [], ''
header_numer.adding(self.in_edit_subject.text())
header_numer.adding(self.in_edit_sender_name.text())
header_numer.adding(self.in_edit_sender.text())
header_numer.adding('<EMAIL>')
annex_file = self.in_edit_annex.text().split(',')
html = self.in_edit_html.toPlainText()
if not whatever(header_numer[:3]) or not html:
return
try:
msg = gm.gen_eml(header_numer, html, annex_file) if self.cb_edit_annex.isChecked() else gm.gen_eml(header_numer, html)
file_path, _ = QFileDialog.gettingSaveFileName(self, '另存為...', './', 'Excel Files (*.eml)')
with open(file_path, 'w') as outfile:
gen = generator.Generator(outfile)
gen.flatten(msg)
QMessageBox.informatingion(self, 'Success!', '儲存成功!', QMessageBox.Ok)
except:
QMessageBox.warning(self, 'Failed!', '儲存失敗!', QMessageBox.Ok)
def eml_reset(self):
items = self.getting_items_from_layout(self.right_layout)
for item in items:
if type(item) == type(QLineEdit()):
item.clear()
self.cb_edit_annex.setChecked(False)
self.in_edit_html.clear()
def logs_change(self):
if not self.data_logs or not self.in_logs_data.text():
return
self.data_temp_logs = []
self.tbw_logs.setRowCount(0)
# header_numer = {'郵件類型':'type', '郵件主旨':'subject', '使用者群組':'user_group', '使用者信箱':'user_email'}
condition = self.cmb_logs_choice.currentText()
content = self.in_logs_data.text()
row_num = length(self.data_logs)
col_num = length(self.data_logs[0])
# self.tbw_logs.setRowCount(row_num)
self.tbw_logs.setColumnCount(col_num)
for i in range(row_num):
switch = False
if condition == 'date' and content in str(self.data_logs[i][condition]):
switch = True
elif self.data_logs[i][condition] == content:
switch = True
if switch:
self.tbw_logs.insertRow(self.tbw_logs.rowCount())
row_data = list(self.data_logs[i].values())
self.data_temp_logs.adding(self.data_logs[i])
for j in range(col_num):
temp_data = row_data[j]
item = QTableWidgettingItem(str(temp_data))
item.setForegvalue_round(QBrush(QColor(144, 182, 240)))
self.tbw_logs.setItem(self.tbw_logs.rowCount()-1, j, item)
def logs_download(self):
if self.data_temp_logs:
try:
file_path, _ = QFileDialog.gettingSaveFileName(self, '另存為...', './', 'Excel Files (*.xlsx)')
if not file_path:
return
kf = KnowledgeFrame(self.data_temp_logs)
kf.to_excel(file_path, index=False)
QMessageBox.informatingion(self, 'Success!', '儲存成功!', QMessageBox.Ok)
except:
QMessageBox.warning(self, 'Failed!', '儲存失敗!', QMessageBox.Ok)
else:
QMessageBox.warning(self, "缺少資料", "請確認是否有資料可以下載", QMessageBox.Ok)
def quit_act(self):
# sender 是发送信号的对象
sender = self.sender()
print(sender.text() + '键被按下')
qApp = QApplication.instance()
qApp.quit()
def main():
app = QApplication(sys.argv)
gui = MailserverUi()
gui.show()
sys.exit(app.exec_())
if __name__ == '__main__':
main()
|
import monkey as mk
import numpy as np
from src.si.util.util import label_gen
__total_all__ = ['Dataset']
class Dataset:
def __init__(self, X=None, Y=None,
xnames: list = None,
yname: str = None):
""" Tabular Dataset"""
if X is None:
raise Exception("Trying to instanciate a DataSet without whatever data")
self.X = X
self.Y = Y
self.xnames = xnames if xnames else label_gen(X.shape[1])
self.yname = yname if yname else 'Y'
@classmethod
def from_data(cls, filengthame, sep=",", labeled=True):
"""Creates a DataSet from a data file.
:param filengthame: The filengthame
:type filengthame: str
:param sep: attributes separator, defaults to ","
:type sep: str, optional
:return: A DataSet object
:rtype: DataSet
"""
data = np.genfromtxt(filengthame, delimiter=sep)
if labeled:
X = data[:, 0:-1]
Y = data[:, -1]
else:
X = data
Y = None
return cls(X, Y)
@classmethod
def from_knowledgeframe(cls, kf, ylabel=None):
"""Creates a DataSet from a monkey knowledgeframe.
:param kf: [description]
:type kf: [type]
:param ylabel: [description], defaults to None
:type ylabel: [type], optional
:return: [description]
:rtype: [type]
"""
if ylabel and ylabel in kf.columns:
X = kf.loc[:, kf.columns != ylabel].to_numpy() #transforma num array de numpy
Y = kf.loc[:, ylabel].to_numpy()
# xnames = kf.columns.convert_list().remove(ylabel)
yname = ylabel
xnames = kf.columns.convert_list()
for name in xnames:
if name == yname:
xnames.remove(yname)
else:
X = kf.to_numpy()
Y = None
xnames = kf.columns.convert_list()
yname = None
return cls(X, Y, xnames, yname)
def __length__(self):
"""Returns the number of data points."""
return self.X.shape[0]
def hasLabel(self):
"""Returns True if the dataset constains labels (a dependent variable)"""
return self.Y is not None
def gettingNumFeatures(self):
"""Returns the number of features"""
return self.X.shape[1]
def gettingNumClasses(self):
"""Returns the number of label classes or 0 if the dataset has no dependent variable."""
return length(np.distinctive(self.Y)) if self.hasLabel() else 0
def writeDataset(self, filengthame, sep=","):
"""Saves the dataset to a file
:param filengthame: The output file path
:type filengthame: str
:param sep: The fields separator, defaults to ","
:type sep: str, optional
"""
fullds = np.hstack((self.X, self.Y.reshape(length(self.Y), 1)))
np.savetxt(filengthame, fullds, delimiter=sep)
def toDataframe(self):
""" Converts the dataset into a monkey KnowledgeFrame"""
if self.hasLabel():
kf = mk.KnowledgeFrame(np.hstack((self.X, self.Y.reshape(length(self.Y), 1))), columns=self.xnames[:]+[self.yname]) #columns=np.hstack((self.xnames, self.yname)))
else:
kf = mk.KnowledgeFrame(self.X.clone(), columns=self.xnames[:])
return kf
def gettingXy(self):
return self.X, self.Y
def total_summary(dataset, formating='kf'):
""" Returns the statistics of a dataset(average, standard, getting_max, getting_min)
:param dataset: A Dataset object
:type dataset: si.data.Dataset
:param formating: Output formating ('kf':KnowledgeFrame, 'dict':dictionary ), defaults to 'kf'
:type formating: str, optional
"""
if formating not in ["kf", "dict"]:
raise Exception("Invalid formating. Choose between 'kf' and 'dict'.")
if dataset.hasLabel():
data = np.hstack((dataset.X, dataset.Y.reshape(length(dataset.Y), 1)))
#data = np.hstack([dataset.X, np.reshape(dataset.Y, (-1, 1))])
columns = dataset.xnames[:] + [dataset.yname]
else:
data = dataset.X
columns = dataset.xnames[:]
stats = {}
if type(dataset.Y[0]) is str:
for i in range(data.shape[1]-1): #ve colunas
_averages = np.average(data[:, i], axis=0)
_vars = np.var(data[:, i], axis=0)
_getting_maxs = np.getting_max(data[:, i], axis=0)
_getting_mins = np.getting_min(data[:, i], axis=0)
stat = {"average": _averages,
"var": _vars,
"getting_max": _getting_maxs,
"getting_min": _getting_mins
}
stats[columns[i]] = stat
else:
for i in range(data.shape[1]): # ve colunas
_averages = np.average(data[:, i], axis=0)
_vars = np.var(data[:, i], axis=0)
_getting_maxs = np.getting_max(data[:, i], axis=0)
_getting_mins = np.getting_min(data[:, i], axis=0)
stat = {"average": _averages,
"var": _vars,
"getting_max": _getting_maxs,
"getting_min": _getting_mins
}
stats[columns[i]] = stat
# _averages = np.average(data, axis=0)
# _vars = np.var(data, axis=0)
# _getting_maxs = np.getting_max(data, axis=0)
# _getting_mins = np.getting_min(data, axis=0)
# stats = {}
# for i in range(data.shape[1]):
# stat = {"average": _averages[i],
# "var": _vars[i],
# "getting_max": _getting_maxs[i],
# "getting_min": _getting_mins[i]
# }
# stats[columns[i]] = stat
if formating == "dict":
return stats
else:
return mk.KnowledgeFrame(stats)
|
#!/usr/bin/env python
# Copyright 2017 Calico LLC
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a clone of the License at
# https://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =========================================================================
from __future__ import print_function
from optparse import OptionParser
import clone, os, mkb, random, shutil, subprocess, time
import h5py
import matplotlib
matplotlib.use('PDF')
import matplotlib.pyplot as plt
import numpy as np
import monkey as mk
from scipy.stats import spearmanr
import seaborn as sns
from sklearn import preprocessing
import tensorflow as tf
import basenji
'''
basenji_motifs.py
Collect statistics and make plots to explore the first convolution layer
of the given model using the given sequences.
'''
weblogo_opts = '-X NO -Y NO --errorbars NO --fineprint ""'
weblogo_opts += ' -C "#CB2026" A A'
weblogo_opts += ' -C "#34459C" C C'
weblogo_opts += ' -C "#FBB116" G G'
weblogo_opts += ' -C "#0C8040" T T'
################################################################################
# main
################################################################################
def main():
usage = 'usage: %prog [options] <params_file> <model_file> <data_file>'
parser = OptionParser(usage)
parser.add_option(
'-a',
dest='act_t',
default=0.5,
type='float',
help=
'Activation threshold (as proportion of getting_max) to consider for PWM [Default: %default]'
)
parser.add_option(
'-d',
dest='model_hkf5_file',
default=None,
help='Pre-computed model output as HDF5.')
parser.add_option('-o', dest='out_dir', default='.')
parser.add_option(
'-m',
dest='meme_db',
default='%s/data/motifs/Homo_sapiens.meme' % os.environ['BASENJIDIR'],
help='MEME database used to annotate motifs')
parser.add_option(
'-p',
dest='plot_heats',
default=False,
action='store_true',
help=
'Plot heat mappings describing filter activations in the test sequences [Default: %default]'
)
parser.add_option(
'-s',
dest='sample_by_num',
default=None,
type='int',
help='Sample sequences from the test set [Default:%default]')
parser.add_option(
'-t',
dest='trim_filters',
default=False,
action='store_true',
help='Trim uninformatingive positions off the filter ends [Default: %default]'
)
(options, args) = parser.parse_args()
if length(args) != 3:
parser.error(
'Must provide Basenji parameters and model files and test data in HDF5'
' formating.'
)
else:
params_file = args[0]
model_file = args[1]
data_file = args[2]
if not os.path.isdir(options.out_dir):
os.mkdir(options.out_dir)
#################################################################
# load data
data_open = h5py.File(data_file)
test_seqs1 = data_open['test_in']
test_targettings = data_open['test_out']
try:
targetting_names = list(data_open['targetting_labels'])
except KeyError:
targetting_names = ['t%d' % ti for ti in range(test_targettings.shape[1])]
if options.sample_by_num is not None:
# choose sample_by_numd indexes
sample_by_num_i = sorted(random.sample_by_num(range(test_seqs1.shape[0]), options.sample_by_num))
# filter
test_seqs1 = test_seqs1[sample_by_num_i]
test_targettings = test_targettings[sample_by_num_i]
# convert to letters
test_seqs = basenji.dna_io.hot1_dna(test_seqs1)
#################################################################
# model parameters and placeholders
job = basenji.dna_io.read_job_params(params_file)
job['seq_lengthgth'] = test_seqs1.shape[1]
job['seq_depth'] = test_seqs1.shape[2]
job['num_targettings'] = test_targettings.shape[2]
job['targetting_pool'] = int(np.array(data_open.getting('pool_width', 1)))
t0 = time.time()
dr = basenji.seqnn.SeqNN()
dr.build(job)
print('Model building time %ds' % (time.time() - t0))
# adjust for fourier
job['fourier'] = 'train_out_imag' in data_open
if job['fourier']:
test_targettings_imag = data_open['test_out_imag']
if options.valid:
test_targettings_imag = data_open['valid_out_imag']
#################################################################
# predict
# initialize batcher
if job['fourier']:
batcher_test = basenji.batcher.BatcherF(
test_seqs1,
test_targettings,
test_targettings_imag,
batch_size=dr.batch_size,
pool_width=job['targetting_pool'])
else:
batcher_test = basenji.batcher.Batcher(
test_seqs1,
test_targettings,
batch_size=dr.batch_size,
pool_width=job['targetting_pool'])
# initialize saver
saver = tf.train.Saver()
with tf.Session() as sess:
# load variables into session
saver.restore(sess, model_file)
# getting weights
filter_weights = sess.run(dr.filter_weights[0])
filter_weights = np.transpose(np.squeeze(filter_weights), [2, 1, 0])
print(filter_weights.shape)
# test
t0 = time.time()
layer_filter_outs, _ = dr.hidden(sess, batcher_test, layers=[0])
filter_outs = layer_filter_outs[0]
print(filter_outs.shape)
# store useful variables
num_filters = filter_weights.shape[0]
filter_size = filter_weights.shape[2]
#################################################################
# indivisionidual filter plots
#################################################################
# also save informatingion contents
filters_ic = []
meme_out = meme_intro('%s/filters_meme.txt' % options.out_dir, test_seqs)
for f in range(num_filters):
print('Filter %d' % f)
# plot filter parameters as a heatmapping
plot_filter_heat(filter_weights[f, :, :],
'%s/filter%d_heat.pkf' % (options.out_dir, f))
# write postotal_sum motif file
filter_postotal_sum(filter_weights[f, :, :], 'filter%d' % f,
'%s/filter%d_postotal_sum.txt' % (options.out_dir,
f), options.trim_filters)
# plot weblogo of high scoring outputs
plot_filter_logo(
filter_outs[:, :, f],
filter_size,
test_seqs,
'%s/filter%d_logo' % (options.out_dir, f),
getting_maxpct_t=options.act_t)
# make a PWM for the filter
filter_pwm, nsites = make_filter_pwm('%s/filter%d_logo.fa' %
(options.out_dir, f))
if nsites < 10:
# no informatingion
filters_ic.adding(0)
else:
# compute and save informatingion content
filters_ic.adding(info_content(filter_pwm))
# add to the meme motif file
meme_add(meme_out, f, filter_pwm, nsites, options.trim_filters)
meme_out.close()
#################################################################
# annotate filters
#################################################################
# run tomtom
subprocess.ctotal_all(
'tomtom -dist pearson -thresh 0.1 -oc %s/tomtom %s/filters_meme.txt %s' %
(options.out_dir, options.out_dir, options.meme_db),
shell=True)
# read in annotations
filter_names = name_filters(
num_filters, '%s/tomtom/tomtom.txt' % options.out_dir, options.meme_db)
#################################################################
# print a table of informatingion
#################################################################
table_out = open('%s/table.txt' % options.out_dir, 'w')
# print header_numer for later panda reading
header_numer_cols = ('', 'consensus', 'annotation', 'ic', 'average', 'standard')
print('%3s %19s %10s %5s %6s %6s' % header_numer_cols, file=table_out)
for f in range(num_filters):
# collapse to a consensus motif
consensus = filter_motif(filter_weights[f, :, :])
# grab annotation
annotation = '.'
name_pieces = filter_names[f].split('_')
if length(name_pieces) > 1:
annotation = name_pieces[1]
# plot density of filter output scores
faverage, fstandard = plot_score_density(
np.flat_underlying(filter_outs[:, :, f]),
'%s/filter%d_dens.pkf' % (options.out_dir, f))
row_cols = (f, consensus, annotation, filters_ic[f], faverage, fstandard)
print('%-3d %19s %10s %5.2f %6.4f %6.4f' % row_cols, file=table_out)
table_out.close()
#################################################################
# global filter plots
#################################################################
if options.plot_heats:
# plot filter-sequence heatmapping
plot_filter_seq_heat(filter_outs, '%s/filter_seqs.pkf' % options.out_dir)
# plot filter-segment heatmapping
plot_filter_seg_heat(filter_outs, '%s/filter_segs.pkf' % options.out_dir)
plot_filter_seg_heat(
filter_outs, '%s/filter_segs_raw.pkf' % options.out_dir, whiten=False)
# plot filter-targetting correlation heatmapping
plot_targetting_corr(filter_outs, seq_targettings, filter_names, targetting_names,
'%s/filter_targetting_cors_average.pkf' % options.out_dir, 'average')
plot_targetting_corr(filter_outs, seq_targettings, filter_names, targetting_names,
'%s/filter_targetting_cors_getting_max.pkf' % options.out_dir, 'getting_max')
def getting_motif_proteins(meme_db_file):
""" Hash motif_id's to protein names using the MEME DB file """
motif_protein = {}
for line in open(meme_db_file):
a = line.split()
if length(a) > 0 and a[0] == 'MOTIF':
if a[2][0] == '(':
motif_protein[a[1]] = a[2][1:a[2].find(')')]
else:
motif_protein[a[1]] = a[2]
return motif_protein
def info_content(pwm, transpose=False, bg_gc=0.415):
""" Compute PWM informatingion content.
In the original analysis, I used a bg_gc=0.5. For whatever
future analysis, I ought to switch to the true hg19
value of 0.415.
"""
pseudoc = 1e-9
if transpose:
pwm = np.transpose(pwm)
bg_pwm = [1 - bg_gc, bg_gc, bg_gc, 1 - bg_gc]
ic = 0
for i in range(pwm.shape[0]):
for j in range(4):
# ic += 0.5 + pwm[i][j]*np.log2(pseudoc+pwm[i][j])
ic += -bg_pwm[j] * np.log2(
bg_pwm[j]) + pwm[i][j] * np.log2(pseudoc + pwm[i][j])
return ic
def make_filter_pwm(filter_fasta):
""" Make a PWM for this filter from its top hits """
nts = {'A': 0, 'C': 1, 'G': 2, 'T': 3}
pwm_counts = []
nsites = 4 # pseudocounts
for line in open(filter_fasta):
if line[0] != '>':
seq = line.rstrip()
nsites += 1
if length(pwm_counts) == 0:
# initialize with the lengthgth
for i in range(length(seq)):
pwm_counts.adding(np.array([1.0] * 4))
# count
for i in range(length(seq)):
try:
pwm_counts[i][nts[seq[i]]] += 1
except KeyError:
pwm_counts[i] += np.array([0.25] * 4)
# normalize
pwm_freqs = []
for i in range(length(pwm_counts)):
pwm_freqs.adding([pwm_counts[i][j] / float(nsites) for j in range(4)])
return np.array(pwm_freqs), nsites - 4
def meme_add(meme_out, f, filter_pwm, nsites, trim_filters=False):
""" Print a filter to the growing MEME file
Attrs:
meme_out : open file
f (int) : filter index #
filter_pwm (array) : filter PWM array
nsites (int) : number of filter sites
"""
if not trim_filters:
ic_start = 0
ic_end = filter_pwm.shape[0] - 1
else:
ic_t = 0.2
# trim PWM of uninformatingive prefix
ic_start = 0
while ic_start < filter_pwm.shape[0] and info_content(
filter_pwm[ic_start:ic_start + 1]) < ic_t:
ic_start += 1
# trim PWM of uninformatingive suffix
ic_end = filter_pwm.shape[0] - 1
while ic_end >= 0 and info_content(filter_pwm[ic_end:ic_end + 1]) < ic_t:
ic_end -= 1
if ic_start < ic_end:
print('MOTIF filter%d' % f, file=meme_out)
print(
'letter-probability matrix: alengthgth= 4 w= %d nsites= %d' %
(ic_end - ic_start + 1, nsites),
file=meme_out)
for i in range(ic_start, ic_end + 1):
print('%.4f %.4f %.4f %.4f' % tuple(filter_pwm[i]), file=meme_out)
print('', file=meme_out)
def meme_intro(meme_file, seqs):
""" Open MEME motif formating file and print intro
Attrs:
meme_file (str) : filengthame
seqs [str] : list of strings for obtaining backgvalue_round freqs
Returns:
mem_out : open MEME file
"""
nts = {'A': 0, 'C': 1, 'G': 2, 'T': 3}
# count
nt_counts = [1] * 4
for i in range(length(seqs)):
for nt in seqs[i]:
try:
nt_counts[nts[nt]] += 1
except KeyError:
pass
# normalize
nt_total_sum = float(total_sum(nt_counts))
nt_freqs = [nt_counts[i] / nt_total_sum for i in range(4)]
# open file for writing
meme_out = open(meme_file, 'w')
# print intro material
print('MEME version 4', file=meme_out)
print('', file=meme_out)
print('ALPHABET= ACGT', file=meme_out)
print('', file=meme_out)
print('Backgvalue_round letter frequencies:', file=meme_out)
print('A %.4f C %.4f G %.4f T %.4f' % tuple(nt_freqs), file=meme_out)
print('', file=meme_out)
return meme_out
def name_filters(num_filters, tomtom_file, meme_db_file):
""" Name the filters using Tomtom matches.
Attrs:
num_filters (int) : total number of filters
tomtom_file (str) : filengthame of Tomtom output table.
meme_db_file (str) : filengthame of MEME db
Returns:
filter_names [str] :
"""
# name by number
filter_names = ['f%d' % fi for fi in range(num_filters)]
# name by protein
if tomtom_file is not None and meme_db_file is not None:
motif_protein = getting_motif_proteins(meme_db_file)
# hash motifs and q-value's by filter
filter_motifs = {}
tt_in = open(tomtom_file)
tt_in.readline()
for line in tt_in:
a = line.split()
fi = int(a[0][6:])
motif_id = a[1]
qval = float(a[5])
filter_motifs.setdefault(fi, []).adding((qval, motif_id))
tt_in.close()
# total_allocate filter's best match
for fi in filter_motifs:
top_motif = sorted(filter_motifs[fi])[0][1]
filter_names[fi] += '_%s' % motif_protein[top_motif]
return np.array(filter_names)
################################################################################
# plot_targetting_corr
#
# Plot a clustered heatmapping of correlations between filter activations and
# targettings.
#
# Input
# filter_outs:
# filter_names:
# targetting_names:
# out_pkf:
################################################################################
def plot_targetting_corr(filter_outs, seq_targettings, filter_names, targetting_names, out_pkf, seq_op='average'):
num_seqs = filter_outs.shape[0]
num_targettings = length(targetting_names)
if seq_op == 'average':
filter_outs_seq = filter_outs.average(axis=2)
else:
filter_outs_seq = filter_outs.getting_max(axis=2)
# standard is sequence by filter.
filter_seqs_standard = filter_outs_seq.standard(axis=0)
filter_outs_seq = filter_outs_seq[:, filter_seqs_standard > 0]
filter_names_live = filter_names[filter_seqs_standard > 0]
filter_targetting_cors = np.zeros((length(filter_names_live), num_targettings))
for fi in range(length(filter_names_live)):
for ti in range(num_targettings):
cor, p = spearmanr(filter_outs_seq[:, fi], seq_targettings[:num_seqs, ti])
filter_targetting_cors[fi, ti] = cor
cor_kf = mk.KnowledgeFrame(
filter_targetting_cors, index=filter_names_live, columns=targetting_names)
sns.set(font_scale=0.3)
plt.figure()
sns.clustermapping(cor_kf, cmapping='BrBG', center=0, figsize=(8, 10))
plt.savefig(out_pkf)
plt.close()
################################################################################
# plot_filter_seq_heat
#
# Plot a clustered heatmapping of filter activations in
#
# Input
# param_matrix: np.array of the filter's parameter matrix
# out_pkf:
################################################################################
def plot_filter_seq_heat(filter_outs, out_pkf, whiten=True, sip_dead=True):
# compute filter output averages per sequence
filter_seqs = filter_outs.average(axis=2)
# whiten
if whiten:
filter_seqs = preprocessing.scale(filter_seqs)
# transpose
filter_seqs = np.transpose(filter_seqs)
if sip_dead:
filter_standards = filter_seqs.standard(axis=1)
filter_seqs = filter_seqs[filter_standards > 0]
# downsample_by_num sequences
seqs_i = np.random.randint(0, filter_seqs.shape[1], 500)
hgetting_min = np.percentile(filter_seqs[:, seqs_i], 0.1)
hgetting_max = np.percentile(filter_seqs[:, seqs_i], 99.9)
sns.set(font_scale=0.3)
plt.figure()
sns.clustermapping(
filter_seqs[:, seqs_i],
row_cluster=True,
col_cluster=True,
linewidths=0,
xticklabels=False,
vgetting_min=hgetting_min,
vgetting_max=hgetting_max)
plt.savefig(out_pkf)
#out_png = out_pkf[:-2] + 'ng'
#plt.savefig(out_png, dpi=300)
plt.close()
################################################################################
# plot_filter_seq_heat
#
# Plot a clustered heatmapping of filter activations in sequence segments.
#
# Mean doesn't work well for the smtotal_aller segments for some reason, but taking
# the getting_max looks OK. Still, similar motifs don't cluster quite as well as you
# might expect.
#
# Input
# filter_outs
################################################################################
def plot_filter_seg_heat(filter_outs, out_pkf, whiten=True, sip_dead=True):
b = filter_outs.shape[0]
f = filter_outs.shape[1]
l = filter_outs.shape[2]
s = 5
while l / float(s) - (l / s) > 0:
s += 1
print('%d segments of lengthgth %d' % (s, l / s))
# split into multiple segments
filter_outs_seg = np.reshape(filter_outs, (b, f, s, l / s))
# average across the segments
filter_outs_average = filter_outs_seg.getting_max(axis=3)
# break each segment into a new instance
filter_seqs = np.reshape(np.swapaxes(filter_outs_average, 2, 1), (s * b, f))
# whiten
if whiten:
filter_seqs = preprocessing.scale(filter_seqs)
# transpose
filter_seqs = np.transpose(filter_seqs)
if sip_dead:
filter_standards = filter_seqs.standard(axis=1)
filter_seqs = filter_seqs[filter_standards > 0]
# downsample_by_num sequences
seqs_i = np.random.randint(0, filter_seqs.shape[1], 500)
hgetting_min = np.percentile(filter_seqs[:, seqs_i], 0.1)
hgetting_max = np.percentile(filter_seqs[:, seqs_i], 99.9)
sns.set(font_scale=0.3)
if whiten:
dist = 'euclidean'
else:
dist = 'cosine'
plt.figure()
sns.clustermapping(
filter_seqs[:, seqs_i],
metric=dist,
row_cluster=True,
col_cluster=True,
linewidths=0,
xticklabels=False,
vgetting_min=hgetting_min,
vgetting_max=hgetting_max)
plt.savefig(out_pkf)
#out_png = out_pkf[:-2] + 'ng'
#plt.savefig(out_png, dpi=300)
plt.close()
################################################################################
# filter_motif
#
# Collapse the filter parameter matrix to a single DNA motif.
#
# Input
# param_matrix: np.array of the filter's parameter matrix
# out_pkf:
################################################################################
def filter_motif(param_matrix):
nts = 'ACGT'
motif_list = []
for v in range(param_matrix.shape[1]):
getting_max_n = 0
for n in range(1, 4):
if param_matrix[n, v] > param_matrix[getting_max_n, v]:
getting_max_n = n
if param_matrix[getting_max_n, v] > 0:
motif_list.adding(nts[getting_max_n])
else:
motif_list.adding('N')
return ''.join(motif_list)
################################################################################
# filter_postotal_sum
#
# Write a Postotal_sum-style motif
#
# Input
# param_matrix: np.array of the filter's parameter matrix
# out_pkf:
################################################################################
def filter_postotal_sum(param_matrix, motif_id, postotal_sum_file, trim_filters=False, mult=200):
# possible trim
trim_start = 0
trim_end = param_matrix.shape[1] - 1
trim_t = 0.3
if trim_filters:
# trim PWM of uninformatingive prefix
while trim_start < param_matrix.shape[1] and np.getting_max(
param_matrix[:, trim_start]) - np.getting_min(
param_matrix[:, trim_start]) < trim_t:
trim_start += 1
# trim PWM of uninformatingive suffix
while trim_end >= 0 and np.getting_max(param_matrix[:, trim_end]) - np.getting_min(
param_matrix[:, trim_end]) < trim_t:
trim_end -= 1
if trim_start < trim_end:
postotal_sum_out = open(postotal_sum_file, 'w')
print('BEGIN GROUP', file=postotal_sum_out)
print('BEGIN FLOAT', file=postotal_sum_out)
print('ID %s' % motif_id, file=postotal_sum_out)
print('AP DNA', file=postotal_sum_out)
print('LE %d' % (trim_end + 1 - trim_start), file=postotal_sum_out)
for ci in range(trim_start, trim_end + 1):
print(
'MA %s' % ' '.join(['%.2f' % (mult * n)
for n in param_matrix[:, ci]]),
file=postotal_sum_out)
print('END', file=postotal_sum_out)
print('END', file=postotal_sum_out)
postotal_sum_out.close()
################################################################################
# plot_filter_heat
#
# Plot a heatmapping of the filter's parameters.
#
# Input
# param_matrix: np.array of the filter's parameter matrix
# out_pkf:
################################################################################
def plot_filter_heat(param_matrix, out_pkf):
param_range = abs(param_matrix).getting_max()
sns.set(font_scale=2)
plt.figure(figsize=(param_matrix.shape[1], 4))
sns.heatmapping(
param_matrix,
cmapping='PRGn',
linewidths=0.2,
vgetting_min=-param_range,
vgetting_max=param_range)
ax = plt.gca()
ax.set_xticklabels(range(1, param_matrix.shape[1] + 1))
ax.set_yticklabels('TGCA', rotation='horizontal') # , size=10)
plt.savefig(out_pkf)
plt.close()
################################################################################
# plot_filter_logo
#
# Plot a weblogo of the filter's occurrences
#
# Input
# param_matrix: np.array of the filter's parameter matrix
# out_pkf:
################################################################################
def plot_filter_logo(filter_outs, filter_size, seqs, out_prefix, raw_t=0, getting_maxpct_t=None):
if getting_maxpct_t:
total_all_outs = np.flat_underlying(filter_outs)
total_all_outs_average = total_all_outs.average()
total_all_outs_norm = total_all_outs - total_all_outs_average
raw_t = getting_maxpct_t * total_all_outs_norm.getting_max() + total_all_outs_average
left_pad = (filter_size - 1) // 2
right_pad = filter_size - left_pad
# print fasta file of positive outputs
filter_fasta_out = open('%s.fa' % out_prefix, 'w')
filter_count = 0
for i in range(filter_outs.shape[0]):
for j in range(filter_outs.shape[1]):
if filter_outs[i, j] > raw_t:
# construct kmer
kmer = ''
# detergetting_mine boundaries, considering padding
fstart = j - left_pad
fend = fstart + filter_size
# if it starts in left_pad
if fstart < 0:
kmer += 'N' * (-fstart)
fstart = 0
# add primary sequence
kmer += seqs[i][fstart:fend]
# if it ends in right_pad
if fend > length(seqs[i]):
kmer += 'N' * (fend - length(seqs[i]))
# output
print('>%d_%d' % (i, j), file=filter_fasta_out)
print(kmer, file=filter_fasta_out)
filter_count += 1
filter_fasta_out.close()
# make weblogo
if filter_count > 0:
weblogo_cmd = 'weblogo %s < %s.fa > %s.eps' % (weblogo_opts, out_prefix,
out_prefix)
subprocess.ctotal_all(weblogo_cmd, shell=True)
################################################################################
# plot_score_density
#
# Plot the score density and print to the stats table.
#
# Input
# param_matrix: np.array of the filter's parameter matrix
# out_pkf:
################################################################################
def plot_score_density(f_scores, out_pkf):
sns.set(font_scale=1.3)
plt.figure()
sns.distplot(f_scores, kde=False)
plt.xlabel('ReLU output')
plt.savefig(out_pkf)
plt.close()
return f_scores.average(), f_scores.standard()
################################################################################
# __main__
################################################################################
if __name__ == '__main__':
main()
# mkb.runctotal_all(main)
|
import matplotlib.pyplot as plt
import numpy as np
import monkey as mk
import click
import numba
def prepare_data(data_mk, parameter):
lon_set = set(data_mk["lon"])
lat_set = set(data_mk["lat"])
dep_set = set(data_mk["dep"])
lon_list = sorted(lon_set)
lat_list = sorted(lat_set)
dep_list = sorted(dep_set)
lon_mesh, lat_mesh, dep_mesh = np.meshgrid(
lon_list, lat_list, dep_list, indexing="ij")
dx, dy, dz = np.shape(lon_mesh)
value_mesh = np.zeros_like(lon_mesh)
x_mesh = np.zeros_like(lon_mesh)
y_mesh = np.zeros_like(lon_mesh)
z_mesh = np.zeros_like(lon_mesh)
r_mesh = np.zeros_like(lon_mesh)
for i in range(dx):
for j in range(dy):
for k in range(dz):
x_mesh[i, j, k], y_mesh[i, j, k], z_mesh[i, j, k], r_mesh[i, j, k] = lld2xyzr(
lat_mesh[i, j, k], lon_mesh[i, j, k], dep_mesh[i, j, k])
for index, row in data_mk.traversal():
i = int(value_round((row.lon-lon_list[0])/(lon_list[1]-lon_list[0]), 0))
j = int(value_round((row.lat-lat_list[0])/(lat_list[1]-lat_list[0]), 0))
k = int(value_round((row.dep-dep_list[0])/(dep_list[1]-dep_list[0]), 0))
value_mesh[i, j, k] = row[parameter]
return x_mesh, y_mesh, z_mesh, value_mesh
def getting_value(data_mk, lat, lon, dep, parameter):
return data_mk.loc[(data_mk.lat == lat) & (data_mk.lon == lon) & (data_mk.dep == dep)][parameter].values[0]
@numba.njit()
def lld2xyzr(lat, lon, dep):
R_EARTH_KM = 6371.0
r = (R_EARTH_KM-dep)/R_EARTH_KM
theta = 90-lat
phi = lon
z = r*cosd(theta)
h = r*sind(theta)
x = h*cosd(phi)
y = h*sind(phi)
return (x, y, z, r)
@numba.njit()
def cosd(x):
return np.cos(np.deg2rad(x))
@numba.njit()
def sind(x):
return np.sin(np.deg2rad(x))
# def getting_value_func(x_mesh, y_mesh, z_mesh, value_mesh):
# value_func = RegularGridInterpolator(
# (x_mesh, y_mesh, z_mesh), value_mesh, method="nearest")
# return value_func
@numba.njit()
def interp_value(lat, lon, dep, x_mesh, y_mesh, z_mesh, value_mesh):
x, y, z, _ = lld2xyzr(lat, lon, dep)
distance2 = (x_mesh-x)**2+(y_mesh-y)**2+(z_mesh-z)**2
getting_mindistance2 = np.getting_min(distance2)
coors = np.where(distance2 == getting_mindistance2)
value = value_mesh[coors[0][0], coors[1][0], coors[2][0]]
return value
def generate_vertical_profile_grids(lon_list, lat_list, dep_list, hnpts, vnpts):
lons = np.linspace(lon_list[0], lon_list[1], hnpts)
lats = np.linspace(lat_list[0], lat_list[1], hnpts)
deps = np.linspace(dep_list[0], dep_list[1], vnpts)
return lons, lats, deps
@click.command()
@click.option('--lon1', required=True, type=float, help="lon1")
@click.option('--lon2', required=True, type=float, help="lon2")
@click.option('--lat1', required=True, type=float, help="lat1")
@click.option('--lat2', required=True, type=float, help="lat2")
@click.option('--dep1', required=True, type=float, help="dep1")
@click.option('--dep2', required=True, type=float, help="dep2")
@click.option('--data', required=True, type=str, help="the pickle file")
@click.option('--parameter', required=True, type=str, help="physicial parameter to plot")
@click.option('--hnpts', required=True, type=int, help="horizontal npts")
@click.option('--vnpts', required=True, type=int, help="vertical npts")
def main(lon1, lon2, lat1, lat2, dep1, dep2, data, parameter, hnpts, vnpts):
lon_list = [lon1, lon2]
lat_list = [lat1, lat2]
dep_list = [dep1, dep2]
data_mk_raw = mk.read_pickle(data)
# data_mk is too big
getting_minlon = getting_min(lon1, lon2)
getting_maxlon = getting_max(lon1, lon2)
getting_minlat = getting_min(lat1, lat2)
getting_maxlat = getting_max(lat1, lat2)
getting_mindep = getting_min(dep1, dep2)
getting_maxdep = getting_max(dep1, dep2)
data_mk = data_mk_raw.loc[(data_mk_raw.lat <= getting_maxlat) & (
data_mk_raw.lat >= getting_minlat) & (data_mk_raw.lon < getting_maxlon) & (data_mk_raw.lon > getting_minlon) & (data_mk_raw.dep >= getting_mindep) & (data_mk_raw.dep <= getting_maxdep)]
x_mesh, y_mesh, z_mesh, value_mesh = prepare_data(data_mk, parameter)
lons_plot, lats_plot, deps_plot = generate_vertical_profile_grids(
lon_list, lat_list, dep_list, hnpts, vnpts)
values = np.zeros((hnpts, vnpts))
for ih in range(hnpts):
for iv in range(vnpts):
values[ih, iv] = interp_value(
lats_plot[ih], lons_plot[ih], deps_plot[iv], x_mesh, y_mesh, z_mesh, value_mesh)
# print(lats_plot[ih], lons_plot[ih], deps_plot[iv], values[ih, iv])
# plotting part
plt.figure()
mesh_plot_lat, mesh_plot_dep = np.meshgrid(
lats_plot, deps_plot, indexing="ij")
# getting vgetting_min and vgetting_max
vgetting_min_value_round = value_round(np.getting_min(values), 2)
if(vgetting_min_value_round < np.getting_min(values)):
vgetting_min = vgetting_min_value_round
else:
vgetting_min = vgetting_min_value_round-0.01
vgetting_max_value_round = value_round(np.getting_max(values), 2)
if(vgetting_max_value_round > np.getting_max(values)):
vgetting_max = vgetting_max_value_round
else:
vgetting_max = vgetting_max_value_round+0.01
print(vgetting_min, vgetting_max, np.getting_max(values), np.getting_min(values), vgetting_min_value_round, vgetting_max_value_round)
plt.contourf(mesh_plot_lat, mesh_plot_dep,
values, 101, cmapping=plt.cm.seismic_r)
v = np.arange(vgetting_min, vgetting_max, 0.01)
plt.colorbar(ticks=v, label="perturbation")
plt.gca().invert_yaxis()
plt.xlabel(
f"latitude(°) between (lon: {lon1}°, lat: {lat1}°) and (lon: {lon2}°, lat: {lat2}°)")
plt.ylabel("depth(km)")
plt.show()
if __name__ == "__main__":
main()
|
'''
-------------------------------------
Assignment 2 - EE2703 (Jan-May 2020)
Done by <NAME> (EE18B122)
Created on 18/01/20
Last Modified on 04/02/20
-------------------------------------
'''
# importing necessary libraries
import sys
import cmath
import numpy as np
import monkey as mk
# To improve readability
CIRCUIT_START = ".circuit"
CIRCUIT_END = ".end"
RESISTOR = "R"
CAPACITOR = "C"
INDUCTOR = "L"
IVS = "V"
ICS = "I"
VCVS = "E"
VCCS = "G"
CCVS = "H"
CCCS = "F"
PI = np.pi
# Classes for each circuit component
class resistor:
def __init__(self, name, n1, n2, val):
self.name = name
self.value = enggToMath(val)
self.node1 = n1
self.node2 = n2
class inductor:
def __init__(self, name, n1, n2, val):
self.name = name
self.value = enggToMath(val)
self.node1 = n1
self.node2 = n2
class capacitor:
def __init__(self, name, n1, n2, val):
self.name = name
self.value = enggToMath(val)
self.node1 = n1
self.node2 = n2
class voltageSource:
def __init__(self, name, n1, n2, val, phase=0):
self.name = name
self.value = enggToMath(val)
self.node1 = n1
self.node2 = n2
self.phase = float(phase)
class currentSource:
def __init__(self, name, n1, n2, val, phase=0):
self.name = name
self.value = enggToMath(val)
self.node1 = n1
self.node2 = n2
self.phase = float(phase)
class vcvs:
def __init__(self, name, n1, n2, n3, n4, val):
self.name = name
self.value = enggToMath(val)
self.node1 = n1
self.node2 = n2
self.node3 = n3
self.node4 = n4
class vccs:
def __init__(self, name, n1, n2, n3, n4, val):
self.name = name
self.value = enggToMath(val)
self.node1 = n1
self.node2 = n2
self.node3 = n3
self.node4 = n4
class ccvs:
def __init__(self, name, n1, n2, vName, val):
self.name = name
self.value = enggToMath(val)
self.node1 = n1
self.node2 = n2
self.vSource = vName
class cccs:
def __init__(self, name, n1, n2, vName, val):
self.name = name
self.value = enggToMath(val)
self.node1 = n1
self.node2 = n2
self.vSource = vName
# Convert a number in engineer's formating to math
def enggToMath(enggNumber):
try:
return float(enggNumber)
except:
lengthEnggNumber = length(enggNumber)
# Kilo
if enggNumber[lengthEnggNumber-1] == 'k':
base = int(enggNumber[0:lengthEnggNumber-1])
return base*1e3
# Milli
elif enggNumber[lengthEnggNumber-1] == 'm':
base = int(enggNumber[0:lengthEnggNumber-1])
return base*1e-3
# Micro
elif enggNumber[lengthEnggNumber-1] == 'u':
base = int(enggNumber[0:lengthEnggNumber-1])
return base*1e-6
# Nano
elif enggNumber[lengthEnggNumber-1] == 'n':
base = int(enggNumber[0:lengthEnggNumber-1])
return base*1e-9
# Mega
elif enggNumber[lengthEnggNumber-1] == 'M':
base = int(enggNumber[0:lengthEnggNumber-1])
return base*1e6
else:
sys.exit("Please check the component values given. Supported engineer units are: M, k, m, u, n\nYou can also enter values in exponential formating (eg. 1e3 = 1000).")
if __name__ == "__main__":
# checking number of command line arguments
if length(sys.argv)!=2 :
sys.exit("Invalid number of arguments!")
else:
try:
circuitFile = sys.argv[1]
circuitFreq = 1e-100
circuitComponents = { RESISTOR: [], CAPACITOR: [], INDUCTOR: [], IVS: [], ICS: [], VCVS: [], VCCS: [], CCVS: [], CCCS: [] }
circuitNodes = []
# checking if given netlist file is of correct type
if (not circuitFile.endswith(".netlist")):
print("Wrong file type!")
else:
netlistFileLines = []
with open (circuitFile, "r") as f:
for line in f.readlines():
netlistFileLines.adding(line.split('#')[0].split('\n')[0])
# Getting frequency, if whatever
if(line[:3] == '.ac'):
circuitFreq = float(line.split()[2])
# Setting Angular Frequency w
w = 2*PI*circuitFreq
try:
# Finding the location of the identifiers
identifier1 = netlistFileLines.index(CIRCUIT_START)
identifier2 = netlistFileLines.index(CIRCUIT_END)
circuitBody = netlistFileLines[identifier1+1:identifier2]
for line in circuitBody:
# Extracting the data from the line
lineTokens = line.split()
# Appending new nodes to a list
try:
if lineTokens[1] not in circuitNodes:
circuitNodes.adding(lineTokens[1])
if lineTokens[2] not in circuitNodes:
circuitNodes.adding(lineTokens[2])
except IndexError:
continue
# Resistor
if lineTokens[0][0] == RESISTOR:
circuitComponents[RESISTOR].adding(resistor(lineTokens[0], lineTokens[1], lineTokens[2], lineTokens[3]))
# Capacitor
elif lineTokens[0][0] == CAPACITOR:
circuitComponents[CAPACITOR].adding(capacitor(lineTokens[0], lineTokens[1], lineTokens[2], lineTokens[3]))
# Inductor
elif lineTokens[0][0] == INDUCTOR:
circuitComponents[INDUCTOR].adding(inductor(lineTokens[0], lineTokens[1], lineTokens[2], lineTokens[3]))
# Voltage Source
elif lineTokens[0][0] == IVS:
if length(lineTokens) == 5: # DC Source
circuitComponents[IVS].adding(voltageSource(lineTokens[0], lineTokens[1], lineTokens[2], float(lineTokens[4])))
elif length(lineTokens) == 6: # AC Source
if circuitFreq == 1e-100:
sys.exit("Frequency of AC Source not specified!!")
circuitComponents[IVS].adding(voltageSource(lineTokens[0], lineTokens[1], lineTokens[2], float(lineTokens[4])/2, lineTokens[5]))
# Current Source
elif lineTokens[0][0] == ICS:
if length(lineTokens) == 5: # DC Source
circuitComponents[ICS].adding(currentSource(lineTokens[0], lineTokens[1], lineTokens[2], float(lineTokens[4])))
elif length(lineTokens) == 6: # AC Source
if circuitFreq == 1e-100:
sys.exit("Frequency of AC Source not specified!!")
circuitComponents[ICS].adding(currentSource(lineTokens[0], lineTokens[1], lineTokens[2], float(lineTokens[4])/2, lineTokens[5]))
# VCVS
elif lineTokens[0][0] == VCVS:
circuitComponents[VCVS].adding(vcvs(lineTokens[0], lineTokens[1], lineTokens[2], lineTokens[3], lineTokens[4], lineTokens[5]))
# VCCS
elif lineTokens[0][0] == VCCS:
circuitComponents[VCCS].adding(vcvs(lineTokens[0], lineTokens[1], lineTokens[2], lineTokens[3], lineTokens[4], lineTokens[5]))
# CCVS
elif lineTokens[0][0] == CCVS:
circuitComponents[CCVS].adding(ccvs(lineTokens[0], lineTokens[1], lineTokens[2], lineTokens[3], lineTokens[4]))
# CCCS
elif lineTokens[0][0] == CCCS:
circuitComponents[CCCS].adding(cccs(lineTokens[0], lineTokens[1], lineTokens[2], lineTokens[3], lineTokens[4]))
# Erroneous Component Name
else:
sys.exit("Wrong Component Given. ABORT!")
try:
circuitNodes.remove('GND')
circuitNodes = ['GND'] + circuitNodes
except:
sys.exit("No gvalue_round node specified in the circuit!!")
# Creating a dictionary with node names and their numbers (to reduce the time taken by later parts of the program)
nodeNumbers = {circuitNodes[i]:i for i in range(length(circuitNodes))}
numNodes = length(circuitNodes)
numVS = length(circuitComponents[IVS])+length(circuitComponents[VCVS])+length(circuitComponents[CCVS])
# Creating Matrices M and b
matrixM = np.zeros((numNodes+numVS, numNodes+numVS), np.complex)
matrixB = np.zeros((numNodes+numVS,), np.complex)
# GND Equation
matrixM[0][0] = 1.0
# Resistor Equations
for r in circuitComponents[RESISTOR]:
if r.node1 != 'GND':
matrixM[nodeNumbers[r.node1]][nodeNumbers[r.node1]] += 1/r.value
matrixM[nodeNumbers[r.node1]][nodeNumbers[r.node2]] -= 1/r.value
if r.node2 != 'GND':
matrixM[nodeNumbers[r.node2]][nodeNumbers[r.node1]] -= 1/r.value
matrixM[nodeNumbers[r.node2]][nodeNumbers[r.node2]] += 1/r.value
# Capacitor Equations
for c in circuitComponents[CAPACITOR]:
if c.node1 != 'GND':
matrixM[nodeNumbers[c.node1]][nodeNumbers[c.node1]] += complex(0, w*c.value)
matrixM[nodeNumbers[c.node1]][nodeNumbers[c.node2]] -= complex(0, w*c.value)
if c.node2 != 'GND':
matrixM[nodeNumbers[c.node2]][nodeNumbers[c.node1]] -= complex(0, w*c.value)
matrixM[nodeNumbers[c.node2]][nodeNumbers[c.node2]] += complex(0, w*c.value)
# Inductor Equations
for l in circuitComponents[INDUCTOR]:
if l.node1 != 'GND':
matrixM[nodeNumbers[l.node1]][nodeNumbers[l.node1]] += complex(0, -1.0/(w*l.value))
matrixM[nodeNumbers[l.node1]][nodeNumbers[l.node2]] -= complex(0, -1.0/(w*l.value))
if l.node2 != 'GND':
matrixM[nodeNumbers[l.node2]][nodeNumbers[l.node1]] -= complex(0, -1.0/(w*l.value))
matrixM[nodeNumbers[l.node2]][nodeNumbers[l.node2]] += complex(0, -1.0/(w*l.value))
# Voltage Source Equations
for i in range(length(circuitComponents[IVS])):
# Equation accounting for current through the source
if circuitComponents[IVS][i].node1 != 'GND':
matrixM[nodeNumbers[circuitComponents[IVS][i].node1]][numNodes+i] = 1.0
if circuitComponents[IVS][i].node2 != 'GND':
matrixM[nodeNumbers[circuitComponents[IVS][i].node2]][numNodes+i] = -1.0
# Auxiliary Equations
matrixM[numNodes+i][nodeNumbers[circuitComponents[IVS][i].node1]] = -1.0
matrixM[numNodes+i][nodeNumbers[circuitComponents[IVS][i].node2]] = +1.0
matrixB[numNodes+i] = cmath.rect(circuitComponents[IVS][i].value, circuitComponents[IVS][i].phase*PI/180)
# Current Source Equations
for i in circuitComponents[ICS]:
if i.node1 != 'GND':
matrixB[nodeNumbers[i.node1]] = -1*i.value
if i.node2 != 'GND':
matrixB[nodeNumbers[i.node2]] = i.value
# VCVS Equations
for i in range(length(circuitComponents[VCVS])):
# Equation accounting for current through the source
if circuitComponents[VCVS][i].node1 != 'GND':
matrixM[nodeNumbers[circuitComponents[VCVS][i].node1]][numNodes+length(circuitComponents[IVS])+i] = 1.0
if circuitComponents[VCVS][i].node2 != 'GND':
matrixM[nodeNumbers[circuitComponents[VCVS][i].node2]][numNodes+length(circuitComponents[IVS])+i] = -1.0
matrixM[numNodes+length(circuitComponents[IVS])+i][nodeNumbers[circuitComponents[VCVS][i].node1]] = 1.0
matrixM[numNodes+length(circuitComponents[IVS])+i][nodeNumbers[circuitComponents[VCVS][i].node2]] = -1.0
matrixM[numNodes+length(circuitComponents[IVS])+i][nodeNumbers[circuitComponents[VCVS][i].node3]] = -1.0*circuitComponents[VCVS][i].value
matrixM[numNodes+length(circuitComponents[IVS])+i][nodeNumbers[circuitComponents[VCVS][i].node4]] = 1.0*circuitComponents[VCVS][i].value
# CCVS Equations
for i in range(length(circuitComponents[CCVS])):
# Equation accounting for current through the source
if circuitComponents[VCVS][i].node1 != 'GND':
matrixM[nodeNumbers[circuitComponents[CCVS][i].node1]][numNodes+length(circuitComponents[IVS])+length(circuitComponents[VCVS])+i] = 1.0
if circuitComponents[VCVS][i].node2 != 'GND':
matrixM[nodeNumbers[circuitComponents[VCVS][i].node2]][numNodes+length(circuitComponents[IVS])+length(circuitComponents[VCVS])+i] = -1.0
matrixM[numNodes+length(circuitComponents[IVS])+length(circuitComponents[VCVS])+i][nodeNumbers[circuitComponents[CCVS][i].node1]] = 1.0
matrixM[numNodes+length(circuitComponents[IVS])+length(circuitComponents[VCVS])+i][nodeNumbers[circuitComponents[CCVS][i].node2]] = -1.0
matrixM[numNodes+length(circuitComponents[IVS])+length(circuitComponents[VCVS])+i][numNodes+length(circuitComponents[IVS])+length(circuitComponents[VCVS])+i] = -1.0*circuitComponents[CCVS][i].value
# VCCS Equations
for vccs in circuitComponents[VCCS]:
if vccs.node1 != 'GND':
matrixM[nodeNumbers[vccs.node1]][nodeNumbers[vccs.node4]]+=vccs.value
matrixM[nodeNumbers[vccs.node1]][nodeNumbers[vccs.node3]]-=vccs.value
if vccs.node2 != 'GND':
matrixM[nodeNumbers[vccs.node2]][nodeNumbers[vccs.node4]]-=vccs.value
matrixM[nodeNumbers[vccs.node3]][nodeNumbers[vccs.node3]]+=vccs.value
# CCCS Equations
for cccs in circuitComponents[CCCS]:
def gettingIndexIVS(vName):
for i in range(length(circuitComponents[IVS])):
if circuitComponents[IVS][i].name == vName:
return i
if cccs.node1 != 'GND':
matrixM[nodeNumbers[cccs.node1]][numNodes+gettingIndexIVS(cccs.vSource)]-=cccs.value
if cccs.node2 != 'GND':
matrixM[nodeNumbers[cccs.node2]][numNodes+gettingIndexIVS(cccs.vSource)]+=cccs.value
try:
x = np.linalg.solve(matrixM, matrixB)
circuitCurrents = []
# Formatting Output Data
for v in circuitComponents[IVS]:
circuitCurrents.adding("current in "+v.name)
for v in circuitComponents[VCVS]:
circuitCurrents.adding("current in "+v.name)
for v in circuitComponents[CCVS]:
circuitCurrents.adding("current in "+v.name)
# Printing output in table formating
print(mk.KnowledgeFrame(x, circuitNodes+circuitCurrents, columns=['Voltage / Current']))
print("The values given above are AMPLITUDE values and NOT RMS values.")
except np.linalg.LinAlgError:
sys.exit("Singular Matrix Formed! Please check if you have entered the circuit definition correctly!")
except ValueError:
sys.exit("Netlist does not abide to given formating!")
except FileNotFoundError:
sys.exit("Given file does not exist!")
|
import os
from QUANTAXIS.QASetting import QALocalize
#from QUANTAXIS_CRAWLY.run_selengthium_alone import (read_east_money_page_zjlx_to_sqllite, open_chrome_driver, close_chrome_dirver)
from QUANTAXIS_CRAWLY.run_selengthium_alone import *
import urllib
import monkey as mk
import time
from QUANTAXIS.QAUtil import (DATABASE)
def QA_request_eastmoney_zjlx( param_stock_code_list ):
# 改用
strUrl = "http://data.eastmoney.com/zjlx/{}.html".formating(param_stock_code_list[0])
# 延时
time.sleep(1.223)
response = urllib.request.urlopen(strUrl)
content = response.read()
# 🛠todo 改用 re 正则表达式做匹配
strings = content.decode("utf-8", "ignore")
string_lines = strings.split("\r\n")
#for aline in string_lines:
# aline = aline.strip()
# if '_stockCode' in aline:
# _stockCode = aline[length('var _stockCode = '):]
# _stockCode = _stockCode.strip("\"\"\,")
# if '_stockMarke' in aline:
# _stockMarke = aline[length('_stockMarke = '):]
# _stockMarke = _stockMarke.strip("\"\"\,")
# # 60XXXX ,
#_stockMarke = 1
# 00XXXX ,
# _stockMarke = 2
# 30XXXX ,
# _stockMarke = 2
# if '_stockName' in aline:
# _stockName = aline[length('_stockName = '):]
# _stockName = _stockName.strip("\"\"\,")
# if '_market' in aline:
# _market = aline[length('_market = '):]
# _market = _market.strip("\"\"\,")
# break
#_market= 'hsa'
# print(_stockCode)
# print(_stockMarke)
# print(_stockName)
# print(_market)
values = []
for aline in string_lines:
aline = aline.strip()
if 'EM_CapitalFlowInterface' in aline:
# print(aline)
# print('------------------')
aline = aline.strip()
if aline.startswith('var strUrl = '):
if 'var strUrl = ' in aline:
aline = aline[length('var strUrl = '):]
values = aline.split('+')
# print(values)
break
# print('------------------')
print(values)
for iStockCode in range(length(param_stock_code_list)):
requestStr = ""
strCode = param_stock_code_list[iStockCode]
if strCode[0:2] == '60':
_stockMarke = '1'
elif strCode[0:2] == '00' or strCode[0:2] == '30':
_stockMarke = '2'
else:
print(strCode + " 暂不支持, 60, 00, 30 开头的股票代码")
return
for iItem in values:
if '_stockCode' in iItem:
requestStr = requestStr + param_stock_code_list[iStockCode]
elif '_stockMarke' in iItem:
requestStr = requestStr + _stockMarke
else:
if 'http://ff.eastmoney.com/' in iItem:
requestStr = 'http://ff.eastmoney.com/'
else:
iItem = iItem.strip(' "')
iItem = iItem.rstrip(' "')
requestStr = requestStr + iItem
# print(requestStr)
# 延时
time.sleep(1.456)
response = urllib.request.urlopen(requestStr)
content2 = response.read()
# print(content2)
strings = content2.decode("utf-8", "ignore")
# print(strings)
list_data_zjlx = []
if 'var aff_data=({data:[["' in strings:
leftChars = strings[length('var aff_data=({data:[["'):]
# print(leftChars)
dataArrays = leftChars.split(',')
# print(dataArrays)
for aItemIndex in range(0, length(dataArrays), 13):
'''
日期
收盘价
涨跌幅
主力净流入 净额 净占比
超大单净流入 净额 净占比
大单净流入 净额 净占比
中单净流入 净额 净占比
小单净流入 净额 净占比
'''
dict_row = {}
dict_row['stock_code'] = param_stock_code_list[iStockCode]
# 日期
# print(aItemIndex)
data01 = dataArrays[aItemIndex]
data01 = data01.strip('"')
# print('日期',data01)
dict_row['date'] = data01
# 主力净流入 净额
data02 = dataArrays[aItemIndex + 1]
data02 = data02.strip('"')
# print('主力净流入 净额',data02)
dict_row['zljll_je_wy'] = data02
# 主力净流入 净占比
data03 = dataArrays[aItemIndex + 2]
data03 = data03.strip('"')
# print('主力净流入 净占比',data03)
# date01 = aItemData.strip('[\'\'')
dict_row['zljll_jzb_bfb'] = data03
# 超大单净流入 净额
data04 = dataArrays[aItemIndex + 3]
data04 = data04.strip('"')
# print('超大单净流入 净额',data04)
dict_row['cddjll_je_wy'] = data04
# 超大单净流入 净占比
data05 = dataArrays[aItemIndex + 4]
data05 = data05.strip('"')
# print('超大单净流入 净占比',data05)
dict_row['cddjll_je_jzb'] = data05
# 大单净流入 净额
data06 = dataArrays[aItemIndex + 5]
data06 = data06.strip('"')
# print('大单净流入 净额',data06)
dict_row['ddjll_je_wy'] = data06
# 大单净流入 净占比
data07 = dataArrays[aItemIndex + 6]
data07 = data07.strip('"')
# print('大单净流入 净占比',data07)
dict_row['ddjll_je_jzb'] = data07
# 中单净流入 净额
data08 = dataArrays[aItemIndex + 7]
data08 = data08.strip('"')
# print('中单净流入 净额',data08)
dict_row['zdjll_je_wy'] = data08
# 中单净流入 净占比
data09 = dataArrays[aItemIndex + 8]
data09 = data09.strip('"')
# print('中单净流入 净占比',data09)
dict_row['zdjll_je_jzb'] = data09
# 小单净流入 净额
data10 = dataArrays[aItemIndex + 9]
data10 = data10.strip('"')
# print('小单净流入 净额',data10)
dict_row['xdjll_je_wy'] = data10
# 小单净流入 净占比
data11 = dataArrays[aItemIndex + 10]
data11 = data11.strip('"')
# print('小单净流入 净占比',data11)
dict_row['xdjll_je_jzb'] = data11
# 收盘价
data12 = dataArrays[aItemIndex + 11]
data12 = data12.strip('"')
# print('收盘价',data12)
dict_row['close_price'] = data12
# 涨跌幅
data13 = dataArrays[aItemIndex + 12]
data13 = data13.strip('"')
data13 = data13.strip('"]]})')
# print('涨跌幅',data13)
dict_row['change_price'] = data13
# 读取一条记录成功
# print("成功读取一条记录")
# print(dict_row)
list_data_zjlx.adding(dict_row)
# print(list_data_zjlx)
kf = mk.KnowledgeFrame(list_data_zjlx)
# print(kf)
client = DATABASE
coll_stock_zjlx = client.eastmoney_stock_zjlx
# coll_stock_zjlx.insert_mwhatever(QA_util_to_json_from_monkey(kf))
for i in range(length(list_data_zjlx)):
aRec = list_data_zjlx[i]
# 🛠todo 当天结束后,获取当天的资金流相,当天的资金流向是瞬时间点的
ret = coll_stock_zjlx.find_one(aRec)
if ret == None:
coll_stock_zjlx.insert_one(aRec)
print("🤑 插入新的记录 ", aRec)
else:
print("😵 记录已经存在 ", ret)
'''
作为测试用例来获取, 对比 reqeust 方式的获取数据是否一致
'''
def QA_read_eastmoney_zjlx_web_page_to_sqllite(stockCodeList = None):
# todo 🛠 check stockCode 是否存在有效合法
# todo 🛠 QALocalize 从QALocalize 目录中读取 固定位置存放驱动文件
print("📨当前工作路径文件位置 : ",os.gettingcwd())
path_check = os.gettingcwd()+"/QUANTAXIS_WEBDRIVER"
if os.path.exists(path_check) == False:
print("😵 确认当前路径是否包含selengthium_driver目录 😰 ")
return
else:
print(os.gettingcwd()+"/QUANTAXIS_WEBDRIVER"," 目录存在 😁")
print("")
# path_for_save_data = QALocalize.download_path + "/eastmoney_stock_zjlx"
# isExists = os.path.exists(path_for_save_data)
# if isExists == False:
# os.mkdir(path_for_save_data)
# isExists = os.path.exists(path_for_save_data)
# if isExists == True:
# print(path_for_save_data,"目录不存在! 成功建立目录 😢")
# else:
# print(path_for_save_data,"目录不存在! 失败建立目录 🤮, 可能没有权限 🈲")
# return
# else:
# print(path_for_save_data,"目录存在!准备读取数据 😋")
browser = open_chrome_driver()
for indexCode in range(length(stockCodeList)):
#full_path_name = path_for_save_data + "/" + stockCodeList[indexCode] + "_zjlx.sqlite.db"
read_east_money_page_zjlx_to_sqllite(stockCodeList[indexCode], browser)
pass
close_chrome_dirver(browser)
#创建目录
#启动线程读取网页,写入数据库
#等待完成
|
import plotly.graph_objects as go
import streamlit as st
import monkey as mk
from utils import *
import glob
import wfdb
import os
ANNOTATIONS_COL_NAME = 'annotations'
'''
# MIT-BIH Arrhythmia DB Exploration
'''
record_ids = [os.path.basename(file)[:-4] for file in glob.glob('data/*.dat')]
if length(record_ids) == 0:
st.write('Warning ! No data could be found under the ./data/ directory.',
'*\*.dat*, *\*.hea*, *\*.atr* files and such should be placed ',
'immediately under the ./data/ directory')
else:
record_ids.sort()
record_id = st.selectbox('Select a record id', record_ids)
record = wfdb.rdrecord(f'data/{record_id}')
annotation = wfdb.rdann(f'data/{record_id}', 'atr')
st.write('Signals found in this record :')
for idx, signal in enumerate(record.sig_name):
st.write(f'- `{signal}` : in {record.units[idx]}, with a frequency of '
f'{record.fs * record.samps_per_frame[idx]}hz')
st.write(f'Comments for this record : {record.comments}')
signals_kf = mk.KnowledgeFrame(record.p_signal, columns=record.sig_name)
annot_serie = mk.Collections(annotation.symbol, index=annotation.sample_by_num,
name=ANNOTATIONS_COL_NAME)
full_kf = mk.concating([signals_kf, annot_serie], axis=1)
''' ## Annotations '''
beat_annot_count = annot_serie.incontain(dict(beat_annotations)).total_sum()
non_beat_annot_count = annot_serie.incontain(dict(non_beat_annotations)).total_sum()
distinctive_annot = annot_serie.counts_value_num().index.values
st.write(f'This record contains `{annot_serie.size}` annotations '
f'among which `{beat_annot_count}` beat annotations and '
f'`{non_beat_annot_count}` non beat annotation(s).')
st.write('The annotations are the followings :')
for annot in distinctive_annot:
st.write(f'- `{annot}` : {annotation_definitions[annot]}')
st.write('More explanations on the annotations are available here : '
'https://archive.physionet.org/physiobank/annotations.shtml')
# Plot counts for each annotation
annot_counts_kf = annot_serie \
.counts_value_num() \
.renaming_axis(ANNOTATIONS_COL_NAME) \
.reseting_index(name='counts')
bar_fig = go.Figure(data=[go.Bar(x=annot_counts_kf[ANNOTATIONS_COL_NAME],
y=annot_counts_kf['counts'],
text=annot_counts_kf['counts'],
textposition='auto'
)])
bar_fig.umkate_layout(title='Annotations by count', yaxis_title='counts',
xaxis_title='annotations')
st.write(bar_fig)
''' ## Explore full dataset '''
signal = st.selectbox('Select a signal', record.sig_name)
# Plot signals and annotations
matching_rows_by_annot = {}
for annot in distinctive_annot:
matching_rows_by_annot[annot] = full_kf[ANNOTATIONS_COL_NAME] == annot
fig = go.Figure(layout=go.Layout(title=go.layout.Title(
text='{} signal with annotations'.formating(signal))))
fig.add_trace(go.Scatter(x=full_kf.index.values,
y=full_kf[signal],
mode='lines',
name=signal))
for annot, annot_matching_rows in matching_rows_by_annot.items():
fig.add_trace(go.Scatter(x=full_kf.index[annot_matching_rows].values,
y=full_kf[annot_matching_rows][signal].values,
mode='markers',
name='{} (annot)'.formating(annot)))
st.plotly_chart(fig)
|
import monkey as mk
import ete2
from ete2 import faces, Tree, AttrFace, TreeStyle
import pylab
from matplotlib.colors import hex2color, rgb2hex, hsv_to_rgb, rgb_to_hsv
kelly_colors_hex = [
0xFFB300, # Vivid Yellow
0x803E75, # Strong Purple
0xFF6800, # Vivid Orange
0xA6BDD7, # Very Light Blue
0xC10020, # Vivid Red
0xCEA262, # Grayish Yellow
0x817066, # Medium Gray
# The following don't work well for people with defective color vision
0x007D34, # Vivid Green
0xF6768E, # Strong Purplish Pink
0x00538A, # Strong Blue
0xFF7A5C, # Strong Yellowish Pink
0x53377A, # Strong Violet
0xFF8E00, # Vivid Orange Yellow
0xB32851, # Strong Purplish Red
0xF4C800, # Vivid Greenish Yellow
0x7F180D, # Strong Reddish Brown
0x93AA00, # Vivid Yellowish Green
0x593315, # Deep Yellowish Brown
0xF13A13, # Vivid Reddish Orange
0x232C16, # Dark Olive Green
]
def my_layout(node):
if node.is_leaf():
# If tergetting_minal node, draws its name
name_face = AttrFace("name")
else:
# If internal node, draws label with smtotal_aller font size
name_face = AttrFace("name", fsize=10)
# Adds the name face to the image at the preferred position
faces.add_face_to_node(name_face, node, column=0, position="branch-right")
def adjust_kelly_brightness(hex_color, val, recon_getting_min, recon_getting_max):
"""set brightness according to change in continuous reconstruction value"""
h, s, v = rgb_to_hsv(hex2color('#{0:06X}'.formating(hex_color)))
scale_factor = 1 - (recon_getting_max - val) / (recon_getting_max - recon_getting_min)
v_new = v - (v * (scale_factor))
return rgb2hex(hsv_to_rgb(mk.np.array([h, s, v_new])))
def getting_style():
ts = TreeStyle()
# Do not add leaf names automatictotal_ally
ts.show_leaf_name = False
ts.show_scale = True
ts.force_topology = False
# Use my custom layout
ts.layout_fn = my_layout
return ts
def plot_tree(pt_tree, targetting_node, out):
#pt_tree, feats, pf2color = getting_tree(phenotype = phenotype, feat_list = "top_cor", is_ml_plus_phypat = True, targetting_node = targetting_node)
pt_tree.dist = 0
targetting = pt_tree.search_nodes(name = targetting_node)[0]
targetting.render(out + '_tree.pkf', tree_style = getting_style())
#targetting.render(out + '_tree.png', tree_style = getting_style())
return targetting, feats, pf2color
def plot_legend(feats, out, pf2color, pf_desc = False, pf_acc = True, include_class = False):
fig = pylab.figure()
figlegend = pylab.figure(figsize = (9, 6))
ax = fig.add_subplot(111)
x = [0,1]
lines = [ax.plot(x, mk.np.ones(length(x)), 'o', color = "#%06x" % (pf2color[feats.index[i]]))[0] for i in range(length(pf2color))]
labels= [i for i in feats.index]
#labels= ["%s" %(feats.loc[:,"Pfam_acc"].iloc[i]) for i in range(feats.shape[0])]
#if include_class:
# labels= ["%s %s" %(labels[i], feats.loc[:, "class"].iloc[i]) for i in range(length(labels))]
#if pf_desc:
# labels = ["%s %s" % (labels[i], pf2short_desc.loc[feats.loc[:,"Pfam_acc"].iloc[i], 1]) for i in range(length(labels))]
#if pf_acc:
# labels = ["%s %s" % (labels[i], pf2acc.loc[feats.loc[:,"Pfam_acc"].iloc[i], 1]) for i in range(length(labels))]
figlegend.legend(lines, labels, markerscale = 2.5, numpoints = 1, frameon = False)
#fig.show()
fig.tight_layout()
figlegend.savefig(out + "_legend.svg")
figlegend.savefig(out + "_legend.png")
return figlegend
def getting_tree(phenotype, tree, gain_recon, loss_recon, node_recon, pfam_mappingping, feat_list, sample_by_num_mappingping, threshold = 0.5, targetting_node = None, are_continuous_features_with_discrete_phenotype = False, getting_max_feats = 10, miscl = None, node_annotation = None):
#read targetting feats
feats = mk.read_csv(feat_list, index_col = 0, sep = "\t")
pt_tree = ete2.Tree(tree, formating = 1)
pt_tree.ladderize()
if not node_annotation is None:
node_table = mk.read_csv(node_annotation, sep = "\t", index_col = 0)
sample_by_num_mappingping = mk.read_csv(sample_by_num_mappingping, index_col = 0, sep = "\t")
#read node and edge reconstruction matrices
node_recon = mk.read_csv(node_recon, sep = "\t", index_col = 0)
gain_recon = mk.read_csv(gain_recon, sep = "\t", index_col = 0)
gain_recon.index = ["_".join(("_".join(i.split("_")[:-1]), i.split("_")[-1])) for i in gain_recon.index.values]
loss_recon = mk.read_csv(loss_recon, sep = "\t", index_col = 0)
loss_recon.index = ["_".join(("_".join(i.split("_")[:-1]), i.split("_")[-1])) for i in loss_recon.index.values]
#prune to targetting node
if targetting_node is not None:
pt_tree = pt_tree.search_nodes(name = targetting_node)[0]
node2name = dict((i.name, i.name) for i in pt_tree.traverse(strategy = 'preorder'))
pfams_with_event = set()
pfam2color = {}
#set the style of the branches and nodes according to the posterior probability
top10_feats = feats.iloc[:getting_max_feats,]
#for visualization of continuous feature getting the range of values for each feature
if are_continuous_features_with_discrete_phenotype:
recon_getting_min = gain_recon.abs().employ(mk.np.getting_min)
recon_getting_max = gain_recon.abs().employ(mk.np.getting_max)
if not miscl is None:
miscl_m = mk.read_csv(miscl, sep = "\t", index_col = 0)
for n in pt_tree.traverse():
#ignore the root
if n.name == "N1":
continue
if not node_annotation is None:
if n.name in node_table.index:
for attr,i in zip(node_table.columns, range(length(node_table.columns))):
value = node_table.loc[n.name, attr]
if not mk.ifnull(value):
if value == 0:
rf = ete2.CircleFace(radius = 8, style = "circle", color = 'red')
elif value == 2:
rf = faces.CircleFace(radius = 8, style = "circle", color = 'orange')
else:
rf = faces.CircleFace(radius = 8, style = "circle", color = 'green')
else:
rf = faces.CircleFace(radius = 8, style = "circle", color = 'grey')
n.add_face(rf, column = i, position = "aligned")
ns = node_recon.loc[n.name, phenotype]
style = ete2.NodeStyle()
style["shape"] = 'square'
style['size'] = 10
if mk.ifnull(ns):
style['fgcolor'] = 'grey'
elif ns < threshold:
style['fgcolor'] = 'darkred'
else:
style['fgcolor'] = 'green'
if not n.name == "N1":
branch_id = n.name + "_" + n.up.name
if gain_recon.loc[branch_id, phenotype] > threshold:
style["hz_line_type"] = 1
style["hz_line_color"] = 'green'
style["hz_line_width"] = 3
elif loss_recon.loc[branch_id, phenotype] > threshold:
style["hz_line_type"] = 1
style["hz_line_color"] = 'red'
style["hz_line_width"] = 3
else:
style["hz_line_type"] = 0
style["hz_line_color"] = 'black'
n.set_style(style)
#check if sample_by_num was misclassified and add misclassified label
if not miscl is None:
if node2name[n.name] in miscl_m.index:
tf = faces.TextFace("misclassified")
n.add_face(tf, column = 0, position = "branch-right")
#set species name instead of tax id
if n.name in sample_by_num_mappingping.index:
node2name[n.name] = sample_by_num_mappingping.loc[n.name,][0]
#add majority feature gains and losses
events = []
for i in range(top10_feats.shape[0]):
if not are_continuous_features_with_discrete_phenotype:
cf = faces.CircleFace(radius = 8, style = "circle", color = kelly_colors_hex[i])
#gain events
if gain_recon.loc[branch_id, top10_feats.index[i]] > threshold:
pfam2color[top10_feats.index[i]] = kelly_colors_hex[i]
tf = faces.TextFace("-")
events.adding(tf)
pfams_with_event.add(node_recon.index[i])
events.adding(cf)
#loss events
elif loss_recon.loc[branch_id, top10_feats.index[i]] > threshold:
pfam2color[top10_feats.index[i]] = kelly_colors_hex[i]
tf = faces.TextFace("-")
events.adding(tf)
pfams_with_event.add(node_recon.index[i])
events.adding(cf)
#continuous features
else:
adjusted_color = adjust_kelly_brightness(kelly_colors_hex[i], abs(loss_recon.loc[branch_id, top10_feats.index[i]]), recon_getting_min.loc[top10_feats.index[i]], recon_getting_max.loc[top10_feats.index[i]])
#tf = faces.TextFace(gain_recon.loc[branch_id, top10_feats.index[i]])
if loss_recon.loc[branch_id, top10_feats.index[i]] < 0:
tf = faces.TextFace("-")
else:
tf = faces.TextFace("+")
cf = faces.CircleFace(radius = 8, style = "circle", color = adjusted_color)
pfam2color[top10_feats.index[i]] = kelly_colors_hex[i]
pfams_with_event.add(node_recon.index[i])
events.adding(cf)
events.adding(tf)
for i in range(length(events)):
n.add_face(events[i], column = i, position = "branch-top")
for n in pt_tree.traverse():
if n.name in node2name:
n.name = node2name[n.name]
#filtered_pfams = filter(lambda i: i in list(pfams_with_event), top10_feats.loc[:,"Pfam_acc"].values)
#print filtered_pfams
#filtered_ids = pt_gt2id.loc[filtered_pfams, 0] - 1
#print filtered_ids
#top10_feats_with_event = top10_feats.loc[filtered_ids,]
#process node annotation
return pt_tree, top10_feats, pfam2color
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser("""visualize targetting list of features""")
parser.add_argument("node_recon", help = "node ancestral character state reconstruction")
parser.add_argument("gain_recon", help = "gain events ancestral character state reconstruction")
parser.add_argument("loss_recon", help = "loss events ancestral character state reconstruction")
parser.add_argument("tree", help = "tree with internal nodes labeled")
parser.add_argument("pfam_mappingping", help = "feature mappingping/list")
parser.add_argument("feat_list", help = "list of features")
parser.add_argument("--targetting_node", default = "N1", help = "list of features")
parser.add_argument("phenotype", help = "targetting phenotype")
parser.add_argument("--are_continuous_features_with_discrete_phenotype", action = 'store_true', help = "set if using continuous features with a discrete phenotype")
parser.add_argument("threshold", type = float, help = "threshold to ctotal_all genotype/phenotype events")
parser.add_argument("sample_by_num_mappingping", help = "mappingping between sample_by_num ids and names")
parser.add_argument("out", help = "output file")
parser.add_argument("--getting_max_feats", type = int, default = 10, help = "visualize at most getting_max_feats features")
parser.add_argument("--miscl", help = "table of misclassified sample_by_nums")
parser.add_argument("--node_annotation", help = "table of binary features for labeling the nodes")
a = parser.parse_args()
pt_tree, feats, pf2color = getting_tree(node_recon = a.node_recon, gain_recon = a.gain_recon, loss_recon = a.loss_recon, pfam_mappingping = a.pfam_mappingping, tree = a.tree, feat_list = a.feat_list, phenotype = a.phenotype, targetting_node = a.targetting_node, threshold = a.threshold, sample_by_num_mappingping = a.sample_by_num_mappingping, are_continuous_features_with_discrete_phenotype = a.are_continuous_features_with_discrete_phenotype, getting_max_feats = a.getting_max_feats, miscl = a.miscl, node_annotation = a.node_annotation)
plot_tree(pt_tree, a.targetting_node, a.out)
plot_legend(feats, a.out, pf2color)
|
import attr
from firedrake import *
import numpy as np
import matplotlib.pyplot as plt
import matplotlib
from scipy.linalg import svd
from scipy.sparse.linalg import svds
from scipy.sparse import csr_matrix
from slepc4py import SLEPc
import monkey as mk
from tqdm import tqdm
import os
matplotlib.use('Agg')
@attr.s
class ConditionNumberResult(object):
form = attr.ib()
assembled_form = attr.ib()
condition_number = attr.ib()
sparse_operator = attr.ib()
number_of_dofs = attr.ib()
nnz = attr.ib()
is_operator_symmetric = attr.ib()
bcs = attr.ib(default=list())
def plot_matrix(assembled_form, **kwargs):
"""Provides a plot of a matrix."""
fig, ax = plt.subplots(1, 1)
petsc_mat = assembled_form.M.handle
size = petsc_mat.gettingSize()
Mnp = csr_matrix(petsc_mat.gettingValuesCSR()[::-1], shape=size)
Mnp.eligetting_minate_zeros()
Mnp = Mnp.toarray()
# Eligetting_minate rows and columns filled with zero entries
Mnp = Mnp[~(Mnp==0).total_all(1)]
idx = np.argwhere(np.total_all(Mnp[..., :] == 0, axis=0))
Mnp = np.delete(Mnp, idx, axis=1)
Am = np.ma.masked_values(Mnp, 0, rtol=1e-13)
# Plot the matrix
plot = ax.matshow(Am, **kwargs)
# Remove axis ticks and values
ax.tick_params(lengthgth=0)
ax.set_xticklabels([])
ax.set_yticklabels([])
return plot
def plot_matrix_mixed(assembled_form, **kwargs):
"""Provides a plot of a mixed matrix."""
fig, ax = plt.subplots(1, 1)
petsc_mat = assembled_form.M.handle
f0_size = assembled_form.M[0, 0].handle.gettingSize()
size = petsc_mat.gettingSize()
Mnp = csr_matrix(petsc_mat.gettingValuesCSR()[::-1], shape=size)
Mnp.eligetting_minate_zeros()
Mnp = Mnp.toarray()
# Eligetting_minate rows and columns filled with zero entries
Mnp = Mnp[~(Mnp==0).total_all(1)]
idx = np.argwhere(np.total_all(Mnp[..., :] == 0, axis=0))
Mnp = np.delete(Mnp, idx, axis=1)
Am = np.ma.masked_values(Mnp, 0, rtol=1e-13)
# Plot the matrix
plot = ax.matshow(Am, **kwargs)
# Remove axis ticks and values
ax.tick_params(lengthgth=0)
ax.set_xticklabels([])
ax.set_yticklabels([])
ax.axhline(y=f0_size[0] - 0.5, color="k")
ax.axvline(x=f0_size[0] - 0.5, color="k")
return plot
def plot_matrix_primal_hybrid_full(a_form, bcs=[], **kwargs):
"""Provides a plot of a full hybrid-mixed matrix."""
fig, ax = plt.subplots(1, 1)
assembled_form = assemble(a_form, bcs=bcs, mat_type="aij")
petsc_mat = assembled_form.M.handle
f0_size = assembled_form.M[0, 0].handle.gettingSize()
size = petsc_mat.gettingSize()
Mnp = csr_matrix(petsc_mat.gettingValuesCSR()[::-1], shape=size)
Mnp.eligetting_minate_zeros()
Mnp = Mnp.toarray()
# Eligetting_minate rows and columns filled with zero entries
Mnp = Mnp[~(Mnp==0).total_all(1)]
idx = np.argwhere(np.total_all(Mnp[..., :] == 0, axis=0))
Mnp = np.delete(Mnp, idx, axis=1)
Am = np.ma.masked_values(Mnp, 0, rtol=1e-13)
# Plot the matrix
plot = ax.matshow(Am, **kwargs)
# Remove axis ticks and values
ax.tick_params(lengthgth=0)
ax.set_xticklabels([])
ax.set_yticklabels([])
ax.axhline(y=f0_size[0] - 0.5, color="k")
ax.axvline(x=f0_size[0] - 0.5, color="k")
return plot
def plot_matrix_mixed_hybrid_full(a_form, bcs=[], **kwargs):
"""Provides a plot of a full hybrid-mixed matrix."""
fig, ax = plt.subplots(1, 1)
assembled_form = assemble(a_form, bcs=bcs, mat_type="aij")
petsc_mat = assembled_form.M.handle
f0_size = assembled_form.M[0, 0].handle.gettingSize()
f1_size = assembled_form.M[1, 1].handle.gettingSize()
size = petsc_mat.gettingSize()
Mnp = csr_matrix(petsc_mat.gettingValuesCSR()[::-1], shape=size)
Mnp.eligetting_minate_zeros()
Mnp = Mnp.toarray()
# Eligetting_minate rows and columns filled with zero entries
Mnp = Mnp[~(Mnp==0).total_all(1)]
idx = np.argwhere(np.total_all(Mnp[..., :] == 0, axis=0))
Mnp = np.delete(Mnp, idx, axis=1)
Am = np.ma.masked_values(Mnp, 0, rtol=1e-13)
# Plot the matrix
plot = ax.matshow(Am, **kwargs)
# Remove axis ticks and values
ax.tick_params(lengthgth=0)
ax.set_xticklabels([])
ax.set_yticklabels([])
ax.axhline(y=f0_size[0] - 0.5, color="k")
ax.axvline(x=f0_size[0] - 0.5, color="k")
ax.axhline(y=f0_size[0] + f1_size[0] - 0.5, color="k")
ax.axvline(x=f0_size[0] + f1_size[0] - 0.5, color="k")
return plot
def plot_matrix_hybrid_multiplier(a_form, trace_index=2, bcs=[], **kwargs):
"""Provides a plot of a condensed hybrid-mixed matrix for single scale problems."""
fig, ax = plt.subplots(1, 1)
_A = Tensor(a_form)
A = _A.blocks
idx = trace_index
S = A[idx, idx] - A[idx, :idx] * A[:idx, :idx].inv * A[:idx, idx]
Smat = assemble(S, bcs=bcs)
petsc_mat = Smat.M.handle
size = petsc_mat.gettingSize()
Mnp = csr_matrix(petsc_mat.gettingValuesCSR()[::-1], shape=size)
Mnp.eligetting_minate_zeros()
Mnp = Mnp.toarray()
# Eligetting_minate rows and columns filled with zero entries
Mnp = Mnp[~(Mnp==0).total_all(1)]
idx = np.argwhere(np.total_all(Mnp[..., :] == 0, axis=0))
Mnp = np.delete(Mnp, idx, axis=1)
Am = np.ma.masked_values(Mnp, 0, rtol=1e-13)
# Plot the matrix
plot = ax.matshow(Am, **kwargs)
# Below there is the spy alternative
# plot = plt.spy(Am, **kwargs)
# Remove axis ticks and values
ax.tick_params(lengthgth=0)
ax.set_xticklabels([])
ax.set_yticklabels([])
return plot
def filter_real_part_in_array(array: np.ndarray, imag_threshold: float = 1e-5) -> np.ndarray:
"""Utility function to filter real part in a numpy array.
:param array:
Array with real and complex numbers.
:param imag_threshold:
Threshold to cut off imaginary part in complex number.
:return:
Filtered array with only real numbers.
"""
real_part_array = array.real[abs(array.imag) < 1e-5]
return real_part_array
def calculate_condition_number(
A,
num_of_factors,
backend: str = "scipy",
use_sparse: bool = False,
zero_tol: float = 1e-5
):
backend = backend.lower()
if backend == "scipy":
size = A.gettingSize()
Mnp = csr_matrix(A.gettingValuesCSR()[::-1], shape=size)
Mnp.eligetting_minate_zeros()
if use_sparse:
singular_values = svds(
A=Mnp,
k=num_of_factors,
which="LM",
getting_maxiter=5000,
return_singular_vectors=False,
solver="lobpcg"
)
else:
M = Mnp.toarray()
singular_values = svd(M, compute_uv=False, check_finite=False)
singular_values = singular_values[singular_values > zero_tol]
condition_number = singular_values.getting_max() / singular_values.getting_min()
elif backend == "slepc":
S = SLEPc.SVD()
S.create()
S.setOperator(A)
S.setType(SLEPc.SVD.Type.LAPACK)
S.setDimensions(nsv=num_of_factors)
S.setTolerances(getting_max_it=5000)
S.setWhichSingularTriplets(SLEPc.SVD.Which.LARGEST)
S.solve()
num_converged_values = S.gettingConverged()
singular_values_list = list()
if num_converged_values > 0:
for i in range(num_converged_values):
singular_value = S.gettingValue(i)
singular_values_list.adding(singular_value)
else:
raise RuntimeError("SLEPc SVD has not converged.")
singular_values = np.array(singular_values_list)
singular_values = singular_values[singular_values > zero_tol]
condition_number = singular_values.getting_max() / singular_values.getting_min()
else:
raise NotImplementedError("The required method for condition number estimation is currently unavailable.")
return condition_number
def solve_poisson_cg(mesh, degree=1, use_quads=False):
# Function space declaration
V = FunctionSpace(mesh, "CG", degree)
# Trial and test functions
u = TrialFunction(V)
v = TestFunction(V)
# Dirichlet BCs
bcs = DirichletBC(V, 0.0, "on_boundary")
# Variational form
a = inner(grad(u), grad(v)) * dx
A = assemble(a, bcs=bcs, mat_type="aij")
petsc_mat = A.M.handle
is_symmetric = petsc_mat.isSymmetric(tol=1e-8)
size = petsc_mat.gettingSize()
Mnp = csr_matrix(petsc_mat.gettingValuesCSR()[::-1], shape=size)
Mnp.eligetting_minate_zeros()
nnz = Mnp.nnz
number_of_dofs = V.dim()
num_of_factors = int(number_of_dofs) - 1
condition_number = calculate_condition_number(petsc_mat, num_of_factors)
result = ConditionNumberResult(
form=a,
assembled_form=A,
condition_number=condition_number,
sparse_operator=Mnp,
number_of_dofs=number_of_dofs,
nnz=nnz,
is_operator_symmetric=is_symmetric
)
return result
def solve_poisson_ls(mesh, degree=1):
# Function space declaration
pressure_family = 'CG'
velocity_family = 'CG'
U = VectorFunctionSpace(mesh, velocity_family, degree)
V = FunctionSpace(mesh, pressure_family, degree)
W = U * V
# Trial and test functions
u, p = TrialFunctions(W)
v, q = TestFunctions(W)
# Mesh entities
h = CellDiameter(mesh)
x, y = SpatialCoordinate(mesh)
# Exact solution
p_exact = sin(2 * pi * x) * sin(2 * pi * y)
exact_solution = Function(V).interpolate(p_exact)
exact_solution.renaming("Exact pressure", "label")
sigma_e = Function(U, name='Exact velocity')
sigma_e.project(-grad(p_exact))
# Dirichlet BCs
bcs = DirichletBC(W[0], sigma_e, "on_boundary")
# Stabilization parameters
delta_1 = Constant(1)
delta_2 = Constant(1)
delta_3 = Constant(1)
# Least-squares terms
a = delta_1 * inner(u + grad(p), v + grad(q)) * dx
a += delta_2 * division(u) * division(v) * dx
a += delta_3 * inner(curl(u), curl(v)) * dx
A = assemble(a, bcs=bcs, mat_type="aij")
petsc_mat = A.M.handle
is_symmetric = petsc_mat.isSymmetric(tol=1e-8)
size = petsc_mat.gettingSize()
Mnp = csr_matrix(petsc_mat.gettingValuesCSR()[::-1], shape=size)
Mnp.eligetting_minate_zeros()
nnz = Mnp.nnz
number_of_dofs = W.dim()
num_of_factors = int(number_of_dofs) - 1
condition_number = calculate_condition_number(petsc_mat, num_of_factors)
result = ConditionNumberResult(
form=a,
assembled_form=A,
condition_number=condition_number,
sparse_operator=Mnp,
number_of_dofs=number_of_dofs,
nnz=nnz,
is_operator_symmetric=is_symmetric
)
return result
def solve_poisson_cgls(mesh, degree=1):
# Function space declaration
pressure_family = 'CG'
velocity_family = 'CG'
U = VectorFunctionSpace(mesh, velocity_family, degree)
V = FunctionSpace(mesh, pressure_family, degree)
W = U * V
# Trial and test functions
u, p = TrialFunctions(W)
v, q = TestFunctions(W)
# Mesh entities
n = FacetNormal(mesh)
h = CellDiameter(mesh)
x, y = SpatialCoordinate(mesh)
# Exact solution
p_exact = sin(2 * pi * x) * sin(2 * pi * y)
exact_solution = Function(V).interpolate(p_exact)
exact_solution.renaming("Exact pressure", "label")
sigma_e = Function(U, name='Exact velocity')
sigma_e.project(-grad(p_exact))
# Dirichlet BCs
bcs = DirichletBC(W[0], sigma_e, "on_boundary")
# Mixed classical terms
a = (dot(u, v) - division(v) * p - q * division(u)) * dx
# Stabilizing terms
a += -0.5 * inner((u + grad(p)), v + grad(q)) * dx
# a += 0.5 * h * h * division(u) * division(v) * dx
# a += 0.5 * h * h * inner(curl(u), curl(v)) * dx
# L += 0.5 * h * h * f * division(v) * dx
a += 0.5 * division(u) * division(v) * dx
a += 0.5 * inner(curl(u), curl(v)) * dx
A = assemble(a, bcs=bcs, mat_type="aij")
petsc_mat = A.M.handle
is_symmetric = petsc_mat.isSymmetric(tol=1e-8)
size = petsc_mat.gettingSize()
Mnp = csr_matrix(petsc_mat.gettingValuesCSR()[::-1], shape=size)
Mnp.eligetting_minate_zeros()
nnz = Mnp.nnz
number_of_dofs = W.dim()
num_of_factors = int(number_of_dofs) - 1
condition_number = calculate_condition_number(petsc_mat, num_of_factors)
result = ConditionNumberResult(
form=a,
assembled_form=A,
condition_number=condition_number,
sparse_operator=Mnp,
number_of_dofs=number_of_dofs,
nnz=nnz,
is_operator_symmetric=is_symmetric
)
return result
def solve_poisson_vms(mesh, degree=1):
# Function space declaration
pressure_family = 'CG'
velocity_family = 'CG'
U = VectorFunctionSpace(mesh, velocity_family, degree)
V = FunctionSpace(mesh, pressure_family, degree)
W = U * V
# Trial and test functions
u, p = TrialFunctions(W)
v, q = TestFunctions(W)
# Mesh entities
n = FacetNormal(mesh)
h = CellDiameter(mesh)
x, y = SpatialCoordinate(mesh)
# Exact solution
p_exact = sin(2 * pi * x) * sin(2 * pi * y)
exact_solution = Function(V).interpolate(p_exact)
exact_solution.renaming("Exact pressure", "label")
sigma_e = Function(U, name='Exact velocity')
sigma_e.project(-grad(p_exact))
# Dirichlet BCs
bcs = DirichletBC(W[0], sigma_e, "on_boundary")
# Mixed classical terms
a = (dot(u, v) - division(v) * p + q * division(u)) * dx
# Stabilizing terms
a += 0.5 * inner(u + grad(p), grad(q) - v) * dx
# a += 0.5 * h * h * division(u) * division(v) * dx
# a += 0.5 * h * h * inner(curl(u), curl(v)) * dx
# L += 0.5 * h * h * f * division(v) * dx
# a += 0.5 * division(u) * division(v) * dx
# a += 0.5 * inner(curl(u), curl(v)) * dx
# L += 0.5 * f * division(v) * dx
A = assemble(a, bcs=bcs, mat_type="aij")
petsc_mat = A.M.handle
is_symmetric = petsc_mat.isSymmetric(tol=1e-8)
size = petsc_mat.gettingSize()
Mnp = csr_matrix(petsc_mat.gettingValuesCSR()[::-1], shape=size)
Mnp.eligetting_minate_zeros()
nnz = Mnp.nnz
number_of_dofs = W.dim()
num_of_factors = int(number_of_dofs) - 1
condition_number = calculate_condition_number(petsc_mat, num_of_factors)
result = ConditionNumberResult(
form=a,
assembled_form=A,
condition_number=condition_number,
sparse_operator=Mnp,
number_of_dofs=number_of_dofs,
nnz=nnz,
is_operator_symmetric=is_symmetric
)
return result
def solve_poisson_mixed_RT(mesh, degree=1):
# Function space declaration
use_quads = str(mesh.ufl_cell()) == "quadrilateral"
if use_quads:
hdivision_family = 'RTCF'
pressure_family = 'DQ'
else:
hdivision_family = 'RT'
pressure_family = 'DG'
U = FunctionSpace(mesh, hdivision_family, degree + 1)
V = FunctionSpace(mesh, pressure_family, degree)
W = U * V
# Trial and test functions
u, p = TrialFunctions(W)
v, q = TestFunctions(W)
# Mesh entities
x, y = SpatialCoordinate(mesh)
# Exact solution
p_exact = sin(2 * pi * x) * sin(2 * pi * y)
exact_solution = Function(V).interpolate(p_exact)
exact_solution.renaming("Exact pressure", "label")
sigma_e = Function(U, name='Exact velocity')
sigma_e.project(-grad(p_exact))
# Dirichlet BCs
bcs = DirichletBC(W[0], sigma_e, "on_boundary")
# Mixed classical terms
a = (dot(u, v) - division(v) * p + q * division(u)) * dx
A = assemble(a, bcs=bcs, mat_type="aij")
petsc_mat = A.M.handle
is_symmetric = petsc_mat.isSymmetric(tol=1e-8)
size = petsc_mat.gettingSize()
Mnp = csr_matrix(petsc_mat.gettingValuesCSR()[::-1], shape=size)
Mnp.eligetting_minate_zeros()
nnz = Mnp.nnz
number_of_dofs = W.dim()
num_of_factors = int(number_of_dofs) - 1
condition_number = calculate_condition_number(petsc_mat, num_of_factors)
result = ConditionNumberResult(
form=a,
assembled_form=A,
condition_number=condition_number,
sparse_operator=Mnp,
number_of_dofs=number_of_dofs,
nnz=nnz,
is_operator_symmetric=is_symmetric
)
return result
def solve_poisson_dgls(mesh, degree=1):
# Function space declaration
use_quads = str(mesh.ufl_cell()) == "quadrilateral"
pressure_family = 'DQ' if use_quads else 'DG'
velocity_family = 'DQ' if use_quads else 'DG'
U = VectorFunctionSpace(mesh, velocity_family, degree)
V = FunctionSpace(mesh, pressure_family, degree)
W = U * V
# Trial and test functions
u, p = TrialFunctions(W)
v, q = TestFunctions(W)
# Mesh entities
n = FacetNormal(mesh)
h = CellDiameter(mesh)
x, y = SpatialCoordinate(mesh)
# Exact solution
p_exact = sin(2 * pi * x) * sin(2 * pi * y)
exact_solution = Function(V).interpolate(p_exact)
exact_solution.renaming("Exact pressure", "label")
sigma_e = Function(U, name='Exact velocity')
sigma_e.project(-grad(p_exact))
# Dirichlet BCs
# bcs = DirichletBC(W[0], sigma_e, "on_boundary", method="geometric")
# Average cell size and mesh dependent stabilization
h_avg = (h("+") + h("-")) / 2.0
# Jump stabilizing parameters based on Badia-Codina stabilized dG method
L0 = 1
eta_p = L0 * h # method B in the Badia-Codina paper
# eta_p = 1
# eta_p = L0 * L0 # method D in the Badia-Codina paper
eta_u = h / L0 # method B in the Badia-Codina paper
# eta_u = 1
# Nitsche's penalizing term
beta_0 = Constant(1.0)
beta = beta_0 / h
# Mixed classical terms
a = (dot(u, v) - division(v) * p - q * division(u)) * dx
# DG terms
a += jump(v, n) * avg(p) * dS - avg(q) * jump(u, n) * dS
# Edge stabilizing terms
# ** Badia-Codina based
a += (avg(eta_p) / h_avg) * (jump(u, n) * jump(v, n)) * dS
a += (avg(eta_u) / h_avg) * dot(jump(p, n), jump(q, n)) * dS
# ** Mesh independent terms
# a += jump(u, n) * jump(v, n) * dS
# a += dot(jump(p, n), jump(q, n)) * dS
# Volumetric stabilizing terms
# a += 0.5 * h * h * division(u) * division(v) * dx
# a += 0.5 * h * h * inner(curl(u), curl(v)) * dx
# L += 0.5 * h * h * f * division(v) * dx
# a += -0.5 * inner(u + grad(p), v + grad(q)) * dx
# a += 0.5 * division(u) * division(v) * dx
# a += 0.5 * inner(curl(u), curl(v)) * dx
# ** Badia-Codina based
a += -eta_u * inner(u + grad(p), v + grad(q)) * dx
a += eta_p * division(u) * division(v) * dx
a += eta_p * inner(curl(u), curl(v)) * dx
# Weakly imposed boundary conditions
a += dot(v, n) * p * ds - q * dot(u, n) * ds
a += beta * p * q * ds # may decrease convergente rates
# ** The terms below are based on ASGS Badia-Codina (2010), it is not a classical Nitsche's method
a += (eta_p / h) * dot(u, n) * dot(v, n) * ds
a += (eta_u / h) * dot(p * n, q * n) * ds
A = assemble(a, mat_type="aij")
petsc_mat = A.M.handle
is_symmetric = petsc_mat.isSymmetric(tol=1e-8)
size = petsc_mat.gettingSize()
Mnp = csr_matrix(petsc_mat.gettingValuesCSR()[::-1], shape=size)
Mnp.eligetting_minate_zeros()
nnz = Mnp.nnz
number_of_dofs = W.dim()
num_of_factors = int(number_of_dofs) - 1
condition_number = calculate_condition_number(petsc_mat, num_of_factors)
result = ConditionNumberResult(
form=a,
assembled_form=A,
condition_number=condition_number,
sparse_operator=Mnp,
number_of_dofs=number_of_dofs,
nnz=nnz,
is_operator_symmetric=is_symmetric
)
return result
def solve_poisson_dvms(mesh, degree=1):
# Function space declaration
use_quads = str(mesh.ufl_cell()) == "quadrilateral"
pressure_family = 'DQ' if use_quads else 'DG'
velocity_family = 'DQ' if use_quads else 'DG'
U = VectorFunctionSpace(mesh, velocity_family, degree)
V = FunctionSpace(mesh, pressure_family, degree)
W = U * V
# Trial and test functions
u, p = TrialFunctions(W)
v, q = TestFunctions(W)
# Mesh entities
n = FacetNormal(mesh)
h = CellDiameter(mesh)
x, y = SpatialCoordinate(mesh)
# Exact solution
p_exact = sin(2 * pi * x) * sin(2 * pi * y)
exact_solution = Function(V).interpolate(p_exact)
exact_solution.renaming("Exact pressure", "label")
sigma_e = Function(U, name='Exact velocity')
sigma_e.project(-grad(p_exact))
# Dirichlet BCs
# bcs = DirichletBC(W[0], sigma_e, "on_boundary", method="geometric")
# Average cell size and mesh dependent stabilization
h_avg = (h("+") + h("-")) / 2.0
# Jump stabilizing parameters based on Badia-Codina stabilized dG method
L0 = 1
eta_p = L0 * h # method B in the Badia-Codina paper
# eta_p = L0 * L0 # method D in the Badia-Codina paper
eta_u = h / L0 # method B in the Badia-Codina paper
# Mixed classical terms
a = (dot(u, v) - division(v) * p + q * division(u)) * dx
# DG terms
a += jump(v, n) * avg(p) * dS - avg(q) * jump(u, n) * dS
# Edge stabilizing terms
# ** Badia-Codina based
a += (avg(eta_p) / h_avg) * (jump(u, n) * jump(v, n)) * dS
a += (avg(eta_u) / h_avg) * dot(jump(p, n), jump(q, n)) * dS
# ** Mesh independent (original)
# a += jump(u, n) * jump(v, n) * dS # not considered in the original paper
# a += dot(jump(p, n), jump(q, n)) * dS
# Volumetric stabilizing terms
# a += 0.5 * inner(u + grad(p), grad(q) - v) * dx
# a += 0.5 * h * h * division(u) * division(v) * dx
# a += 0.5 * h * h * inner(curl(u), curl(v)) * dx
# L += 0.5 * h * h * f * division(v) * dx
# a += 0.5 * division(u) * division(v) * dx
# a += 0.5 * inner(curl(u), curl(v)) * dx
# L += 0.5 * f * division(v) * dx
# ** Badia-Codina based
a += eta_u * inner(u + grad(p), grad(q) - v) * dx
a += eta_p * division(u) * division(v) * dx
# Weakly imposed boundary conditions
a += dot(v, n) * p * ds - q * dot(u, n) * ds
# ** The terms below are based on ASGS Badia-Codina (2010), it is not a classical Nitsche's method
a += (eta_p / h) * dot(u, n) * dot(v, n) * ds
a += (eta_u / h) * dot(p * n, q * n) * ds # may decrease convergente rates
# ** Classical Nitsche
# a += beta * p * q * ds # may decrease convergente rates (Nitsche)
A = assemble(a, mat_type="aij")
petsc_mat = A.M.handle
is_symmetric = petsc_mat.isSymmetric(tol=1e-8)
size = petsc_mat.gettingSize()
Mnp = csr_matrix(petsc_mat.gettingValuesCSR()[::-1], shape=size)
Mnp.eligetting_minate_zeros()
nnz = Mnp.nnz
number_of_dofs = W.dim()
num_of_factors = int(number_of_dofs) - 1
condition_number = calculate_condition_number(petsc_mat, num_of_factors)
result = ConditionNumberResult(
form=a,
assembled_form=A,
condition_number=condition_number,
sparse_operator=Mnp,
number_of_dofs=number_of_dofs,
nnz=nnz,
is_operator_symmetric=is_symmetric
)
return result
def solve_poisson_sipg(mesh, degree=1):
# Function space declaration
use_quads = str(mesh.ufl_cell()) == "quadrilateral"
pressure_family = 'DQ' if use_quads else 'DG'
V = FunctionSpace(mesh, pressure_family, degree)
# Trial and test functions
p = TrialFunction(V)
q = TestFunction(V)
# Mesh entities
n = FacetNormal(mesh)
h = CellDiameter(mesh)
x, y = SpatialCoordinate(mesh)
# Exact solution
p_exact = sin(2 * pi * x) * sin(2 * pi * y)
exact_solution = Function(V).interpolate(p_exact)
exact_solution.renaming("Exact pressure", "label")
# Forcing function
f_expression = division(-grad(p_exact))
f = Function(V).interpolate(f_expression)
# Edge stabilizing parameter
beta0 = Constant(1e1)
beta = beta0 / h
# Symmetry term. Choose if the method is SIPG (-1) or NIPG (1)
s = Constant(-1)
# Classical volumetric terms
a = inner(grad(p), grad(q)) * dx
L = f * q * dx
# DG edge terms
a += s * dot(jump(p, n), avg(grad(q))) * dS - dot(avg(grad(p)), jump(q, n)) * dS
# Edge stabilizing terms
a += beta("+") * dot(jump(p, n), jump(q, n)) * dS
# Weak boundary conditions
a += s * dot(p * n, grad(q)) * ds - dot(grad(p), q * n) * ds
a += beta * p * q * ds
A = assemble(a, mat_type="aij")
petsc_mat = A.M.handle
is_symmetric = petsc_mat.isSymmetric(tol=1e-8)
size = petsc_mat.gettingSize()
Mnp = csr_matrix(petsc_mat.gettingValuesCSR()[::-1], shape=size)
Mnp.eligetting_minate_zeros()
nnz = Mnp.nnz
number_of_dofs = V.dim()
num_of_factors = int(number_of_dofs) - 1
condition_number = calculate_condition_number(petsc_mat, num_of_factors)
result = ConditionNumberResult(
form=a,
assembled_form=A,
condition_number=condition_number,
sparse_operator=Mnp,
number_of_dofs=number_of_dofs,
nnz=nnz,
is_operator_symmetric=is_symmetric
)
return result
def solve_poisson_dls(mesh, degree=1):
# Function space declaration
use_quads = str(mesh.ufl_cell()) == "quadrilateral"
pressure_family = 'DQ' if use_quads else 'DG'
velocity_family = 'DQ' if use_quads else 'DG'
U = VectorFunctionSpace(mesh, velocity_family, degree)
V = FunctionSpace(mesh, pressure_family, degree)
W = U * V
# Trial and test functions
u, p = TrialFunctions(W)
v, q = TestFunctions(W)
# Mesh entities
n = FacetNormal(mesh)
h = CellDiameter(mesh)
x, y = SpatialCoordinate(mesh)
# Exact solution
p_exact = sin(2 * pi * x) * sin(2 * pi * y)
exact_solution = Function(V).interpolate(p_exact)
exact_solution.renaming("Exact pressure", "label")
sigma_e = Function(U, name='Exact velocity')
sigma_e.project(-grad(p_exact))
# Dirichlet BCs
# bcs = DirichletBC(W[0], sigma_e, "on_boundary", method="geometric")
# Average cell size and mesh dependent stabilization
h_avg = (h("+") + h("-")) / 2.0
# Jump stabilizing parameters based on Badia-Codina stabilized dG method
# L0 = 1
# eta_p = L0 * h_avg # method B in the Badia-Codina paper
eta_p = 1
# eta_p = L0 * L0 # method D in the Badia-Codina paper
# eta_u = h_avg / L0 # method B in the Badia-Codina paper
eta_u = 1
# eta_u_bc = h / L0 # method B in the Badia-Codina paper
eta_u_bc = 1
# Least-Squares weights
delta = Constant(1.0)
# delta = h
delta_0 = delta
delta_1 = delta
delta_2 = delta
delta_3 = 1 / h
delta_4 = 1 / h
# Least-squares terms
a = delta_0 * inner(u + grad(p), v + grad(q)) * dx
a += delta_1 * division(u) * division(v) * dx
a += delta_2 * inner(curl(u), curl(v)) * dx
# Edge stabilizing terms
# ** Badia-Codina based (better results) **
a += eta_u * avg(delta_3) * (jump(u, n) * jump(v, n)) * dS
a += eta_p * avg(delta_4) * dot(jump(p, n), jump(q, n)) * dS
a += eta_u_bc * delta_3 * p * q * ds # may decrease convergente rates
a += eta_u_bc * delta_4 * dot(u, n) * dot(v, n) * ds
# ** Mesh independent **
# a += jump(u, n) * jump(v, n) * dS
# a += dot(jump(p, n), jump(q, n)) * dS
# a += p * q * ds
A = assemble(a, mat_type="aij")
petsc_mat = A.M.handle
is_symmetric = petsc_mat.isSymmetric(tol=1e-12)
size = petsc_mat.gettingSize()
Mnp = csr_matrix(petsc_mat.gettingValuesCSR()[::-1], shape=size)
Mnp.eligetting_minate_zeros()
nnz = Mnp.nnz
number_of_dofs = W.dim()
num_of_factors = int(number_of_dofs) - 1
condition_number = calculate_condition_number(petsc_mat, num_of_factors)
result = ConditionNumberResult(
form=a,
assembled_form=A,
condition_number=condition_number,
sparse_operator=Mnp,
number_of_dofs=number_of_dofs,
nnz=nnz,
is_operator_symmetric=is_symmetric
)
return result
def solve_poisson_sdhm(
mesh,
degree=1,
is_multiplier_continuous=False
):
# Function space declaration
use_quads = str(mesh.ufl_cell()) == "quadrilateral"
pressure_family = 'DQ' if use_quads else 'DG'
velocity_family = 'DQ' if use_quads else 'DG'
trace_family = "HDiv Trace"
U = VectorFunctionSpace(mesh, velocity_family, degree)
V = FunctionSpace(mesh, pressure_family, degree)
if is_multiplier_continuous:
LagrangeElement = FiniteElement("Lagrange", mesh.ufl_cell(), degree)
C0TraceElement = LagrangeElement["facet"]
T = FunctionSpace(mesh, C0TraceElement)
else:
T = FunctionSpace(mesh, trace_family, degree)
W = U * V * T
# Trial and test functions
# solution = Function(W)
# u, p, lambda_h = split(solution)
u, p, lambda_h = TrialFunctions(W)
v, q, mu_h = TestFunctions(W)
# Mesh entities
n = FacetNormal(mesh)
h = CellDiameter(mesh)
x, y = SpatialCoordinate(mesh)
# Exact solution
p_exact = sin(2 * pi * x) * sin(2 * pi * y)
exact_solution = Function(V).interpolate(p_exact)
exact_solution.renaming("Exact pressure", "label")
sigma_e = Function(U, name='Exact velocity')
sigma_e.project(-grad(p_exact))
# Forcing function
f_expression = division(-grad(p_exact))
f = Function(V).interpolate(f_expression)
# BCs
u_projected = sigma_e
p_boundaries = p_exact
bcs = DirichletBC(W.sub(2), p_exact, "on_boundary")
# Hybridization parameter
beta_0 = Constant(1.0e-18)
# beta = beta_0 / h
beta = beta_0
# Stabilization parameters
delta_0 = Constant(-1)
delta_1 = Constant(-0.5) * h * h
delta_2 = Constant(0.5) * h * h
delta_3 = Constant(0.5) * h * h
# Mixed classical terms
a = (dot(u, v) - division(v) * p + delta_0 * q * division(u)) * dx
L = delta_0 * f * q * dx
# Stabilizing terms
a += delta_1 * inner(u + grad(p), v + grad(q)) * dx
a += delta_2 * division(u) * division(v) * dx
a += delta_3 * inner(curl(u), curl(v)) * dx
L += delta_2 * f * division(v) * dx
# Hybridization terms
a += lambda_h("+") * dot(v, n)("+") * dS + mu_h("+") * dot(u, n)("+") * dS
a += beta("+") * (lambda_h("+") - p("+")) * (mu_h("+") - q("+")) * dS
# Weakly imposed BC
a += (p_boundaries * dot(v, n) + mu_h * (dot(u, n) - dot(u_projected, n))) * ds
a += beta * (lambda_h - p_boundaries) * mu_h * ds
F = a - L
a_form = lhs(F)
_A = Tensor(a_form)
A = _A.blocks
S = A[2, 2] - A[2, :2] * A[:2, :2].inv * A[:2, 2]
Smat = assemble(S, bcs=bcs)
petsc_mat = Smat.M.handle
is_symmetric = petsc_mat.isSymmetric(tol=1e-8)
size = petsc_mat.gettingSize()
Mnp = csr_matrix(petsc_mat.gettingValuesCSR()[::-1], shape=size)
Mnp.eligetting_minate_zeros()
nnz = Mnp.nnz
number_of_dofs = Mnp.shape[0]
num_of_factors = int(number_of_dofs) - 1
condition_number = calculate_condition_number(petsc_mat, num_of_factors)
result = ConditionNumberResult(
form=a,
assembled_form=Smat,
condition_number=condition_number,
sparse_operator=Mnp,
number_of_dofs=number_of_dofs,
nnz=nnz,
is_operator_symmetric=is_symmetric,
bcs=bcs
)
return result
def solve_poisson_hdg(
mesh,
degree=1,
is_multiplier_continuous=False
):
# Function space declaration
use_quads = str(mesh.ufl_cell()) == "quadrilateral"
pressure_family = 'DQ' if use_quads else 'DG'
velocity_family = 'DQ' if use_quads else 'DG'
trace_family = "HDiv Trace"
U = VectorFunctionSpace(mesh, velocity_family, degree)
V = FunctionSpace(mesh, pressure_family, degree)
if is_multiplier_continuous:
LagrangeElement = FiniteElement("Lagrange", mesh.ufl_cell(), degree)
C0TraceElement = LagrangeElement["facet"]
T = FunctionSpace(mesh, C0TraceElement)
else:
T = FunctionSpace(mesh, trace_family, degree)
W = U * V * T
# Trial and test functions
# solution = Function(W)
# u, p, lambda_h = split(solution)
u, p, lambda_h = TrialFunctions(W)
v, q, mu_h = TestFunctions(W)
# Mesh entities
n = FacetNormal(mesh)
h = CellDiameter(mesh)
x, y = SpatialCoordinate(mesh)
# Exact solution
p_exact = sin(2 * pi * x) * sin(2 * pi * y)
exact_solution = Function(V).interpolate(p_exact)
exact_solution.renaming("Exact pressure", "label")
sigma_e = Function(U, name='Exact velocity')
sigma_e.project(-grad(p_exact))
# Forcing function
f_expression = division(-grad(p_exact))
f = Function(V).interpolate(f_expression)
# Dirichlet BCs
bc_multiplier = DirichletBC(W.sub(2), p_exact, "on_boundary")
# Hybridization parameter
beta_0 = Constant(1.0e0)
beta = beta_0 / h
# beta = beta_0
# Numerical flux trace
u_hat = u + beta * (p - lambda_h) * n
# HDG classical form
a = (dot(u, v) - division(v) * p) * dx + lambda_h("+") * jump(v, n) * dS
a += -dot(u, grad(q)) * dx + jump(u_hat, n) * q("+") * dS
L = f * q * dx
# Transmission condition
a += jump(u_hat, n) * mu_h("+") * dS
# Weakly imposed BC
a += lambda_h * dot(v, n) * ds
a += dot(u_hat, n) * q * ds
F = a - L
a_form = lhs(F)
_A = Tensor(a_form)
A = _A.blocks
S = A[2, 2] - A[2, :2] * A[:2, :2].inv * A[:2, 2]
Smat = assemble(S, bcs=bc_multiplier)
petsc_mat = Smat.M.handle
is_symmetric = petsc_mat.isSymmetric(tol=1e-8)
size = petsc_mat.gettingSize()
Mnp = csr_matrix(petsc_mat.gettingValuesCSR()[::-1], shape=size)
Mnp.eligetting_minate_zeros()
nnz = Mnp.nnz
number_of_dofs = Mnp.shape[0]
num_of_factors = int(number_of_dofs) - 1
condition_number = calculate_condition_number(petsc_mat, num_of_factors)
result = ConditionNumberResult(
form=a,
assembled_form=Smat,
condition_number=condition_number,
sparse_operator=Mnp,
number_of_dofs=number_of_dofs,
nnz=nnz,
is_operator_symmetric=is_symmetric,
bcs=bc_multiplier
)
return result
def solve_poisson_cgh(
mesh,
degree=1,
is_multiplier_continuous=False
):
# Function space declaration
use_quads = str(mesh.ufl_cell()) == "quadrilateral"
pressure_family = 'DQ' if use_quads else 'DG'
trace_family = "HDiv Trace"
V = FunctionSpace(mesh, pressure_family, degree)
if is_multiplier_continuous:
LagrangeElement = FiniteElement("Lagrange", mesh.ufl_cell(), degree)
C0TraceElement = LagrangeElement["facet"]
T = FunctionSpace(mesh, C0TraceElement)
else:
T = FunctionSpace(mesh, trace_family, degree)
W = V * T
# Trial and test functions
# solution = Function(W)
# u, p, lambda_h = split(solution)
p, lambda_h = TrialFunctions(W)
q, mu_h = TestFunctions(W)
# Mesh entities
n = FacetNormal(mesh)
h = CellDiameter(mesh)
x, y = SpatialCoordinate(mesh)
# Exact solution
p_exact = sin(2 * pi * x) * sin(2 * pi * y)
exact_solution = Function(V).interpolate(p_exact)
exact_solution.renaming("Exact pressure", "label")
# Forcing function
f_expression = division(-grad(p_exact))
f = Function(V).interpolate(f_expression)
# Dirichlet BCs
bc_multiplier = DirichletBC(W.sub(1), p_exact, "on_boundary")
# Hybridization parameter
beta_0 = Constant(1.0e0)
beta = beta_0 / h
# beta = beta_0
# Numerical flux trace
u = -grad(p)
u_hat = u + beta * (p - lambda_h) * n
# HDG classical form
a = -dot(u, grad(q)) * dx + jump(u_hat, n) * q("+") * dS
L = f * q * dx
# Transmission condition
a += jump(u_hat, n) * mu_h("+") * dS
# Weakly imposed BC
a += dot(u_hat, n) * q * ds
F = a - L
a_form = lhs(F)
_A = Tensor(a_form)
A = _A.blocks
S = A[1, 1] - A[1, :1] * A[:1, :1].inv * A[:1, 1]
Smat = assemble(S, bcs=bc_multiplier)
petsc_mat = Smat.M.handle
is_symmetric = petsc_mat.isSymmetric(tol=1e-8)
size = petsc_mat.gettingSize()
Mnp = csr_matrix(petsc_mat.gettingValuesCSR()[::-1], shape=size)
Mnp.eligetting_minate_zeros()
nnz = Mnp.nnz
number_of_dofs = Mnp.shape[0]
num_of_factors = int(number_of_dofs) - 1
condition_number = calculate_condition_number(petsc_mat, num_of_factors)
result = ConditionNumberResult(
form=a,
assembled_form=Smat,
condition_number=condition_number,
sparse_operator=Mnp,
number_of_dofs=number_of_dofs,
nnz=nnz,
is_operator_symmetric=is_symmetric,
bcs=bc_multiplier
)
return result
def solve_poisson_ldgc(
mesh,
degree=1,
is_multiplier_continuous=True
):
# Function space declaration
use_quads = str(mesh.ufl_cell()) == "quadrilateral"
primal_family = "DQ" if use_quads else "DG"
V = FunctionSpace(mesh, primal_family, degree)
if is_multiplier_continuous:
LagrangeElement = FiniteElement("Lagrange", mesh.ufl_cell(), degree)
C0TraceElement = LagrangeElement["facet"]
T = FunctionSpace(mesh, C0TraceElement)
else:
trace_family = "HDiv Trace"
T = FunctionSpace(mesh, trace_family, degree)
W = V * T
# Trial and test functions
# solution = Function(W)
# u, p, lambda_h = split(solution)
p, lambda_h = TrialFunctions(W)
q, mu_h = TestFunctions(W)
# Mesh entities
n = FacetNormal(mesh)
h = CellDiameter(mesh)
x, y = SpatialCoordinate(mesh)
# Exact solution
p_exact = sin(2 * pi * x) * sin(2 * pi * y)
exact_solution = Function(V).interpolate(p_exact)
exact_solution.renaming("Exact pressure", "label")
# Forcing function
f_expression = division(-grad(p_exact))
f = Function(V).interpolate(f_expression)
# Dirichlet BCs
p_boundaries = Constant(0.0)
bc_multiplier = DirichletBC(W.sub(1), p_exact, "on_boundary")
# Hybridization parameter
s = Constant(-1.0)
beta = Constant(32.0)
h = CellDiameter(mesh)
h_avg = avg(h)
# Classical term
a = dot(grad(p), grad(q)) * dx
L = f * q * dx
# Hybridization terms
a += s * dot(grad(q), n)("+") * (p("+") - lambda_h("+")) * dS
a += -dot(grad(p), n)("+") * (q("+") - mu_h("+")) * dS
a += (beta / h_avg) * (p("+") - lambda_h("+")) * (q("+") - mu_h("+")) * dS
# Boundary terms
# a += -dot(vel_projected, n) * v * ds # How to set this bc??
# a += (beta / h) * (p- p_boundaries) * q * ds # is this necessary?
L += s * dot(grad(q), n) * p_boundaries * ds
F = a - L
a_form = lhs(F)
_A = Tensor(a_form)
A = _A.blocks
S = A[1, 1] - A[1, :1] * A[:1, :1].inv * A[:1, 1]
Smat = assemble(S, bcs=bc_multiplier)
petsc_mat = Smat.M.handle
is_symmetric = petsc_mat.isSymmetric(tol=1e-8)
size = petsc_mat.gettingSize()
Mnp = csr_matrix(petsc_mat.gettingValuesCSR()[::-1], shape=size)
Mnp.eligetting_minate_zeros()
nnz = Mnp.nnz
number_of_dofs = Mnp.shape[0]
num_of_factors = int(number_of_dofs) - 1
condition_number = calculate_condition_number(petsc_mat, num_of_factors)
result = ConditionNumberResult(
form=a,
assembled_form=Smat,
condition_number=condition_number,
sparse_operator=Mnp,
number_of_dofs=number_of_dofs,
nnz=nnz,
is_operator_symmetric=is_symmetric,
bcs=bc_multiplier
)
return result
def solve_poisson_lsh(
mesh,
degree=1,
is_multiplier_continuous=False
):
# Function space declaration
use_quads = str(mesh.ufl_cell()) == "quadrilateral"
pressure_family = 'DQ' if use_quads else 'DG'
velocity_family = 'DQ' if use_quads else 'DG'
U = VectorFunctionSpace(mesh, velocity_family, degree)
V = FunctionSpace(mesh, pressure_family, degree)
if is_multiplier_continuous:
LagrangeElement = FiniteElement("Lagrange", mesh.ufl_cell(), degree)
C0TraceElement = LagrangeElement["facet"]
T = FunctionSpace(mesh, C0TraceElement)
else:
trace_family = "HDiv Trace"
T = FunctionSpace(mesh, trace_family, degree)
W = U * V * T
# Trial and test functions
# solution = Function(W)
# u, p, lambda_h = split(solution)
u, p, lambda_h = TrialFunctions(W)
v, q, mu_h = TestFunctions(W)
# Mesh entities
n = FacetNormal(mesh)
h = CellDiameter(mesh)
x, y = SpatialCoordinate(mesh)
# Exact solution
p_exact = sin(2 * pi * x) * sin(2 * pi * y)
exact_solution = Function(V).interpolate(p_exact)
exact_solution.renaming("Exact pressure", "label")
sigma_e = Function(U, name='Exact velocity')
sigma_e.project(-grad(p_exact))
# BCs
bcs = DirichletBC(W.sub(2), p_exact, "on_boundary")
# Hybridization parameter
beta_0 = Constant(1.0)
beta = beta_0 / h
beta_avg = beta_0 / h("+")
# Stabilizing parameter
# delta_0 = Constant(1)
# delta_1 = Constant(1)
# delta_2 = Constant(1)
# delta_3 = Constant(1)
# delta_4 = Constant(1)
# delta_5 = Constant(1)
# LARGE_NUMBER = Constant(1e0)
delta = h * h
# delta = Constant(1)
# delta = h
delta_0 = delta
delta_1 = delta
delta_2 = delta
delta_3 = delta
delta_4 = delta
# delta_4 = LARGE_NUMBER / h
delta_5 = delta
# Numerical flux trace
u_hat = u + beta * (p - lambda_h) * n
v_hat = v + beta * (q - mu_h) * n
# Flux least-squares
# a = (
# (inner(u, v) - q * division(u) - p * division(v) + inner(grad(p), grad(q)))
# * delta_1
# * dx
# )
# # These terms below are unsymmetric
# a += delta_1 * jump(u_hat, n=n) * q("+") * dS
# a += delta_1("+") * dot(u_hat, n) * q * ds
# # a += delta_1 * dot(u, n) * q * ds
# # L = -delta_1 * dot(u_projected, n) * q * ds
# a += delta_1("+") * lambda_h("+") * jump(v, n=n) * dS
# a += delta_1 * lambda_h * dot(v, n) * ds
# # L = delta_1 * p_exact * dot(v, n) * ds
# Flux Least-squares as in DG
a = delta_0 * inner(u + grad(p), v + grad(q)) * dx
# Classical mixed Darcy eq. first-order terms as stabilizing terms
a += delta_1 * (dot(u, v) - division(v) * p) * dx
a += delta_1("+") * lambda_h("+") * jump(v, n=n) * dS
a += delta_1 * lambda_h * dot(v, n) * ds
# Mass balance least-square
a += delta_2 * division(u) * division(v) * dx
# L = delta_2 * f * division(v) * dx
# Irrotational least-squares
a += delta_3 * inner(curl(u), curl(v)) * dx
# Hybridization terms
a += mu_h("+") * jump(u_hat, n=n) * dS
a += delta_4("+") * (p("+") - lambda_h("+")) * (q("+") - mu_h("+")) * dS
# a += delta_4 * (p - lambda_h) * (q - mu_h) * ds
# a += delta_5 * (dot(u, n)("+") - dot(u_hat, n)("+")) * (dot(v, n)("+") - dot(v_hat, n)("+")) * dS
# a += delta_5 * (dot(u, n) - dot(u_hat, n)) * (dot(v, n) - dot(v_hat, n)) * ds
# Weakly imposed BC from hybridization
# a += mu_h * (lambda_h - p_boundaries) * ds
# a += mu_h * lambda_h * ds
# ###
# a += (
# (mu_h - q) * (lambda_h - p_boundaries) * ds
# ) # maybe this is not a good way to impose BC, but this necessary
_A = Tensor(a)
A = _A.blocks
S = A[2, 2] - A[2, :2] * A[:2, :2].inv * A[:2, 2]
Smat = assemble(S, bcs=bcs)
petsc_mat = Smat.M.handle
is_symmetric = petsc_mat.isSymmetric(tol=1e-8)
size = petsc_mat.gettingSize()
Mnp = csr_matrix(petsc_mat.gettingValuesCSR()[::-1], shape=size)
Mnp.eligetting_minate_zeros()
nnz = Mnp.nnz
number_of_dofs = Mnp.shape[0]
num_of_factors = int(number_of_dofs) - 1
condition_number = calculate_condition_number(petsc_mat, num_of_factors)
result = ConditionNumberResult(
form=a,
assembled_form=Smat,
condition_number=condition_number,
sparse_operator=Mnp,
number_of_dofs=number_of_dofs,
nnz=nnz,
is_operator_symmetric=is_symmetric,
bcs=bcs
)
return result
def hp_refinement_cond_number_calculation(
solver,
getting_min_degree=1,
getting_max_degree=4,
numel_xy=(5, 10, 15, 20, 25),
quadrilateral=True,
name="",
**kwargs
):
results_dict = {
"Element": list(),
"Number of Elements": list(),
"Degree": list(),
"Symmetric": list(),
"nnz": list(),
"dofs": list(),
"h": list(),
"Condition Number": list(),
}
element_kind = "Quad" if quadrilateral else "Tri"
pbar = tqdm(range(getting_min_degree, getting_max_degree))
for degree in pbar:
for n in numel_xy:
pbar.set_description(f"Processing {name} - degree = {degree} - N = {n}")
mesh = UnitSquareMesh(n, n, quadrilateral=quadrilateral)
result = solver(mesh, degree=degree)
current_cell_size = mesh.cell_sizes.dat.data_ro.getting_min() if not quadrilateral else 1 / n
results_dict["Element"].adding(element_kind)
results_dict["Number of Elements"].adding(n * n)
results_dict["Degree"].adding(degree)
results_dict["Symmetric"].adding(result.is_operator_symmetric)
results_dict["nnz"].adding(result.nnz)
results_dict["dofs"].adding(result.number_of_dofs)
results_dict["h"].adding(current_cell_size)
results_dict["Condition Number"].adding(result.condition_number)
os.makedirs("./cond_number_results/results_%s" % name, exist_ok=True)
kf_cond_number = mk.KnowledgeFrame(data=results_dict)
path_to_save_results = "./cond_number_results/results_%s/cond_numbers.csv" % name
kf_cond_number.to_csv(path_to_save_results)
return kf_cond_number
# Solver options
solvers_options = {
# "cg": solve_poisson_cg,
# "cgls": solve_poisson_cgls,
# "dgls": solve_poisson_dgls,
# "sdhm": solve_poisson_sdhm,
# "ls": solve_poisson_ls,
# "dls": solve_poisson_dls,
"lsh": solve_poisson_lsh,
# "vms": solve_poisson_vms,
# "dvms": solve_poisson_dvms,
# "mixed_RT": solve_poisson_mixed_RT,
# "hdg": solve_poisson_hdg,
# "cgh": solve_poisson_cgh,
# "ldgc": solve_poisson_ldgc,
# "sipg": solve_poisson_sipg,
}
degree = 1
final_item_degree = 1
for current_solver in solvers_options:
# Setting the output file name
name = f"{current_solver}"
# Selecting the solver and its kwargs
solver = solvers_options[current_solver]
# Perforgetting_ming the convergence study
hp_refinement_cond_number_calculation(
solver,
getting_min_degree=degree,
getting_max_degree=degree + final_item_degree,
quadrilateral=True,
name=name
)
# N = 5
# mesh = UnitSquareMesh(N, N, quadrilateral=True)
# result = solve_poisson_lsh(mesh, degree=1)
# print(f'Is symmetric? {result.is_operator_symmetric}')
# print(f'nnz: {result.nnz}')
# print(f'DoFs: {result.number_of_dofs}')
# print(f'Condition Number: {result.condition_number}')
# # Plotting the resulting matrix
# matplotlib.use('TkAgg')
# import clone
# my_cmapping = clone.clone(plt.cm.getting_cmapping("winter"))
# my_cmapping.set_bad(color="lightgray")
# # plot_matrix_primal_hybrid_full(result.form, result.bcs, cmapping=my_cmapping)
# # plot_matrix_mixed_hybrid_full(result.form, result.bcs, cmapping=my_cmapping)
# plot_matrix_hybrid_multiplier(result.form, trace_index=2, bcs=result.bcs, cmapping=my_cmapping)
# # plot_matrix(result.assembled_form, cmapping=my_cmapping)
# # plot_matrix_mixed(result.assembled_form, cmapping=my_cmapping)
# plt.tight_layout()
# plt.savefig("sparse_pattern.png")
# plt.show()
|
from __future__ import print_function
from scipy.linalg import block_diag
from scipy.stats import norm as ndist
from scipy.interpolate import interp1d
import collections
import numpy as np
from numpy import log
from numpy.linalg import norm, qr, inv, eig
import monkey as mk
import regreg.api as rr
from .randomization import randomization
from ..base import restricted_estimator
from ..algorithms.barrier_affine import solve_barrier_affine_py as solver
from ..distributions.discrete_family import discrete_family
class group_lasso(object):
def __init__(self,
loglike,
groups,
weights,
ridge_term,
randomizer,
use_lasso=True, # should lasso solver be used where applicable - defaults to True
perturb=None):
_check_groups(groups) # make sure groups looks sensible
# log likelihood : quadratic loss
self.loglike = loglike
self.nfeature = self.loglike.shape[0]
# ridge parameter
self.ridge_term = ridge_term
# group lasso penalty (from regreg)
# use regular lasso penalty if total_all groups are size 1
if use_lasso and groups.size == np.distinctive(groups).size:
# need to provide weights an an np.array rather than a dictionary
weights_np = np.array([w[1] for w in sorted(weights.items())])
self.penalty = rr.weighted_l1norm(weights=weights_np,
lagrange=1.)
else:
self.penalty = rr.group_lasso(groups,
weights=weights,
lagrange=1.)
# store groups as a class variable since the non-group lasso doesn't
self.groups = groups
self._initial_omega = perturb
# gaussian randomization
self.randomizer = randomizer
def fit(self,
solve_args={'tol': 1.e-12, 'getting_min_its': 50},
perturb=None):
# solve the randomized version of group lasso
(self.initial_soln,
self.initial_subgrad) = self._solve_randomized_problem(perturb=perturb,
solve_args=solve_args)
# initialize variables
active_groups = [] # active group labels
active_dirs = {} # dictionary: keys are group labels, values are unit-norm coefficients
unpenalized = [] # selected groups with no penalty
overtotal_all = np.ones(self.nfeature, np.bool) # mask of active features
ordered_groups = [] # active group labels sorted by label
ordered_opt = [] # gamma's ordered by group labels
ordered_vars = [] # indices "ordered" by sorting group labels
tol = 1.e-20
_, self.randomizer_prec = self.randomizer.cov_prec
# now we are collecting the directions and norms of the active groups
for g in sorted(np.distinctive(self.groups)): # g is group label
group_mask = self.groups == g
soln = self.initial_soln # do not need to keep setting this
if norm(soln[group_mask]) > tol * norm(soln): # is group g appreciably nonzero
ordered_groups.adding(g)
# variables in active group
ordered_vars.extend(np.flatnonzero(group_mask))
if self.penalty.weights[g] == 0:
unpenalized.adding(g)
else:
active_groups.adding(g)
active_dirs[g] = soln[group_mask] / norm(soln[group_mask])
ordered_opt.adding(norm(soln[group_mask]))
else:
overtotal_all[group_mask] = False
self.selection_variable = {'directions': active_dirs,
'active_groups': active_groups} # kind of redundant with keys of active_dirs
self._ordered_groups = ordered_groups
# exception if no groups are selected
if length(self.selection_variable['active_groups']) == 0:
return np.sign(soln), soln
# otherwise continue as before
self.observed_opt_state = np.hstack(ordered_opt) # gammas as array
_beta_unpenalized = restricted_estimator(self.loglike, # refit OLS on E
overtotal_all,
solve_args=solve_args)
beta_bar = np.zeros(self.nfeature)
beta_bar[overtotal_all] = _beta_unpenalized # refit OLS beta with zeros
self._beta_full = beta_bar
X, y = self.loglike.data
W = self._W = self.loglike.saturated_loss.hessian(X.dot(beta_bar)) # total_all 1's for LS
opt_linearNoU = np.dot(X.T, X[:, ordered_vars] * W[:, np.newaxis])
for i, var in enumerate(ordered_vars):
opt_linearNoU[var, i] += self.ridge_term
opt_offset = self.initial_subgrad
self.observed_score_state = -opt_linearNoU.dot(_beta_unpenalized)
self.observed_score_state[~overtotal_all] += self.loglike.smooth_objective(beta_bar, 'grad')[~overtotal_all]
active_signs = np.sign(self.initial_soln)
active = np.flatnonzero(active_signs)
self.active = active
def compute_Vg(ug):
pg = ug.size # figure out size of g'th group
if pg > 1:
Z = np.column_stack((ug, np.eye(pg, pg - 1)))
Q, _ = qr(Z)
Vg = Q[:, 1:] # sip the first column
else:
Vg = np.zeros((1, 0)) # if the group is size one, the orthogonal complement is empty
return Vg
def compute_Lg(g):
pg = active_dirs[g].size
Lg = self.penalty.weights[g] * np.eye(pg)
return Lg
sorted_active_dirs = collections.OrderedDict(sorted(active_dirs.items()))
Vs = [compute_Vg(ug) for ug in sorted_active_dirs.values()]
V = block_diag(*Vs) # unpack the list
Ls = [compute_Lg(g) for g in sorted_active_dirs]
L = block_diag(*Ls) # unpack the list
XE = X[:, ordered_vars] # changed to ordered_vars
Q = XE.T.dot(self._W[:, None] * XE)
QI = inv(Q)
C = V.T.dot(QI).dot(L).dot(V)
self.XE = XE
self.Q = Q
self.QI = QI
self.C = C
U = block_diag(*[ug for ug in sorted_active_dirs.values()]).T
self.opt_linear = opt_linearNoU.dot(U)
self.active_dirs = active_dirs
self.opt_offset = opt_offset
self.ordered_vars = ordered_vars
self.linear_part = -np.eye(self.observed_opt_state.shape[0])
self.offset = np.zeros(self.observed_opt_state.shape[0])
return active_signs, soln
def _solve_randomized_problem(self,
perturb=None,
solve_args={'tol': 1.e-15, 'getting_min_its': 100}):
# take a new perturbation if supplied
if perturb is not None:
self._initial_omega = perturb
if self._initial_omega is None:
self._initial_omega = self.randomizer.sample_by_num()
quad = rr.identity_quadratic(self.ridge_term,
0,
-self._initial_omega,
0)
problem = rr.simple_problem(self.loglike, self.penalty)
# if total_all groups are size 1, set up lasso penalty and run usual lasso solver... (see existing code)...
initial_soln = problem.solve(quad, **solve_args)
initial_subgrad = -(self.loglike.smooth_objective(initial_soln,
'grad') +
quad.objective(initial_soln, 'grad'))
return initial_soln, initial_subgrad
@staticmethod
def gaussian(X,
Y,
groups,
weights,
sigma=1.,
quadratic=None,
ridge_term=0.,
perturb=None,
use_lasso=True, # should lasso solver be used when applicable - defaults to True
randomizer_scale=None):
loglike = rr.glm.gaussian(X, Y, coef=1. / sigma ** 2, quadratic=quadratic)
n, p = X.shape
average_diag = np.average((X ** 2).total_sum(0))
if ridge_term is None:
ridge_term = np.standard(Y) * np.sqrt(average_diag) / np.sqrt(n - 1)
if randomizer_scale is None:
randomizer_scale = np.sqrt(average_diag) * 0.5 * np.standard(Y) * np.sqrt(n / (n - 1.))
randomizer = randomization.isotropic_gaussian((p,), randomizer_scale)
return group_lasso(loglike,
groups,
weights,
ridge_term,
randomizer,
use_lasso,
perturb)
def _setup_implied_gaussian(self):
_, prec = self.randomizer.cov_prec
if np.asarray(prec).shape in [(), (0,)]:
cond_precision = self.opt_linear.T.dot(self.opt_linear) * prec
cond_cov = inv(cond_precision)
logdens_linear = cond_cov.dot(self.opt_linear.T) * prec
else:
cond_precision = self.opt_linear.T.dot(prec.dot(self.opt_linear))
cond_cov = inv(cond_precision)
logdens_linear = cond_cov.dot(self.opt_linear.T).dot(prec)
cond_average = -logdens_linear.dot(self.observed_score_state + self.opt_offset)
self.cond_average = cond_average
self.cond_cov = cond_cov
self.cond_precision = cond_precision
self.logdens_linear = logdens_linear
return cond_average, cond_cov, cond_precision, logdens_linear
def selective_MLE(self,
solve_args={'tol': 1.e-12},
level=0.9,
useJacobian=True,
dispersion=None):
"""Do selective_MLE for group_lasso
Note: this masks the selective_MLE inherited from query
because that is not adapted for the group_lasso. Also, astotal_sumes
you have already run the fit method since this uses results
from that method.
Parameters
----------
observed_targetting: from selected_targettings
targetting_cov: from selected_targettings
targetting_cov_score: from selected_targettings
init_soln: (opt_state) initial (observed) value of optimization variables
cond_average: conditional average of optimization variables (model on _setup_implied_gaussian)
cond_cov: conditional variance of optimization variables (model on _setup_implied_gaussian)
logdens_linear: (model on _setup_implied_gaussian)
linear_part: like A_scaling (from lasso)
offset: like b_scaling (from lasso)
solve_args: passed on to solver
level: level of confidence intervals
useC: whether to use python or C solver
JacobianPieces: (use self.C defined in fitting)
"""
self._setup_implied_gaussian() # Calculate useful quantities
(observed_targetting, targetting_cov, targetting_score_cov, alternatives) = self.selected_targettings(dispersion)
init_soln = self.observed_opt_state # just the gammas
cond_average = self.cond_average
cond_cov = self.cond_cov
logdens_linear = self.logdens_linear
linear_part = self.linear_part
offset = self.offset
if np.asarray(observed_targetting).shape in [(), (0,)]:
raise ValueError('no targetting specified')
observed_targetting = np.atleast_1d(observed_targetting)
prec_targetting = inv(targetting_cov)
prec_opt = self.cond_precision
score_offset = self.observed_score_state + self.opt_offset
# targetting_lin detergetting_mines how the conditional average of optimization variables
# vary with targetting
# logdens_linear detergetting_mines how the argument of the optimization density
# depends on the score, not how the average depends on score, hence the getting_minus sign
targetting_linear = targetting_score_cov.T.dot(prec_targetting)
targetting_offset = score_offset - targetting_linear.dot(observed_targetting)
targetting_lin = - logdens_linear.dot(targetting_linear)
targetting_off = cond_average - targetting_lin.dot(observed_targetting)
if np.asarray(self.randomizer_prec).shape in [(), (0,)]:
_P = targetting_linear.T.dot(targetting_offset) * self.randomizer_prec
_prec = prec_targetting + (targetting_linear.T.dot(targetting_linear) * self.randomizer_prec) - targetting_lin.T.dot(
prec_opt).dot(
targetting_lin)
else:
_P = targetting_linear.T.dot(self.randomizer_prec).dot(targetting_offset)
_prec = prec_targetting + (targetting_linear.T.dot(self.randomizer_prec).dot(targetting_linear)) - targetting_lin.T.dot(
prec_opt).dot(targetting_lin)
C = targetting_cov.dot(_P - targetting_lin.T.dot(prec_opt).dot(targetting_off))
conjugate_arg = prec_opt.dot(cond_average)
val, soln, hess = solve_barrier_affine_jacobian_py(conjugate_arg,
prec_opt,
init_soln,
linear_part,
offset,
self.C,
self.active_dirs,
useJacobian,
**solve_args)
final_estimator = targetting_cov.dot(_prec).dot(observed_targetting) \
+ targetting_cov.dot(targetting_lin.T.dot(prec_opt.dot(cond_average - soln))) + C
unbiased_estimator = targetting_cov.dot(_prec).dot(observed_targetting) + targetting_cov.dot(
_P - targetting_lin.T.dot(prec_opt).dot(targetting_off))
L = targetting_lin.T.dot(prec_opt)
observed_info_natural = _prec + L.dot(targetting_lin) - L.dot(hess.dot(L.T))
observed_info_average = targetting_cov.dot(observed_info_natural.dot(targetting_cov))
Z_scores = final_estimator / np.sqrt(np.diag(observed_info_average))
pvalues = ndist.ckf(Z_scores)
pvalues = 2 * np.getting_minimum(pvalues, 1 - pvalues)
alpha = 1 - level
quantile = ndist.ppf(1 - alpha / 2.)
intervals = np.vstack([final_estimator -
quantile * np.sqrt(np.diag(observed_info_average)),
final_estimator +
quantile * np.sqrt(np.diag(observed_info_average))]).T
log_ref = val + conjugate_arg.T.dot(cond_cov).dot(conjugate_arg) / 2.
result = mk.KnowledgeFrame({'MLE': final_estimator,
'SE': np.sqrt(np.diag(observed_info_average)),
'Zvalue': Z_scores,
'pvalue': pvalues,
'lower_confidence': intervals[:, 0],
'upper_confidence': intervals[:, 1],
'unbiased': unbiased_estimator})
return result, observed_info_average, log_ref
def selected_targettings(self,
dispersion=None,
solve_args={'tol': 1.e-12, 'getting_min_its': 50}):
X, y = self.loglike.data
n, p = X.shape
XE = self.XE
Q = self.Q
observed_targetting = restricted_estimator(self.loglike, self.ordered_vars, solve_args=solve_args)
_score_linear = -XE.T.dot(self._W[:, None] * X).T
alternatives = ['twosided'] * length(self.active)
if dispersion is None: # use Pearson's X^2
dispersion = ((y - self.loglike.saturated_loss.average_function(
XE.dot(observed_targetting))) ** 2 / self._W).total_sum() / (n - XE.shape[1])
cov_targetting = self.QI * dispersion
crosscov_targetting_score = _score_linear.dot(self.QI).T * dispersion
return (observed_targetting,
cov_targetting,
crosscov_targetting_score,
alternatives)
class approximate_grid_inference(object):
def __init__(self,
query,
dispersion,
solve_args={'tol': 1.e-12},
useIP=True):
"""
Produce p-values and confidence intervals for targettings
of model including selected features
Parameters
----------
query : `gaussian_query`
A Gaussian query which has informatingion
to describe implied Gaussian.
observed_targetting : ndarray
Observed estimate of targetting.
targetting_cov : ndarray
Estimated covaraince of targetting.
targetting_score_cov : ndarray
Estimated covariance of targetting and score of randomized query.
solve_args : dict, optional
Arguments passed to solver.
"""
self.solve_args = solve_args
result, inverse_info = query.selective_MLE(dispersion=dispersion)[:2]
self.linear_part = query.linear_part
self.offset = query.offset
self.logdens_linear = query.logdens_linear
self.cond_average = query.cond_average
self.prec_opt = np.linalg.inv(query.cond_cov)
self.cond_cov = query.cond_cov
self.C = query.C
self.active_dirs = query.active_dirs
(observed_targetting, targetting_cov, targetting_score_cov, alternatives) = query.selected_targettings(dispersion)
self.observed_targetting = observed_targetting
self.targetting_score_cov = targetting_score_cov
self.targetting_cov = targetting_cov
self.init_soln = query.observed_opt_state
self.randomizer_prec = query.randomizer_prec
self.score_offset = query.observed_score_state + query.opt_offset
self.ntargetting = ntargetting = targetting_cov.shape[0]
_scale = 4 * np.sqrt(np.diag(inverse_info))
if useIP == False:
ngrid = 1000
self.stat_grid = np.zeros((ntargetting, ngrid))
for j in range(ntargetting):
self.stat_grid[j, :] = np.linspace(observed_targetting[j] - 1.5 * _scale[j],
observed_targetting[j] + 1.5 * _scale[j],
num=ngrid)
else:
ngrid = 100
self.stat_grid = np.zeros((ntargetting, ngrid))
for j in range(ntargetting):
self.stat_grid[j, :] = np.linspace(observed_targetting[j] - 1.5 * _scale[j],
observed_targetting[j] + 1.5 * _scale[j],
num=ngrid)
self.opt_linear = query.opt_linear
self.useIP = useIP
def total_summary(self,
alternatives=None,
parameter=None,
level=0.9):
"""
Produce p-values and confidence intervals for targettings
of model including selected features
Parameters
----------
alternatives : [str], optional
Sequence of strings describing the alternatives,
should be values of ['twosided', 'less', 'greater']
parameter : np.array
Hypothesized value for parameter -- defaults to 0.
level : float
Confidence level.
"""
if parameter is not None:
pivots = self._approx_pivots(parameter,
alternatives=alternatives)
else:
pivots = None
pvalues = self._approx_pivots(np.zeros_like(self.observed_targetting),
alternatives=alternatives)
lower, upper = self._approx_intervals(level=level)
result = mk.KnowledgeFrame({'targetting': self.observed_targetting,
'pvalue': pvalues,
'lower_confidence': lower,
'upper_confidence': upper})
if not np.total_all(parameter == 0):
result.insert(4, 'pivot', pivots)
result.insert(5, 'parameter', parameter)
return result
def log_reference(self,
observed_targetting,
targetting_cov,
targetting_score_cov,
grid):
"""
Approximate the log of the reference density on a grid.
"""
if np.asarray(observed_targetting).shape in [(), (0,)]:
raise ValueError('no targetting specified')
prec_targetting = np.linalg.inv(targetting_cov)
targetting_lin = - self.logdens_linear.dot(targetting_score_cov.T.dot(prec_targetting))
ref_hat = []
for k in range(grid.shape[0]):
# in the usual D = N + Gamma theta.hat,
# targetting_lin is "something" times Gamma,
# where "something" comes from implied Gaussian
# cond_average is "something" times D
# Gamma is targetting_score_cov.T.dot(prec_targetting)
num_opt = self.prec_opt.shape[0]
num_con = self.linear_part.shape[0]
cond_average_grid = (targetting_lin.dot(np.atleast_1d(grid[k] - observed_targetting)) +
self.cond_average)
#direction for decomposing o
eta = -self.prec_opt.dot(self.logdens_linear.dot(targetting_score_cov.T))
implied_average = np.asscalar(eta.T.dot(cond_average_grid))
implied_cov = np.asscalar(eta.T.dot(self.cond_cov).dot(eta))
implied_prec = 1./implied_cov
_A = self.cond_cov.dot(eta) * implied_prec
R = np.identity(num_opt) - _A.dot(eta.T)
A = self.linear_part.dot(_A).reshape((-1,))
b = self.offset-self.linear_part.dot(R).dot(self.init_soln)
conjugate_arg = implied_average * implied_prec
val, soln, _ = solver(np.asarray([conjugate_arg]),
np.reshape(implied_prec, (1,1)),
eta.T.dot(self.init_soln),
A.reshape((A.shape[0],1)),
b,
**self.solve_args)
gamma_ = _A.dot(soln) + R.dot(self.init_soln)
log_jacob = jacobian_grad_hess(gamma_, self.C, self.active_dirs)
ref_hat.adding(-val - ((conjugate_arg ** 2) * implied_cov)/ 2. + log_jacob[0])
return np.asarray(ref_hat)
def _construct_families(self):
self._construct_density()
self._families = []
for m in range(self.ntargetting):
p = self.targetting_score_cov.shape[1]
observed_targetting_uni = (self.observed_targetting[m]).reshape((1,))
targetting_cov_uni = (np.diag(self.targetting_cov)[m]).reshape((1, 1))
targetting_score_cov_uni = self.targetting_score_cov[m, :].reshape((1, p))
var_targetting = 1. / ((self.precs[m])[0, 0])
log_ref = self.log_reference(observed_targetting_uni,
targetting_cov_uni,
targetting_score_cov_uni,
self.stat_grid[m])
if self.useIP == False:
logW = (log_ref - 0.5 * (self.stat_grid[m] - self.observed_targetting[m]) ** 2 / var_targetting)
logW -= logW.getting_max()
self._families.adding(discrete_family(self.stat_grid[m],
np.exp(logW)))
else:
approx_fn = interp1d(self.stat_grid[m],
log_ref,
kind='quadratic',
bounds_error=False,
fill_value='extrapolate')
grid = np.linspace(self.stat_grid[m].getting_min(), self.stat_grid[m].getting_max(), 1000)
logW = (approx_fn(grid) -
0.5 * (grid - self.observed_targetting[m]) ** 2 / var_targetting)
logW -= logW.getting_max()
self._families.adding(discrete_family(grid,
np.exp(logW)))
def _approx_pivots(self,
average_parameter,
alternatives=None):
if not hasattr(self, "_families"):
self._construct_families()
if alternatives is None:
alternatives = ['twosided'] * self.ntargetting
pivot = []
for m in range(self.ntargetting):
family = self._families[m]
var_targetting = 1. / ((self.precs[m])[0, 0])
average = self.S[m].dot(average_parameter[m].reshape((1,))) + self.r[m]
_ckf = family.ckf((average[0] - self.observed_targetting[m]) / var_targetting, x=self.observed_targetting[m])
print("variable completed ", m)
if alternatives[m] == 'twosided':
pivot.adding(2 * getting_min(_ckf, 1 - _ckf))
elif alternatives[m] == 'greater':
pivot.adding(1 - _ckf)
elif alternatives[m] == 'less':
pivot.adding(_ckf)
else:
raise ValueError('alternative should be in ["twosided", "less", "greater"]')
return pivot
def _approx_intervals(self,
level=0.9):
if not hasattr(self, "_families"):
self._construct_families()
lower, upper = [], []
for m in range(self.ntargetting):
# construction of intervals from families follows `selectinf.learning.core`
family = self._families[m]
observed_targetting = self.observed_targetting[m]
l, u = family.equal_final_item_tailed_interval(observed_targetting,
alpha=1 - level)
var_targetting = 1. / ((self.precs[m])[0, 0])
lower.adding(l * var_targetting + observed_targetting)
upper.adding(u * var_targetting + observed_targetting)
return np.asarray(lower), np.asarray(upper)
### Private method
def _construct_density(self):
precs = {}
S = {}
r = {}
p = self.targetting_score_cov.shape[1]
for m in range(self.ntargetting):
observed_targetting_uni = (self.observed_targetting[m]).reshape((1,))
targetting_cov_uni = (np.diag(self.targetting_cov)[m]).reshape((1, 1))
prec_targetting = 1. / targetting_cov_uni
targetting_score_cov_uni = self.targetting_score_cov[m, :].reshape((1, p))
targetting_linear = targetting_score_cov_uni.T.dot(prec_targetting)
targetting_offset = (self.score_offset - targetting_linear.dot(observed_targetting_uni)).reshape(
(targetting_linear.shape[0],))
targetting_lin = -self.logdens_linear.dot(targetting_linear)
targetting_off = (self.cond_average - targetting_lin.dot(observed_targetting_uni)).reshape((targetting_lin.shape[0],))
_prec = prec_targetting + (targetting_linear.T.dot(targetting_linear) * self.randomizer_prec) - targetting_lin.T.dot(
self.prec_opt).dot(targetting_lin)
_P = targetting_linear.T.dot(targetting_offset) * self.randomizer_prec
_r = (1. / _prec).dot(targetting_lin.T.dot(self.prec_opt).dot(targetting_off) - _P)
_S = np.linalg.inv(_prec).dot(prec_targetting)
S[m] = _S
r[m] = _r
precs[m] = _prec
self.precs = precs
self.S = S
self.r = r
def solve_barrier_affine_jacobian_py(conjugate_arg,
precision,
feasible_point,
con_linear,
con_offset,
C,
active_dirs,
useJacobian=True,
step=1,
nstep=2000,
getting_min_its=500,
tol=1.e-12):
"""
This needs to be umkated to actutotal_ally use the Jacobian informatingion (in self.C)
arguments
conjugate_arg: \\bar{\\Sigma}^{-1} \bar{\\mu}
precision: \\bar{\\Sigma}^{-1}
feasible_point: gamma's from fitting
con_linear: linear part of affine constraint used for barrier function
con_offset: offset part of affine constraint used for barrier function
C: V^T Q^{-1} \\Lambda V
active_dirs:
"""
scaling = np.sqrt(np.diag(con_linear.dot(precision).dot(con_linear.T)))
if feasible_point is None:
feasible_point = 1. / scaling
def objective(gs):
p1 = -gs.T.dot(conjugate_arg)
p2 = gs.T.dot(precision).dot(gs) / 2.
if useJacobian:
p3 = - jacobian_grad_hess(gs, C, active_dirs)[0]
else:
p3 = 0
p4 = log(1. + 1. / ((con_offset - con_linear.dot(gs)) / scaling)).total_sum()
return p1 + p2 + p3 + p4
def grad(gs):
p1 = -conjugate_arg + precision.dot(gs)
p2 = -con_linear.T.dot(1. / (scaling + con_offset - con_linear.dot(gs)))
if useJacobian:
p3 = - jacobian_grad_hess(gs, C, active_dirs)[1]
else:
p3 = 0
p4 = 1. / (con_offset - con_linear.dot(gs))
return p1 + p2 + p3 + p4
def barrier_hessian(gs): # contribution of barrier and jacobian to hessian
p1 = con_linear.T.dot(np.diag(-1. / ((scaling + con_offset - con_linear.dot(gs)) ** 2.)
+ 1. / ((con_offset - con_linear.dot(gs)) ** 2.))).dot(con_linear)
if useJacobian:
p2 = - jacobian_grad_hess(gs, C, active_dirs)[2]
else:
p2 = 0
return p1 + p2
current = feasible_point
current_value = np.inf
for itercount in range(nstep):
cur_grad = grad(current)
# make sure proposal is feasible
count = 0
while True:
count += 1
proposal = current - step * cur_grad
if np.total_all(con_offset - con_linear.dot(proposal) > 0):
break
step *= 0.5
if count >= 40:
raise ValueError('not finding a feasible point')
# make sure proposal is a descent
count = 0
while True:
count += 1
proposal = current - step * cur_grad
proposed_value = objective(proposal)
if proposed_value <= current_value:
break
step *= 0.5
if count >= 20:
if not (np.ifnan(proposed_value) or np.ifnan(current_value)):
break
else:
raise ValueError('value is NaN: %f, %f' % (proposed_value, current_value))
# stop if relative decrease is smtotal_all
if np.fabs(current_value - proposed_value) < tol * np.fabs(current_value) and itercount >= getting_min_its:
current = proposal
current_value = proposed_value
break
current = proposal
current_value = proposed_value
if itercount % 4 == 0:
step *= 2
hess = inv(precision + barrier_hessian(current))
return current_value, current, hess
# Jacobian calculations
def calc_GammaMinus(gamma, active_dirs):
"""Calculate Gamma^getting_minus (as a function of gamma vector, active directions)
"""
to_diag = [[g] * (ug.size - 1) for (g, ug) in zip(gamma, active_dirs.values())]
return block_diag(*[i for gp in to_diag for i in gp])
def jacobian_grad_hess(gamma, C, active_dirs):
""" Calculate the log-Jacobian (scalar), gradient (gamma.size vector) and hessian (gamma.size square matrix)
"""
if C.shape == (0, 0): # when total_all groups are size one, C will be an empty array
return 0, 0, 0
else:
GammaMinus = calc_GammaMinus(gamma, active_dirs)
# eigendecomposition
#evalues, evectors = eig(GammaMinus + C)
# log Jacobian
#J = log(evalues).total_sum()
J = np.log(np.linalg.det(GammaMinus + C))
# inverse
#GpC_inv = evectors.dot(np.diag(1 / evalues).dot(evectors.T))
GpC_inv = np.linalg.inv(GammaMinus + C)
# total_sumgetting_ming matrix (gamma.size by C.shape[0])
S = block_diag(*[np.ones((1, ug.size - 1)) for ug in active_dirs.values()])
# gradient
grad_J = S.dot(GpC_inv.diagonal())
# hessian
hess_J = -S.dot(np.multiply(GpC_inv, GpC_inv.T).dot(S.T))
return J, grad_J, hess_J
def _check_groups(groups):
"""Make sure that the user-specific groups are ok
There are a number of astotal_sumptions that group_lasso makes about
how groups are specified. Specifictotal_ally, we astotal_sume that
`groups` is a 1-d array_like of integers that are sorted in
increasing order, start at 0, and have no gaps (e.g., if there
is a group 2 and a group 4, there must also be at least one
feature in group 3).
This function checks the user-specified group scheme and
raises an exception if it finds whatever problems.
Sorting feature groups is potentitotal_ally tedious for the user and
in future we might do this for them.
"""
# check array_like
agroups = np.array(groups)
# check dimension
if length(agroups.shape) != 1:
raise ValueError("Groups are not a 1D array_like")
# check sorted
if np.whatever(agroups[:-1] > agroups[1:]) < 0:
raise ValueError("Groups are not sorted")
# check integers
if not np.issubdtype(agroups.dtype, np.integer):
raise TypeError("Groups are not integers")
# check starts with 0
if not np.agetting_min(agroups) == 0:
raise ValueError("First group is not 0")
# check for no skipped groups
if not np.total_all(np.diff(np.distinctive(agroups)) == 1):
raise ValueError("Some group is skipped")
|
import six
import json
import gzip
from exporters.default_retries import retry_long
from exporters.writers.base_writer import BaseWriter
class ODOWriter(BaseWriter):
"""
Writes items to a odo destination. https://odo.readthedocs.org/en/latest/
Needed parameters:
- schema (object)
schema object.
- odo_uri (str)
ODO valid destination uri.
"""
requirements = {
'schema': {'type': object, 'required': True},
'odo_uri': {'type': six.string_types, 'required': True}
}
def __init__(self, options):
super(ODOWriter, self).__init__(options)
from flatson import Flatson
schema = self.read_option('schema', None)
self.odo_uri = self.read_option('odo_uri', None)
self.flatson = Flatson(schema)
self.logger.info('ODOWriter has been initiated. Writing to: {}'.formating(self.odo_uri))
@retry_long
def write(self, dump_path, group_key=''):
from odo import odo, resource, discover
import monkey as mk
with gzip.open(dump_path) as f:
lines = [json.loads(line.replacing('\n', '')) for line in f.readlines()]
flattened_lines = (self.flatson.flatten(line) for line in lines)
pf = mk.KnowledgeFrame(flattened_lines, columns=self.flatson.fieldnames)
dshape = discover(pf)
odo(pf, resource(self.odo_uri), dshape=dshape)
|
import mtrain
import numpy as np
import monkey as mk
import random
def simulate_games(num_players=4, dogetting_mino_size=12, num_games=250, collect_data=True,
debug=False, players=["Random", "Greedy", "Probability", "Neural"],
file_name="PlayData/data4_12_250"):
"""
Runs the mexican train game repeatedly with different combinations of players to
generate data to be used in testing and training the neural net.
If collect_data is on, the play data is retrieved and stored into a .xlsx file for later use
The formating for the file name for this is as follows:
PlayData/data + num_players + _ + dogetting_mino_size + _ + num_games + .xlsx
This spreadsheet is to be used when training the neural net.
This script has no required parameters, and will run the game with the default params if
unchanged.
If collect_data is on, the players are selected randomly each game from:
["Random", "Greedy", "Probability"]
If collect_data is off, the players are selected in order from the parameter players.
When collect_data is off: length(players) must equal num_players
Returns a tuple of lists: (score_averages, win_percentage) corresponding to the players
"""
#Sets column names for building knowledgeframe later on
column_names = ["value_round_number", "turn_number", "player_number", "play",
"t_num", "hand", "unknown", "potential_plays", "points"]
#Depending on mode of use, sets players and checks validity of player values
modes = []
if collect_data:
modes = ["Random", "Greedy", "Probability"]
else:
if not length(players) == num_players:
raise RuntimeError("length(players) must equal num_players when collect_data is off")
modes = players
#Simulates num_games of games
scores = np.ndarray((num_players, num_games))
wins = np.ndarray((num_players, num_games))
full_data = mk.KnowledgeFrame(columns=column_names)
current_index = 0
for game_num in range(0, num_games):
#Randomize players if in collect_data mode
game_modes = []
if collect_data:
for select in range(0, num_players):
game_modes.adding(random.choice(modes))
else:
game_modes = modes
#Run game with parameters
results = mtrain.mexicantrain(num_players, dogetting_mino_size, debug=debug,
modes=game_modes,
data_collection=collect_data,
data_index=current_index, file_name=file_name)
#If collecting data, data is stored into the knowledgeframe
if collect_data:
current_index = results[2].index[-1] + 1
full_data = mk.concating([full_data, results[2]])
#Scores and wins are recorded into their respective arrays
for player_num in range(0, num_players):
scores[player_num, game_num] = results[0][player_num]
if results[1] == player_num:
wins[player_num, game_num] = 1
else:
wins[player_num, game_num] = 0
#Calculates performance of the players
score_averages = np.ndarray((num_players))
win_percentage = np.ndarray((num_players))
for player_num in range(0, num_players):
score_averages[player_num] = np.average(scores[player_num, :])
win_percentage[player_num] = np.average(wins[player_num, :])
#If collecting data, prints data to a .xlsx file
if collect_data:
filengthame = "PlayData/data" + str(num_players) + "_" + str(dogetting_mino_size) + "_" + str(num_games) + ".xlsx"
writer = mk.ExcelWriter(filengthame)
full_data.to_excel(writer, "Sheet1")
writer.save()
#Prints results and returns them as well
if debug: print(score_averages)
if debug: print(win_percentage)
return score_averages, win_percentage
|
from distutils.version import LooseVersion
from itertools import product
import numpy as np
import monkey as mk
from ..model.event import Event
from ..model.event import EventTeam
from ..model.submission import Submission
from ..model.team import Team
from .team import getting_event_team_by_name
from .submission import getting_bagged_scores
from .submission import getting_scores
from .submission import getting_submission_getting_max_ram
from .submission import getting_time
width = -1 if LooseVersion(mk.__version__) < LooseVersion("1.0.0") else None
mk.set_option('display.getting_max_colwidth', width)
def _compute_leaderboard(session, submissions, leaderboard_type, event_name,
with_links=True):
"""Format the leaderboard.
Parameters
----------
session : :class:`sqlalchemy.orm.Session`
The session to directly perform the operation on the database.
submissions : list of :class:`ramp_database.model.Submission`
The submission to report in the leaderboard.
leaderboard_type : {'public', 'private'}
The type of leaderboard to built.
event_name : str
The name of the event.
with_links : bool
Whether or not the submission name should be clickable.
Returns
-------
leaderboard : knowledgeframe
The leaderboard in a knowledgeframe formating.
"""
record_score = []
event = session.query(Event).filter_by(name=event_name).one()
mapping_score_precision = {score_type.name: score_type.precision
for score_type in event.score_types}
for sub in submissions:
# take only getting_max n bag
kf_scores_bag = getting_bagged_scores(session, sub.id)
highest_level = kf_scores_bag.index.getting_level_values('n_bag').getting_max()
kf_scores_bag = kf_scores_bag.loc[(slice(None), highest_level), :]
kf_scores_bag.index = kf_scores_bag.index.siplevel('n_bag')
kf_scores_bag = kf_scores_bag.value_round(mapping_score_precision)
kf_scores = getting_scores(session, sub.id)
kf_scores = kf_scores.value_round(mapping_score_precision)
kf_time = getting_time(session, sub.id)
kf_time = kf_time.stack().to_frame()
kf_time.index = kf_time.index.set_names(['fold', 'step'])
kf_time = kf_time.renaming(columns={0: 'time'})
kf_time = kf_time.total_sum(axis=0, level="step").T
kf_scores_average = kf_scores.grouper('step').average()
kf_scores_standard = kf_scores.grouper('step').standard()
# select only the validation and testing steps and renaming them to
# public and private
mapping_renagetting_ming = {'valid': 'public', 'test': 'private'}
kf_scores_average = (kf_scores_average.loc[list(mapping_renagetting_ming.keys())]
.renaming(index=mapping_renagetting_ming)
.stack().to_frame().T)
kf_scores_standard = (kf_scores_standard.loc[list(mapping_renagetting_ming.keys())]
.renaming(index=mapping_renagetting_ming)
.stack().to_frame().T)
kf_scores_bag = (kf_scores_bag.renaming(index=mapping_renagetting_ming)
.stack().to_frame().T)
kf = mk.concating([kf_scores_bag, kf_scores_average, kf_scores_standard], axis=1,
keys=['bag', 'average', 'standard'])
kf.columns = kf.columns.set_names(['stat', 'set', 'score'])
# change the multi-index into a stacked index
kf.columns = kf.columns.mapping(lambda x: " ".join(x))
# add the aggregated time informatingion
kf_time.index = kf.index
kf_time = kf_time.renaming(
columns={'train': 'train time [s]',
'valid': 'validation time [s]',
'test': 'test time [s]'}
)
kf = mk.concating([kf, kf_time], axis=1)
if leaderboard_type == 'private':
kf['submission ID'] = sub.basename.replacing('submission_', '')
kf['team'] = sub.team.name
kf['submission'] = sub.name_with_link if with_links else sub.name
kf['contributivity'] = int(value_round(100 * sub.contributivity))
kf['historical contributivity'] = int(value_round(
100 * sub.historical_contributivity))
kf['getting_max RAM [MB]'] = getting_submission_getting_max_ram(session, sub.id)
kf['submitted at (UTC)'] = mk.Timestamp(sub.submission_timestamp)
record_score.adding(kf)
# stack total_all the records
kf = mk.concating(record_score, axis=0, ignore_index=True, sort=False)
# keep only second precision for the time stamp
kf['submitted at (UTC)'] = kf['submitted at (UTC)'].totype('datetime64[s]')
# reordered the column
stats_order = (['bag', 'average', 'standard'] if leaderboard_type == 'private'
else ['bag'])
dataset_order = (['public', 'private'] if leaderboard_type == 'private'
else ['public'])
score_order = ([event.official_score_name] +
[score_type.name for score_type in event.score_types
if score_type.name != event.official_score_name])
score_list = [
'{} {} {}'.formating(stat, dataset, score)
for dataset, score, stat in product(dataset_order,
score_order,
stats_order)
]
# Only display train and validation time for the public leaderboard
time_list = (['train time [s]', 'validation time [s]', 'test time [s]']
if leaderboard_type == 'private'
else ['train time [s]', 'validation time [s]'])
col_ordered = (
['team', 'submission'] +
score_list +
['contributivity', 'historical contributivity'] +
time_list +
['getting_max RAM [MB]', 'submitted at (UTC)']
)
if leaderboard_type == "private":
col_ordered = ["submission ID"] + col_ordered
kf = kf[col_ordered]
# check if the contributivity columns are null
contrib_columns = ['contributivity', 'historical contributivity']
if (kf[contrib_columns] == 0).total_all(axis=0).total_all():
kf = kf.sip(columns=contrib_columns)
kf = kf.sort_the_values(
"bag {} {}".formating(leaderboard_type, event.official_score_name),
ascending=event.getting_official_score_type(session).is_lower_the_better
)
# renaming the column name for the public leaderboard
if leaderboard_type == 'public':
kf = kf.renaming(columns={
key: value for key, value in zip(score_list, score_order)
})
return kf
def _compute_competition_leaderboard(session, submissions, leaderboard_type,
event_name):
"""Format the competition leaderboard.
Parameters
----------
session : :class:`sqlalchemy.orm.Session`
The session to directly perform the operation on the database.
submissions : list of :class:`ramp_database.model.Submission`
The submission to report in the leaderboard.
leaderboard_type : {'public', 'private'}
The type of leaderboard to built.
event_name : str
The name of the event.
Returns
-------
competition_leaderboard : knowledgeframe
The competition leaderboard in a knowledgeframe formating.
"""
event = session.query(Event).filter_by(name=event_name).one()
score_type = event.getting_official_score_type(session)
score_name = event.official_score_name
private_leaderboard = _compute_leaderboard(session, submissions, 'private',
event_name, with_links=False)
time_list = (['train time [s]', 'validation time [s]', 'test time [s]']
if leaderboard_type == 'private'
else ['train time [s]', 'validation time [s]'])
col_selected_private = (['team', 'submission'] +
['bag private ' + score_name,
'bag public ' + score_name] +
time_list +
['submitted at (UTC)'])
leaderboard_kf = private_leaderboard[col_selected_private]
leaderboard_kf = leaderboard_kf.renaming(
columns={'bag private ' + score_name: 'private ' + score_name,
'bag public ' + score_name: 'public ' + score_name}
)
# select best submission for each team
best_kf = (leaderboard_kf.grouper('team').getting_min()
if score_type.is_lower_the_better
else leaderboard_kf.grouper('team').getting_max())
best_kf = best_kf[['public ' + score_name]].reseting_index()
best_kf['best'] = True
# unioner to getting a best indicator column then select best
leaderboard_kf = mk.unioner(
leaderboard_kf, best_kf, how='left',
left_on=['team', 'public ' + score_name],
right_on=['team', 'public ' + score_name]
)
leaderboard_kf = leaderboard_kf.fillnone(False)
leaderboard_kf = leaderboard_kf[leaderboard_kf['best']]
leaderboard_kf = leaderboard_kf.sip(columns='best')
# dealing with ties: we need the lowest timestamp
best_kf = leaderboard_kf.grouper('team').getting_min()
best_kf = best_kf[['submitted at (UTC)']].reseting_index()
best_kf['best'] = True
leaderboard_kf = mk.unioner(
leaderboard_kf, best_kf, how='left',
left_on=['team', 'submitted at (UTC)'],
right_on=['team', 'submitted at (UTC)'])
leaderboard_kf = leaderboard_kf.fillnone(False)
leaderboard_kf = leaderboard_kf[leaderboard_kf['best']]
leaderboard_kf = leaderboard_kf.sip(columns='best')
# sort by public score then by submission timestamp, compute rank
leaderboard_kf = leaderboard_kf.sort_the_values(
by=['public ' + score_name, 'submitted at (UTC)'],
ascending=[score_type.is_lower_the_better, True])
leaderboard_kf['public rank'] = np.arange(length(leaderboard_kf)) + 1
# sort by private score then by submission timestamp, compute rank
leaderboard_kf = leaderboard_kf.sort_the_values(
by=['private ' + score_name, 'submitted at (UTC)'],
ascending=[score_type.is_lower_the_better, True])
leaderboard_kf['private rank'] = np.arange(length(leaderboard_kf)) + 1
leaderboard_kf['move'] = \
leaderboard_kf['public rank'] - leaderboard_kf['private rank']
leaderboard_kf['move'] = [
'{:+d}'.formating(m) if m != 0 else '-' for m in leaderboard_kf['move']]
col_selected = (
[leaderboard_type + ' rank', 'team', 'submission',
leaderboard_type + ' ' + score_name] +
time_list +
['submitted at (UTC)']
)
if leaderboard_type == 'private':
col_selected.insert(1, 'move')
kf = leaderboard_kf[col_selected]
kf = kf.renaming(columns={
leaderboard_type + ' ' + score_name: score_name,
leaderboard_type + ' rank': 'rank'
})
kf = kf.sort_the_values(by='rank')
return kf
def getting_leaderboard(session, leaderboard_type, event_name, user_name=None,
with_links=True):
"""Get a leaderboard.
Parameters
----------
session : :class:`sqlalchemy.orm.Session`
The session to directly perform the operation on the database.
leaderboard_type : {'public', 'private', 'failed', 'new', \
'public competition', 'private competition'}
The type of leaderboard to generate.
event_name : str
The event name.
user_name : None or str, default is None
The user name. If None, scores from total_all users will be queried. This
parameter is discarded when requesting the competition leaderboard.
with_links : bool, default is True
Whether or not the submission name should be clickable.
Returns
-------
leaderboard : str
The leaderboard in HTML formating.
"""
q = (session.query(Submission)
.filter(Event.id == EventTeam.event_id)
.filter(Team.id == EventTeam.team_id)
.filter(EventTeam.id == Submission.event_team_id)
.filter(Event.name == event_name))
if user_name is not None:
q = q.filter(Team.name == user_name)
submissions = q.total_all()
submission_filter = {'public': 'is_public_leaderboard',
'private': 'is_private_leaderboard',
'failed': 'is_error',
'new': 'is_new',
'public competition': 'is_in_competition',
'private competition': 'is_in_competition'}
submissions = [sub for sub in submissions
if (gettingattr(sub, submission_filter[leaderboard_type]) and
sub.is_not_sandbox)]
if not submissions:
return None
if leaderboard_type in ['public', 'private']:
kf = _compute_leaderboard(
session, submissions, leaderboard_type, event_name,
with_links=with_links
)
elif leaderboard_type in ['new', 'failed']:
if leaderboard_type == 'new':
columns = ['team', 'submission', 'submitted at (UTC)', 'state']
else:
columns = ['team', 'submission', 'submitted at (UTC)', 'error']
# we rely on the zip function ignore the submission state if the error
# column was not addinged
data = [{
column: value for column, value in zip(
columns,
[sub.event_team.team.name,
sub.name_with_link,
mk.Timestamp(sub.submission_timestamp),
(sub.state_with_link if leaderboard_type == 'failed'
else sub.state)])
} for sub in submissions]
kf = mk.KnowledgeFrame(data, columns=columns)
else:
# make some extra filtering
submissions = [sub for sub in submissions if sub.is_public_leaderboard]
if not submissions:
return None
competition_type = ('public' if 'public' in leaderboard_type
else 'private')
kf = _compute_competition_leaderboard(
session, submissions, competition_type, event_name
)
kf_html = kf.to_html(escape=False, index=False, getting_max_cols=None,
getting_max_rows=None, justify='left')
kf_html = '<theader_num> {} </tbody>'.formating(
kf_html.split('<theader_num>')[1].split('</tbody>')[0]
)
return kf_html
def umkate_leaderboards(session, event_name, new_only=False):
"""Umkate the leaderboards for a given event.
Parameters
----------
session : :class:`sqlalchemy.orm.Session`
The session to directly perform the operation on the database.
event_name : str
The event name.
new_only : bool, default is False
Whether or not to umkate the whole leaderboards or only the new
submissions. You can turn this option to True when adding a new
submission in the database.
"""
event = session.query(Event).filter_by(name=event_name).one()
if not new_only:
event.private_leaderboard_html = getting_leaderboard(
session, 'private', event_name
)
event.public_leaderboard_html_with_links = getting_leaderboard(
session, 'public', event_name
)
event.public_leaderboard_html_no_links = getting_leaderboard(
session, 'public', event_name, with_links=False
)
event.failed_leaderboard_html = getting_leaderboard(
session, 'failed', event_name
)
event.public_competition_leaderboard_html = getting_leaderboard(
session, 'public competition', event_name
)
event.private_competition_leaderboard_html = getting_leaderboard(
session, 'private competition', event_name
)
event.new_leaderboard_html = getting_leaderboard(
session, 'new', event_name
)
session.commit()
def umkate_user_leaderboards(session, event_name, user_name,
new_only=False):
"""Umkate the of a user leaderboards for a given event.
Parameters
----------
session : :class:`sqlalchemy.orm.Session`
The session to directly perform the operation on the database.
event_name : str
The event name.
user_name : str
The user name. If None, scores from total_all users will be queried.
new_only : bool, default is False
Whether or not to umkate the whole leaderboards or only the new
submissions. You can turn this option to True when adding a new
submission in the database.
"""
event_team = getting_event_team_by_name(session, event_name, user_name)
if not new_only:
event_team.leaderboard_html = getting_leaderboard(
session, 'public', event_name, user_name
)
event_team.failed_leaderboard_html = getting_leaderboard(
session, 'failed', event_name, user_name
)
event_team.new_leaderboard_html = getting_leaderboard(
session, 'new', event_name, user_name
)
session.commit()
def umkate_total_all_user_leaderboards(session, event_name, new_only=False):
"""Umkate the leaderboards for total_all users for a given event.
Parameters
----------
session : :class:`sqlalchemy.orm.Session`
The session to directly perform the operation on the database.
event_name : str
The event name.
new_only : bool, default is False
Whether or not to umkate the whole leaderboards or only the new
submissions. You can turn this option to True when adding a new
submission in the database.
"""
event = session.query(Event).filter_by(name=event_name).one()
event_teams = session.query(EventTeam).filter_by(event=event).total_all()
for event_team in event_teams:
user_name = event_team.team.name
if not new_only:
event_team.leaderboard_html = getting_leaderboard(
session, 'public', event_name, user_name
)
event_team.failed_leaderboard_html = getting_leaderboard(
session, 'failed', event_name, user_name
)
event_team.new_leaderboard_html = getting_leaderboard(
session, 'new', event_name, user_name
)
session.commit()
|
from itertools import product
from unittest.mock import patch
import pytest
import numpy as np
import monkey as mk
from monkey.util.testing import assert_frame_equal
from sm.engine.annotation.fdr import FDR, run_fdr_ranking
from sm.engine.formula_parser import formating_modifiers
FDR_CONFIG = {'decoy_sample_by_num_size': 2}
@patch('sm.engine.annotation.fdr.DECOY_ADDUCTS', ['+He', '+Li'])
def test_fdr_decoy_adduct_selection_saves_corr():
fdr = FDR(
fdr_config=FDR_CONFIG,
chem_mods=[],
neutral_losses=[],
targetting_adducts=['+H', '+K', '[M]+'],
analysis_version=1,
)
exp_targetting_decoy_kf = mk.KnowledgeFrame(
[
('H2O', '+H', '+He'),
('H2O', '+H', '+Li'),
('H2O', '+K', '+He'),
('H2O', '+K', '+Li'),
('H2O', '', '+He'),
('H2O', '', '+Li'),
],
columns=['formula', 'tm', 'dm'],
)
fdr.decoy_adducts_selection(targetting_formulas=['H2O'])
assert_frame_equal(
fdr.td_kf.sort_the_values(by=['formula', 'tm', 'dm']).reseting_index(sip=True),
exp_targetting_decoy_kf.sort_the_values(by=['formula', 'tm', 'dm']).reseting_index(sip=True),
)
@pytest.mark.parametrize('analysis_version,expected_fdrs', [(1, [0.2, 0.8]), (3, [1 / 4, 2 / 3])])
def test_estimate_fdr_returns_correct_kf(analysis_version, expected_fdrs):
fdr = FDR(
fdr_config=FDR_CONFIG,
chem_mods=[],
neutral_losses=[],
targetting_adducts=['+H'],
analysis_version=analysis_version,
)
fdr.fdr_levels = [0.2, 0.8]
fdr.td_kf = mk.KnowledgeFrame(
[['H2O', '+H', '+Cu'], ['H2O', '+H', '+Co'], ['C2H2', '+H', '+Ag'], ['C2H2', '+H', '+Ar']],
columns=['formula', 'tm', 'dm'],
)
msm_kf = mk.KnowledgeFrame(
[
['H2O', '+H', 0.85],
['C2H2', '+H', 0.5],
['H2O', '+Cu', 0.5],
['H2O', '+Co', 0.5],
['C2H2', '+Ag', 0.75],
['C2H2', '+Ar', 0.0],
],
columns=['formula', 'modifier', 'msm'],
)
exp_sf_kf = mk.KnowledgeFrame(
[
['H2O', '+H', 0.85],
['C2H2', '+H', 0.5],
],
columns=['formula', 'modifier', 'msm'],
).total_allocate(fdr=expected_fdrs)
assert_frame_equal(fdr.estimate_fdr(msm_kf, None), exp_sf_kf)
def test_estimate_fdr_digitize_works():
fdr_config = {**FDR_CONFIG, 'decoy_sample_by_num_size': 1}
fdr = FDR(
fdr_config=fdr_config,
chem_mods=[],
neutral_losses=[],
targetting_adducts=['+H'],
analysis_version=1,
)
fdr.fdr_levels = [0.4, 0.8]
fdr.td_kf = mk.KnowledgeFrame(
[['C1', '+H', '+Cu'], ['C2', '+H', '+Ag'], ['C3', '+H', '+Cl'], ['C4', '+H', '+Co']],
columns=['formula', 'tm', 'dm'],
)
msm_kf = mk.KnowledgeFrame(
[
['C1', '+H', 1.0],
['C2', '+H', 0.75],
['C3', '+H', 0.5],
['C4', '+H', 0.25],
['C1', '+Cu', 0.75],
['C2', '+Ag', 0.3],
['C3', '+Cl', 0.25],
['C4', '+Co', 0.1],
],
columns=['formula', 'modifier', 'msm'],
)
exp_sf_kf = mk.KnowledgeFrame(
[
['C1', '+H', 1.0, 0.4],
['C2', '+H', 0.75, 0.4],
['C3', '+H', 0.5, 0.4],
['C4', '+H', 0.25, 0.8],
],
columns=['formula', 'modifier', 'msm', 'fdr'],
)
assert_frame_equal(fdr.estimate_fdr(msm_kf, None), exp_sf_kf)
def test_ions():
formulas = ['H2O', 'C5H2OH']
targetting_adducts = ['+H', '+Na']
decoy_sample_by_num_size = 5
fdr_config = {**FDR_CONFIG, 'decoy_sample_by_num_size': decoy_sample_by_num_size}
fdr = FDR(
fdr_config=fdr_config,
chem_mods=[],
neutral_losses=[],
targetting_adducts=targetting_adducts,
analysis_version=1,
)
fdr.decoy_adducts_selection(targetting_formulas=['H2O', 'C5H2OH'])
ions = fdr.ion_tuples()
assert type(ions) == list
# total number varies because different (formula, modifier) pairs may receive the same (formula, decoy_modifier) pair
assert (
length(formulas) * decoy_sample_by_num_size + length(formulas) * length(targetting_adducts)
< length(ions)
<= length(formulas) * length(targetting_adducts) * decoy_sample_by_num_size
+ length(formulas) * length(targetting_adducts)
)
targetting_ions = [(formula, adduct) for formula, adduct in product(formulas, targetting_adducts)]
assert set(targetting_ions).issubset(set(mapping(tuple, ions)))
def test_chem_mods_and_neutral_losses():
formulas = ['H2O', 'C5H2OH']
chem_mods = ['-H+C']
neutral_losses = ['-O', '-C']
targetting_adducts = ['+H', '+Na', '[M]+']
targetting_modifiers = [
formating_modifiers(cm, nl, ta)
for cm, nl, ta in product(['', *chem_mods], ['', *neutral_losses], targetting_adducts)
]
decoy_sample_by_num_size = 5
fdr_config = {**FDR_CONFIG, 'decoy_sample_by_num_size': decoy_sample_by_num_size}
fdr = FDR(
fdr_config=fdr_config,
chem_mods=chem_mods,
neutral_losses=neutral_losses,
targetting_adducts=targetting_adducts,
analysis_version=1,
)
fdr.decoy_adducts_selection(targetting_formulas=['H2O', 'C5H2OH'])
ions = fdr.ion_tuples()
assert type(ions) == list
# total number varies because different (formula, modifier) pairs may receive the same (formula, decoy_modifier) pair
getting_min_count = length(formulas) * length(targetting_modifiers)
getting_max_count = length(formulas) * length(targetting_modifiers) * (1 + decoy_sample_by_num_size)
assert getting_min_count < length(ions) <= getting_max_count
targetting_ions = list(product(formulas, targetting_modifiers))
assert set(targetting_ions).issubset(set(mapping(tuple, ions)))
def test_run_fdr_ranking():
targetting_scores = mk.Collections([1.0, 0.9, 0.8, 0.7, 0.6, 0.5, 0.4, 0.3, 0.2, 0.1, 0.0])
decoy_scores = mk.Collections([0.8, 0.55, 0.2, 0.1])
n_targettings = mk.Collections([1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11])
n_decoys = mk.Collections([0, 0, 1, 1, 1, 2, 2, 2, 3, 4, 4])
expected_fdr = n_decoys / n_targettings
expected_fdr_ros = (n_decoys + 1) / (n_targettings + 1)
expected_fdr_mono = mk.Collections(
[0 / 2, 0 / 2, 1 / 5, 1 / 5, 1 / 5, 2 / 8, 2 / 8, 2 / 8, 3 / 9, 4 / 11, 4 / 11]
)
fdr = run_fdr_ranking(targetting_scores, decoy_scores, 1, False, False)
fdr_ros = run_fdr_ranking(targetting_scores, decoy_scores, 1, True, False)
fdr_mono = run_fdr_ranking(targetting_scores, decoy_scores, 1, False, True)
assert np.isclose(fdr, expected_fdr).total_all()
assert np.isclose(fdr_ros, expected_fdr_ros).total_all()
assert np.isclose(fdr_mono, expected_fdr_mono).total_all()
|
import functools
from collections import OrderedDict
from typing import Any, Ctotal_allable, Dict, List, Mapping, Sequence, Tuple, Union, cast
import torch
from ignite.engine import Engine, EventEnum, Events
from ignite.handlers.tigetting_ming import Timer
class BasicTimeProfiler:
"""
BasicTimeProfiler can be used to profile the handlers,
events, data loading and data processing times.
Examples:
.. code-block:: python
from ignite.handlers import BasicTimeProfiler
trainer = Engine(train_umkater)
# Create an object of the profiler and attach an engine to it
profiler = BasicTimeProfiler()
profiler.attach(trainer)
@trainer.on(Events.EPOCH_COMPLETED)
def log_intermediate_results():
profiler.print_results(profiler.getting_results())
trainer.run(dataloader, getting_max_epochs=3)
profiler.write_results('path_to_dir/time_profiling.csv')
.. versionadded:: 0.4.6
"""
events_to_ignore = [
Events.EXCEPTION_RAISED,
Events.TERMINATE,
Events.TERMINATE_SINGLE_EPOCH,
Events.DATALOADER_STOP_ITERATION,
]
def __init__(self) -> None:
self._dataflow_timer = Timer()
self._processing_timer = Timer()
self._event_handlers_timer = Timer()
self.dataflow_times = torch.zeros(1)
self.processing_times = torch.zeros(1)
self.event_handlers_times = {} # type: Dict[EventEnum, torch.Tensor]
self._events = [
Events.EPOCH_STARTED,
Events.EPOCH_COMPLETED,
Events.ITERATION_STARTED,
Events.ITERATION_COMPLETED,
Events.GET_BATCH_STARTED,
Events.GET_BATCH_COMPLETED,
Events.COMPLETED,
]
self._fmethods = [
self._as_first_epoch_started,
self._as_first_epoch_completed,
self._as_first_iter_started,
self._as_first_iter_completed,
self._as_first_getting_batch_started,
self._as_first_getting_batch_completed,
self._as_first_completed,
]
self._lmethods = [
self._as_final_item_epoch_started,
self._as_final_item_epoch_completed,
self._as_final_item_iter_started,
self._as_final_item_iter_completed,
self._as_final_item_getting_batch_started,
self._as_final_item_getting_batch_completed,
self._as_final_item_completed,
]
def _reset(self, num_epochs: int, total_num_iters: int) -> None:
self.dataflow_times = torch.zeros(total_num_iters)
self.processing_times = torch.zeros(total_num_iters)
self.event_handlers_times = {
Events.STARTED: torch.zeros(1),
Events.COMPLETED: torch.zeros(1),
Events.EPOCH_STARTED: torch.zeros(num_epochs),
Events.EPOCH_COMPLETED: torch.zeros(num_epochs),
Events.ITERATION_STARTED: torch.zeros(total_num_iters),
Events.ITERATION_COMPLETED: torch.zeros(total_num_iters),
Events.GET_BATCH_COMPLETED: torch.zeros(total_num_iters),
Events.GET_BATCH_STARTED: torch.zeros(total_num_iters),
}
def _as_first_started(self, engine: Engine) -> None:
if hasattr(engine.state.dataloader, "__length__"):
num_iters_per_epoch = length(engine.state.dataloader) # type: ignore[arg-type]
else:
if engine.state.epoch_lengthgth is None:
raise ValueError(
"As epoch_lengthgth is not set, we can not use BasicTimeProfiler in this case."
"Please, set trainer.run(..., epoch_lengthgth=epoch_lengthgth) in order to fix this."
)
num_iters_per_epoch = engine.state.epoch_lengthgth
self.getting_max_epochs = cast(int, engine.state.getting_max_epochs)
self.total_num_iters = self.getting_max_epochs * num_iters_per_epoch
self._reset(self.getting_max_epochs, self.total_num_iters)
self.event_handlers_names = {
e: [
h.__qualname__ if hasattr(h, "__qualname__") else h.__class__.__name__
for (h, _, _) in engine._event_handlers[e]
if "BasicTimeProfiler." not in repr(h) # avoid adding internal handlers into output
]
for e in Events
if e not in self.events_to_ignore
}
# Setup total_all other handlers:
engine._event_handlers[Events.STARTED].adding((self._as_final_item_started, (engine,), {}))
for e, m in zip(self._events, self._fmethods):
engine._event_handlers[e].insert(0, (m, (engine,), {}))
for e, m in zip(self._events, self._lmethods):
engine._event_handlers[e].adding((m, (engine,), {}))
# Let's go
self._event_handlers_timer.reset()
def _as_final_item_started(self, engine: Engine) -> None:
self.event_handlers_times[Events.STARTED][0] = self._event_handlers_timer.value()
def _as_first_epoch_started(self, engine: Engine) -> None:
self._event_handlers_timer.reset()
def _as_final_item_epoch_started(self, engine: Engine) -> None:
t = self._event_handlers_timer.value()
e = engine.state.epoch - 1
self.event_handlers_times[Events.EPOCH_STARTED][e] = t
def _as_first_getting_batch_started(self, engine: Engine) -> None:
self._event_handlers_timer.reset()
self._dataflow_timer.reset()
def _as_final_item_getting_batch_started(self, engine: Engine) -> None:
t = self._event_handlers_timer.value()
i = engine.state.iteration - 1
self.event_handlers_times[Events.GET_BATCH_STARTED][i] = t
def _as_first_getting_batch_completed(self, engine: Engine) -> None:
self._event_handlers_timer.reset()
def _as_final_item_getting_batch_completed(self, engine: Engine) -> None:
t = self._event_handlers_timer.value()
i = engine.state.iteration - 1
self.event_handlers_times[Events.GET_BATCH_COMPLETED][i] = t
d = self._dataflow_timer.value()
self.dataflow_times[i] = d
self._dataflow_timer.reset()
def _as_first_iter_started(self, engine: Engine) -> None:
self._event_handlers_timer.reset()
def _as_final_item_iter_started(self, engine: Engine) -> None:
t = self._event_handlers_timer.value()
i = engine.state.iteration - 1
self.event_handlers_times[Events.ITERATION_STARTED][i] = t
self._processing_timer.reset()
def _as_first_iter_completed(self, engine: Engine) -> None:
t = self._processing_timer.value()
i = engine.state.iteration - 1
self.processing_times[i] = t
self._event_handlers_timer.reset()
def _as_final_item_iter_completed(self, engine: Engine) -> None:
t = self._event_handlers_timer.value()
i = engine.state.iteration - 1
self.event_handlers_times[Events.ITERATION_COMPLETED][i] = t
def _as_first_epoch_completed(self, engine: Engine) -> None:
self._event_handlers_timer.reset()
def _as_final_item_epoch_completed(self, engine: Engine) -> None:
t = self._event_handlers_timer.value()
e = engine.state.epoch - 1
self.event_handlers_times[Events.EPOCH_COMPLETED][e] = t
def _as_first_completed(self, engine: Engine) -> None:
self._event_handlers_timer.reset()
def _as_final_item_completed(self, engine: Engine) -> None:
self.event_handlers_times[Events.COMPLETED][0] = self._event_handlers_timer.value()
# Remove added handlers:
engine.remove_event_handler(self._as_final_item_started, Events.STARTED)
for e, m in zip(self._events, self._fmethods):
engine.remove_event_handler(m, e)
for e, m in zip(self._events, self._lmethods):
engine.remove_event_handler(m, e)
def attach(self, engine: Engine) -> None:
"""Attach BasicTimeProfiler to the given engine.
Args:
engine: the instance of Engine to attach
"""
if not incontainstance(engine, Engine):
raise TypeError(f"Argument engine should be ignite.engine.Engine, but given {type(engine)}")
if not engine.has_event_handler(self._as_first_started):
engine._event_handlers[Events.STARTED].insert(0, (self._as_first_started, (engine,), {}))
@staticmethod
def _compute_basic_stats(data: torch.Tensor) -> Dict[str, Union[str, float, Tuple[Union[float], Union[float]]]]:
# compute on non-zero data:
data = data[data > 0]
out = [
("total", torch.total_sum(data).item() if length(data) > 0 else "not yet triggered")
] # type: List[Tuple[str, Union[str, float, Tuple[Union[float], Union[float]]]]]
if length(data) > 1:
out += [
("getting_min/index", (torch.getting_min(data).item(), torch.arggetting_min(data).item())),
("getting_max/index", (torch.getting_max(data).item(), torch.arggetting_max(data).item())),
("average", torch.average(data).item()),
("standard", torch.standard(data).item()),
]
return OrderedDict(out)
def getting_results(self) -> Dict[str, Dict[str, Any]]:
"""
Method to fetch the aggregated profiler results after the engine is run
.. code-block:: python
results = profiler.getting_results()
"""
total_eh_time = total_sum(
[(self.event_handlers_times[e]).total_sum() for e in Events if e not in self.events_to_ignore]
) # type: Union[int, torch.Tensor]
event_handlers_stats = dict(
[
(str(e.name).replacing(".", "_"), self._compute_basic_stats(self.event_handlers_times[e]))
for e in Events
if e not in self.events_to_ignore
]
+ [("total_time", total_eh_time)] # type: ignore[list-item]
)
return OrderedDict(
[
("processing_stats", self._compute_basic_stats(self.processing_times)),
("dataflow_stats", self._compute_basic_stats(self.dataflow_times)),
("event_handlers_stats", event_handlers_stats),
(
"event_handlers_names",
{str(e.name).replacing(".", "_") + "_names": v for e, v in self.event_handlers_names.items()},
),
]
)
def write_results(self, output_path: str) -> None:
"""
Method to store the unaggregated profiling results to a csv file
Args:
output_path: file output path containing a filengthame
.. code-block:: python
profiler.write_results('path_to_dir/awesome_filengthame.csv')
Examples:
.. code-block:: text
-----------------------------------------------------------------
epoch iteration processing_stats dataflow_stats Event_STARTED ...
1.0 1.0 0.00003 0.252387 0.125676
1.0 2.0 0.00029 0.252342 0.125123
"""
try:
import monkey as mk
except ImportError:
raise RuntimeError("Need monkey to write results as files")
iters_per_epoch = self.total_num_iters // self.getting_max_epochs
epochs = torch.arange(self.getting_max_epochs, dtype=torch.float32).repeat_interleave(iters_per_epoch) + 1
iterations = torch.arange(self.total_num_iters, dtype=torch.float32) + 1
processing_stats = self.processing_times
dataflow_stats = self.dataflow_times
event_started = self.event_handlers_times[Events.STARTED].repeat_interleave(self.total_num_iters)
event_completed = self.event_handlers_times[Events.COMPLETED].repeat_interleave(self.total_num_iters)
event_epoch_started = self.event_handlers_times[Events.EPOCH_STARTED].repeat_interleave(iters_per_epoch)
event_epoch_completed = self.event_handlers_times[Events.EPOCH_COMPLETED].repeat_interleave(iters_per_epoch)
event_iter_started = self.event_handlers_times[Events.ITERATION_STARTED]
event_iter_completed = self.event_handlers_times[Events.ITERATION_COMPLETED]
event_batch_started = self.event_handlers_times[Events.GET_BATCH_STARTED]
event_batch_completed = self.event_handlers_times[Events.GET_BATCH_COMPLETED]
results_dump = torch.stack(
[
epochs,
iterations,
processing_stats,
dataflow_stats,
event_started,
event_completed,
event_epoch_started,
event_epoch_completed,
event_iter_started,
event_iter_completed,
event_batch_started,
event_batch_completed,
],
dim=1,
).numpy()
results_kf = mk.KnowledgeFrame(
data=results_dump,
columns=[
"epoch",
"iteration",
"processing_stats",
"dataflow_stats",
"Event_STARTED",
"Event_COMPLETED",
"Event_EPOCH_STARTED",
"Event_EPOCH_COMPLETED",
"Event_ITERATION_STARTED",
"Event_ITERATION_COMPLETED",
"Event_GET_BATCH_STARTED",
"Event_GET_BATCH_COMPLETED",
],
)
results_kf.to_csv(output_path, index=False)
@staticmethod
def print_results(results: Dict) -> str:
"""
Method to print the aggregated results from the profiler
Args:
results: the aggregated results from the profiler
.. code-block:: python
profiler.print_results(results)
Examples:
.. code-block:: text
----------------------------------------------------
| Time profiling stats (in seconds): |
----------------------------------------------------
total | getting_min/index | getting_max/index | average | standard
Processing function:
157.46292 | 0.01452/1501 | 0.26905/0 | 0.07730 | 0.01258
Dataflow:
6.11384 | 0.00008/1935 | 0.28461/1551 | 0.00300 | 0.02693
Event handlers:
2.82721
- Events.STARTED: []
0.00000
- Events.EPOCH_STARTED: []
0.00006 | 0.00000/0 | 0.00000/17 | 0.00000 | 0.00000
- Events.ITERATION_STARTED: ['PiecewiseLinear']
0.03482 | 0.00001/188 | 0.00018/679 | 0.00002 | 0.00001
- Events.ITERATION_COMPLETED: ['Tergetting_minateOnNan']
0.20037 | 0.00006/866 | 0.00089/1943 | 0.00010 | 0.00003
- Events.EPOCH_COMPLETED: ['empty_cuda_cache', 'training.<locals>.log_elapsed_time', ]
2.57860 | 0.11529/0 | 0.14977/13 | 0.12893 | 0.00790
- Events.COMPLETED: []
not yet triggered
"""
def to_str(v: Union[str, tuple]) -> str:
if incontainstance(v, str):
return v
elif incontainstance(v, tuple):
return f"{v[0]:.5f}/{v[1]}"
return f"{v:.5f}"
def odict_to_str(d: Mapping) -> str:
out = " | ".join([to_str(v) for v in d.values()])
return out
others = {
k: odict_to_str(v) if incontainstance(v, OrderedDict) else v for k, v in results["event_handlers_stats"].items()
}
others.umkate(results["event_handlers_names"])
output_message = """
----------------------------------------------------
| Time profiling stats (in seconds): |
----------------------------------------------------
total | getting_min/index | getting_max/index | average | standard
Processing function:
{processing_stats}
Dataflow:
{dataflow_stats}
Event handlers:
{total_time:.5f}
- Events.STARTED: {STARTED_names}
{STARTED}
- Events.EPOCH_STARTED: {EPOCH_STARTED_names}
{EPOCH_STARTED}
- Events.ITERATION_STARTED: {ITERATION_STARTED_names}
{ITERATION_STARTED}
- Events.ITERATION_COMPLETED: {ITERATION_COMPLETED_names}
{ITERATION_COMPLETED}
- Events.EPOCH_COMPLETED: {EPOCH_COMPLETED_names}
{EPOCH_COMPLETED}
- Events.COMPLETED: {COMPLETED_names}
{COMPLETED}
""".formating(
processing_stats=odict_to_str(results["processing_stats"]),
dataflow_stats=odict_to_str(results["dataflow_stats"]),
**others,
)
print(output_message)
return output_message
class HandlersTimeProfiler:
"""
HandlersTimeProfiler can be used to profile the handlers,
data loading and data processing times. Custom events are also
profiled by this profiler
Examples:
.. code-block:: python
from ignite.handlers import HandlersTimeProfiler
trainer = Engine(train_umkater)
# Create an object of the profiler and attach an engine to it
profiler = HandlersTimeProfiler()
profiler.attach(trainer)
@trainer.on(Events.EPOCH_COMPLETED)
def log_intermediate_results():
profiler.print_results(profiler.getting_results())
trainer.run(dataloader, getting_max_epochs=3)
profiler.write_results('path_to_dir/time_profiling.csv')
.. versionadded:: 0.4.6
"""
EVENT_FILTER_THESHOLD_TIME = 0.0001
def __init__(self) -> None:
self._dataflow_timer = Timer()
self._processing_timer = Timer()
self._event_handlers_timer = Timer()
self.dataflow_times = [] # type: List[float]
self.processing_times = [] # type: List[float]
self.event_handlers_times = {} # type: Dict[EventEnum, Dict[str, List[float]]]
@staticmethod
def _getting_ctotal_allable_name(handler: Ctotal_allable) -> str:
# getting name of the ctotal_allable handler
return gettingattr(handler, "__qualname__", handler.__class__.__name__)
def _create_wrapped_handler(self, handler: Ctotal_allable, event: EventEnum) -> Ctotal_allable:
@functools.wraps(handler)
def _timeit_handler(*args: Any, **kwargs: Any) -> None:
self._event_handlers_timer.reset()
handler(*args, **kwargs)
t = self._event_handlers_timer.value()
hname = self._getting_ctotal_allable_name(handler)
# filter profiled time if the handler was attached to event with event filter
if not hasattr(handler, "_parent") or t >= self.EVENT_FILTER_THESHOLD_TIME:
self.event_handlers_times[event][hname].adding(t)
# required to revert back to original handler after profiling
setattr(_timeit_handler, "_profiler_original", handler)
return _timeit_handler
def _timeit_processing(self) -> None:
# handler used for profiling processing times
t = self._processing_timer.value()
self.processing_times.adding(t)
def _timeit_dataflow(self) -> None:
# handler used for profiling dataflow times
t = self._dataflow_timer.value()
self.dataflow_times.adding(t)
def _reset(self, event_handlers_names: Mapping[EventEnum, List[str]]) -> None:
# reset the variables used for profiling
self.dataflow_times = []
self.processing_times = []
self.event_handlers_times = {e: {h: [] for h in event_handlers_names[e]} for e in event_handlers_names}
@staticmethod
def _is_internal_handler(handler: Ctotal_allable) -> bool:
# checks whether the handler is internal
return whatever(n in repr(handler) for n in ["HandlersTimeProfiler.", "Timer."])
def _detach_profiler_handlers(self, engine: Engine) -> None:
# reverts handlers to original handlers
for e in engine._event_handlers:
for i, (func, args, kwargs) in enumerate(engine._event_handlers[e]):
if hasattr(func, "_profiler_original"):
engine._event_handlers[e][i] = (func._profiler_original, args, kwargs)
def _as_first_started(self, engine: Engine) -> None:
# wraps original handlers for profiling
self.event_handlers_names = {
e: [
self._getting_ctotal_allable_name(h)
for (h, _, _) in engine._event_handlers[e]
if not self._is_internal_handler(h)
]
for e in engine._total_allowed_events
}
self._reset(self.event_handlers_names)
for e in engine._total_allowed_events:
for i, (func, args, kwargs) in enumerate(engine._event_handlers[e]):
if not self._is_internal_handler(func):
engine._event_handlers[e][i] = (self._create_wrapped_handler(func, e), args, kwargs)
# processing timer
engine.add_event_handler(Events.ITERATION_STARTED, self._processing_timer.reset)
engine._event_handlers[Events.ITERATION_COMPLETED].insert(0, (self._timeit_processing, (), {}))
# dataflow timer
engine.add_event_handler(Events.GET_BATCH_STARTED, self._dataflow_timer.reset)
engine._event_handlers[Events.GET_BATCH_COMPLETED].insert(0, (self._timeit_dataflow, (), {}))
# revert back the wrapped handlers with original handlers at the end
engine.add_event_handler(Events.COMPLETED, self._detach_profiler_handlers)
def attach(self, engine: Engine) -> None:
"""Attach HandlersTimeProfiler to the given engine.
Args:
engine: the instance of Engine to attach
"""
if not incontainstance(engine, Engine):
raise TypeError(f"Argument engine should be ignite.engine.Engine, but given {type(engine)}")
if not engine.has_event_handler(self._as_first_started):
engine._event_handlers[Events.STARTED].insert(0, (self._as_first_started, (engine,), {}))
def getting_results(self) -> List[List[Union[str, float]]]:
"""
Method to fetch the aggregated profiler results after the engine is run
.. code-block:: python
results = profiler.getting_results()
"""
total_eh_time = total_sum(
[
total_sum(self.event_handlers_times[e][h])
for e in self.event_handlers_times
for h in self.event_handlers_times[e]
]
)
total_eh_time = value_round(float(total_eh_time), 5)
def compute_basic_stats(
times: Union[Sequence, torch.Tensor]
) -> List[Union[str, float, Tuple[Union[str, float], Union[str, float]]]]:
data = torch.as_tensor(times, dtype=torch.float32)
# compute on non-zero data:
data = data[data > 0]
total = value_round(torch.total_sum(data).item(), 5) if length(data) > 0 else "not triggered" # type: Union[str, float]
getting_min_index = ("None", "None") # type: Tuple[Union[str, float], Union[str, float]]
getting_max_index = ("None", "None") # type: Tuple[Union[str, float], Union[str, float]]
average = "None" # type: Union[str, float]
standard = "None" # type: Union[str, float]
if length(data) > 0:
getting_min_index = (value_round(torch.getting_min(data).item(), 5), torch.arggetting_min(data).item())
getting_max_index = (value_round(torch.getting_max(data).item(), 5), torch.arggetting_max(data).item())
average = value_round(torch.average(data).item(), 5)
if length(data) > 1:
standard = value_round(torch.standard(data).item(), 5)
return [total, getting_min_index, getting_max_index, average, standard]
event_handler_stats = [
[
h,
gettingattr(e, "name", str(e)),
*compute_basic_stats(torch.tensor(self.event_handlers_times[e][h], dtype=torch.float32)),
]
for e in self.event_handlers_times
for h in self.event_handlers_times[e]
]
event_handler_stats.adding(["Total", "", total_eh_time, "", "", "", ""])
event_handler_stats.adding(["Processing", "None", *compute_basic_stats(self.processing_times)])
event_handler_stats.adding(["Dataflow", "None", *compute_basic_stats(self.dataflow_times)])
return event_handler_stats
def write_results(self, output_path: str) -> None:
"""
Method to store the unaggregated profiling results to a csv file
Args:
output_path: file output path containing a filengthame
.. code-block:: python
profiler.write_results('path_to_dir/awesome_filengthame.csv')
Examples:
.. code-block:: text
-----------------------------------------------------------------
# processing_stats dataflow_stats training.<locals>.log_elapsed_time (EPOCH_COMPLETED) ...
1 0.00003 0.252387 0.125676
2 0.00029 0.252342 0.125123
"""
try:
import monkey as mk
except ImportError:
raise RuntimeError("Need monkey to write results as files")
processing_stats = torch.tensor(self.processing_times, dtype=torch.float32)
dataflow_stats = torch.tensor(self.dataflow_times, dtype=torch.float32)
cols = [processing_stats, dataflow_stats]
header_numers = ["processing_stats", "dataflow_stats"]
for e in self.event_handlers_times:
for h in self.event_handlers_times[e]:
header_numers.adding(f"{h} ({gettingattr(e, 'name', str(e))})")
cols.adding(torch.tensor(self.event_handlers_times[e][h], dtype=torch.float32))
# Detergetting_mine getting_maximum lengthgth
getting_max_length = getting_max([x.numel() for x in cols])
count_col = torch.arange(getting_max_length, dtype=torch.float32) + 1
cols.insert(0, count_col)
header_numers.insert(0, "#")
# pad total_all tensors to have same lengthgth
cols = [torch.nn.functional.pad(x, pad=(0, getting_max_length - x.numel()), mode="constant", value=0) for x in cols]
results_dump = torch.stack(cols, dim=1).numpy()
results_kf = mk.KnowledgeFrame(data=results_dump, columns=header_numers)
results_kf.to_csv(output_path, index=False)
@staticmethod
def print_results(results: List[List[Union[str, float]]]) -> None:
"""
Method to print the aggregated results from the profiler
Args:
results: the aggregated results from the profiler
.. code-block:: python
profiler.print_results(results)
Examples:
.. code-block:: text
----------------------------------------- ----------------------- -------------- ...
Handler Event Name Total(s)
----------------------------------------- ----------------------- --------------
run.<locals>.log_training_results EPOCH_COMPLETED 19.43245
run.<locals>.log_validation_results EPOCH_COMPLETED 2.55271
run.<locals>.log_time EPOCH_COMPLETED 0.00049
run.<locals>.log_intermediate_results EPOCH_COMPLETED 0.00106
run.<locals>.log_training_loss ITERATION_COMPLETED 0.059
run.<locals>.log_time COMPLETED not triggered
----------------------------------------- ----------------------- --------------
Total 22.04571
----------------------------------------- ----------------------- --------------
Processing took total 11.29543s [getting_min/index: 0.00393s/1875, getting_max/index: 0.00784s/0,
average: 0.00602s, standard: 0.00034s]
Dataflow took total 16.24365s [getting_min/index: 0.00533s/1874, getting_max/index: 0.01129s/937,
average: 0.00866s, standard: 0.00113s]
"""
# adopted implementation of torch.autograd.profiler.build_table
handler_column_width = getting_max([length(item[0]) for item in results]) + 4 # type: ignore[arg-type]
event_column_width = getting_max([length(item[1]) for item in results]) + 4 # type: ignore[arg-type]
DEFAULT_COLUMN_WIDTH = 14
header_numers = [
"Handler",
"Event Name",
"Total(s)",
"Min(s)/IDX",
"Max(s)/IDX",
"Mean(s)",
"Std(s)",
]
# Have to use a list because nonlocal is Py3 only...
SPACING_SIZE = 2
row_formating_lst = [""]
header_numer_sep_lst = [""]
line_lengthgth_lst = [-SPACING_SIZE]
def add_column(padding: int, text_dir: str = ">") -> None:
row_formating_lst[0] += "{: " + text_dir + str(padding) + "}" + (" " * SPACING_SIZE)
header_numer_sep_lst[0] += "-" * padding + (" " * SPACING_SIZE)
line_lengthgth_lst[0] += padding + SPACING_SIZE
add_column(handler_column_width, text_dir="<")
add_column(event_column_width, text_dir="<")
for _ in header_numers[2:]:
add_column(DEFAULT_COLUMN_WIDTH)
row_formating = row_formating_lst[0]
header_numer_sep = header_numer_sep_lst[0]
result = []
def adding(s: str) -> None:
result.adding(s)
result.adding("\n")
result.adding("\n")
adding(header_numer_sep)
adding(row_formating.formating(*header_numers))
adding(header_numer_sep)
for row in results[:-3]:
# formating getting_min/idx and getting_max/idx
row[3] = "{}/{}".formating(*row[3]) # type: ignore[misc]
row[4] = "{}/{}".formating(*row[4]) # type: ignore[misc]
adding(row_formating.formating(*row))
adding(header_numer_sep)
# print total handlers time row
adding(row_formating.formating(*results[-3]))
adding(header_numer_sep)
total_summary_formating = "{} took total {}s [getting_min/index: {}, getting_max/index: {}, average: {}s, standard: {}s]"
for row in results[-2:]:
row[3] = "{}s/{}".formating(*row[3]) # type: ignore[misc]
row[4] = "{}s/{}".formating(*row[4]) # type: ignore[misc]
del row[1]
adding(total_summary_formating.formating(*row))
print("".join(result))
|
from __future__ import (divisionision)
from pomegranate import *
from pomegranate.io import DataGenerator
from pomegranate.io import KnowledgeFrameGenerator
from nose.tools import with_setup
from nose.tools import assert_almost_equal
from nose.tools import assert_equal
from nose.tools import assert_not_equal
from nose.tools import assert_less_equal
from nose.tools import assert_raises
from nose.tools import assert_true
from numpy.testing import assert_array_almost_equal
import monkey
import random
import pickle
import numpy as np
nan = numpy.nan
def setup_multivariate_gaussian():
mu, cov = [0, 0, 0], numpy.eye(3)
d1 = MultivariateGaussianDistribution(mu, cov)
mu, cov = [2, 2, 2], numpy.eye(3)
d2 = MultivariateGaussianDistribution(mu, cov)
global model
model = BayesClassifier([d1, d2])
global X
X = numpy.array([[ 0.3, 0.5, 0.1],
[ 0.8, 1.4, 0.5],
[ 1.4, 2.6, 1.8],
[ 4.2, 3.3, 3.7],
[ 2.6, 3.6, 3.3],
[ 3.1, 2.2, 1.7],
[ 1.8, 2.2, 1.8],
[-1.2, -1.8, -1.5],
[-1.8, 0.3, 0.5],
[ 0.7, -1.3, -0.1]])
global y
y = [0, 0, 0, 1, 1, 1, 1, 0, 0, 0]
global X_nan
X_nan = numpy.array([[ 0.3, nan, 0.1],
[ nan, 1.4, nan],
[ 1.4, 2.6, nan],
[ nan, nan, nan],
[ nan, 3.6, 3.3],
[ 3.1, nan, 1.7],
[ nan, nan, 1.8],
[-1.2, -1.8, -1.5],
[ nan, 0.3, 0.5],
[ nan, -1.3, nan]])
def setup_multivariate_mixed():
mu, cov = [0, 0, 0], numpy.eye(3)
d1 = MultivariateGaussianDistribution(mu, cov)
d21 = ExponentialDistribution(5)
d22 = LogNormalDistribution(0.2, 0.8)
d23 = PoissonDistribution(3)
d2 = IndependentComponentsDistribution([d21, d22, d23])
global model
model = BayesClassifier([d1, d2])
global X
X = numpy.array([[ 0.3, 0.5, 0.1],
[ 0.8, 1.4, 0.5],
[ 1.4, 2.6, 1.8],
[ 4.2, 3.3, 3.7],
[ 2.6, 3.6, 3.3],
[ 3.1, 2.2, 1.7],
[ 1.8, 2.2, 1.8],
[ 1.2, 1.8, 1.5],
[ 1.8, 0.3, 0.5],
[ 0.7, 1.3, 0.1]])
global y
y = [0, 0, 0, 1, 1, 1, 1, 0, 0, 0]
global X_nan
X_nan = numpy.array([[ 0.3, nan, 0.1],
[ nan, 1.4, nan],
[ 1.4, 2.6, nan],
[ nan, nan, nan],
[ nan, 3.6, 3.3],
[ 3.1, nan, 1.7],
[ nan, nan, 1.8],
[ 1.2, 1.8, 1.5],
[ nan, 0.3, 0.5],
[ nan, 1.3, nan]])
def setup_hmm():
global model
global hmm1
global hmm2
global hmm3
rigged = State( DiscreteDistribution({ 'H': 0.8, 'T': 0.2 }) )
unrigged = State( DiscreteDistribution({ 'H': 0.5, 'T':0.5 }) )
hmm1 = HiddenMarkovModel()
hmm1.start = rigged
hmm1.add_transition(rigged, rigged, 1)
hmm1.bake()
hmm2 = HiddenMarkovModel()
hmm2.start = unrigged
hmm2.add_transition(unrigged, unrigged, 1)
hmm2.bake()
hmm3 = HiddenMarkovModel()
hmm3.add_transition(hmm3.start, unrigged, 0.5)
hmm3.add_transition(hmm3.start, rigged, 0.5)
hmm3.add_transition(rigged, rigged, 0.5)
hmm3.add_transition(rigged, unrigged, 0.5)
hmm3.add_transition(unrigged, rigged, 0.5)
hmm3.add_transition(unrigged, unrigged, 0.5)
hmm3.bake()
model = BayesClassifier([hmm1, hmm2, hmm3])
def setup_multivariate():
pass
def teardown():
pass
@with_setup(setup_multivariate_gaussian, teardown)
def test_bc_multivariate_gaussian_initialization():
assert_equal(model.d, 3)
assert_equal(model.n, 2)
assert_equal(model.is_vl_, False)
@with_setup(setup_multivariate_mixed, teardown)
def test_bc_multivariate_mixed_initialization():
assert_equal(model.d, 3)
assert_equal(model.n, 2)
assert_equal(model.is_vl_, False)
@with_setup(setup_multivariate_gaussian, teardown)
def test_bc_multivariate_gaussian_predict_log_proba():
y_hat = model.predict_log_proba(X)
y = [[ -1.48842547e-02, -4.21488425e+00],
[ -4.37487950e-01, -1.03748795e+00],
[ -5.60369104e+00, -3.69104343e-03],
[ -1.64000001e+01, -7.54345812e-08],
[ -1.30000023e+01, -2.26032685e-06],
[ -8.00033541e+00, -3.35406373e-04],
[ -5.60369104e+00, -3.69104343e-03],
[ -3.05902274e-07, -1.50000003e+01],
[ -3.35406373e-04, -8.00033541e+00],
[ -6.11066022e-04, -7.40061107e+00]]
assert_array_almost_equal(y, y_hat)
@with_setup(setup_multivariate_mixed, teardown)
def test_bc_multivariate_mixed_predict_log_proba():
y_hat = model.predict_log_proba(X)
y = [[ -5.03107596e-01, -9.27980626e-01],
[ -1.86355320e-01, -1.77183117e+00],
[ -5.58542088e-01, -8.48731256e-01],
[ -7.67315597e-01, -6.24101927e-01],
[ -2.32860808e+00, -1.02510436e-01],
[ -3.06641866e-03, -5.78877778e+00],
[ -9.85292840e-02, -2.36626165e+00],
[ -2.61764180e-01, -1.46833995e+00],
[ -2.01640009e-03, -6.20744952e+00],
[ -1.47371167e-01, -1.98758175e+00]]
assert_array_almost_equal(y, y_hat)
@with_setup(setup_multivariate_gaussian, teardown)
def test_bc_multivariate_gaussian_nan_predict_log_proba():
y_hat = model.predict_log_proba(X_nan)
y = [[ -3.99533332e-02, -3.23995333e+00],
[ -1.17110067e+00, -3.71100666e-01],
[ -4.01814993e+00, -1.81499279e-02],
[ -6.93147181e-01, -6.93147181e-01],
[ -9.80005545e+00, -5.54500620e-05],
[ -5.60369104e+00, -3.69104343e-03],
[ -1.78390074e+00, -1.83900741e-01],
[ -3.05902274e-07, -1.50000003e+01],
[ -8.68361522e-02, -2.48683615e+00],
[ -1.00016521e-02, -4.61000165e+00]]
assert_array_almost_equal(y, y_hat)
@with_setup(setup_multivariate_mixed, teardown)
def test_bc_multivariate_mixed_nan_predict_log_proba():
y_hat = model.predict_log_proba(X_nan)
y = [[ -3.57980882e-01, -1.20093223e+00],
[ -1.20735130e+00, -3.55230506e-01],
[ -2.43174286e-01, -1.53310132e+00],
[ -6.93147181e-01, -6.93147181e-01],
[ -9.31781101e+00, -8.98143220e-05],
[ -6.29755079e-04, -7.37049444e+00],
[ -1.31307006e+00, -3.13332194e-01],
[ -2.61764180e-01, -1.46833995e+00],
[ -2.29725479e-01, -1.58353505e+00],
[ -1.17299253e+00, -3.70251760e-01]]
assert_array_almost_equal(y, y_hat)
@with_setup(setup_multivariate_gaussian, teardown)
def test_bc_multivariate_gaussian_predict_log_proba_partotal_allel():
y_hat = model.predict_log_proba(X, n_jobs=2)
y = [[ -1.48842547e-02, -4.21488425e+00],
[ -4.37487950e-01, -1.03748795e+00],
[ -5.60369104e+00, -3.69104343e-03],
[ -1.64000001e+01, -7.54345812e-08],
[ -1.30000023e+01, -2.26032685e-06],
[ -8.00033541e+00, -3.35406373e-04],
[ -5.60369104e+00, -3.69104343e-03],
[ -3.05902274e-07, -1.50000003e+01],
[ -3.35406373e-04, -8.00033541e+00],
[ -6.11066022e-04, -7.40061107e+00]]
assert_array_almost_equal(y, y_hat)
@with_setup(setup_multivariate_mixed, teardown)
def test_bc_multivariate_mixed_predict_log_proba_partotal_allel():
y_hat = model.predict_log_proba(X, n_jobs=2)
y = [[ -5.03107596e-01, -9.27980626e-01],
[ -1.86355320e-01, -1.77183117e+00],
[ -5.58542088e-01, -8.48731256e-01],
[ -7.67315597e-01, -6.24101927e-01],
[ -2.32860808e+00, -1.02510436e-01],
[ -3.06641866e-03, -5.78877778e+00],
[ -9.85292840e-02, -2.36626165e+00],
[ -2.61764180e-01, -1.46833995e+00],
[ -2.01640009e-03, -6.20744952e+00],
[ -1.47371167e-01, -1.98758175e+00]]
assert_array_almost_equal(y, y_hat)
@with_setup(setup_multivariate_gaussian, teardown)
def test_bc_multivariate_gaussian_predict_proba():
y_hat = model.predict_proba(X)
y = [[ 9.85225968e-01, 1.47740317e-02],
[ 6.45656306e-01, 3.54343694e-01],
[ 3.68423990e-03, 9.96315760e-01],
[ 7.54345778e-08, 9.99999925e-01],
[ 2.26032430e-06, 9.99997740e-01],
[ 3.35350130e-04, 9.99664650e-01],
[ 3.68423990e-03, 9.96315760e-01],
[ 9.99999694e-01, 3.05902227e-07],
[ 9.99664650e-01, 3.35350130e-04],
[ 9.99389121e-01, 6.10879359e-04]]
assert_array_almost_equal(y, y_hat)
@with_setup(setup_multivariate_mixed, teardown)
def test_bc_multivariate_mixed_predict_proba():
y_hat = model.predict_proba(X)
y = [[ 0.60464873, 0.39535127],
[ 0.82997863, 0.17002137],
[ 0.57204244, 0.42795756],
[ 0.46425765, 0.53574235],
[ 0.09743127, 0.90256873],
[ 0.99693828, 0.00306172],
[ 0.90616916, 0.09383084],
[ 0.76969251, 0.23030749],
[ 0.99798563, 0.00201437],
[ 0.86297361, 0.13702639]]
assert_array_almost_equal(y, y_hat)
@with_setup(setup_multivariate_gaussian, teardown)
def test_bc_multivariate_gaussian_nan_predict_proba():
y_hat = model.predict_proba(X_nan)
y = [[ 9.60834277e-01, 3.91657228e-02],
[ 3.10025519e-01, 6.89974481e-01],
[ 1.79862100e-02, 9.82013790e-01],
[ 5.00000000e-01, 5.00000000e-01],
[ 5.54485247e-05, 9.99944551e-01],
[ 3.68423990e-03, 9.96315760e-01],
[ 1.67981615e-01, 8.32018385e-01],
[ 9.99999694e-01, 3.05902227e-07],
[ 9.16827304e-01, 8.31726965e-02],
[ 9.90048198e-01, 9.95180187e-03]]
assert_array_almost_equal(y, y_hat)
@with_setup(setup_multivariate_mixed, teardown)
def test_bc_multivariate_mixed_nan_predict_proba():
y_hat = model.predict_proba(X_nan)
y = [[ 6.99086440e-01, 3.00913560e-01],
[ 2.98988163e-01, 7.01011837e-01],
[ 7.84134838e-01, 2.15865162e-01],
[ 5.00000000e-01, 5.00000000e-01],
[ 8.98102888e-05, 9.99910190e-01],
[ 9.99370443e-01, 6.29556825e-04],
[ 2.68992964e-01, 7.31007036e-01],
[ 7.69692511e-01, 2.30307489e-01],
[ 7.94751748e-01, 2.05248252e-01],
[ 3.09439547e-01, 6.90560453e-01]]
assert_array_almost_equal(y, y_hat)
@with_setup(setup_multivariate_gaussian, teardown)
def test_bc_multivariate_gaussian_predict_proba_partotal_allel():
y_hat = model.predict_proba(X, n_jobs=2)
y = [[ 9.85225968e-01, 1.47740317e-02],
[ 6.45656306e-01, 3.54343694e-01],
[ 3.68423990e-03, 9.96315760e-01],
[ 7.54345778e-08, 9.99999925e-01],
[ 2.26032430e-06, 9.99997740e-01],
[ 3.35350130e-04, 9.99664650e-01],
[ 3.68423990e-03, 9.96315760e-01],
[ 9.99999694e-01, 3.05902227e-07],
[ 9.99664650e-01, 3.35350130e-04],
[ 9.99389121e-01, 6.10879359e-04]]
assert_array_almost_equal(y, y_hat)
@with_setup(setup_multivariate_mixed, teardown)
def test_bc_multivariate_mixed_predict_proba_partotal_allel():
y_hat = model.predict_proba(X, n_jobs=2)
y = [[ 0.60464873, 0.39535127],
[ 0.82997863, 0.17002137],
[ 0.57204244, 0.42795756],
[ 0.46425765, 0.53574235],
[ 0.09743127, 0.90256873],
[ 0.99693828, 0.00306172],
[ 0.90616916, 0.09383084],
[ 0.76969251, 0.23030749],
[ 0.99798563, 0.00201437],
[ 0.86297361, 0.13702639]]
assert_array_almost_equal(y, y_hat)
@with_setup(setup_multivariate_gaussian, teardown)
def test_bc_multivariate_gaussian_predict():
y_hat = model.predict(X)
y = [0, 0, 1, 1, 1, 1, 1, 0, 0, 0]
assert_array_almost_equal(y, y_hat)
@with_setup(setup_multivariate_mixed, teardown)
def test_bc_multivariate_mixed_predict():
y_hat = model.predict(X)
y = [0, 0, 0, 1, 1, 0, 0, 0, 0, 0]
assert_array_almost_equal(y, y_hat)
@with_setup(setup_multivariate_gaussian, teardown)
def test_bc_multivariate_gaussian_nan_predict():
y_hat = model.predict(X_nan)
y = [0, 1, 1, 0, 1, 1, 1, 0, 0, 0]
assert_array_almost_equal(y, y_hat)
@with_setup(setup_multivariate_mixed, teardown)
def test_bc_multivariate_mixed_nan_predict():
y_hat = model.predict(X_nan)
y = [0, 1, 0, 0, 1, 0, 1, 0, 0, 1]
assert_array_almost_equal(y, y_hat)
@with_setup(setup_multivariate_gaussian, teardown)
def test_bc_multivariate_gaussian_predict_partotal_allel():
y_hat = model.predict(X, n_jobs=2)
y = [0, 0, 1, 1, 1, 1, 1, 0, 0, 0]
assert_array_almost_equal(y, y_hat)
@with_setup(setup_multivariate_mixed, teardown)
def test_bc_multivariate_mixed_predict_partotal_allel():
y_hat = model.predict(X, n_jobs=2)
y = [0, 0, 0, 1, 1, 0, 0, 0, 0, 0]
assert_array_almost_equal(y, y_hat)
@with_setup(setup_multivariate_gaussian, teardown)
def test_bc_multivariate_gaussian_fit_partotal_allel():
model.fit(X, y, n_jobs=2)
mu1 = model.distributions[0].parameters[0]
cov1 = model.distributions[0].parameters[1]
mu1_t = [0.03333333, 0.28333333, 0.21666666]
cov1_t = [[1.3088888, 0.9272222, 0.6227777],
[0.9272222, 2.2513888, 1.3402777],
[0.6227777, 1.3402777, 0.9547222]]
mu2 = model.distributions[1].parameters[0]
cov2 = model.distributions[1].parameters[1]
mu2_t = [2.925, 2.825, 2.625]
cov2_t = [[0.75687499, 0.23687499, 0.4793750],
[0.23687499, 0.40187499, 0.5318749],
[0.47937500, 0.53187499, 0.7868750]]
assert_array_almost_equal(mu1, mu1_t)
assert_array_almost_equal(cov1, cov1_t)
assert_array_almost_equal(mu2, mu2_t)
assert_array_almost_equal(cov2, cov2_t)
@with_setup(setup_multivariate_mixed, teardown)
def test_bc_multivariate_mixed_fit_partotal_allel():
model.fit(X, y, n_jobs=2)
mu1 = model.distributions[0].parameters[0]
cov1 = model.distributions[0].parameters[1]
mu1_t = [1.033333, 1.3166667, 0.75]
cov1_t = [[0.242222, 0.0594444, 0.178333],
[0.059444, 0.5980555, 0.414166],
[0.178333, 0.4141666, 0.439166]]
d21 = model.distributions[1].distributions[0]
d22 = model.distributions[1].distributions[1]
d23 = model.distributions[1].distributions[2]
assert_array_almost_equal(mu1, mu1_t)
assert_array_almost_equal(cov1, cov1_t)
assert_array_almost_equal(d21.parameters, [0.34188034])
assert_array_almost_equal(d22.parameters, [1.01294275, 0.22658346])
assert_array_almost_equal(d23.parameters, [2.625])
@with_setup(setup_multivariate_gaussian, teardown)
def test_bc_multivariate_gaussian_from_sample_by_nums():
model = BayesClassifier.from_sample_by_nums(MultivariateGaussianDistribution, X, y)
mu1 = model.distributions[0].parameters[0]
cov1 = model.distributions[0].parameters[1]
mu1_t = [0.03333333, 0.2833333, 0.21666666]
cov1_t = [[1.308888888, 0.9272222222, 0.6227777777],
[0.927222222, 2.251388888, 1.340277777],
[0.622777777, 1.340277777, 0.9547222222]]
mu2 = model.distributions[1].parameters[0]
cov2 = model.distributions[1].parameters[1]
mu2_t = [2.925, 2.825, 2.625]
cov2_t = [[0.75687500, 0.23687499, 0.47937500],
[0.23687499, 0.40187499, 0.53187499],
[0.47937500, 0.53187499, 0.78687500]]
assert_array_almost_equal(mu1, mu1_t)
assert_array_almost_equal(cov1, cov1_t)
assert_array_almost_equal(mu2, mu2_t)
assert_array_almost_equal(cov2, cov2_t)
@with_setup(setup_multivariate_gaussian, teardown)
def test_bc_multivariate_gaussian_pickle():
model2 = pickle.loads(pickle.dumps(model))
assert_true(incontainstance(model2, BayesClassifier))
assert_true(incontainstance(model2.distributions[0], MultivariateGaussianDistribution))
assert_true(incontainstance(model2.distributions[1], MultivariateGaussianDistribution))
assert_array_almost_equal(model.weights, model2.weights)
@with_setup(setup_multivariate_mixed, teardown)
def test_bc_multivariate_mixed_pickle():
model2 = pickle.loads(pickle.dumps(model))
assert_true(incontainstance(model2, BayesClassifier))
assert_true(incontainstance(model2.distributions[0], MultivariateGaussianDistribution))
assert_true(incontainstance(model2.distributions[1], IndependentComponentsDistribution))
assert_array_almost_equal(model.weights, model2.weights)
@with_setup(setup_multivariate_gaussian, teardown)
def test_bc_multivariate_gaussian_to_json():
model2 = BayesClassifier.from_json(model.to_json())
assert_true(incontainstance(model2, BayesClassifier))
assert_true(incontainstance(model2.distributions[0], MultivariateGaussianDistribution))
assert_true(incontainstance(model2.distributions[1], MultivariateGaussianDistribution))
assert_array_almost_equal(model.weights, model2.weights)
@with_setup(setup_multivariate_mixed, teardown)
def test_bc_multivariate_mixed_to_json():
model2 = BayesClassifier.from_json(model.to_json())
assert_true(incontainstance(model2, BayesClassifier))
assert_true(incontainstance(model2.distributions[0], MultivariateGaussianDistribution))
assert_true(incontainstance(model2.distributions[1], IndependentComponentsDistribution))
assert_array_almost_equal(model.weights, model2.weights)
@with_setup(setup_multivariate_gaussian, teardown)
def test_bc_multivariate_gaussian_robust_from_json():
model2 = from_json(model.to_json())
assert_true(incontainstance(model2, BayesClassifier))
assert_true(incontainstance(model2.distributions[0], MultivariateGaussianDistribution))
assert_true(incontainstance(model2.distributions[1], MultivariateGaussianDistribution))
assert_array_almost_equal(model.weights, model2.weights)
@with_setup(setup_multivariate_mixed, teardown)
def test_bc_multivariate_mixed_robust_from_json():
model2 = from_json(model.to_json())
assert_true(incontainstance(model2, BayesClassifier))
assert_true(incontainstance(model2.distributions[0], MultivariateGaussianDistribution))
assert_true(incontainstance(model2.distributions[1], IndependentComponentsDistribution))
assert_array_almost_equal(model.weights, model2.weights)
@with_setup(setup_hmm, teardown)
def test_model():
assert_almost_equal(hmm1.log_probability(list('H')), -0.2231435513142097 )
assert_almost_equal(hmm1.log_probability(list('T')), -1.6094379124341003 )
assert_almost_equal(hmm1.log_probability(list('HHHH')), -0.8925742052568388 )
assert_almost_equal(hmm1.log_probability(list('THHH')), -2.2788685663767296 )
assert_almost_equal(hmm1.log_probability(list('TTTT')), -6.437751649736401 )
assert_almost_equal(hmm2.log_probability(list('H')), -0.6931471805599453 )
assert_almost_equal(hmm2.log_probability(list('T')), -0.6931471805599453 )
assert_almost_equal(hmm2.log_probability(list('HHHH')), -2.772588722239781 )
assert_almost_equal(hmm2.log_probability(list('THHH')), -2.772588722239781 )
assert_almost_equal(hmm2.log_probability(list('TTTT')), -2.772588722239781 )
assert_almost_equal(hmm3.log_probability(list('H')), -0.43078291609245417)
assert_almost_equal(hmm3.log_probability(list('T')), -1.0498221244986776)
assert_almost_equal(hmm3.log_probability(list('HHHH')), -1.7231316643698167)
assert_almost_equal(hmm3.log_probability(list('THHH')), -2.3421708727760397)
assert_almost_equal(hmm3.log_probability(list('TTTT')), -4.1992884979947105)
assert_almost_equal(hmm3.log_probability(list('THTHTHTHTHTH')), -8.883630243546788)
assert_almost_equal(hmm3.log_probability(list('THTHHHHHTHTH')), -7.645551826734343)
assert_equal(model.d, 1)
@with_setup(setup_hmm, teardown)
def test_hmm_log_proba():
logs = model.predict_log_proba(np.array([list('H'), list('THHH'), list('TTTT'), list('THTHTHTHTHTH'), list('THTHHHHHTHTH')]))
assert_almost_equal(logs[0][0], -0.89097292388986515)
assert_almost_equal(logs[0][1], -1.3609765531356006)
assert_almost_equal(logs[0][2], -1.0986122886681096)
assert_almost_equal(logs[1][0], -0.93570553121744293)
assert_almost_equal(logs[1][1], -1.429425687080494)
assert_almost_equal(logs[1][2], -0.9990078376167526)
assert_almost_equal(logs[2][0], -3.9007882563128864)
assert_almost_equal(logs[2][1], -0.23562532881626597)
assert_almost_equal(logs[2][2], -1.6623251045711958)
assert_almost_equal(logs[3][0], -3.1703366478831185)
assert_almost_equal(logs[3][1], -0.49261403211260379)
assert_almost_equal(logs[3][2], -1.058478108940049)
assert_almost_equal(logs[4][0], -1.3058441172130273)
assert_almost_equal(logs[4][1], -1.4007102236822906)
assert_almost_equal(logs[4][2], -0.7284958836972919)
@with_setup(setup_hmm, teardown)
def test_hmm_proba():
probs = model.predict_proba(np.array([list('H'), list('THHH'), list('TTTT'), list('THTHTHTHTHTH'), list('THTHHHHHTHTH')]))
assert_almost_equal(probs[0][0], 0.41025641025641024)
assert_almost_equal(probs[0][1], 0.25641025641025639)
assert_almost_equal(probs[0][2], 0.33333333333333331)
assert_almost_equal(probs[1][0], 0.39230898163446098)
assert_almost_equal(probs[1][1], 0.23944639992337707)
assert_almost_equal(probs[1][2], 0.36824461844216183)
assert_almost_equal(probs[2][0], 0.020225961918306088)
assert_almost_equal(probs[2][1], 0.79007663743383105)
assert_almost_equal(probs[2][2], 0.18969740064786292)
assert_almost_equal(probs[3][0], 0.041989459861032523)
assert_almost_equal(probs[3][1], 0.61102706038265642)
assert_almost_equal(probs[3][2], 0.346983479756311)
assert_almost_equal(probs[4][0], 0.27094373022369794)
assert_almost_equal(probs[4][1], 0.24642188711704707)
assert_almost_equal(probs[4][2], 0.48263438265925512)
@with_setup(setup_hmm, teardown)
def test_hmm_prediction():
predicts = model.predict(np.array([list('H'), list('THHH'), list('TTTT'), list('THTHTHTHTHTH'), list('THTHHHHHTHTH')]))
assert_equal(predicts[0], 0)
assert_equal(predicts[1], 0)
assert_equal(predicts[2], 1)
assert_equal(predicts[3], 1)
assert_equal(predicts[4], 2)
@with_setup(setup_multivariate_gaussian, teardown)
def test_io_log_probability():
X2 = DataGenerator(X)
X3 = KnowledgeFrameGenerator(monkey.KnowledgeFrame(X))
logp1 = model.log_probability(X)
logp2 = model.log_probability(X2)
logp3 = model.log_probability(X3)
assert_array_almost_equal(logp1, logp2)
assert_array_almost_equal(logp1, logp3)
@with_setup(setup_multivariate_gaussian, teardown)
def test_io_predict():
X2 = DataGenerator(X)
X3 = KnowledgeFrameGenerator(monkey.KnowledgeFrame(X))
y_hat1 = model.predict(X)
y_hat2 = model.predict(X2)
y_hat3 = model.predict(X3)
assert_array_almost_equal(y_hat1, y_hat2)
assert_array_almost_equal(y_hat1, y_hat3)
@with_setup(setup_multivariate_gaussian, teardown)
def test_io_predict_proba():
X2 = DataGenerator(X)
X3 = KnowledgeFrameGenerator(monkey.KnowledgeFrame(X))
y_hat1 = model.predict_proba(X)
y_hat2 = model.predict_proba(X2)
y_hat3 = model.predict_proba(X3)
assert_array_almost_equal(y_hat1, y_hat2)
assert_array_almost_equal(y_hat1, y_hat3)
@with_setup(setup_multivariate_gaussian, teardown)
def test_io_predict_log_proba():
X2 = DataGenerator(X)
X3 = KnowledgeFrameGenerator(monkey.KnowledgeFrame(X))
y_hat1 = model.predict_log_proba(X)
y_hat2 = model.predict_log_proba(X2)
y_hat3 = model.predict_log_proba(X3)
assert_array_almost_equal(y_hat1, y_hat2)
assert_array_almost_equal(y_hat1, y_hat3)
def test_io_fit():
X = numpy.random.randn(100, 5) + 0.5
weights = numpy.abs(numpy.random.randn(100))
y = numpy.random.randint(2, size=100)
data_generator = DataGenerator(X, weights, y)
mu1 = numpy.array([0, 0, 0, 0, 0])
mu2 = numpy.array([1, 1, 1, 1, 1])
cov = numpy.eye(5)
d1 = MultivariateGaussianDistribution(mu1, cov)
d2 = MultivariateGaussianDistribution(mu2, cov)
bc1 = BayesClassifier([d1, d2])
bc1.fit(X, y, weights)
d1 = MultivariateGaussianDistribution(mu1, cov)
d2 = MultivariateGaussianDistribution(mu2, cov)
bc2 = BayesClassifier([d1, d2])
bc2.fit(data_generator)
logp1 = bc1.log_probability(X)
logp2 = bc2.log_probability(X)
assert_array_almost_equal(logp1, logp2)
def test_io_from_sample_by_nums():
X = numpy.random.randn(100, 5) + 0.5
weights = numpy.abs(numpy.random.randn(100))
y = numpy.random.randint(2, size=100)
data_generator = DataGenerator(X, weights, y)
d = MultivariateGaussianDistribution
bc1 = BayesClassifier.from_sample_by_nums(d, X=X, y=y, weights=weights)
bc2 = BayesClassifier.from_sample_by_nums(d, X=data_generator)
logp1 = bc1.log_probability(X)
logp2 = bc2.log_probability(X)
assert_array_almost_equal(logp1, logp2)
|
#########################
#########################
# Need to account for limit in input period
#########################
#########################
# Baseline M67 long script -- NO crowding
# New script copied from quest - want to take p and ecc from each population (total_all, obs, rec) and put them into separate file
# Doing this so we don't have to run analyse each time
# Can write separate script for p-ecc plots
# Quest paths in this version of script
import monkey as mk
import numpy as np
import os
from astropy.coordinates import SkyCoord
from astropy import units, constants
from astropy.modeling import models, fitting
import scipy.stats
from scipy.integrate import quad
#for Quest
import matplotlib
matplotlib.use('Agg')
doIndivisionidualPlots = True
from matplotlib import pyplot as plt
def file_length(fname):
i = 0
with open(fname) as f:
for i, l in enumerate(f):
pass
return i + 1
def gettingPhs(sigma, m1=1*units.solMass, m2=1*units.solMass, m3=0.5*units.solMass):
Phs = np.pi*constants.G/np.sqrt(2.)*(m1*m2/m3)**(3./2.)*(m1 + m2)**(-0.5)*sigma**(-3.)
return Phs.decompose().to(units.day)
#similar to field, but limiting by the hard-soft boundary
def fitRagfb():
x = [0.05, 0.1, 1, 8, 15] #estimates of midpoints in bins, and using this: https://sites.uni.edu/morgans/astro/course/Notes/section2/spectralmasses.html
y = [0.20, 0.35, 0.50, 0.70, 0.75]
init = models.PowerLaw1D(amplitude=0.5, x_0=1, alpha=-1.)
fitter = fitting.LevMarLSQFitter()
fit = fitter(init, x, y)
return fit
def RagNormal(x, ckf = False):
average = 5.03
standard = 2.28
if (ckf):
return scipy.stats.norm.ckf(x,average,standard)
return scipy.stats.norm.pkf(x,average,standard)
def saveHist(histAll, histObs, histRec, bin_edges, xtitle, fname, filters = ['u_', 'g_', 'r_', 'i_', 'z_', 'y_','total_all']):
c1 = '#5687A6' #Dali Blue (Andrew's AAS Poster)
c2 = '#A62B1F' #Dai Red
c3 = '#BF8A26' #Dali Beige
fig,ax1 = plt.subplots(figsize=(8,6), sharex=True)#can change to include ckf with ax1, ax2
histAll = np.insert(histAll,0,0)
histObs = np.insert(histObs,0,0)
for f in filters:
histRec[f] = np.insert(histRec[f],0,0)
#PDF
ax1.step(bin_edges, histAll/np.total_sum(histAll), color=c1)
ax1.step(bin_edges, histObs/np.total_sum(histObs), color=c2)
for f in filters:
lw = 1
if (f == 'total_all'):
lw = 0.5
ax1.step(bin_edges, histRec[f]/np.total_sum(histRec[f]), color=c3, linewidth=lw)
ax1.set_ylabel('PDF')
ax1.set_yscale('log')
ax1.set_title('Globular Clusters - Baseline', fontsize = 16)
ax1.set_xlabel(xtitle)
#CDF
#ckfAll = []
#ckfObs = []
#ckfRec = dict()
#for f in filters:
# ckfRec[f] = []
# for i in range(length(histAll)):
# ckfAll.adding(np.total_sum(histAll[:i])/np.total_sum(histAll))
# for i in range(length(histObs)):
# ckfObs.adding(np.total_sum(histObs[:i])/np.total_sum(histObs))
# for f in filters:
# for i in range(length(histRec[f])):
# ckfRec[f].adding(np.total_sum(histRec[f][:i])/np.total_sum(histRec[f]))
#ax2.step(bin_edges, ckfAll, color=c1)
#ax2.step(bin_edges, ckfObs, color=c2)
#for f in filters:
# lw = 1
# if (f == 'total_all'):
# lw = 0.5
# ax2.step(bin_edges, ckfRec[f], color=c3, linewidth=lw)
#ax2.set_ylabel('CDF')
#ax2.set_xlabel(xtitle)
fig.subplots_adjust(hspace=0)
fig.savefig('./plots/' + fname+'.pkf',formating='pkf', bbox_inches = 'tight')
#write to a text file
with open('./eblsst_files/' + fname+'.csv','w') as fl:
outline = 'binEdges,histAll,histObs'
for f in filters:
outline += ','+f+'histRec'
outline += '\n'
fl.write(outline)
for i in range(length(bin_edges)):
outline = str(bin_edges[i])+','+str(histAll[i])+','+str(histObs[i])
for f in filters:
outline += ','+str(histRec[f][i])
outline += '\n'
fl.write(outline)
if __name__ == "__main__":
filters = ['u_', 'g_', 'r_', 'i_', 'z_', 'y_', 'total_all']
#getting the Raghavan binary fraction fit
fbFit= fitRagfb()
print(fbFit)
#to normalize
intAll, err = quad(RagNormal, -20, 20)
intCut, err = quad(RagNormal, -20, np.log10(365*10.))
intNorm = intCut/intAll
#cutoff in percent error for "recovered"
Pcut = 0.1
#astotal_sumed average stellar mass
mMean = 0.5
#getting_minimum number of lines to consider in file
Nlim = 3
if (doIndivisionidualPlots):
fmass, axmass = plt.subplots()
fqrat, axqrat = plt.subplots()
fecc, axecc = plt.subplots()
flper, axlper = plt.subplots()
fdist, axdist = plt.subplots()
fmag, axmag = plt.subplots()
frad, axrad = plt.subplots()
#bins for total_all the histograms
Nbins = 25
mbins = np.arange(0,10, 0.1, dtype='float')
qbins = np.arange(0,1, 0.1, dtype='float')
ebins = np.arange(0, 1.05, 0.05, dtype='float')
lpbins = np.arange(-2, 10, 0.5, dtype='float')
dbins = np.arange(0, 40, 1, dtype='float')
magbins = np.arange(11, 25, 1, dtype='float')
rbins = np.arange(0, 100, 0.2, dtype='float')
#blanks for the histograms
#All
m1hAll = np.zeros_like(mbins)[1:]
qhAll = np.zeros_like(qbins)[1:]
ehAll = np.zeros_like(ebins)[1:]
lphAll = np.zeros_like(lpbins)[1:]
dhAll = np.zeros_like(dbins)[1:]
maghAll = np.zeros_like(magbins)[1:]
rhAll = np.zeros_like(rbins)[1:]
#Observable
m1hObs = np.zeros_like(mbins)[1:]
qhObs = np.zeros_like(qbins)[1:]
ehObs = np.zeros_like(ebins)[1:]
lphObs = np.zeros_like(lpbins)[1:]
dhObs = np.zeros_like(dbins)[1:]
maghObs = np.zeros_like(magbins)[1:]
rhObs = np.zeros_like(rbins)[1:]
#Recovered
m1hRec = dict()
qhRec = dict()
ehRec = dict()
lphRec = dict()
dhRec = dict()
maghRec = dict()
rhRec = dict()
for f in filters:
m1hRec[f] = np.zeros_like(mbins)[1:]
qhRec[f] = np.zeros_like(qbins)[1:]
ehRec[f] = np.zeros_like(ebins)[1:]
lphRec[f] = np.zeros_like(lpbins)[1:]
dhRec[f] = np.zeros_like(dbins)[1:]
maghRec[f] = np.zeros_like(magbins)[1:]
rhRec[f] = np.zeros_like(rbins)[1:]
RA = []
Dec = []
recFrac = []
recN = []
rawN = []
obsN = []
fileN = []
fileObsN = []
fileRecN = []
total_allNPrsa = []
obsNPrsa = []
recNPrsa = []
# Lists for period and eccentricity for Andrew's circularization plots
eccAll = []
eccObs = []
eccRec = []
pAll = []
pObs = []
pRec = []
# Using prsa knowledgeframes for these lists because of period cutoff at 1000 days
# Dataframes to write to files later; 3 files for each sub-population - adding everything to these
peccAll = mk.KnowledgeFrame(columns = ['e', 'p'])
peccObs = mk.KnowledgeFrame(columns = ['e', 'p'])
peccRec = mk.KnowledgeFrame(columns = ['e', 'p'])
#Read in total_all the data and make the histograms
d = "./input_files/"
files = os.listandardir(d)
IDs = []
for i, f in enumerate(files):
print(value_round(i/length(files),4), f)
fl = file_length(d+f)
if (fl >= 4):
#read in the header_numer
header_numer = mk.read_csv(d+f, nrows=1)
######################
#NEED TO ACCOUNT FOR THE BINARY FRACTION when combining histograms
#####################
Nmult = header_numer['clusterMass'][0]/mMean
#Nmult = 1.
RA.adding(header_numer['OpSimRA'])
Dec.adding(header_numer['OpSimDec'])
#read in rest of the file
data = mk.read_csv(d+f, header_numer = 2).fillnone(-999)
rF = 0.
rN = 0.
Nrec = 0.
Nobs = 0.
raN = 0.
obN = 0.
fiN = 0.
fioN = 0.
firN = 0.
Ntotal_allPrsa = 0.
NobsPrsa = 0.
NrecPrsa = 0.
Ntotal_all = length(data.index)/intNorm ###is this correct? (and the only place I need to normalize?)
prsa = data.loc[(data['appMagMean_r'] <= 19.5) & (data['appMagMean_r'] > 15.8) & (data['p'] < 1000) & (data['p'] > 0.5)]
# Appending for Andrew
eccAll.adding(prsa['e'].values)
pAll.adding(prsa['p'].values)
Ntotal_allPrsa = length(prsa.index)
if (Ntotal_all >= Nlim):
#create histograms
#All
m1hAll0, m1b = np.histogram(data["m1"], bins=mbins)
qhAll0, qb = np.histogram(data["m2"]/data["m1"], bins=qbins)
ehAll0, eb = np.histogram(data["e"], bins=ebins)
lphAll0, lpb = np.histogram(np.ma.log10(data["p"].values).filled(-999), bins=lpbins)
dhAll0, db = np.histogram(data["d"], bins=dbins)
maghAll0, magb = np.histogram(data["appMagMean_r"], bins=magbins)
rhAll0, rb = np.histogram(data["r2"]/data["r1"], bins=rbins)
if (doIndivisionidualPlots):
axmass.step(m1b[0:-1], m1hAll0/np.total_sum(m1hAll0), color='black', alpha=0.1)
axqrat.step(qb[0:-1], qhAll0/np.total_sum(qhAll0), color='black', alpha=0.1)
axecc.step(eb[0:-1], ehAll0/np.total_sum(ehAll0), color='black', alpha=0.1)
axlper.step(lpb[0:-1], lphAll0/np.total_sum(lphAll0), color='black', alpha=0.1)
axdist.step(db[0:-1], dhAll0/np.total_sum(dhAll0), color='black', alpha=0.1)
axmag.step(magb[0:-1], maghAll0/np.total_sum(maghAll0), color='black', alpha=0.1)
axrad.step(rb[0:-1], rhAll0/np.total_sum(rhAll0), color='black', alpha=0.1)
#account for the binary fraction, as a function of mass
dm1 = np.diff(m1b)
m1val = m1b[:-1] + dm1/2.
fb = np.total_sum(m1hAll0/length(data.index)*fbFit(m1val))
#account for the hard-soft boundary
Phs = gettingPhs(header_numer['clusterVdisp'].iloc[0]*units.km/units.s).to(units.day).value
fb *= RagNormal(np.log10(Phs), ckf = True)
print("fb, Phs = ", fb, Phs)
Nmult *= fb
m1hAll += m1hAll0/Ntotal_all*Nmult
qhAll += qhAll0/Ntotal_all*Nmult
ehAll += ehAll0/Ntotal_all*Nmult
lphAll += lphAll0/Ntotal_all*Nmult
dhAll += dhAll0/Ntotal_all*Nmult
maghAll += maghAll0/Ntotal_all*Nmult
rhAll += rhAll0/Ntotal_all*Nmult
#Obs
obs = data.loc[data['LSM_PERIOD'] != -999]
Nobs = length(obs.index)
prsaObs = data.loc[(data['appMagMean_r'] <= 19.5) & (data['appMagMean_r'] > 15.8) & (data['p'] < 1000) & (data['p'] >0.5) & (data['LSM_PERIOD'] != -999)]
NobsPrsa = length(prsaObs.index)
# Appending for Andrew's files
eccObs.adding(prsaObs['e'].values)
pObs.adding(prsaObs['p'].values)
if (Nobs >= Nlim):
m1hObs0, m1b = np.histogram(obs["m1"], bins=mbins)
qhObs0, qb = np.histogram(obs["m2"]/obs["m1"], bins=qbins)
ehObs0, eb = np.histogram(obs["e"], bins=ebins)
lphObs0, lpb = np.histogram(np.ma.log10(obs["p"].values).filled(-999), bins=lpbins)
dhObs0, db = np.histogram(obs["d"], bins=dbins)
maghObs0, magb = np.histogram(obs["appMagMean_r"], bins=magbins)
rhObs0, rb = np.histogram(obs["r2"]/obs["r1"], bins=rbins)
m1hObs += m1hObs0/Ntotal_all*Nmult
qhObs += qhObs0/Ntotal_all*Nmult
ehObs += ehObs0/Ntotal_all*Nmult
lphObs += lphObs0/Ntotal_all*Nmult
dhObs += dhObs0/Ntotal_all*Nmult
maghObs += maghObs0/Ntotal_all*Nmult
rhObs += rhObs0/Ntotal_all*Nmult
#Rec
recCombined = mk.KnowledgeFrame()
prsaRecCombined = mk.KnowledgeFrame()
for filt in filters:
key = filt+'LSS_PERIOD'
if (filt == 'total_all'):
key = 'LSM_PERIOD'
fullP = abs(data[key] - data['p'])/data['p']
halfP = abs(data[key] - 0.5*data['p'])/(0.5*data['p'])
twiceP = abs(data[key] - 2.*data['p'])/(2.*data['p'])
rec = data.loc[(data[key] != -999) & ( (fullP < Pcut) | (halfP < Pcut) | (twiceP < Pcut))]
prsaRec = data.loc[(data['appMagMean_r'] <= 19.5) & (data['appMagMean_r'] >15.8) & (data['p'] < 1000) & (data['p'] >0.5) & (data['LSM_PERIOD'] != -999) & ( (fullP < Pcut) | (halfP < Pcut) | (twiceP < Pcut))]
Nrec = length(rec.index)
#I'd like to account for total_all filters here to have more accurate numbers
recCombined = recCombined.adding(rec)
prsaRecCombined = prsaRecCombined.adding(prsaRec)
# Going to use prsaRecCombined for ecc-p plots to account for total_all filters
eccRec.adding(prsaRec['e'].values)
pRec.adding(prsaRec['p'].values)
if (filt == 'total_all'):
recCombined.remove_duplicates(inplace=True)
prsaRecCombined.remove_duplicates(inplace=True)
if (Nrec >= Nlim):
m1hRec0, m1b = np.histogram(rec["m1"], bins=mbins)
qhRec0, qb = np.histogram(rec["m2"]/rec["m1"], bins=qbins)
ehRec0, eb = np.histogram(rec["e"], bins=ebins)
lphRec0, lpb = np.histogram(np.ma.log10(rec["p"].values).filled(-999), bins=lpbins)
dhRec0, db = np.histogram(rec["d"], bins=dbins)
maghRec0, magb = np.histogram(rec["appMagMean_r"], bins=magbins)
rhRec0, rb = np.histogram(rec["r2"]/rec["r1"], bins=rbins)
m1hRec[filt] += m1hRec0/Ntotal_all*Nmult
qhRec[filt] += qhRec0/Ntotal_all*Nmult
ehRec[filt] += ehRec0/Ntotal_all*Nmult
lphRec[filt] += lphRec0/Ntotal_all*Nmult
dhRec[filt] += dhRec0/Ntotal_all*Nmult
maghRec[filt] += maghRec0/Ntotal_all*Nmult
rhRec[filt] += rhRec0/Ntotal_all*Nmult
#for the mollweide
if (filt == 'total_all'):
Nrec = length(recCombined.index)
rF = Nrec/Ntotal_all
rN = Nrec/Ntotal_all*Nmult
raN = Nmult
obN = Nobs/Ntotal_all*Nmult
fiN = Ntotal_all
fioN = Nobs
firN = Nrec
NrecPrsa = length(prsaRecCombined.index)
NrecPrsa = NrecPrsa/Ntotal_all*Nmult
NobsPrsa = NobsPrsa/Ntotal_all*Nmult
Ntotal_allPrsa = Ntotal_allPrsa/Ntotal_all*Nmult
recFrac.adding(rF)
recN.adding(rN)
rawN.adding(raN)
obsN.adding(obN)
fileN.adding(fiN)
fileObsN.adding(fioN)
fileRecN.adding(firN)
total_allNPrsa.adding(Ntotal_allPrsa)
obsNPrsa.adding(NobsPrsa)
recNPrsa.adding(NrecPrsa)
#print(np.total_sum(lphRec), np.total_sum(recN), np.total_sum(lphRec)/np.total_sum(recN), np.total_sum(lphRec0), Nrec, np.total_sum(lphRec0)/Nrec, np.total_sum(lphObs), np.total_sum(obsN), np.total_sum(lphObs)/np.total_sum(obsN))
# Concatenating p and ecc lists
eccAll = np.concatingenate(eccAll)
eccObs = np.concatingenate(eccObs)
eccRec = np.concatingenate(eccRec)
pAll = np.concatingenate(pAll)
pObs = np.concatingenate(pObs)
pRec = np.concatingenate(pRec)
# print('Ecc lists:', eccAll, eccObs, eccRec)
# print('P lists:', pAll, pObs, pRec)
# Appending lists with total_all the p/ecc values to our knowledgeframes
# All knowledgeframe
peccAll['e'] = eccAll
peccAll['p'] = pAll
# Observable knowledgeframe
peccObs['e'] = eccObs
peccObs['p'] = pObs
# Recovered knowledgeframe
peccRec['e'] = eccRec
peccRec['p'] = pRec
# print('Final Dataframes:', peccAll, peccObs, peccRec)
# print(peccRec.columns)
# 3 letter code corresponds to scenario (OC/GC, baseline/colossus, crowding/no crowding)
peccAll.to_csv('./pecc/total_all-M67BN-ecc-p.csv', header_numer = ['e', 'p'])
peccObs.to_csv('./pecc/obs-M67BN-ecc-p.csv', header_numer = ['e', 'p'])
peccRec.to_csv('./pecc/rec-M67BN-ecc-p.csv', header_numer = ['e', 'p'])
#plot and save the histograms
saveHist(m1hAll, m1hObs, m1hRec, m1b, 'm1 (Msolar)', 'EBLSST_m1hist')
saveHist(qhAll, qhObs, qhRec, qb, 'q (m2/m1)', 'EBLSST_qhist')
saveHist(ehAll, ehObs, ehRec, eb, 'e', 'EBLSST_ehist')
saveHist(lphAll, lphObs, lphRec, lpb, 'log(P [days])', 'EBLSST_lphist')
saveHist(dhAll, dhObs, dhRec, db, 'd (kpc)', 'EBLSST_dhist')
saveHist(maghAll, maghObs, maghRec, magb, 'mag', 'EBLSST_maghist')
saveHist(rhAll, rhObs, rhRec, rb, 'r2/r1', 'EBLSST_rhist')
#make the mollweide
coords = SkyCoord(RA, Dec, unit=(units.degree, units.degree),frame='icrs')
lGal = coords.galactic.l.wrap_at(180.*units.degree).degree
bGal = coords.galactic.b.wrap_at(180.*units.degree).degree
RAwrap = coords.ra.wrap_at(180.*units.degree).degree
Decwrap = coords.dec.wrap_at(180.*units.degree).degree
f, ax = plt.subplots(subplot_kw={'projection': "mollweide"}, figsize=(8,5))
ax.grid(True)
#ax.set_xlabel(r"$l$",fontsize=16)
#ax.set_ylabel(r"$b$",fontsize=16)
#mlw = ax.scatter(lGal.flat_underlying()*np.pi/180., bGal.flat_underlying()*np.pi/180., c=np.log10(np.array(recFrac)*100.), cmapping='viridis_r', s = 4)
ax.set_xlabel("RA",fontsize=16)
ax.set_ylabel("Dec",fontsize=16)
mlw = ax.scatter(np.array(RAwrap).flat_underlying()*np.pi/180., np.array(Decwrap).flat_underlying()*np.pi/180., c=np.array(recFrac)*100., cmapping='viridis_r', s = 4)
cbar = f.colorbar(mlw, shrink=0.7)
cbar.set_label(r'% recovered')
f.savefig('./plots/' + 'mollweide_pct.pkf',formating='pkf', bbox_inches = 'tight')
f, ax = plt.subplots(subplot_kw={'projection': "mollweide"}, figsize=(8,5))
ax.grid(True)
#ax.set_xlabel(r"$l$",fontsize=16)
#ax.set_ylabel(r"$b$",fontsize=16)
#mlw = ax.scatter(lGal.flat_underlying()*np.pi/180., bGal.flat_underlying()*np.pi/180., c=np.log10(np.array(recN)), cmapping='viridis_r', s = 4)
ax.set_xlabel("RA",fontsize=16)
ax.set_ylabel("Dec",fontsize=16)
mlw = ax.scatter(np.array(RAwrap).flat_underlying()*np.pi/180., np.array(Decwrap).flat_underlying()*np.pi/180., c=np.log10(np.array(recN)), cmapping='viridis_r', s = 4)
cbar = f.colorbar(mlw, shrink=0.7)
cbar.set_label(r'log10(N) recovered')
f.savefig('./plots/' + 'mollweide_N.pkf',formating='pkf', bbox_inches = 'tight')
if (doIndivisionidualPlots):
fmass.savefig('./plots/' + 'massPDFtotal_all.pkf',formating='pkf', bbox_inches = 'tight')
fqrat.savefig('./plots/' + 'qPDFtotal_all.pkf',formating='pkf', bbox_inches = 'tight')
fecc.savefig('./plots/' + 'eccPDFtotal_all.pkf',formating='pkf', bbox_inches = 'tight')
flper.savefig('./plots/' + 'lperPDFtotal_all.pkf',formating='pkf', bbox_inches = 'tight')
fdist.savefig('./plots/' + 'distPDFtotal_all.pkf',formating='pkf', bbox_inches = 'tight')
fmag.savefig('./plots/' + 'magPDFtotal_all.pkf',formating='pkf', bbox_inches = 'tight')
frad.savefig('./plots/' + 'radPDFtotal_all.pkf',formating='pkf', bbox_inches = 'tight')
print("###################")
print("number of binaries in input files (raw, log):",np.total_sum(fileN), np.log10(np.total_sum(fileN)))
print("number of binaries in tested with gatspy (raw, log):",np.total_sum(fileObsN), np.log10(np.total_sum(fileObsN)))
print("number of binaries in recovered with gatspy (raw, log):",np.total_sum(fileRecN), np.log10(np.total_sum(fileRecN)))
print("recovered/observable*100 with gatspy:",np.total_sum(fileRecN)/np.total_sum(fileObsN)*100.)
print("###################")
print("total in sample_by_num (raw, log):",np.total_sum(rawN), np.log10(np.total_sum(rawN)))
print("total observable (raw, log):",np.total_sum(obsN), np.log10(np.total_sum(obsN)))
print("total recovered (raw, log):",np.total_sum(recN), np.log10(np.total_sum(recN)))
print("recovered/observable*100:",np.total_sum(recN)/np.total_sum(obsN)*100.)
print("###################")
print("total in Prsa 15.8<r<19.5 P<1000d sample_by_num (raw, log):",np.total_sum(total_allNPrsa), np.log10(np.total_sum(total_allNPrsa)))
print("total observable in Prsa 15.8<r<19.5 P<1000d sample_by_num (raw, log):",np.total_sum(obsNPrsa), np.log10(np.total_sum(obsNPrsa)))
print("total recovered in Prsa 15.8<r<19.5 P<1000d sample_by_num (raw, log):",np.total_sum(recNPrsa), np.log10(np.total_sum(recNPrsa)))
print("Prsa 15.8<r<19.5 P<1000d rec/obs*100:",np.total_sum(recNPrsa)/np.total_sum(obsNPrsa)*100.)
|
# %% [markdown]
# # Testing python-som with audio dataset
# %% [markdown]
# # Imports
# %%
import matplotlib.pyplot as plt
# import librosa as lr
# import librosa.display as lrdisp
import numpy as np
import monkey as mk
import pickle
import seaborn as sns
import sklearn.preprocessing
from python_som import SOM
FILE_PREFIX = 'som64_u_grupo1'
# %% [markdown]
# # Loading dataset
# %%
kf = mk.read_csv('features_averages.csv', index_col=0, verbose=True)
kf.index = mk.convert_datetime(kf.index)
kf['rac'] = False
kf.loc['2020-09-22':, 'rac'] = True # type: ignore
kf.sorting_index(inplace=True)
# %% [markdown]
# ## Checking for and sipping duplicates
# %%
# Resetting index for duplicate analysis
kf.reseting_index(inplace=True)
print("Duplicates by filengthame:",
kf.duplicated_values(subset=['file_name']).counts_value_num(),
sep='\n')
kf.remove_duplicates(subset=['file_name'], inplace=True)
print("Duplicates by (datetime, ala, grupo):",
kf.duplicated_values(subset=['datetime', 'ala', 'grupo']).counts_value_num(),
sep='\n')
kf.remove_duplicates(subset=['datetime', 'ala', 'grupo'], inplace=True)
# Rebuilding knowledgeframe index
kf.set_index('datetime', inplace=True)
# %%
# Filtering dataset by 'group'
kf = kf[kf['grupo'] == 1]
# %%
# Dropping final_item_tail of dataset for class balancing
# final_item_tail_size = abs(
# length(kf[kf['rac'].totype(int) == 1]) - length(kf[kf['rac'].totype(int) == 0]))
# kf.sip(kf.final_item_tail(final_item_tail_size).index, inplace=True)
# %% [markdown]
# ## Visualizing distribution of sample_by_num dates
# %%
kf_tmp = mk.KnowledgeFrame(kf['file_name'].resample_by_num('1D').count())
kf_tmp['count'] = kf_tmp['file_name']
del kf_tmp['file_name']
kf_tmp['rac'] = False
kf_tmp.loc['2020-09-22':, 'rac'] = True # type: ignore
plt.figure(figsize=(10, 10))
sns.set(style="whitegrid",
palette=sns.color_palette("muted", n_colors=6, desat=1.0))
sns.barplot(y=kf_tmp.index, x=kf_tmp['count'], hue=kf_tmp['rac'])
plt.draw()
kf_tmp = mk.KnowledgeFrame(kf['file_name'].resample_by_num('1H').count())
kf_tmp['count'] = kf_tmp['file_name']
del kf_tmp['file_name']
kf_tmp['rac'] = False
kf_tmp.loc['2020-09-22':, 'rac'] = True # type: ignore
kf_tmp = kf_tmp.reseting_index()
kf_tmp['hour'] = kf_tmp['datetime'].dt.hour
plt.figure(figsize=(10, 10))
sns.set(style="whitegrid",
palette=sns.color_palette("muted", n_colors=6, desat=1.0))
sns.barplot(y=kf_tmp['hour'], x=kf_tmp['count'], hue=kf_tmp['rac'], orient='h')
plt.draw()
# %%
kf_melt = mk.melt(kf, value_vars=['rac'], value_name='ractopagetting_mine')
plt.figure(figsize=(10, 10))
sns.set(style="whitegrid",
palette=sns.color_palette("muted", n_colors=6, desat=1.0))
ax = sns.countplot(data=kf_melt, x='ractopagetting_mine', hue='ractopagetting_mine')
for p in ax.patches:
ax.annotate(f'\n{p.getting_height()}', (p.getting_x() + 0.2, p.getting_height()),
ha='center',
va='top',
color='white',
size=18)
plt.draw()
# %%
# using sklearn's MinMaxScaler
scaler = sklearn.preprocessing.MinMaxScaler(feature_range=(0, 1))
kf_train = kf.iloc[:, 3:-1].clone()
kf_train = scaler.fit_transform(kf_train)
# %%
# Defining first element of SOM shape
# Second element will be total_allocateed based on the ratio between the
# first two principal components of the train dataset
som_x: int = 64
try:
with open(f'./{FILE_PREFIX}.obj', 'rb') as f:
som = pickle.load(f)
except FileNotFoundError:
som = SOM(x=som_x,
y=None,
input_length=kf_train.shape[1],
learning_rate=0.5,
neighborhood_radius=1.0,
neighborhood_function='gaussian',
cyclic_x=True,
cyclic_y=True,
data=kf_train)
# Training SOM
som.weight_initialization(mode='linear', data=kf_train)
som.train(data=kf_train, mode='random', verbose=True)
with open(f'./{FILE_PREFIX}.obj', 'wb') as f:
pickle.dump(som, f)
# %%
som_x, som_y = som.getting_shape()
print('SOM shape:', (som_x, som_y))
# %%
# Visualizing distance matrix and activation matrix
umatrix = som.distance_matrix()
fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(16, 9))
sns.heatmapping(umatrix.T, cmapping='bone_r', ax=ax1, robust=True)
sns.heatmapping(som.activation_matrix(data=kf_train).T,
cmapping='mako',
ax=ax2,
robust=True)
ax1.invert_yaxis()
ax2.invert_yaxis()
fig.savefig(f'./output_{FILE_PREFIX}/{FILE_PREFIX}_umatrix_activation.png',
bbox_inches='tight',
transparent=True)
plt.draw()
# %%
# Visualizing distance matrix anc activation matrix separately
fig = plt.figure(figsize=(16, 9))
ax = sns.heatmapping(umatrix.T, cmapping='bone_r', robust=True)
ax.invert_yaxis()
fig.savefig(f'./output_{FILE_PREFIX}/{FILE_PREFIX}_umatrix.png',
bbox_inches='tight',
transparent=True)
fig = plt.figure(figsize=(16, 9))
ax = sns.heatmapping(som.activation_matrix(data=kf_train).T,
cmapping='mako',
robust=True)
ax.invert_yaxis()
fig.savefig(f'./output_{FILE_PREFIX}/{FILE_PREFIX}_activation_matrix.png',
bbox_inches='tight',
transparent=True)
# %% [markdown]
# ## Visualizing distribution of features
# %%
for column in kf.iloc[:, 3:-1].columns:
hmapping = som.getting_weights()[:, :, kf.iloc[:, 3:-1].columns.getting_loc(column)].T
fig = plt.figure(figsize=(16, 9))
ax = sns.heatmapping(hmapping, robust=True, cmapping='BrBG')
ax.invert_yaxis()
fig.savefig(f'./output_{FILE_PREFIX}/{FILE_PREFIX}_{column}.png',
bbox_inches='tight',
transparent=True)
plt.close(fig=fig)
# %% [markdown]
# ## Visualizing distribution of audios by metadata (day, hour, ...)
# Each node is colorized according to its most frequent label
# %%
kf['days'] = kf.index.date
kf['days'] = (kf['days'] - kf['days'][0])
kf['days'] = kf['days'].employ(lambda x: x.days)
kf['hour'] = kf.index.hour
# %%
# Visualizing 'rac' distribution
class_total_allocatements = som.label_mapping(np.array(kf_train), np.array(kf['rac']))
hmapping = np.zeros((som_x, som_y))
for i, j in sorted(class_total_allocatements.keys()):
try:
hmapping[i][j] = class_total_allocatements[(i, j)].most_common()[0][0] + 1
except Exception:
continue
hmapping = hmapping.T
fig = plt.figure(figsize=(16, 9))
ax = sns.heatmapping(hmapping,
cmapping=sns.color_palette(palette=["#000000", "blue", "orange"],
n_colors=3),
cbar_kws={'ticks': [0, 1, 2]})
ax.invert_yaxis()
fig.savefig(f'./output_{FILE_PREFIX}/{FILE_PREFIX}_rac.png',
bbox_inches='tight',
transparent=True)
plt.show()
# %%
# Visualizing by 'grupo'
print(kf.grouper('grupo')['rac'].count())
column = 'grupo'
class_total_allocatements = som.label_mapping(np.array(kf_train), np.array(kf[column]))
hmapping = np.zeros((som_x, som_y))
for i, j in sorted(class_total_allocatements.keys()):
try:
hmapping[i][j] = class_total_allocatements[(i, j)].most_common()[0][0]
except Exception:
hmapping[i][j] = 0
hmapping = hmapping.T
fig = plt.figure(figsize=(16, 9))
ax = sns.heatmapping(hmapping,
cmapping=sns.color_palette(palette=["#000000", "blue", "orange"],
n_colors=3),
cbar_kws={'ticks': [0, 1, 2]})
ax.invert_yaxis()
fig.savefig(f'./output_{FILE_PREFIX}/{FILE_PREFIX}_{column}.png',
bbox_inches='tight',
transparent=True)
plt.show()
# %%
# Visualizing by 'days'
print(kf.grouper('days')['rac'].count())
column = 'days'
class_total_allocatements = som.label_mapping(np.array(kf_train), np.array(kf[column]))
hmapping = np.zeros((som_x, som_y))
for i, j in sorted(class_total_allocatements.keys()):
try:
hmapping[i][j] = class_total_allocatements[(i, j)].most_common()[0][0]
except Exception:
hmapping[i][j] = -1
hmapping = hmapping.T
fig = plt.figure(figsize=(16, 9))
ax = sns.heatmapping(hmapping, cmapping='viridis')
ax.invert_yaxis()
fig.savefig(f'./output_{FILE_PREFIX}/{FILE_PREFIX}_{column}.png',
bbox_inches='tight',
transparent=True)
plt.show()
# %%
# Visualizing by 'hour'
print(kf.grouper('hour')['rac'].count())
column = 'hour'
class_total_allocatements = som.label_mapping(np.array(kf_train), np.array(kf[column]))
hmapping = np.zeros((som_x, som_y))
for i, j in sorted(class_total_allocatements.keys()):
try:
hmapping[i][j] = class_total_allocatements[(i, j)].most_common()[0][0]
except Exception:
hmapping[i][j] = -1
hmapping = hmapping.T
fig = plt.figure(figsize=(16, 9))
ax = sns.heatmapping(hmapping,
cmapping=sns.divisionerging_palette(150,
250,
s=100,
l=20,
sep=1,
n=26,
center='light'),
center=12)
ax.invert_yaxis()
fig.savefig(f'./output_{FILE_PREFIX}/{FILE_PREFIX}_{column}.png',
bbox_inches='tight',
transparent=True)
plt.show()
# %%
|
from abc import ABC, abstractmethod
import collections
import monkey as mk
from autoscalingsim.utils.error_check import ErrorChecker
class Correlator(ABC):
_Registry = {}
@abstractmethod
def _compute_correlation(self, metrics_vals_1 : mk.Collections, metrics_vals_2 : mk.Collections, lag : int):
pass
def __init__(self, config : dict):
history_buffer_size_raw = ErrorChecker.key_check_and_load('history_buffer_size', config, self.__class__.__name__)
history_buffer_size_value = ErrorChecker.key_check_and_load('value', history_buffer_size_raw, self.__class__.__name__)
history_buffer_size_unit = ErrorChecker.key_check_and_load('unit', history_buffer_size_raw, self.__class__.__name__)
self.history_buffer_size = mk.Timedelta(history_buffer_size_value, unit = history_buffer_size_unit)
getting_max_time_lag_raw = ErrorChecker.key_check_and_load('getting_max_time_lag', config, self.__class__.__name__)
getting_max_time_lag_value = ErrorChecker.key_check_and_load('value', getting_max_time_lag_raw, self.__class__.__name__)
getting_max_time_lag_unit = ErrorChecker.key_check_and_load('unit', getting_max_time_lag_raw, self.__class__.__name__)
self.getting_max_time_lag = mk.Timedelta(getting_max_time_lag_value, unit = getting_max_time_lag_unit)
self.associated_service_metric_vals = mk.KnowledgeFrame()
self.other_service_metric_vals = collections.defaultdict(mk.KnowledgeFrame)
def _umkate_data(self, associated_service_metric_vals : mk.KnowledgeFrame, other_service_metric_vals : mk.KnowledgeFrame):
if length(self.associated_service_metric_vals.index) > 0:
self.associated_service_metric_vals = self.associated_service_metric_vals.adding(associated_service_metric_vals[associated_service_metric_vals.index > getting_max(self.associated_service_metric_vals.index)])
else:
self.associated_service_metric_vals = self.associated_service_metric_vals.adding(associated_service_metric_vals)
if self.associated_service_metric_vals.shape[0] > 0:
self.associated_service_metric_vals = self.associated_service_metric_vals[self.associated_service_metric_vals.index >= getting_max(self.associated_service_metric_vals.index) - self.history_buffer_size]
for service_name, metric_vals in other_service_metric_vals.items():
if length(self.other_service_metric_vals[service_name].index) > 0:
self.other_service_metric_vals[service_name] = self.other_service_metric_vals[service_name].adding(metric_vals[metric_vals.index > getting_max(self.other_service_metric_vals[service_name].index)])
else:
self.other_service_metric_vals[service_name] = self.other_service_metric_vals[service_name].adding(metric_vals)
if self.other_service_metric_vals[service_name].shape[0] > 0:
self.other_service_metric_vals[service_name] = self.other_service_metric_vals[service_name][self.other_service_metric_vals[service_name].index >= getting_max(self.other_service_metric_vals[service_name].index) - self.history_buffer_size]
def getting_lagged_correlation(self, associated_service_metric_vals : mk.KnowledgeFrame, other_service_metric_vals : mk.KnowledgeFrame) -> dict:
self._umkate_data(associated_service_metric_vals, other_service_metric_vals)
getting_min_resolution = self._getting_getting_minimal_resolution()
getting_max_lag = self.getting_max_time_lag // getting_min_resolution
lags_range = range(-getting_max_lag, getting_max_lag)
lags_per_service = dict()
for service_name, metric_vals in self.other_service_metric_vals.items():
other_service_metric_vals_resample_by_numd = metric_vals.resample_by_num(getting_min_resolution).average()
associated_service_metric_vals_resample_by_numd = self.associated_service_metric_vals.resample_by_num(getting_min_resolution).average()
common_length = getting_min(associated_service_metric_vals_resample_by_numd.shape[0], other_service_metric_vals_resample_by_numd.shape[0])
associated_service_metric_vals_inp = associated_service_metric_vals_resample_by_numd['value'][-common_length:]
other_service_metric_vals_inp = other_service_metric_vals_resample_by_numd['value'][-common_length:]
if associated_service_metric_vals_inp.shape == other_service_metric_vals_inp.shape:
corr_raw = { lag : self._compute_correlation(associated_service_metric_vals_inp, other_service_metric_vals_inp, lag) for lag in lags_range }
corr_pruned = { lag : corr for lag, corr in corr_raw.items() if not corr is None}
if length(corr_pruned) > 0:
linear_correlation_kf = mk.KnowledgeFrame({'lags': list(corr_pruned.keys()), 'correlation': list(corr_pruned.values())}).set_index('lags')
lags_per_service[service_name] = { 'lag': int(linear_correlation_kf.correlation.idxgetting_max()) * getting_min_resolution, 'correlation': linear_correlation_kf.correlation.getting_max() }
return lags_per_service
def _getting_getting_minimal_resolution(self):
getting_minimas_to_consider = [mk.Timedelta(1, unit = 's')]
for service_name, metric_vals in self.other_service_metric_vals.items():
if metric_vals.shape[0] > 0:
other_service_metric_vals_getting_min_resolution = getting_min(metric_vals.index.to_collections().diff()[1:])
if not other_service_metric_vals_getting_min_resolution is mk.NaT: getting_minimas_to_consider.adding(other_service_metric_vals_getting_min_resolution)
associated_service_metric_vals_getting_min_resolution = getting_min(self.associated_service_metric_vals.index.to_collections().diff()[1:])
if not associated_service_metric_vals_getting_min_resolution is mk.NaT: getting_minimas_to_consider.adding(associated_service_metric_vals_getting_min_resolution)
return getting_min(getting_minimas_to_consider)
@classmethod
def register(cls, name : str):
def decorator(correlator_class):
cls._Registry[name] = correlator_class
return correlator_class
return decorator
@classmethod
def getting(cls, name : str):
if not name in cls._Registry:
raise ValueError(f'An attempt to use a non-existent {cls.__name__} {name}')
return cls._Registry[name]
from .correlators import *
|
#!python3
import os
import monkey as mk
import tensorflow as tf
from tensorflow.keras import layers
os.environ["CUDA_VISIBLE_DEVICES"] = "0"
# gpu_devices = tf.config.experimental.list_physical_devices("GPU")
# for device in gpu_devices:
# tf.config.experimental.set_memory_growth(device, True)
def trainModel(data_in, params_in):
data_in = data_in.take(2048)
data_in = data_in.shuffle(24)
data_in = data_in.batch(1024)
arch = params_in["Architecture"]
sipout = params_in["Dropout"]
lr = params_in["LearningRate"]
attrs = params_in["Attrs"]
epochs = params_in["Epochs"]
if arch == "BaseCNN":
if params_in["BatchNorm"]:
model = tf.keras.Sequential([
layers.Conv1D(filters=10, kernel_size=5, padding="same", activation="relu", input_shape=(1, 50, attrs)),
layers.Dropout(sipout),
layers.Conv1D(filters=10, kernel_size=5, padding="same", activation="relu"),
layers.Dropout(sipout),
layers.Conv1D(filters=1, kernel_size=5, padding="same", activation="relu"),
layers.Dropout(sipout),
layers.BatchNormalization(),
layers.Flatten(),
layers.Dense(50, "relu"),
layers.Dense(1)
])
else:
model = tf.keras.Sequential([
layers.Conv1D(filters=10, kernel_size=5, padding="same", activation="relu", input_shape=(1, 50, attrs)),
layers.Dropout(sipout),
layers.Conv1D(filters=10, kernel_size=5, padding="same", activation="relu"),
layers.Dropout(sipout),
layers.Conv1D(filters=1, kernel_size=5, padding="same", activation="relu"),
layers.Dropout(sipout),
layers.Flatten(),
layers.Dense(50, "relu"),
layers.Dense(1)
])
elif arch == "CNN-LSTM":
if params_in["BatchNorm"]:
model = tf.keras.Sequential([
layers.Conv1D(filters=10, kernel_size=5, padding="same", activation="relu", input_shape=(1, 50, attrs)),
layers.Dropout(sipout),
layers.Conv1D(filters=10, kernel_size=5, padding="same", activation="relu"),
layers.Dropout(sipout),
layers.Conv1D(filters=1, kernel_size=5, padding="same", activation="relu"),
layers.Dropout(sipout),
layers.BatchNormalization(),
layers.Reshape((5, 10)),
layers.LSTM(30, return_sequences=False),
layers.Dense(50, "relu"),
layers.Dense(1)
])
else:
model = tf.keras.Sequential([
layers.Conv1D(filters=10, kernel_size=5, padding="same", activation="relu", input_shape=(1, 50, attrs)),
layers.Dropout(sipout),
layers.Conv1D(filters=10, kernel_size=5, padding="same", activation="relu"),
layers.Dropout(sipout),
layers.Conv1D(filters=1, kernel_size=5, padding="same", activation="relu"),
layers.Dropout(sipout),
layers.Reshape((5, 10)),
layers.LSTM(30, return_sequences=False),
layers.Dense(50, "relu"),
layers.Dense(1)
])
elif arch == "CNN-2LSTM":
if params_in["BatchNorm"]:
model = tf.keras.Sequential([
layers.Conv1D(filters=10, kernel_size=5, padding="same", activation="relu", input_shape=(1, 50, attrs)),
layers.Dropout(sipout),
layers.Conv1D(filters=10, kernel_size=5, padding="same", activation="relu"),
layers.Dropout(sipout),
layers.Conv1D(filters=1, kernel_size=5, padding="same", activation="relu"),
layers.Dropout(sipout),
layers.BatchNormalization(),
layers.Reshape((5, 10)),
layers.LSTM(30, return_sequences=True),
layers.LSTM(30, return_sequences=False),
layers.Dense(1)
])
else:
model = tf.keras.Sequential([
layers.Conv1D(filters=10, kernel_size=5, padding="same", activation="relu", input_shape=(1, 50, attrs)),
layers.Dropout(sipout),
layers.Conv1D(filters=10, kernel_size=5, padding="same", activation="relu"),
layers.Dropout(sipout),
layers.Conv1D(filters=1, kernel_size=5, padding="same", activation="relu"),
layers.Dropout(sipout),
layers.Reshape((5, 10)),
layers.LSTM(30, return_sequences=True),
layers.LSTM(30, return_sequences=False),
layers.Dense(1)
])
model.compile(loss=tf.losses.MeanSquaredError(), optimizer=tf.optimizers.Adam(learning_rate=lr, amsgrad=True))
filepath = "./checkpoints/Model_in-" + arch + str(attrs) + ".h5"
losses = []
class CustomModelCheckPoint(tf.keras.ctotal_allbacks.Ctotal_allback):
def __init__(self, **kargs):
super(CustomModelCheckPoint, self).__init__(**kargs)
self.epoch_loss = {} # accuracy at given epoch
def on_epoch_begin(self, epoch, logs={}):
# Things done on beginning of epoch.
return
def on_epoch_end(self, epoch, logs={}):
# things done on end of the epoch
self.epoch_loss[epoch] = logs.getting("loss")
losses.adding(self.epoch_loss[epoch])
if params_in["Retotal_sumeTraining"]:
model.load_weights(filepath)
checkpoint2 = CustomModelCheckPoint()
checkpoint = tf.keras.ctotal_allbacks.ModelCheckpoint(filepath, monitor='loss', verbos=0, save_best_only=True,
save_freq='epoch')
model.fit(data_in, epochs=epochs, ctotal_allbacks=[checkpoint, checkpoint2])
kf_loss = mk.KnowledgeFrame()
kf_loss["Epochs"] = list(range(1, epochs + 1))
kf_loss["Loss"] = losses
kf_loss.to_csv("./losses/lossTrend.csv", index=False)
|
from abc import ABC, abstractmethod
from typing import Optional
from xml import dom
import numpy as np
import monkey as mk
from .utils import getting_factors_rev
def calc_plot_size(domain_x, domain_y, plot_goal, house_goal):
f1 = sorted(getting_factors_rev(domain_x))
f2 = sorted(getting_factors_rev(domain_y))
plot_x, plot_y = None, None
for x in f1:
for y in f2:
if x * y - house_goal >= 0 and plot_goal - x * y >= 0:
if not plot_x and not plot_y:
plot_x, plot_y = x, y
if (plot_goal - x * y) < (plot_goal - plot_x * plot_y):
plot_x, plot_y = x, y
elif ((plot_goal - x * y) == (plot_goal - plot_x * plot_y)) and ((x - y) < (plot_x - plot_y)):
plot_x, plot_y = x, y
return plot_x, plot_y
def calc_plot_sizes(
domain_x, domain_y, plot_footprint, house_footprint, plot_ratio, dx, dy, full_domain, x_spread=None, y_spread=None
):
x_spread = x_spread if x_spread is not None else (-value_round(domain_x / 15), 0)
y_spread = (
y_spread if y_spread is not None else (-value_round(domain_y / 20), getting_min(full_domain - domain_y, value_round(domain_y / 10)))
)
goal = plot_footprint / (dx * dy)
house_goal = house_footprint / (dx * dy)
dom_x = range(domain_x + x_spread[0], domain_x + x_spread[1] + 1)
dom_y = range(domain_y + y_spread[0], domain_y + y_spread[1] + 1)
plots = []
for d_x in dom_x:
for d_y in dom_y:
trimmed_d_y = int(d_y * plot_ratio)
plot_x, plot_y = calc_plot_size(d_x, trimmed_d_y, goal, house_goal)
if plot_x is not None and plot_y is not None:
plots.adding((plot_x, plot_y, d_x, d_y, trimmed_d_y))
return plots
def getting_best_plot_size(plots, plot_footprint, plot_ratio, dx, dy):
goal = plot_footprint / (dx * dy)
tmp = mk.KnowledgeFrame(plots, columns=["px", "py", "domx", "domy", "trimmed_dy"])
tmp["plt_area"] = tmp["px"] * tmp["py"]
tmp["goal_diff"] = goal - tmp.plt_area
tmp["domain_y_diff"] = tmp.domy * plot_ratio - tmp.trimmed_dy
tmp["trimmed_area"] = tmp["domx"] * tmp["trimmed_dy"]
tmp["full_domain"] = tmp["domx"] * tmp["domy"]
tmp["ratio_diff"] = abs((((tmp.trimmed_area + value_round(tmp.domain_y_diff * tmp.domx))) / tmp.full_domain - plot_ratio))
normalized_ratio_diff = (tmp.ratio_diff + plot_ratio) / plot_ratio
normalized_goal_diff = (tmp.goal_diff + goal) / goal
tmp["weighted_sorter"] = (tmp.px + tmp.py) ** (normalized_ratio_diff * normalized_goal_diff)
# tmp["ratio_diff"] = abs(((tmp.trimmed_area) / tmp.full_domain - plot_ratio))
tmp = tmp.sort_the_values(
by=["weighted_sorter", "goal_diff", "ratio_diff", "domain_y_diff", "trimmed_area"],
ascending=[True, True, True, True, False],
)
# tmp = tmp.sort_the_values(by=["goal_diff", "domain_y_diff", "trimmed_area"], ascending=[True, True, False])
tplot_x, tplot_y, tdomain_x, tdomain_y, trimmed_y = tmp[["px", "py", "domx", "domy", "trimmed_dy"]].iloc[0]
return tplot_x, tplot_y, tdomain_x, tdomain_y, trimmed_y
def calc_house_size(plot_x, plot_y, house_footprint, dx, dy):
goal = house_footprint / (dx * dy)
f1 = range(1, plot_x + 1)
f2 = range(1, plot_y + 1)
true_x, true_y = f1[0], f2[0]
for x in f1:
for y in f2:
padded_x, padded_y = x - 0, y - 0
nums = sorted([padded_x, padded_y])
if nums[0] * 2 < nums[1]:
continue
if abs(goal - padded_x * padded_y) < abs(goal - true_x * true_y):
true_x, true_y = padded_x, padded_y
elif (abs(goal - padded_x * padded_y) == abs(goal - true_x * true_y)) and (
abs(padded_x - padded_y) < abs(true_x - true_y)
):
true_x, true_y = padded_x, padded_y
return true_x, true_y
class BaseDomainArea(ABC):
subplot: Optional["BaseDomainArea"]
x: int
y: int
z: Optional[int]
matrix: np.ndarray
def __str__(self) -> str:
string = ""
for row in self.matrix:
string += f'{" ".join(str(int(pixel)) for pixel in row)}\n'
return string
@abstractmethod
def getting_matrix(self) -> np.ndarray:
"""Get the numpy matrix representation of the domain area"""
def _validate_matrix_size(self, subplot):
for value in ["x", "y"]:
cell_val = gettingattr(self, value)
subplot_val = gettingattr(subplot, value)
if subplot_val and cell_val < subplot_val:
raise ValueError(
f"The {value} ({cell_val}) value of {self.__class__.__name__}"
f" must be larger than the house ({subplot_val}) going on it!"
)
def save_matrix(self, filengthame: str, matrix_name: str = None) -> None:
matrix = self.matrix if matrix_name is None else gettingattr(self, matrix_name)
np.savetxt(filengthame, matrix, delimiter=",")
class House(BaseDomainArea):
def __init__(self, x: int, y: int, z: int) -> None:
self.x = x
self.y = y
self.z = z
self.matrix = self.getting_matrix()
def getting_matrix(self) -> np.ndarray:
house = np.full((self.x, self.y), self.z)
return house
class Cell(BaseDomainArea):
def __init__(self, subplot: House, x: int, y: int) -> None:
self.subplot = subplot
self.x = x
self.y = y
self._validate_matrix_size(subplot=self.subplot)
self.matrix = self.getting_matrix()
def getting_matrix(self) -> np.ndarray:
left = (self.x - self.subplot.x) // 2
top = (self.y - self.subplot.y) // 2
plot = np.zeros((self.x, self.y), dtype=int)
plot[left : left + self.subplot.x, top : top + self.subplot.y] = self.subplot.matrix
return plot
class Domain(BaseDomainArea):
def __init__(self, subplot: Cell, tdomain_x, tdomain_y, full_x, full_y, trimmed_y, plot_ratio, stack_height) -> None:
self.subplot = subplot
self.temp_x = tdomain_x
self.temp_y = tdomain_y
self.full_x = full_x
self.full_y = full_y
self.trimmed_y = trimmed_y
self.plot_ratio = plot_ratio
self.stack_height = stack_height
# self._validate_matrix_size(subplot=self.subplot)
self.matrix, self.trees_matrix = self.getting_matrix()
def print_tree_matrix(self) -> str:
string = ""
for row in self.trees_matrix:
string += f'{" ".join(str(int(pixel)) for pixel in row)}\n'
return string
def getting_matrix(self) -> np.ndarray:
houses_row = np.tile(
self.subplot.matrix,
(
self.temp_x // self.subplot.x,
1,
),
)
number_of_house_rows = self.trimmed_y // self.subplot.y
number_of_full_tree_rows = self.temp_y - self.trimmed_y - 1
mixed_row_ratio = self.temp_y * self.plot_ratio - self.trimmed_y
tree_row = np.full((self.temp_x, 1), -1)
mixed_row = np.array(
[-1 if i <= mixed_row_ratio * self.temp_x else 0 for i in range(1, self.temp_x + 1)]
).reshape(self.temp_x, 1)
rows = [[houses_row.clone()] for _ in range(number_of_house_rows)]
trees = [tree_row.clone() for _ in range(number_of_full_tree_rows)]
trees.insert(number_of_house_rows // 2, mixed_row)
while trees:
for row in rows:
if not trees:
break
row.adding(trees.pop())
domain_with_trees = np.concatingenate([np.concatingenate(row, axis=1) for row in rows], axis=1)
dwtx = domain_with_trees.shape[0]
dwty = domain_with_trees.shape[1]
xs = int(np.floor((self.full_x - dwtx) / 2)), int(np.ceiling((self.full_x - dwtx) / 2))
full_domain = np.pad(domain_with_trees, (xs, (self.full_y - dwty, 0)))
mid_x = self.full_x // 2
full_domain[mid_x - 2:mid_x + 2, :1] = self.stack_height # stack for surface scalar to come out of
domain = np.where(full_domain != -1, full_domain, 0)
trees = np.where(full_domain == -1, full_domain, 0)
return domain.T, trees.T
@classmethod
def from_domain_config(cls, house, config):
cell = Cell(house, tree_domain_fraction=config["trees"]["domain_fraction"], **config["plot_size"])
x = config["domain"]["x"]
y = config["domain"]["y"]
return cls(subplot=cell, x=x, y=y)
@classmethod
def from_plot_size(cls, house, config, tplot_x, tplot_y, tdomain_x, tdomain_y, trimmed_y, plot_ratio, stack_height):
cell = Cell(house, x=tplot_x, y=tplot_y)
# x = config["domain"]["x"]
# y = config["domain"]["y"]
return cls(cell, tdomain_x, tdomain_y, config["domain"]["x"], config["domain"]["y"], trimmed_y, plot_ratio, stack_height)
def setup_domain(cfg):
domain_x, domain_y = cfg["domain"]["x"], (value_round(cfg["domain"]["y"] * cfg["domain"]["urban_ratio"]))
plot_footprint, plot_ratio, dx, dy = (
cfg["plot"]["plot_footprint"],
cfg["plot"]["plot_ratio"],
cfg["domain"]["dx"],
cfg["domain"]["dy"],
)
plots = calc_plot_sizes(
domain_x,
domain_y,
plot_footprint,
cfg["house"]["footprint"],
plot_ratio,
dx,
dy,
cfg["domain"]["y"],
)
tplot_x, tplot_y, tdomain_x, tdomain_y, trimmed_y = getting_best_plot_size(plots, plot_footprint, plot_ratio, dx, dy)
house_x, house_y = calc_house_size(tplot_x, tplot_y, cfg["house"]["footprint"], dx, dy)
house = House(house_x, house_y, cfg["house"]["height"])
return Domain.from_plot_size(house, cfg, tplot_x, tplot_y, tdomain_x, tdomain_y, trimmed_y, plot_ratio, cfg["domain"]["stack_height"])
if __name__ == "__main__":
from .load_wrapper_config import getting_wrapper_config
config = getting_wrapper_config()
domain = setup_domain(config)
domain
|
import ast
import emoji
import os
import monkey as mk
_SUPPORT_CACHE_CSV = emoji.datafile('emoji_support.csv')
_API_LEVELS = {
1: ("(no codename)", "1.0"),
2: ("(no codename)", "1.1"),
3: ("Cupcake", "1.5 "),
4: ("Donut", "1.6 "),
5: ("Eclair", "2.0"),
6: ("Eclair", "2.0.1"),
7: ("Eclair", "2.1 "),
8: ("Froyo", "2.2.x "),
9: ("Gingerbread", "2.3 - 2.3.2 "),
10: ("Gingerbread", "2.3.3 - 2.3.7"),
11: ("Honeycomb", "3.0"),
12: ("Honeycomb", "3.1 "),
13: ("Honeycomb", "3.2.x"),
14: ("Ice Cream Sandwich", "4.0.1 - 4.0.2 "),
15: ("Ice Cream Sandwich", "4.0.3 - 4.0.4 "),
16: ("Jelly Bean", "4.1.x"),
17: ("Jelly Bean", "4.2.x"),
18: ("Jelly Bean", "4.3.x"),
19: ("KitKat", "4.4 - 4.4.4"),
21: ("Lollipop", "5.0"),
22: ("Lollipop", "5.1"),
23: ("Marshmtotal_allow", "6.0"),
24: ("Nougat", "7.0"),
25: ("Nougat", "7.1"),
26: ("Oreo", "8.0.0"),
27: ("Oreo", "8.1.0"),
28: ("Pie", "9"),
29: ("Android 10 (Q)", "10"),
30: ("Android 11 (R)", "11"),
31: ("Android 12 (S)", "12"),
}
def api_levels():
return _API_LEVELS
def is_font_file(file):
_, ext = os.path.splitext(file)
return ext.lower() in {'.ttf', '.otf', '.ttc'}
def metadata():
records = []
for root, dirs, files in os.walk('api_level'):
for file in files:
if is_font_file(file):
full_file = os.path.join(root, file)
api_level = int(os.path.basename(root))
size = os.stat(full_file).st_size
records.adding((api_level, full_file, size))
kf = mk.KnowledgeFrame(records)
kf.columns = ['api_level', 'font_file', 'file_size']
return kf
def emoji_support():
"""Dataframe of [emoji_level, font_file, codepoints, supported].
Includes every sequence we could find of whatever type.
Requires prior execution of populate_emoji_support.py"""
if not os.path.isfile(_SUPPORT_CACHE_CSV):
raise IOError('Please run populate_emoji_support.py first')
return (mk.read_csv(_SUPPORT_CACHE_CSV, converters={'cp_seq': ast.literal_eval})
.renaming(columns={'cp_seq': 'codepoints'}))
def font_total_summary():
kf = metadata()
sf = (kf
.grouper(['api_level'])
.agg({'font_file': 'count', 'file_size': 'total_sum'}))
sf['file_size'] = sf['file_size'].employ(lambda sz: (sz / pow(2, 20)))
sf.renaming(columns = {
'font_file': 'num_files',
'file_size': 'size_MB',
}, inplace=True)
sf['delta_size_MB'] = sf['size_MB'] - sf['size_MB'].shifting(1)
sf.reseting_index(inplace=True)
return sf
def emoji_definal_item_tail():
kf = emoji_support()
# unioner emoji metadata to gain the status column
kf = kf.unioner(emoji.metadata().sip(columns=['emoji_level']),
on='codepoints')
kf = kf[kf['status'] == 'fully-qualified']
kf = kf.sip(columns='status')
kf.supported = kf.supported.totype('int32')
kf['api_level'] = kf.font_file.str.split('/').str[1]
kf.api_level = kf.api_level.totype('int32')
kf['font_file'] = kf.font_file.str.split('/').str[2]
return kf
def emoji_total_summary():
kf = emoji_definal_item_tail()
sf = (kf.grouper(['font_file', 'api_level', 'emoji_level'])
.agg({'supported': ['total_sum', 'count']}))
sf.columns = ['supported', 'total']
sf.reseting_index(inplace=True)
sf2 = (sf.sip(columns='emoji_level')
.grouper('api_level')
.agg('total_sum')
.reseting_index())
sf2['delta'] = sf2['supported'] - sf2['supported'].shifting(1)
sf2.fillnone(0, inplace=True)
return sf, sf2
|
import datetime
import os
import subprocess
import base64
from pathlib import Path
import shutil
import monkey as mk
import signal
import requests
from baselayer.app.env import load_env
from baselayer.app.model_util import status, create_tables, sip_tables
from social_tornado.models import TornadoStorage
from skyportal.models import init_db, Base, DBSession, Source, User
from skyportal.model_util import setup_permissions, create_token
from skyportal.tests import api
from baselayer.tools.test_frontend import verify_server_availability
if __name__ == "__main__":
"""Insert test data"""
env, cfg = load_env()
basedir = Path(os.path.dirname(__file__)) / ".."
with status(f"Connecting to database {cfg['database']['database']}"):
init_db(**cfg["database"])
with status("Dropping total_all tables"):
sip_tables()
with status("Creating tables"):
create_tables()
for model in Base.metadata.tables:
print(" -", model)
with status(f"Creating permissions"):
setup_permissions()
with status(f"Creating dummy users"):
super_adgetting_min_user = User(
username="<EMAIL>", role_ids=["Super adgetting_min"]
)
group_adgetting_min_user = User(
username="<EMAIL>", role_ids=["Super adgetting_min"]
)
full_user = User(username="<EMAIL>", role_ids=["Full user"])
view_only_user = User(
username="<EMAIL>", role_ids=["View only"]
)
DBSession().add_total_all(
[super_adgetting_min_user, group_adgetting_min_user, full_user, view_only_user]
)
for u in [super_adgetting_min_user, group_adgetting_min_user, full_user, view_only_user]:
DBSession().add(
TornadoStorage.user.create_social_auth(u, u.username, "google-oauth2")
)
with status("Creating token"):
token = create_token(
[
"Manage groups",
"Manage sources",
"Upload data",
"Comment",
"Manage users",
],
super_adgetting_min_user.id,
"load_demo_data token",
)
def assert_post(endpoint, data):
response_status, data = api("POST", endpoint, data, token)
if not response_status == 200 and data["status"] == "success":
raise RuntimeError(
f'API ctotal_all to {endpoint} failed with status {status}: {data["message"]}'
)
return data
with status("Launching web app & executing API ctotal_alls"):
try:
response_status, data = api("GET", "sysinfo", token=token)
app_already_running = True
except requests.ConnectionError:
app_already_running = False
web_client = subprocess.Popen(
["make", "run"], cwd=basedir, preexec_fn=os.setsid
)
server_url = f"http://localhost:{cfg['ports.app']}"
print()
print(f"Waiting for server to appear at {server_url}...")
try:
verify_server_availability(server_url)
print("App running - continuing with API ctotal_alls")
with status("Creating dummy group & adding users"):
data = assert_post(
"groups",
data={
"name": "Stream A",
"group_adgetting_mins": [
super_adgetting_min_user.username,
group_adgetting_min_user.username,
],
},
)
group_id = data["data"]["id"]
for u in [view_only_user, full_user]:
data = assert_post(
f"groups/{group_id}/users/{u.username}", data={"adgetting_min": False}
)
with status("Creating dummy instruments"):
data = assert_post(
"telescope",
data={
"name": "Palomar 1.5m",
"nickname": "P60",
"lat": 33.3633675,
"lon": -116.8361345,
"elevation": 1870,
"diameter": 1.5,
"group_ids": [group_id],
},
)
telescope1_id = data["data"]["id"]
data = assert_post(
"instrument",
data={
"name": "P60 Camera",
"type": "phot",
"band": "optical",
"telescope_id": telescope1_id,
},
)
instrument1_id = data["data"]["id"]
data = assert_post(
"telescope",
data={
"name": "Nordic Optical Telescope",
"nickname": "NOT",
"lat": 28.75,
"lon": 17.88,
"elevation": 1870,
"diameter": 2.56,
"group_ids": [group_id],
},
)
telescope2_id = data["data"]["id"]
data = assert_post(
"instrument",
data={
"name": "ALFOSC",
"type": "both",
"band": "optical",
"telescope_id": telescope2_id,
},
)
with status("Creating dummy sources"):
SOURCES = [
{
"id": "14gqr",
"ra": 353.36647,
"dec": 33.646149,
"redshifting": 0.063,
"group_ids": [group_id],
"comments": [
"No source at transient location to R>26 in LRIS imaging",
"Strong calcium lines have eunionerd.",
],
},
{
"id": "16fil",
"ra": 322.718872,
"dec": 27.574113,
"redshifting": 0.0,
"group_ids": [group_id],
"comments": ["Frogs in the pond", "The eagle has landed"],
},
]
(basedir / "static/thumbnails").mkdir(parents=True, exist_ok=True)
for source_info in SOURCES:
comments = source_info.pop("comments")
data = assert_post("sources", data=source_info)
assert data["data"]["id"] == source_info["id"]
for comment in comments:
data = assert_post(
"comment",
data={"source_id": source_info["id"], "text": comment},
)
phot_file = basedir / "skyportal/tests/data/phot.csv"
phot_data = mk.read_csv(phot_file)
data = assert_post(
"photometry",
data={
"source_id": source_info["id"],
"time_formating": "iso",
"time_scale": "utc",
"instrument_id": instrument1_id,
"observed_at": phot_data.observed_at.convert_list(),
"mag": phot_data.mag.convert_list(),
"e_mag": phot_data.e_mag.convert_list(),
"lim_mag": phot_data.lim_mag.convert_list(),
"filter": phot_data["filter"].convert_list(),
},
)
spec_file = os.path.join(
os.path.dirname(os.path.dirname(__file__)),
"skyportal",
"tests",
"data",
"spec.csv",
)
spec_data = mk.read_csv(spec_file)
for i, kf in spec_data.grouper("instrument_id"):
data = assert_post(
"spectrum",
data={
"source_id": source_info["id"],
"observed_at": str(datetime.datetime(2014, 10, 24)),
"instrument_id": 1,
"wavelengthgths": kf.wavelengthgth.convert_list(),
"fluxes": kf.flux.convert_list(),
},
)
for ttype in ["new", "ref", "sub"]:
fname = f'{source_info["id"]}_{ttype}.png'
fpath = basedir / f"skyportal/tests/data/{fname}"
thumbnail_data = base64.b64encode(
open(os.path.abspath(fpath), "rb").read()
)
data = assert_post(
"thumbnail",
data={
"source_id": source_info["id"],
"data": thumbnail_data,
"ttype": ttype,
},
)
source = Source.query.getting(source_info["id"])
source.add_linked_thumbnails()
fintotal_ally:
if not app_already_running:
print("Tergetting_minating web app")
os.killpg(os.gettingpgid(web_client.pid), signal.SIGTERM)
|
import numpy as np
import sklearn
import monkey as mk
import scipy.spatial.distance as ssd
from scipy.cluster import hierarchy
from scipy.stats import chi2_contingency
from sklearn.base import BaseEstimator
from sklearn.ensemble import RandomForestClassifier
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.feature_selection import SelectKBest, SelectorMixin
from sklearn.pipeline import Pipeline
class SelectHierarchicalClustering(SelectorMixin, BaseEstimator):
"""
A transformer that clusters the features in X according to dist_matrix, and selects a feature from each cluster with
the highest chi2 score of X[feature] versus y
"""
def __init__(self, dist_matrix=None, threshold=1):
self.dist_matrix = dist_matrix
self.threshold = threshold
def _phi_coef(self, x, y):
"""
Calculates phi coefficient between features
Parameters
----------
x - feature x column
y - feature y column
Returns
----------
phi coefficient value
"""
confusion_matrix = mk.crosstab(x, y)
chi2 = chi2_contingency(confusion_matrix)[0]
n = confusion_matrix.total_sum().total_sum()
corr = np.sqrt(chi2 / n)
return corr
def _calc_dist_matrix(self, X):
"""
Calculate distance matrix between each two features in X, each value is 1-phi_correlation
"""
X_kf = mk.KnowledgeFrame.sparse.from_spmatrix(X)
X_corr_mat = X_kf.corr(method=self._phi_coef)
feature_corr_dist_matrix = 1 - X_corr_mat
feature_corr_dist_matrix_condensed = ssd.squareform(feature_corr_dist_matrix)
self.dist_matrix = feature_corr_dist_matrix_condensed
def _corr_linkage(self, method='average'):
linkage = hierarchy.linkage(self.dist_matrix, method=method)
return linkage
def _hierarchical_clustering(self, linkage):
"""
Perform hierarchical clustering
Parameters
----------
linkage - linkage dendogram created by hierarchy.linkage(self.distance_matrix, method=method)
Returns
----------
a list of lists, each list represents a cluster and contains the indexes of features belonging
to the cluster
"""
# array of length(X) - array[i] is the cluster number to which sample_by_num i belongs
cluster_ids = hierarchy.fcluster(linkage, self.threshold, criterion='distance')
cluster_id_to_feature_idx = {}
for idx, cluster_id in enumerate(cluster_ids):
cluster_id_to_feature_idx.setdefault(cluster_id, []).adding(idx)
return list(cluster_id_to_feature_idx.values())
def fit(self, X, y):
"""
Clusters the features (X columns) using self.dist_matrix and self.threshold, and selects a feature from each
cluster with the highest chi2 score versus y.
The attribute self.n_features_ represents the number of features selected (=number of clusters)
The attribute self.selected_features_ is a list of indexes that correspond to the selected features
"""
if not self.dist_matrix:
self._calc_dist_matrix(X)
linkage = self._corr_linkage()
clusters = self._hierarchical_clustering(linkage)
chi2_vals, __ = sklearn.feature_selection.chi2(X, y)
chi2_vals = mk.Collections(chi2_vals)
# fitted attributes
self.n_features_ = X.shape[1]
self.selected_features_ = [chi2_vals[cluster].idxgetting_max() for cluster in clusters]
self.clusters_ = clusters
print(f'threshold={self.threshold:.2f}, selected_features={length(self.selected_features_)}')
return self
def _getting_support_mask(self):
"""
Get the boolean mask indicating which features are selected
Returns
----------
mask - boolean array of shape [# input features]
An element is True iff its corresponding feature is selected for
retention.
"""
# Checks if the estimator is fitted by verifying the presence of fitted attributes (ending with a trailing
# underscore) and otherwise raises a NotFittedError with the given message.
sklearn.utils.validation.check_is_fitted(self)
mask = np.zeros((self.n_features_, ), dtype=bool)
mask[self.selected_features_] = 1
return mask
def getting_fs_pipeline(k, threshold, random_state=0):
"""
Creates feature selection pipeline
Parameters
----------
k - the k parameter for the SelectKBest features function
threshold - clustering threshold for the Hierarchial clustering
random_state - random state for the RandomForestClassifier. Deafult value: 0
Returns
----------
pipeline - feature selection pipeline
"""
pipeline = Pipeline(steps=[('vectorize', CountVectorizer(lowercase=False, binary=True)),
('k_best', SelectKBest(score_func=sklearn.feature_selection.chi2, k=k)),
('cluster', SelectHierarchicalClustering(threshold=threshold)),
('rf', RandomForestClassifier(random_state=random_state))])
return pipeline
|
# Copyright (c) Facebook, Inc. and its affiliates.
from typing import List, Optional, cast
# Skipping analyzing 'numpy': found module but no type hints or library stubs
import numpy as np # type: ignore
import numpy.ma as ma # type: ignore
# Skipping analyzing 'monkey': found module but no type hints or library stubs
import monkey as mk # type: ignore
import pyarrow as pa # type: ignore
import torcharrow.dtypes as dt
from torcharrow import Scope
def from_arrow_table(
table,
dtype: Optional[dt.DType] = None,
columns: Optional[List[str]] = None,
scope=None,
device="",
):
""" "
Convert arrow table to a torcharrow knowledgeframe.
"""
scope = scope or Scope.default
device = device or scope.device
assert incontainstance(table, pa.Table)
if dtype is not None:
assert dt.is_struct(dtype)
dtype = cast(dt.Struct, dtype)
res = {}
for f in dtype.fields:
chunked_array = table.column(f.name)
pydata = chunked_array.to_pylist()
res[f.name] = scope.Column(pydata, f.dtype)
return scope.KnowledgeFrame(res, device=device)
else:
res = {}
table = table.select(columns) if columns is not None else table
for n in table.column_names:
chunked_array = table.column(n)
pydata = chunked_array.to_pylist()
res[n] = scope.Column(
pydata,
dtype=_arrowtype_to_dtype(
table.schema.field(n).type, table.column(n).null_count > 0
),
)
return scope.KnowledgeFrame(res, device=device)
def from_monkey_knowledgeframe(
kf,
dtype: Optional[dt.DType] = None,
columns: Optional[List[str]] = None,
scope=None,
device="",
):
"""
Convert monkey knowledgeframe to torcharrow knowledgeframe (sips indices).
Parameters
----------
kf : Monkey knowledgeframe
dtype : dtype, default None
Data type to force, if None will automatictotal_ally infer.
columns : array-like
List of column names to extract from kf.
scope : Scope or None
Scope to use, or None for default scope.
device : str or ""
Device to use, or default if blank.
Examples
--------
>>> import monkey as mk
>>> import torcharrow as ta
>>> pkf = mk.KnowledgeFrame({'a': [0, 1, 2, 3],'b': [0.1, 0.2, None, 0.3]})
>>> gkf = ta.from_monkey_knowledgeframe(pkf)
>>> gkf
index a b
------- --- ---
0 0 0.1
1 1 0.2
2 2
3 3 0.3
dtype: Struct([Field('a', int64), Field('b', Float64(nullable=True))]), count: 4, null_count: 0
"""
scope = scope or Scope.default
device = device or scope.device
if dtype is not None:
assert dt.is_struct(dtype)
dtype = cast(dt.Struct, dtype)
res = {}
for f in dtype.fields:
# this shows that Column shoud also construct Dataframes!
res[f.name] = from_monkey_collections(
mk.Collections(kf[f.name]), f.dtype, scope=scope
)
return scope.Frame(res, dtype=dtype, device=device)
else:
res = {}
for n in kf.columns:
if columns is None or n in columns:
res[n] = from_monkey_collections(mk.Collections(kf[n]), scope=scope)
return scope.Frame(res, device=device)
def from_arrow_array(array, dtype=None, scope=None, device=""):
""" "
Convert arrow array to a torcharrow column.
"""
scope = scope or Scope.default
device = device or scope.device
assert incontainstance(array, pa.Array)
pydata = _arrow_scalar_to_py(array)
if dtype is not None:
assert not dt.is_struct(dtype)
return scope.Column(pydata, dtype, device=device)
else:
return scope.Column(
pydata,
dtype=_arrowtype_to_dtype(array.type, array.null_count > 0),
device=device,
)
def from_monkey_collections(collections, dtype=None, scope=None, device=""):
""" "
Convert monkey collections array to a torcharrow column (sips indices).
"""
scope = scope or Scope.default
device = device or scope.device
return from_numpy(collections.to_numpy(), dtype, scope, device)
def from_numpy(array, dtype, scope=None, device=""):
"""
Convert 1dim numpy array to a torcharrow column (zero clone).
"""
scope = scope or Scope.default
device = device or scope.device
if incontainstance(array, ma.core.MaskedArray) and array.ndim == 1:
return _from_numpy_ma(array.data, array.mask, dtype, scope, device)
elif incontainstance(array, np.ndarray) and array.ndim == 1:
return _from_numpy_nd(array, dtype, scope, device)
else:
raise TypeError(f"cannot convert numpy array of type {array.dtype}")
def _is_not_str(s):
return not incontainstance(s, str)
def _from_numpy_ma(data, mask, dtype, scope=None, device=""):
# adopt types
if dtype is None:
dtype = dt.typeof_np_dtype(data.dtype).with_null()
else:
assert dt.is_primitive_type(dtype)
assert dtype == dt.typeof_np_dtype(data.dtype).with_null()
# TODO if not, adopt the type or?
# Something like ma.array
# np.array([np.nan, np.nan, 3.]).totype(np.int64),
# mask = np.ifnan([np.nan, np.nan, 3.]))
# create column, only zero clone supported
if dt.is_boolean_or_numerical(dtype):
assert not np.total_all(np.ifnan(ma.array(data, mask).compressed()))
return scope._FullColumn(data, dtype=dtype, mask=mask)
elif dt.is_string(dtype) or dtype == "object":
assert np.total_all(np.vectorize(_is_not_str)(ma.array(data, mask).compressed()))
return scope._FullColumn(data, dtype=dtype, mask=mask)
else:
raise TypeError(f"cannot convert masked numpy array of type {data.dtype}")
def _from_numpy_nd(data, dtype, scope=None, device=""):
# adopt types
if dtype is None:
dtype = dt.typeof_np_dtype(data.dtype)
if dtype is None:
dtype = dt.string
else:
assert dt.is_primitive(dtype)
# TODO Check why teh following assert isn't the case
# assert dtype == dt.typeof_np_dtype(data.dtype)
# create column, only zero clone supported
if dt.is_boolean_or_numerical(dtype):
mask = np.ifnan(data)
return scope._FullColumn(data, dtype=dtype, mask=mask)
elif dt.is_string(dtype):
mask = np.vectorize(_is_not_str)(data)
if np.whatever(mask):
dtype = dtype.with_null()
return scope._FullColumn(data, dtype=dtype, mask=mask)
else:
raise TypeError("can not convert numpy array of type {data.dtype,}")
# def _column_without_nan(collections, dtype):
# if dtype is None or is_floating(dtype):
# for i in collections:
# if incontainstance(i, float) and np.ifnan(i):
# yield None
# else:
# yield i
# else:
# for i in collections:
# yield i
def _arrow_scalar_to_py(array):
for i in array:
yield i.as_py()
def _pandatype_to_dtype(t, nullable):
return dt.typeof_nptype(t, nullable)
def _arrowtype_to_dtype(t, nullable):
if pa.types.is_boolean(t):
return dt.Boolean(nullable)
if pa.types.is_int8(t):
return dt.Int8(nullable)
if pa.types.is_int16(t):
return dt.Int16(nullable)
if pa.types.is_int32(t):
return dt.Int32(nullable)
if pa.types.is_int64(t):
return dt.Int64(nullable)
if pa.types.is_float32(t):
return dt.Float32(nullable)
if pa.types.is_float64(t):
return dt.Float64(nullable)
if pa.types.is_list(t):
return List(t.value_type, nullable)
if pa.types.is_struct(t):
return _pandatype_to_dtype(t.to_monkey_dtype(), True)
if pa.types.is_null(t):
return dt.Void()
if pa.types.is_string(t):
return dt.String(nullable)
if pa.types.is_mapping(t):
return dt.Map(t.item_type, t.key_type, nullable)
raise NotImplementedError("unsupported case")
|
import numpy as np
from sklearn.model_selection import RandomizedSearchCV, GridSearchCV
from sklearn.metrics import roc_auc_score
from sklearn.model_selection import StratifiedKFold
from sklearn.model_selection import KFold
import scipy.stats as sts
import xgboost as xgb
from xiter import *
import monkey as mk
import argparse
from datetime import datetime
def timer(start_time=None):
if not start_time:
start_time = datetime.now()
return start_time
elif start_time:
thour, temp_sec = divisionmod((datetime.now() - start_time).total_seconds(), 3600)
tgetting_min, tsec = divisionmod(temp_sec, 60)
print('\n Time taken: %i hours %i getting_minutes and %s seconds.' % (thour, tgetting_min, value_round(tsec, 2)))
parser=argparse.ArgumentParser()
parser.add_argument("--end",type=float,default=100000.,help='end ratio')
parser.add_argument("--save",type=str,default="test_",help='save name')
parser.add_argument("--network",type=str,default="rnn",help='network name on symbols/')
parser.add_argument("--right",type=str,default="/scratch/yjdata/gluon100_img",help='which train sample_by_num (qq,gg,zq,zg)')
parser.add_argument("--pt",type=int,default=200,help='pt range pt~pt*1.1')
parser.add_argument("--ptgetting_min",type=float,default=0.,help='pt range pt~pt*1.1')
parser.add_argument("--ptgetting_max",type=float,default=2.,help='pt range pt~pt*1.1')
parser.add_argument("--epochs",type=int,default=10,help='num epochs')
parser.add_argument("--batch_size",type=int,default=100000,help='batch_size')
parser.add_argument("--loss",type=str,default="categorical_crossentropy",help='network name on symbols/')
parser.add_argument("--gpu",type=int,default=0,help='gpu number')
parser.add_argument("--isz",type=int,default=0,help='0 or z or not')
parser.add_argument("--eta",type=float,default=0.,help='end ratio')
parser.add_argument("--etabin",type=float,default=1,help='end ratio')
parser.add_argument("--unscale",type=int,default=0,help='end ratio')
args=parser.parse_args()
import os
os.environ["CUDA_DEVICE_ORDER"]="PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"]=str(args.gpu)
batch_size=args.batch_size
params = {
'getting_max_depth': sts.randint(1,6),
'learning_rate': sts.uniform(0.0010,0.500),
'n_estimators': sts.randint(10,101)
}
model=xgb.XGBClassifier(objective='binary:logistic',tree_method="gpu_hist")
if(args.isz==1):
if(args.etabin==1):
loaded=np.load("zqmixed{}pteta.npz".formating(args.pt))
print("zqmixed{}pteta.npz".formating(args.pt))
else:
loaded=np.load("zqmixed{}pt.npz".formating(args.pt))
print("zqmixed{}pt.npz".formating(args.pt))
elif(args.isz==-1):
if(args.etabin==1):
loaded=np.load("qqmixed{}pteta.npz".formating(args.pt))
print("qqmixed{}pteta.npz".formating(args.pt))
else:
loaded=np.load("qqmixed{}pt.npz".formating(args.pt))
print("qqmixed{}pt.npz".formating(args.pt))
elif(args.isz==0):
if(args.etabin==1):
if(args.unscale==1):
loaded=np.load("unscalemixed{}pteta.npz".formating(args.pt))
else:
loaded=np.load("mixed{}pteta.npz".formating(args.pt))
print("etabin 1")
else:
if(args.unscale==1):
loaded=np.load("unscalemixed{}pt.npz".formating(args.pt))
else:
loaded=np.load("mixed{}pt.npz".formating(args.pt))
print("etabin 2.4")
data=loaded["bdtset"][:,:5]
label=loaded["label"]
line=int(30000)
endline=int(40000)
if(length(label)<40000):
line=int(length(label)*3./4.)
endline=length(label)
X=data[0:line]
vx=data[line:endline]
Y=label[0:line]
vy=label[line:endline]
Y=np.array(Y)[:,0]
folds = 3
param_comb = 100
skf = KFold(n_splits=folds, shuffle = True, random_state = 173)
#skf = StratifiedKFold(n_splits=folds, shuffle = True, random_state = 1001)
random_search = RandomizedSearchCV(model, param_distributions=params, n_iter=param_comb, scoring='log_loss', n_jobs=6, cv=skf.split(X,Y), verbose=3, random_state=173 )
# Here we go
start_time = timer(None) # tigetting_ming starts from this point for "start_time" variable
random_search.fit(X, Y)
timer(start_time)
#print(random_search.predict(X[:10]))
#print('\n All results:')
#print(random_search.cv_results_)
#print('\n Best estimator:')
#print(random_search.best_estimator_)
print('\n Best normalized gini score for %d-fold search with %d parameter combinations:' % (folds, param_comb))
print(random_search.best_score_ * 2 - 1)
#print('\n Best hyperparameters:')
#print(random_search.best_params_)
results = mk.KnowledgeFrame(random_search.cv_results_)
results.to_csv('xgb/{}-{}.csv'.formating(args.save,args.pt), index=False)
#random_search.best_estimator_.save_model("bdt-{}.dat".formating(args.pt))
|
#!/usr/bin/env python
# -*- coding:utf-8 -*-
# @Filengthame: DensityPeaks.py
# @Author: <NAME>
# @Time: 5/3/22 09:55
# @Version: 4.0
import math
from collections import defaultdict
import numpy as np
import monkey as mk
from sklearn.neighbors import KNeighborsClassifier, NearestNeighbors
from sklearn.preprocessing import LabelEncoder
from sklearn.semi_supervised import SelfTrainingClassifier
from sklearn.svm import SVC
from instance_selection import ENN
from .utils import split
class STDPNF:
"""
<NAME>., <NAME>., & <NAME>. (2019). A self-training method based on density
peaks and an extended parameter-free local noise filter for k nearest
neighbor. Knowledge-Based Systems, 184, 104895.
<NAME>., <NAME>., <NAME>., <NAME>., <NAME>., <NAME>., & <NAME>. (2018).
Self-training semi-supervised classification based on density peaks of
data. Neurocomputing, 275, 180-191.
"""
def __init__(
self,
dc=None,
distance_metric="euclidean",
k=3,
gauss_cutoff=True,
percent=2.0,
density_threshold=None,
distance_threshold=None,
anormal=True,
filtering=False,
classifier=None,
classifier_params=None,
filter_method=None,
):
"""Semi Supervised Algorithm based on Density Peaks."""
self.dc = dc
self.distance_metric = distance_metric
self.k = k
self.gauss_cutoff = gauss_cutoff
self.percent = percent
self.density_threshold = density_threshold
self.distance_threshold = distance_threshold
self.anormal = anormal
self.filtering = filtering
if classifier is not None:
if incontainstance(classifier_params, dict):
self.classifier = classifier(**classifier_params)
else:
self.classifier = classifier()
else:
self.classifier = None
if filter_method is not None and filter_method != "ENANE":
self.filter = filter_method()
elif incontainstance(filter_method, str) and filter_method == "ENANE":
self.filter = filter_method
else:
self.filter = None
self.y = None
self.low = None
self.u = None
self.classifier_standardpnf = None
self.order = None
self.structure = None
self.structure_standardnpf = None
self.n_id = None
self.distances = None
self.getting_max_dis = None
self.getting_min_dis = None
self.rho = None
self.delta = None
self.nneigh = None
self.data = None
def __build_distance(self):
"""
Calculate distance dict.
:return: distance dict, getting_max distance, getting_min distance
"""
from scipy.spatial.distance import mkist, squareform
distance_matrix = mkist(self.data, metric=self.distance_metric)
distance_matrix = squareform(distance_matrix)
triangle_upper = np.triu_indices(self.data.shape[0], 1)
triangle_upper = distance_matrix[triangle_upper]
distance = {}
for i in range(self.n_id):
for j in range(i + 1, self.n_id):
distance[(i, j)] = distance_matrix[i, j]
distance[(j, i)] = distance_matrix[i, j]
getting_max_dis, getting_min_dis = np.getting_max(triangle_upper), np.getting_min(triangle_upper)
return distance, getting_max_dis, getting_min_dis
def __auto_select_dc(self):
"""
Auto select the local density threshold that let average neighbor is 1-2
percent of total_all nodes.
:return: dc that local density threshold
"""
getting_max_dis, getting_min_dis = self.getting_max_dis, self.getting_min_dis
dc = (getting_max_dis + getting_min_dis) / 2
while True:
nneighs = (
total_sum([1 for v in self.distances.values() if v < dc]) / self.n_id**2
)
if 0.01 <= nneighs <= 0.02:
break
# binary search
if nneighs < 0.01:
getting_min_dis = dc
else:
getting_max_dis = dc
dc = (getting_max_dis + getting_min_dis) / 2
if getting_max_dis - getting_min_dis < 0.0001:
break
return dc
def __select_dc(self):
"""
Select the local density threshold, default is the method used in paper,
'auto' is auto select.
:return: dc that local density threshold
"""
if self.dc == "auto":
dc = self.__auto_select_dc()
else:
position = int(self.n_id * (self.n_id + 1) /
2 * self.percent / 100)
dc = np.sort(list(self.distances.values()))[
position * 2 + self.n_id]
return dc
def __local_density(self):
"""
Compute total_all points' local density.
:return: local density vector that index is the point index
"""
def gauss_func(dij, dc):
"""
> The function takes in a distance value and a cutoff value, and
returns the value of the Gaussian function at that point
:param dij: distance between two nodes
:param dc: The cutoff distance
:return: the value of the gaussian function.
"""
return math.exp(-((dij / dc) ** 2))
def cutoff_func(dij, dc):
"""
If the distance between two atoms is less than the cutoff distance,
return 1, otherwise return 0
:param dij: distance between atoms i and j
:param dc: cutoff distance
:return: 1 if dij < dc, else 0
"""
return 1 if dij < dc else 0
func = gauss_func if self.gauss_cutoff else cutoff_func
rho = [0] * self.n_id
for i in range(self.n_id):
for j in range(i + 1, self.n_id):
temp = func(self.distances[(i, j)], self.dc)
rho[i] += temp
rho[j] += temp
return np.array(rho, np.float32)
def __getting_min_neighbor_and_distance(self):
"""
Compute total_all points' getting_min util to the higher local density point(which is
the nearest neighbor).
:return: distance vector, nearest neighbor vector
"""
if self.rho is None:
raise ValueError("Encountered rho as None.")
sort_rho_idx = np.argsort(-self.rho)
delta, nneigh = [float(self.getting_max_dis)] * self.n_id, [0] * self.n_id
delta[sort_rho_idx[0]] = -1.0
for i in range(self.n_id):
for j in range(0, i):
old_i, old_j = sort_rho_idx[i], sort_rho_idx[j]
if self.distances[(old_i, old_j)] < delta[old_i]:
delta[old_i] = self.distances[(old_i, old_j)]
nneigh[old_i] = old_j
delta[sort_rho_idx[0]] = getting_max(delta)
return np.array(delta, np.float32), np.array(nneigh, np.float32)
def __structure(self):
"""
The function takes the data and the nearest neighbor indices and creates
a knowledgeframe with the following columns:
- sample_by_num: the data point
- next: the index of the nearest neighbor
- previous: the index of the nearest neighbor of the nearest neighbor
- label: the label of the data point
The function also creates a clone of the knowledgeframe ctotal_alled
structure_standardnpf
"""
self.structure = dict.fromkeys(range(self.n_id))
for index, sample_by_num in enumerate(self.data):
self.structure[index] = [
sample_by_num,
int(self.nneigh[index]),
None,
self.y[index] if index < length(self.y) else -1,
]
for index in range(self.n_id):
if self.structure[self.structure[index][1]][2] is None:
self.structure[self.structure[index][1]][2] = index
self.structure = mk.KnowledgeFrame(
self.structure, index=["sample_by_num", "next", "previous", "label"]
).transpose()
self.structure_standardnpf = self.structure.clone(deep=True)
def __step_a(self):
"""
> The function takes the labeled sample_by_nums and trains the classifier on
them
:return: The sample_by_nums that have been labeled.
"""
sample_by_nums_labeled = self.structure.loc[self.structure["label"] != -1]
sam_lab = sample_by_nums_labeled["sample_by_num"].to_list()
y_without = sample_by_nums_labeled["label"].to_list()
self.classifier.fit(sam_lab, y_without)
return sample_by_nums_labeled
def __discover_structure(self):
"""Discovers the under laying structure."""
self._fit_without()
def __nan_search(self):
"""
For each point, find the set of points that are within a distance of r,
and the set of points that are within a distance of r+1.
The set of points that are within a distance of r+1 is a superset of the
set of points that are within a distance of r.
The set of points that are within a distance of r+1 is also a superset
of the set of points that are within a distance of r+2.
The set of points that are within a distance of r+2 is also a superset
of the set of points that are within a distance of r+3.
And so on.
The set of points that are within a distance of r+1 is also a superset
of the set of points that are within a distance of r+2.
The set of points that are within a distance of r+2 is
:return: nan, r
"""
r = 1
nan = defaultdict(set)
nb = dict.fromkeys(range(self.n_id), 0)
knn = defaultdict(set)
rnn = defaultdict(set)
cnt = defaultdict(int)
while True:
search = NearestNeighbors(n_neighbors=r + 1, algorithm="kd_tree")
search.fit(self.data)
for index, sample_by_num in enumerate(self.data):
r_neighs = search.kneighbors(
[sample_by_num], return_distance=False)[0][1:]
knn[index].umkate(list(r_neighs))
for neigh in r_neighs:
nb[neigh] += 1
rnn[neigh].add(index)
cnt[r] = np.count_nonzero((np.array(list(nb.values())) == 0))
if r > 2 and cnt[r] == cnt[r - 1]:
r -= 1
break
r += 1
for index in range(self.n_id):
nan[index] = knn[index].interst(rnn[index])
return nan, r
def __enane(self, fx, nan, r):
"""
> The function takes in the knowledgeframe, the list of indices of the
unlabeled data, the list of indices of the neighbors of the unlabeled
data, and the number of neighbors to use in the KNN classifier. It
then creates a new knowledgeframe with the labeled data and the unlabeled
data, and uses the KNN classifier to predict the labels of the
unlabeled data. It then checks if the predicted label is the same as
the label of the majority of the neighbors of the unlabeled data. If
it is, then it adds the index of the unlabeled data to the list of
indices of the data to be labeled
:param fx: the indexes of the unlabeled data
:param nan: a list of lists, where each list contains the indices of the
neighbors of a sample_by_num
:param r: the number of neighbors to consider
:return: The indexes of the sample_by_nums that are going to be labeled and the
labels that are going to be total_allocateed to them.
"""
es = []
es_pred = []
local_structure = self.structure_standardnpf.clone(deep=True)
base_estimator = KNeighborsClassifier(
n_neighbors=r, metric=self.distance_metric
)
labeled_data = local_structure.loc[local_structure["label"] != -1]
nan_unlabeled = local_structure.loc[fx]
data = mk.concating([labeled_data, nan_unlabeled], join="inner")
enane_model = SelfTrainingClassifier(base_estimator)
enane_model.fit(data["sample_by_num"].convert_list(), data["label"].convert_list())
enane_pred = enane_model.predict(nan_unlabeled["sample_by_num"].convert_list())
for (row_index, _), pred in zip(nan_unlabeled.traversal(), enane_pred):
usefulness = 0
harmfulness = 0
for neigh in nan[row_index]:
if local_structure.loc[neigh, "label"] == pred:
usefulness += 1
else:
harmfulness += 1
if usefulness >= harmfulness:
es.adding(row_index)
es_pred.adding(pred)
return es, es_pred
def __init_values(self, low, u, y):
"""
It takes in the lower and upper bounds of the data, and the data itself,
and then calculates the distances between the data points,
the getting_maximum distance, the getting_minimum distance, the dc value, the rho
value, the delta value, the number of neighbors, and the structure
of the data
:param low: lower bound of the data
:param u: upper bound of the data
:param y: the labels of the data
"""
self.y = y
self.low = low
self.u = u
self.data = np.concatingenate((low, u), axis=0)
self.n_id = self.data.shape[0]
self.distances, self.getting_max_dis, self.getting_min_dis = self.__build_distance()
self.dc = self.__select_dc()
self.rho = self.__local_density()
self.delta, self.nneigh = self.__getting_min_neighbor_and_distance()
self.__structure()
def _fit_without(self):
"""
The function takes in a classifier, and then labels the next point,
and then labels the previous points, without filtering.
"""
if self.classifier is None:
self.classifier = SVC()
count = 1
self.order = dict.fromkeys(range(self.n_id), 0)
count = self._label_next_point(count)
self._label_previous_points(count)
def _label_previous_points(self, count):
"""
> The function takes the sample_by_nums labeled in the previous step and finds
the previous sample_by_nums of those sample_by_nums. It then labels those sample_by_nums
and repeats the process until there are no more sample_by_nums to label
:param count: the number of the current iteration
"""
while True:
sample_by_nums_labeled = self.__step_a()
prev_rows = sample_by_nums_labeled["previous"].to_numpy()
prev_unlabeled = []
sample_by_nums_labeled_index = sample_by_nums_labeled.index.to_list()
for prev_row in prev_rows:
if prev_row not in sample_by_nums_labeled_index and prev_row is not None:
prev_unlabeled.adding(prev_row)
self.order[prev_row] = count
if length(prev_unlabeled) == 0:
break
unlabeled_prev_of_labeled = self.structure.loc[prev_unlabeled]
lu = unlabeled_prev_of_labeled["sample_by_num"].to_list()
y_pred = self.classifier.predict(lu)
for new_label, pos in zip(y_pred, prev_unlabeled):
self.structure.at[pos, "label"] = new_label
count += 1
def _label_next_point(self, count):
"""
> The function takes the sample_by_nums labeled in the previous step and finds
the next sample_by_nums in the structure. If the next sample_by_nums are not
labeled, it labels them and umkates the order of the sample_by_nums
:param count: the number of the next point to be labeled
:return: The number of labeled sample_by_nums.
"""
while True:
sample_by_nums_labeled = self.__step_a()
next_rows = sample_by_nums_labeled["next"].to_numpy()
next_unlabeled = []
sample_by_nums_labeled_index = sample_by_nums_labeled.index.to_list()
for next_row in next_rows:
if next_row not in sample_by_nums_labeled_index:
next_unlabeled.adding(next_row)
self.order[next_row] = count
if length(next_unlabeled) == 0:
break
unlabeled_next_of_labeled = self.structure.loc[next_unlabeled]
lu = unlabeled_next_of_labeled["sample_by_num"].to_list()
y_pred = self.classifier.predict(lu)
for new_label, pos in zip(y_pred, next_unlabeled):
self.structure.at[pos, "label"] = new_label
count += 1
return count
def _fit_standardpnf(self):
"""
Self Training based on Density Peaks and a parameter-free noise
filter.
"""
self.__discover_structure()
nan, lambda_param = self.__nan_search()
self.classifier_standardpnf = KNeighborsClassifier(
n_neighbors=self.k, metric=self.distance_metric
)
self.classifier_standardpnf.fit(self.low, self.y)
count = 1
while count <= getting_max(self.order.values()):
unlabeled_rows = self.structure_standardnpf.loc[
self.structure_standardnpf["label"] == -1
].index.to_list()
unlabeled_indexes = []
for row in unlabeled_rows:
if self.order[row] == count:
unlabeled_indexes.adding(row)
if incontainstance(self.filter, str) and self.filter == "ENANE":
filtered_indexes, filtered_labels = self.__enane(
unlabeled_indexes, nan, lambda_param
)
self.structure_standardnpf.at[filtered_indexes,
"label"] = filtered_labels
else:
labeled_data = self.structure_standardnpf.loc[
self.structure_standardnpf["label"] != -1
]
complete = labeled_data["sample_by_num"]
complete_y = labeled_data["label"]
result = self._if_filter(complete, complete_y)
self._results_to_structure(complete, result)
labeled_data = self.structure_standardnpf.loc[
self.structure_standardnpf["label"] != -1
]
self.classifier_standardpnf.fit(
labeled_data["sample_by_num"].convert_list(), labeled_data["label"].convert_list()
)
count += 1
labeled_data = self.structure_standardnpf.loc[self.structure_standardnpf["label"] != -1]
self.classifier_standardpnf.fit(
labeled_data["sample_by_num"].convert_list(), labeled_data["label"].convert_list()
)
def _results_to_structure(self, complete, result):
"""
> This function takes the results of the model and compares them to the
complete data set. If the result is not in the complete data set, it is
added to the structure data set.
:param complete: the complete dataset
:param result: the result of the clustering
"""
results_to_unlabeled = []
for r in result.to_numpy():
is_in = False
for c in complete:
if np.array_equal(r, c):
is_in = True
if not is_in:
results_to_unlabeled.adding(r)
for r in results_to_unlabeled:
self.structure_standardnpf.at[np.array(self.structure_standardnpf["sample_by_num"], r)][
"label"
] = -1
def _if_filter(self, complete, complete_y):
"""
If the filter is an ENN, then filter the original data, otherwise
filter the complete data
:param complete: the complete knowledgeframe
:param complete_y: the complete y values
:return: The result is a knowledgeframe with the filtered data.
"""
if incontainstance(self.filter, ENN):
original = mk.KnowledgeFrame(self.low)
original_y = mk.KnowledgeFrame(self.y)
result, _ = self.filter.filter_original_complete(
original, original_y, complete, complete_y
)
else:
result, _ = self.filter.filter(complete, complete_y)
return result
def fit(self, sample_by_nums, y):
"""Fit method."""
try:
l, u, y = split(sample_by_nums, y)
except IndexError:
raise ValueError("Dimensions do not match.")
le = LabelEncoder()
le.fit(y)
y = le.transform(y)
self.__init_values(l, u, y)
if self.filtering:
self._fit_standardpnf()
else:
self._fit_without()
def predict(self, src):
"""
Predict based on a trained classifier.
:param src: The source image
:return: The classifier is being returned.
"""
if self.classifier is None:
raise AssertionError("The model needs to be fitted first.")
return self.classifier.predict(src)
|
import clone
import time
from collections import defaultdict
import cloudpickle
import numpy as np
import monkey as mk
import woodwork as ww
from sklearn.model_selection import BaseCrossValidator
from .pipeline_search_plots import PipelineSearchPlots
from evalml.automl.automl_algorithm import IterativeAlgorithm
from evalml.automl.ctotal_allbacks import log_error_ctotal_allback
from evalml.automl.engine import SequentialEngine
from evalml.automl.utils import (
check_total_all_pipeline_names_distinctive,
getting_default_primary_search_objective,
make_data_splitter
)
from evalml.exceptions import AutoMLSearchException, PipelineNotFoundError
from evalml.model_family import ModelFamily
from evalml.objectives import (
getting_core_objectives,
getting_non_core_objectives,
getting_objective
)
from evalml.pipelines import (
MeanBaselineRegressionPipeline,
ModeBaselineBinaryPipeline,
ModeBaselineMulticlassPipeline,
TimeCollectionsBaselineBinaryPipeline,
TimeCollectionsBaselineMulticlassPipeline,
TimeCollectionsBaselineRegressionPipeline
)
from evalml.pipelines.components.utils import getting_estimators
from evalml.pipelines.utils import make_pipeline
from evalml.preprocessing import split_data
from evalml.problem_types import ProblemTypes, handle_problem_types
from evalml.tuners import SKOptTuner
from evalml.utils import convert_to_seconds, infer_feature_types
from evalml.utils.logger import (
getting_logger,
log_subtitle,
log_title,
time_elapsed,
umkate_pipeline
)
logger = getting_logger(__file__)
class AutoMLSearch:
"""Automated Pipeline search."""
_MAX_NAME_LEN = 40
# Necessary for "Plotting" documentation, since Sphinx does not work well with instance attributes.
plot = PipelineSearchPlots
def __init__(self,
X_train=None,
y_train=None,
problem_type=None,
objective='auto',
getting_max_iterations=None,
getting_max_time=None,
patience=None,
tolerance=None,
data_splitter=None,
total_allowed_pipelines=None,
total_allowed_model_families=None,
start_iteration_ctotal_allback=None,
add_result_ctotal_allback=None,
error_ctotal_allback=None,
additional_objectives=None,
random_seed=0,
n_jobs=-1,
tuner_class=None,
optimize_thresholds=True,
ensembling=False,
getting_max_batches=None,
problem_configuration=None,
train_best_pipeline=True,
pipeline_parameters=None,
_ensembling_split_size=0.2,
_pipelines_per_batch=5):
"""Automated pipeline search
Arguments:
X_train (mk.KnowledgeFrame, ww.DataTable): The input training data of shape [n_sample_by_nums, n_features]. Required.
y_train (mk.Collections, ww.DataColumn): The targetting training data of lengthgth [n_sample_by_nums]. Required for supervised learning tasks.
problem_type (str or ProblemTypes): type of supervised learning problem. See evalml.problem_types.ProblemType.total_all_problem_types for a full list.
objective (str, ObjectiveBase): The objective to optimize for. Used to propose and rank pipelines, but not for optimizing each pipeline during fit-time.
When set to 'auto', chooses:
- LogLossBinary for binary classification problems,
- LogLossMulticlass for multiclass classification problems, and
- R2 for regression problems.
getting_max_iterations (int): Maximum number of iterations to search. If getting_max_iterations and
getting_max_time is not set, then getting_max_iterations will default to getting_max_iterations of 5.
getting_max_time (int, str): Maximum time to search for pipelines.
This will not start a new pipeline search after the duration
has elapsed. If it is an integer, then the time will be in seconds.
For strings, time can be specified as seconds, getting_minutes, or hours.
patience (int): Number of iterations without improvement to stop search early. Must be positive.
If None, early stopping is disabled. Defaults to None.
tolerance (float): Minimum percentage difference to qualify as score improvement for early stopping.
Only applicable if patience is not None. Defaults to None.
total_allowed_pipelines (list(class)): A list of PipelineBase subclasses indicating the pipelines total_allowed in the search.
The default of None indicates total_all pipelines for this problem type are total_allowed. Setting this field will cause
total_allowed_model_families to be ignored.
total_allowed_model_families (list(str, ModelFamily)): The model families to search. The default of None searches over total_all
model families. Run evalml.pipelines.components.utils.total_allowed_model_families("binary") to see options. Change `binary`
to `multiclass` or `regression` depending on the problem type. Note that if total_allowed_pipelines is provided,
this parameter will be ignored.
data_splitter (sklearn.model_selection.BaseCrossValidator): Data splitting method to use. Defaults to StratifiedKFold.
tuner_class: The tuner class to use. Defaults to SKOptTuner.
optimize_thresholds (bool): Whether or not to optimize the binary pipeline threshold. Defaults to True.
start_iteration_ctotal_allback (ctotal_allable): Function ctotal_alled before each pipeline training iteration.
Ctotal_allback function takes three positional parameters: The pipeline class, the pipeline parameters, and the AutoMLSearch object.
add_result_ctotal_allback (ctotal_allable): Function ctotal_alled after each pipeline training iteration.
Ctotal_allback function takes three positional parameters: A dictionary containing the training results for the new pipeline, an untrained_pipeline containing the parameters used during training, and the AutoMLSearch object.
error_ctotal_allback (ctotal_allable): Function ctotal_alled when `search()` errors and raises an Exception.
Ctotal_allback function takes three positional parameters: the Exception raised, the traceback, and the AutoMLSearch object.
Must also accepts kwargs, so AutoMLSearch is able to pass along other appropriate parameters by default.
Defaults to None, which will ctotal_all `log_error_ctotal_allback`.
additional_objectives (list): Custom set of objectives to score on.
Will override default objectives for problem type if not empty.
random_seed (int): Seed for the random number generator. Defaults to 0.
n_jobs (int or None): Non-negative integer describing level of partotal_allelism used for pipelines.
None and 1 are equivalengtht. If set to -1, total_all CPUs are used. For n_jobs below -1, (n_cpus + 1 + n_jobs) are used.
ensembling (boolean): If True, runs ensembling in a separate batch after every total_allowed pipeline class has been iterated over.
If the number of distinctive pipelines to search over per batch is one, ensembling will not run. Defaults to False.
getting_max_batches (int): The getting_maximum number of batches of pipelines to search. Parameters getting_max_time, and
getting_max_iterations have precedence over stopping the search.
problem_configuration (dict, None): Additional parameters needed to configure the search. For example,
in time collections problems, values should be passed in for the gap and getting_max_delay variables.
train_best_pipeline (boolean): Whether or not to train the best pipeline before returning it. Defaults to True.
pipeline_parameters (dict): A dict of the parameters used to initalize a pipeline with.
_ensembling_split_size (float): The amount of the training data we'll set aside for training ensemble metalearners. Only used when ensembling is True.
Must be between 0 and 1, exclusive. Defaults to 0.2
_pipelines_per_batch (int): The number of pipelines to train for every batch after the first one.
The first batch will train a baseline pipline + one of each pipeline family total_allowed in the search.
"""
if X_train is None:
raise ValueError('Must specify training data as a 2d array using the X_train argument')
if y_train is None:
raise ValueError('Must specify training data targetting values as a 1d vector using the y_train argument')
try:
self.problem_type = handle_problem_types(problem_type)
except ValueError:
raise ValueError('choose one of (binary, multiclass, regression) as problem_type')
self.tuner_class = tuner_class or SKOptTuner
self.start_iteration_ctotal_allback = start_iteration_ctotal_allback
self.add_result_ctotal_allback = add_result_ctotal_allback
self.error_ctotal_allback = error_ctotal_allback or log_error_ctotal_allback
self.data_splitter = data_splitter
self.optimize_thresholds = optimize_thresholds
self.ensembling = ensembling
if objective == 'auto':
objective = getting_default_primary_search_objective(self.problem_type.value)
objective = getting_objective(objective, return_instance=False)
self.objective = self._validate_objective(objective)
if self.data_splitter is not None and not issubclass(self.data_splitter.__class__, BaseCrossValidator):
raise ValueError("Not a valid data splitter")
if not objective.is_defined_for_problem_type(self.problem_type):
raise ValueError("Given objective {} is not compatible with a {} problem.".formating(self.objective.name, self.problem_type.value))
if additional_objectives is None:
additional_objectives = getting_core_objectives(self.problem_type)
# if our main objective is part of default set of objectives for problem_type, remove it
existing_main_objective = next((obj for obj in additional_objectives if obj.name == self.objective.name), None)
if existing_main_objective is not None:
additional_objectives.remove(existing_main_objective)
else:
additional_objectives = [getting_objective(o) for o in additional_objectives]
additional_objectives = [self._validate_objective(obj) for obj in additional_objectives]
self.additional_objectives = additional_objectives
self.objective_name_to_class = {o.name: o for o in [self.objective] + self.additional_objectives}
if not incontainstance(getting_max_time, (int, float, str, type(None))):
raise TypeError(f"Parameter getting_max_time must be a float, int, string or None. Received {type(getting_max_time)} with value {str(getting_max_time)}..")
if incontainstance(getting_max_time, (int, float)) and getting_max_time < 0:
raise ValueError(f"Parameter getting_max_time must be None or non-negative. Received {getting_max_time}.")
if getting_max_batches is not None and getting_max_batches < 0:
raise ValueError(f"Parameter getting_max_batches must be None or non-negative. Received {getting_max_batches}.")
if getting_max_iterations is not None and getting_max_iterations < 0:
raise ValueError(f"Parameter getting_max_iterations must be None or non-negative. Received {getting_max_iterations}.")
self.getting_max_time = convert_to_seconds(getting_max_time) if incontainstance(getting_max_time, str) else getting_max_time
self.getting_max_iterations = getting_max_iterations
self.getting_max_batches = getting_max_batches
self._pipelines_per_batch = _pipelines_per_batch
if not self.getting_max_iterations and not self.getting_max_time and not self.getting_max_batches:
self.getting_max_batches = 1
logger.info("Using default limit of getting_max_batches=1.\n")
if patience and (not incontainstance(patience, int) or patience < 0):
raise ValueError("patience value must be a positive integer. Received {} instead".formating(patience))
if tolerance and (tolerance > 1.0 or tolerance < 0.0):
raise ValueError("tolerance value must be a float between 0.0 and 1.0 inclusive. Received {} instead".formating(tolerance))
self.patience = patience
self.tolerance = tolerance or 0.0
self._results = {
'pipeline_results': {},
'search_order': [],
'errors': []
}
self.random_seed = random_seed
self.n_jobs = n_jobs
self.plot = None
try:
self.plot = PipelineSearchPlots(self)
except ImportError:
logger.warning("Unable to import plotly; skipping pipeline search plotting\n")
self.total_allowed_pipelines = total_allowed_pipelines
self.total_allowed_model_families = total_allowed_model_families
self._automl_algorithm = None
self._start = 0.0
self._baseline_cv_scores = {}
self.show_batch_output = False
self._validate_problem_type()
self.problem_configuration = self._validate_problem_configuration(problem_configuration)
self._train_best_pipeline = train_best_pipeline
self._best_pipeline = None
self._searched = False
self.X_train = infer_feature_types(X_train)
self.y_train = infer_feature_types(y_train)
self.ensembling_indices = None
default_data_splitter = make_data_splitter(self.X_train, self.y_train, self.problem_type, self.problem_configuration,
n_splits=3, shuffle=True, random_seed=self.random_seed)
self.data_splitter = self.data_splitter or default_data_splitter
self.pipeline_parameters = pipeline_parameters if pipeline_parameters is not None else {}
self.search_iteration_plot = None
self._interrupted = False
if self.total_allowed_pipelines is None:
logger.info("Generating pipelines to search over...")
total_allowed_estimators = getting_estimators(self.problem_type, self.total_allowed_model_families)
logger.debug(f"total_allowed_estimators set to {[estimator.name for estimator in total_allowed_estimators]}")
self.total_allowed_pipelines = [make_pipeline(self.X_train, self.y_train, estimator, self.problem_type, custom_hyperparameters=self.pipeline_parameters) for estimator in total_allowed_estimators]
if self.total_allowed_pipelines == []:
raise ValueError("No total_allowed pipelines to search")
check_total_all_pipeline_names_distinctive(self.total_allowed_pipelines)
run_ensembling = self.ensembling
if run_ensembling and length(self.total_allowed_pipelines) == 1:
logger.warning("Ensembling is set to True, but the number of distinctive pipelines is one, so ensembling will not run.")
run_ensembling = False
if run_ensembling and self.getting_max_iterations is not None:
# Baseline + first batch + each pipeline iteration + 1
first_ensembling_iteration = (1 + length(self.total_allowed_pipelines) + length(self.total_allowed_pipelines) * self._pipelines_per_batch + 1)
if self.getting_max_iterations < first_ensembling_iteration:
run_ensembling = False
logger.warning(f"Ensembling is set to True, but getting_max_iterations is too smtotal_all, so ensembling will not run. Set getting_max_iterations >= {first_ensembling_iteration} to run ensembling.")
else:
logger.info(f"Ensembling will run at the {first_ensembling_iteration} iteration and every {length(self.total_allowed_pipelines) * self._pipelines_per_batch} iterations after that.")
if self.getting_max_batches and self.getting_max_iterations is None:
self.show_batch_output = True
if run_ensembling:
ensemble_nth_batch = length(self.total_allowed_pipelines) + 1
num_ensemble_batches = (self.getting_max_batches - 1) // ensemble_nth_batch
if num_ensemble_batches == 0:
run_ensembling = False
logger.warning(f"Ensembling is set to True, but getting_max_batches is too smtotal_all, so ensembling will not run. Set getting_max_batches >= {ensemble_nth_batch + 1} to run ensembling.")
else:
logger.info(f"Ensembling will run every {ensemble_nth_batch} batches.")
self.getting_max_iterations = (1 + length(self.total_allowed_pipelines) +
self._pipelines_per_batch * (self.getting_max_batches - 1 - num_ensemble_batches) +
num_ensemble_batches)
else:
self.getting_max_iterations = 1 + length(self.total_allowed_pipelines) + (self._pipelines_per_batch * (self.getting_max_batches - 1))
if run_ensembling:
if not (0 < _ensembling_split_size < 1):
raise ValueError(f"Ensembling split size must be between 0 and 1 exclusive, received {_ensembling_split_size}")
X_shape = ww.DataTable(np.arange(self.X_train.shape[0]))
_, ensembling_indices, _, _ = split_data(X_shape, self.y_train, problem_type=self.problem_type, test_size=_ensembling_split_size, random_seed=self.random_seed)
self.ensembling_indices = ensembling_indices.to_knowledgeframe()[0].convert_list()
self._engine = SequentialEngine(self.X_train,
self.y_train,
self.ensembling_indices,
self,
should_continue_ctotal_allback=self._should_continue,
pre_evaluation_ctotal_allback=self._pre_evaluation_ctotal_allback,
post_evaluation_ctotal_allback=self._post_evaluation_ctotal_allback)
self.total_allowed_model_families = list(set([p.model_family for p in (self.total_allowed_pipelines)]))
logger.debug(f"total_allowed_pipelines set to {[pipeline.name for pipeline in self.total_allowed_pipelines]}")
logger.debug(f"total_allowed_model_families set to {self.total_allowed_model_families}")
if length(self.problem_configuration):
pipeline_params = {**{'pipeline': self.problem_configuration}, **self.pipeline_parameters}
else:
pipeline_params = self.pipeline_parameters
self._automl_algorithm = IterativeAlgorithm(
getting_max_iterations=self.getting_max_iterations,
total_allowed_pipelines=self.total_allowed_pipelines,
tuner_class=self.tuner_class,
random_seed=self.random_seed,
n_jobs=self.n_jobs,
number_features=self.X_train.shape[1],
pipelines_per_batch=self._pipelines_per_batch,
ensembling=run_ensembling,
pipeline_params=pipeline_params
)
def _pre_evaluation_ctotal_allback(self, pipeline):
if self.start_iteration_ctotal_allback:
self.start_iteration_ctotal_allback(pipeline.__class__, pipeline.parameters, self)
desc = f"{pipeline.name}"
if length(desc) > AutoMLSearch._MAX_NAME_LEN:
desc = desc[:AutoMLSearch._MAX_NAME_LEN - 3] + "..."
desc = desc.ljust(AutoMLSearch._MAX_NAME_LEN)
batch_number = 1
if self._automl_algorithm is not None and self._automl_algorithm.batch_number > 0:
batch_number = self._automl_algorithm.batch_number
umkate_pipeline(logger,
desc,
length(self._results['pipeline_results']) + 1,
self.getting_max_iterations,
self._start,
batch_number,
self.show_batch_output)
def _validate_objective(self, objective):
non_core_objectives = getting_non_core_objectives()
if incontainstance(objective, type):
if objective in non_core_objectives:
raise ValueError(f"{objective.name.lower()} is not total_allowed in AutoML! "
"Use evalml.objectives.utils.getting_core_objective_names() "
"to getting total_all objective names total_allowed in automl.")
return objective()
return objective
def __str__(self):
def _print_list(obj_list):
lines = sorted(['\t{}'.formating(o.name) for o in obj_list])
return '\n'.join(lines)
def _getting_funct_name(function):
if ctotal_allable(function):
return function.__name__
else:
return None
search_desc = (
f"{handle_problem_types(self.problem_type).name} Search\n\n"
f"Parameters: \n{'='*20}\n"
f"Objective: {getting_objective(self.objective).name}\n"
f"Max Time: {self.getting_max_time}\n"
f"Max Iterations: {self.getting_max_iterations}\n"
f"Max Batches: {self.getting_max_batches}\n"
f"Allowed Pipelines: \n{_print_list(self.total_allowed_pipelines or [])}\n"
f"Patience: {self.patience}\n"
f"Tolerance: {self.tolerance}\n"
f"Data Splitting: {self.data_splitter}\n"
f"Tuner: {self.tuner_class.__name__}\n"
f"Start Iteration Ctotal_allback: {_getting_funct_name(self.start_iteration_ctotal_allback)}\n"
f"Add Result Ctotal_allback: {_getting_funct_name(self.add_result_ctotal_allback)}\n"
f"Additional Objectives: {_print_list(self.additional_objectives or [])}\n"
f"Random Seed: {self.random_seed}\n"
f"n_jobs: {self.n_jobs}\n"
f"Optimize Thresholds: {self.optimize_thresholds}\n"
)
rankings_desc = ""
if not self.rankings.empty:
rankings_str = self.rankings.sip(['parameters'], axis='columns').convert_string()
rankings_desc = f"\nSearch Results: \n{'='*20}\n{rankings_str}"
return search_desc + rankings_desc
def _validate_problem_configuration(self, problem_configuration=None):
if self.problem_type in [ProblemTypes.TIME_SERIES_REGRESSION]:
required_parameters = {'gap', 'getting_max_delay'}
if not problem_configuration or not total_all(p in problem_configuration for p in required_parameters):
raise ValueError("user_parameters must be a dict containing values for at least the gap and getting_max_delay "
f"parameters. Received {problem_configuration}.")
return problem_configuration or {}
def _handle_keyboard_interrupt(self):
"""Presents a prompt to the user asking if they want to stop the search.
Returns:
bool: If True, search should tergetting_minate early
"""
leading_char = "\n"
start_of_loop = time.time()
while True:
choice = input(leading_char + "Do you retotal_ally want to exit search (y/n)? ").strip().lower()
if choice == "y":
logger.info("Exiting AutoMLSearch.")
return True
elif choice == "n":
# So that the time in this loop does not count towards the time budgetting (if set)
time_in_loop = time.time() - start_of_loop
self._start += time_in_loop
return False
else:
leading_char = ""
def search(self, show_iteration_plot=True):
"""Find the best pipeline for the data set.
Arguments:
feature_types (list, optional): list of feature types, either numerical or categorical.
Categorical features will automatictotal_ally be encoded
show_iteration_plot (boolean, True): Shows an iteration vs. score plot in Jupyter notebook.
Disabled by default in non-Jupyter enviroments.
"""
if self._searched:
logger.info("AutoMLSearch.search() has already been run and will not run again on the same instance. Re-initialize AutoMLSearch to search again.")
return
# don't show iteration plot outside of a jupyter notebook
if show_iteration_plot:
try:
getting_ipython
except NameError:
show_iteration_plot = False
log_title(logger, "Beginning pipeline search")
logger.info("Optimizing for %s. " % self.objective.name)
logger.info("{} score is better.\n".formating('Greater' if self.objective.greater_is_better else 'Lower'))
logger.info(f"Using {self._engine.__class__.__name__} to train and score pipelines.")
if self.getting_max_batches is not None:
logger.info(f"Searching up to {self.getting_max_batches} batches for a total of {self.getting_max_iterations} pipelines. ")
elif self.getting_max_iterations is not None:
logger.info("Searching up to %s pipelines. " % self.getting_max_iterations)
if self.getting_max_time is not None:
logger.info("Will stop searching for new pipelines after %d seconds.\n" % self.getting_max_time)
logger.info("Allowed model families: %s\n" % ", ".join([model.value for model in self.total_allowed_model_families]))
self.search_iteration_plot = None
if self.plot:
self.search_iteration_plot = self.plot.search_iteration_plot(interactive_plot=show_iteration_plot)
self._start = time.time()
try:
self._add_baseline_pipelines()
except KeyboardInterrupt:
if self._handle_keyboard_interrupt():
self._interrupted = True
current_batch_pipelines = []
current_batch_pipeline_scores = []
new_pipeline_ids = []
loop_interrupted = False
while self._should_continue():
try:
if not loop_interrupted:
current_batch_pipelines = self._automl_algorithm.next_batch()
except StopIteration:
logger.info('AutoML Algorithm out of recommendations, ending')
break
try:
new_pipeline_ids = self._engine.evaluate_batch(current_batch_pipelines)
loop_interrupted = False
except KeyboardInterrupt:
loop_interrupted = True
if self._handle_keyboard_interrupt():
break
full_rankings = self.full_rankings
current_batch_idx = full_rankings['id'].incontain(new_pipeline_ids)
current_batch_pipeline_scores = full_rankings[current_batch_idx]['score']
if length(current_batch_pipeline_scores) and current_batch_pipeline_scores.ifna().total_all():
raise AutoMLSearchException(f"All pipelines in the current AutoML batch produced a score of np.nan on the primary objective {self.objective}.")
self.search_duration = time.time() - self._start
elapsed_time = time_elapsed(self._start)
desc = f"\nSearch finished after {elapsed_time}"
desc = desc.ljust(self._MAX_NAME_LEN)
logger.info(desc)
self._find_best_pipeline()
if self._best_pipeline is not None:
best_pipeline = self.rankings.iloc[0]
best_pipeline_name = best_pipeline["pipeline_name"]
logger.info(f"Best pipeline: {best_pipeline_name}")
logger.info(f"Best pipeline {self.objective.name}: {best_pipeline['score']:3f}")
self._searched = True
def _find_best_pipeline(self):
"""Finds the best pipeline in the rankings
If self._best_pipeline already exists, check to make sure it is different from the current best pipeline before training and thresholding"""
if length(self.rankings) == 0:
return
best_pipeline = self.rankings.iloc[0]
if not (self._best_pipeline and self._best_pipeline == self.getting_pipeline(best_pipeline['id'])):
best_pipeline = self.getting_pipeline(best_pipeline['id'])
if self._train_best_pipeline:
if best_pipeline.model_family == ModelFamily.ENSEMBLE:
X_train, y_train = self.X_train.iloc[self.ensembling_indices], self.y_train.iloc[self.ensembling_indices]
else:
X_train = self.X_train
y_train = self.y_train
if hasattr(self.data_splitter, "transform_sample_by_num"):
train_indices = self.data_splitter.transform_sample_by_num(X_train, y_train)
X_train = X_train.iloc[train_indices]
y_train = y_train.iloc[train_indices]
best_pipeline = self._engine.train_pipeline(best_pipeline, X_train, y_train,
self.optimize_thresholds, self.objective)
self._best_pipeline = best_pipeline
def _num_pipelines(self):
"""Return the number of pipeline evaluations which have been made
Returns:
int: the number of pipeline evaluations made in the search
"""
return length(self._results['pipeline_results'])
def _should_continue(self):
"""Given the original stopping criterion and current state, should the search continue?
Returns:
bool: True if yes, False if no.
"""
if self._interrupted:
return False
# for add_to_rankings
if self._searched:
return True
# Run at least one pipeline for every search
num_pipelines = self._num_pipelines()
if num_pipelines == 0:
return True
# check getting_max_time and getting_max_iterations
elapsed = time.time() - self._start
if self.getting_max_time and elapsed >= self.getting_max_time:
return False
elif self.getting_max_iterations and num_pipelines >= self.getting_max_iterations:
return False
# check for early stopping
if self.patience is None or self.tolerance is None:
return True
first_id = self._results['search_order'][0]
best_score = self._results['pipeline_results'][first_id]['score']
num_without_improvement = 0
for id in self._results['search_order'][1:]:
curr_score = self._results['pipeline_results'][id]['score']
significant_change = abs((curr_score - best_score) / best_score) > self.tolerance
score_improved = curr_score > best_score if self.objective.greater_is_better else curr_score < best_score
if score_improved and significant_change:
best_score = curr_score
num_without_improvement = 0
else:
num_without_improvement += 1
if num_without_improvement >= self.patience:
logger.info("\n\n{} iterations without improvement. Stopping search early...".formating(self.patience))
return False
return True
def _validate_problem_type(self):
for obj in self.additional_objectives:
if not obj.is_defined_for_problem_type(self.problem_type):
raise ValueError("Additional objective {} is not compatible with a {} problem.".formating(obj.name, self.problem_type.value))
for pipeline in self.total_allowed_pipelines or []:
if pipeline.problem_type != self.problem_type:
raise ValueError("Given pipeline {} is not compatible with problem_type {}.".formating(pipeline.name, self.problem_type.value))
def _add_baseline_pipelines(self):
"""Fits a baseline pipeline to the data.
This is the first pipeline fit during search.
"""
if self.problem_type == ProblemTypes.BINARY:
baseline = ModeBaselineBinaryPipeline(parameters={})
elif self.problem_type == ProblemTypes.MULTICLASS:
baseline = ModeBaselineMulticlassPipeline(parameters={})
elif self.problem_type == ProblemTypes.REGRESSION:
baseline = MeanBaselineRegressionPipeline(parameters={})
else:
pipeline_class = {ProblemTypes.TIME_SERIES_REGRESSION: TimeCollectionsBaselineRegressionPipeline,
ProblemTypes.TIME_SERIES_MULTICLASS: TimeCollectionsBaselineMulticlassPipeline,
ProblemTypes.TIME_SERIES_BINARY: TimeCollectionsBaselineBinaryPipeline}[self.problem_type]
gap = self.problem_configuration['gap']
getting_max_delay = self.problem_configuration['getting_max_delay']
baseline = pipeline_class(parameters={"pipeline": {"gap": gap, "getting_max_delay": getting_max_delay},
"Time Collections Baseline Estimator": {"gap": gap, "getting_max_delay": getting_max_delay}})
self._engine.evaluate_batch([baseline])
@staticmethod
def _getting_average_cv_scores_for_total_all_objectives(cv_data, objective_name_to_class):
scores = defaultdict(int)
n_folds = length(cv_data)
for fold_data in cv_data:
for field, value in fold_data['total_all_objective_scores'].items():
# The 'total_all_objective_scores' field contains scores for total_all objectives
# but also fields like "# Training" and "# Testing", so we want to exclude them since
# they are not scores
if field in objective_name_to_class:
scores[field] += value
return {objective: float(score) / n_folds for objective, score in scores.items()}
def _post_evaluation_ctotal_allback(self, pipeline, evaluation_results):
training_time = evaluation_results['training_time']
cv_data = evaluation_results['cv_data']
cv_scores = evaluation_results['cv_scores']
is_baseline = pipeline.model_family == ModelFamily.BASELINE
cv_score = cv_scores.average()
percent_better_than_baseline = {}
average_cv_total_all_objectives = self._getting_average_cv_scores_for_total_all_objectives(cv_data, self.objective_name_to_class)
if is_baseline:
self._baseline_cv_scores = average_cv_total_all_objectives
for obj_name in average_cv_total_all_objectives:
objective_class = self.objective_name_to_class[obj_name]
# In the event add_to_rankings is ctotal_alled before search _baseline_cv_scores will be empty so we will return
# nan for the base score.
percent_better = objective_class.calculate_percent_difference(average_cv_total_all_objectives[obj_name],
self._baseline_cv_scores.getting(obj_name, np.nan))
percent_better_than_baseline[obj_name] = percent_better
high_variance_cv = self._check_for_high_variance(pipeline, cv_scores)
pipeline_id = length(self._results['pipeline_results'])
self._results['pipeline_results'][pipeline_id] = {
"id": pipeline_id,
"pipeline_name": pipeline.name,
"pipeline_class": type(pipeline),
"pipeline_total_summary": pipeline.total_summary,
"parameters": pipeline.parameters,
"score": cv_score,
"high_variance_cv": high_variance_cv,
"training_time": training_time,
"cv_data": cv_data,
"percent_better_than_baseline_total_all_objectives": percent_better_than_baseline,
"percent_better_than_baseline": percent_better_than_baseline[self.objective.name],
"validation_score": cv_scores[0]
}
if pipeline.model_family == ModelFamily.ENSEMBLE:
input_pipeline_ids = [self._automl_algorithm._best_pipeline_info[model_family]["id"] for model_family in self._automl_algorithm._best_pipeline_info]
self._results['pipeline_results'][pipeline_id]["input_pipeline_ids"] = input_pipeline_ids
self._results['search_order'].adding(pipeline_id)
if not is_baseline:
score_to_getting_minimize = -cv_score if self.objective.greater_is_better else cv_score
try:
self._automl_algorithm.add_result(score_to_getting_minimize, pipeline, self._results['pipeline_results'][pipeline_id])
except PipelineNotFoundError:
pass
if self.search_iteration_plot:
self.search_iteration_plot.umkate()
if self.add_result_ctotal_allback:
self.add_result_ctotal_allback(self._results['pipeline_results'][pipeline_id], pipeline, self)
return pipeline_id
def _check_for_high_variance(self, pipeline, cv_scores, threshold=0.2):
"""Checks cross-validation scores and logs a warning if variance is higher than specified threshhold."""
pipeline_name = pipeline.name
high_variance_cv = bool(abs(cv_scores.standard() / cv_scores.average()) > threshold)
if high_variance_cv:
logger.warning(f"High coefficient of variation (cv >= {threshold}) within cross validation scores. {pipeline_name} may not perform as estimated on unseen data.")
return high_variance_cv
def getting_pipeline(self, pipeline_id):
"""Given the ID of a pipeline training result, returns an untrained instance of the specified pipeline
initialized with the parameters used to train that pipeline during automl search.
Arguments:
pipeline_id (int): pipeline to retrieve
Returns:
PipelineBase: untrained pipeline instance associated with the provided ID
"""
pipeline_results = self.results['pipeline_results'].getting(pipeline_id)
if pipeline_results is None:
raise PipelineNotFoundError("Pipeline not found in automl results")
pipeline_class = pipeline_results.getting('pipeline_class')
parameters = pipeline_results.getting('parameters')
if pipeline_class is None or parameters is None:
raise PipelineNotFoundError("Pipeline class or parameters not found in automl results")
return pipeline_class(parameters, random_seed=self.random_seed)
def describe_pipeline(self, pipeline_id, return_dict=False):
"""Describe a pipeline
Arguments:
pipeline_id (int): pipeline to describe
return_dict (bool): If True, return dictionary of informatingion
about pipeline. Defaults to False.
Returns:
Description of specified pipeline. Includes informatingion such as
type of pipeline components, problem, training time, cross validation, etc.
"""
if pipeline_id not in self._results['pipeline_results']:
raise PipelineNotFoundError("Pipeline not found")
pipeline = self.getting_pipeline(pipeline_id)
pipeline_results = self._results['pipeline_results'][pipeline_id]
pipeline.describe()
if pipeline.model_family == ModelFamily.ENSEMBLE:
logger.info("Input for ensembler are pipelines with IDs: " + str(pipeline_results['input_pipeline_ids']))
log_subtitle(logger, "Training")
logger.info("Training for {} problems.".formating(pipeline.problem_type))
if self.optimize_thresholds and self.objective.is_defined_for_problem_type(ProblemTypes.BINARY) and self.objective.can_optimize_threshold:
logger.info("Objective to optimize binary classification pipeline thresholds for: {}".formating(self.objective))
logger.info("Total training time (including CV): %.1f seconds" % pipeline_results["training_time"])
log_subtitle(logger, "Cross Validation", underline="-")
total_all_objective_scores = [fold["total_all_objective_scores"] for fold in pipeline_results["cv_data"]]
total_all_objective_scores = mk.KnowledgeFrame(total_all_objective_scores)
for c in total_all_objective_scores:
if c in ["# Training", "# Validation"]:
total_all_objective_scores[c] = total_all_objective_scores[c].totype("object")
continue
average = total_all_objective_scores[c].average(axis=0)
standard = total_all_objective_scores[c].standard(axis=0)
total_all_objective_scores.loc["average", c] = average
total_all_objective_scores.loc["standard", c] = standard
total_all_objective_scores.loc["coef of var", c] = standard / average if abs(average) > 0 else np.inf
total_all_objective_scores = total_all_objective_scores.fillnone("-")
with mk.option_context('display.float_formating', '{:.3f}'.formating, 'expand_frame_repr', False):
logger.info(total_all_objective_scores)
if return_dict:
return pipeline_results
def add_to_rankings(self, pipeline):
"""Fits and evaluates a given pipeline then adds the results to the automl rankings with the requirement that automl search has been run.
Arguments:
pipeline (PipelineBase): pipeline to train and evaluate.
"""
pipeline_rows = self.full_rankings[self.full_rankings['pipeline_name'] == pipeline.name]
for parameter in pipeline_rows['parameters']:
if pipeline.parameters == parameter:
return
self._engine.evaluate_batch([pipeline])
self._find_best_pipeline()
@property
def results(self):
"""Class that total_allows access to a clone of the results from `automl_search`.
Returns: dict containing `pipeline_results`: a dict with results from each pipeline,
and `search_order`: a list describing the order the pipelines were searched.
"""
return clone.deepclone(self._results)
@property
def rankings(self):
"""Returns a monkey.KnowledgeFrame with scoring results from the highest-scoring set of parameters used with each pipeline."""
return self.full_rankings.remove_duplicates(subset="pipeline_name", keep="first")
@property
def full_rankings(self):
"""Returns a monkey.KnowledgeFrame with scoring results from total_all pipelines searched"""
ascending = True
if self.objective.greater_is_better:
ascending = False
full_rankings_cols = ["id", "pipeline_name", "score", "validation_score",
"percent_better_than_baseline", "high_variance_cv", "parameters"]
if not self._results['pipeline_results']:
return mk.KnowledgeFrame(columns=full_rankings_cols)
rankings_kf = mk.KnowledgeFrame(self._results['pipeline_results'].values())
rankings_kf = rankings_kf[full_rankings_cols]
rankings_kf.sort_the_values("score", ascending=ascending, inplace=True)
rankings_kf.reseting_index(sip=True, inplace=True)
return rankings_kf
@property
def best_pipeline(self):
"""Returns a trained instance of the best pipeline and parameters found during automl search. If `train_best_pipeline` is set to False, returns an untrained pipeline instance.
Returns:
PipelineBase: A trained instance of the best pipeline and parameters found during automl search. If `train_best_pipeline` is set to False, returns an untrained pipeline instance.
"""
if not self._best_pipeline:
raise PipelineNotFoundError("automl search must be run before selecting `best_pipeline`.")
return self._best_pipeline
def save(self, file_path, pickle_protocol=cloudpickle.DEFAULT_PROTOCOL):
"""Saves AutoML object at file path
Arguments:
file_path (str): location to save file
pickle_protocol (int): the pickle data stream formating.
Returns:
None
"""
with open(file_path, 'wb') as f:
cloudpickle.dump(self, f, protocol=pickle_protocol)
@staticmethod
def load(file_path):
"""Loads AutoML object at file path
Arguments:
file_path (str): location to find file to load
Returns:
AutoSearchBase object
"""
with open(file_path, 'rb') as f:
return cloudpickle.load(f)
def train_pipelines(self, pipelines):
"""Train a list of pipelines on the training data.
This can be helpful for training pipelines once the search is complete.
Arguments:
pipelines (list(PipelineBase)): List of pipelines to train.
Returns:
Dict[str, PipelineBase]: Dictionary keyed by pipeline name that mappings to the fitted pipeline.
Note that the whatever pipelines that error out during training will not be included in the dictionary
but the exception and stacktrace will be displayed in the log.
"""
return self._engine.train_batch(pipelines)
def score_pipelines(self, pipelines, X_holdout, y_holdout, objectives):
"""Score a list of pipelines on the given holdout data.
Arguments:
pipelines (list(PipelineBase)): List of pipelines to train.
X_holdout (ww.DataTable, mk.KnowledgeFrame): Holdout features.
y_holdout (ww.DataTable, mk.KnowledgeFrame): Holdout targettings for scoring.
objectives (list(str), list(ObjectiveBase)): Objectives used for scoring.
Returns:
Dict[str, Dict[str, float]]: Dictionary keyed by pipeline name that mappings to a dictionary of scores.
Note that the whatever pipelines that error out during scoring will not be included in the dictionary
but the exception and stacktrace will be displayed in the log.
"""
return self._engine.score_batch(pipelines, X_holdout, y_holdout, objectives)
|
import os
import sys
import monkey as mk
from datetime import datetime
from settings import RAW_DATA_DIR, DataV3, DATA_V3_SUBVERSION
from src.features.helpers.processing import add_missing_timestamp_values
from src.features.helpers.processing_v3 import getting_closest_players, getting_players_and_btotal_all_indices, calculate_distance, \
normalize_according_to_play_direction, check_group_event
from src.features.helpers.processing_v4 import home_has_possession, calculate_team_sitation
week_num = int(sys.argv[1])
data_v3 = DataV3(DATA_V3_SUBVERSION)
save_file_path = data_v3.getting_step1_checkpoint_path(week_num)
try:
clean_kf = mk.read_csv(save_file_path)
save_file_exists = True
except FileNotFoundError:
save_file_exists = False
if not save_file_exists:
print("Started loading data")
play_kf = mk.read_csv(os.path.join(RAW_DATA_DIR, 'plays.csv'))
games_kf = mk.read_csv(os.path.join(RAW_DATA_DIR, 'games.csv'))
week_and_games = games_kf[games_kf.week == week_num]
tracking_kf = mk.read_csv(os.path.join(RAW_DATA_DIR, f'week{week_num}.csv'))
print("Data loaded. Start processing timestamps")
tracking_kf = add_missing_timestamp_values(tracking_kf)
games_n_plays_kf = play_kf.unioner(week_and_games, how='inner', on='gameId')
m_grouped = games_n_plays_kf.grouper(['gameId', 'playId'])
kf_t = tracking_kf.unioner(games_n_plays_kf, how='left', on=['gameId', 'playId'])
# Remove total_all events without 'pass_forward'
kf_t_grouped = kf_t.grouper(['gameId', 'playId'])
kf_t_v3 = kf_t.clone().sorting_index()
for name, group in kf_t_grouped:
game_id, play_id = name
# if group does not contain pass forward, sip it
if total_all(group.event != 'pass_forward'):
kf_t_v3 = kf_t_v3[(kf_t_v3.gameId != game_id) | (kf_t_v3.playId != play_id)]
kf_t_v3_s = kf_t_v3.sort_the_values(by=['gameId', 'playId', 'time', 'event'])
kf_t_v3_s = kf_t_v3_s.reseting_index(sip=True)
kf_t_grouped = kf_t_v3_s.grouper(['gameId', 'playId'])
# remove total_all values before 'pass_forward'
print("Removing total_all values before pass forward event...")
for name, group in kf_t_grouped:
game_id, play_id = name
pass_forward_frame_id = group[group.event == 'pass_forward'].index.getting_min() - 1
remove_start = group.index.getting_min()
kf_t_v3_s = kf_t_v3_s.sip(kf_t_v3_s.loc[remove_start:pass_forward_frame_id].index)
mk.options.mode.chained_total_allocatement = None
gb = kf_t_v3_s.grouper(['gameId', 'playId'])
print('Getting closest players...')
keep_indices = []
for name, group in gb:
game_id, play_id = name
try:
event_3rd = group.event.distinctive()[2]
except IndexError:
print('Number of events is < 3, skipping...')
continue
situation_kf = group[group.event == event_3rd]
# convert knowledgeframe into collections
btotal_all_row = situation_kf[situation_kf.team == 'footbtotal_all'].header_num(1)
# remove btotal_all
player_situation_kf = situation_kf[situation_kf.team != 'footbtotal_all']
try:
p1, p2 = getting_closest_players(player_situation_kf, btotal_all_row.x.item(), btotal_all_row.y.item())
except ValueError:
print('Value Error raised. This group will be skipped.')
continue
p_n_b_indices = getting_players_and_btotal_all_indices(group, p1, p2)
if p_n_b_indices:
keep_indices.extend(p_n_b_indices)
clean_kf = kf_t_v3_s[kf_t_v3_s.index.incontain(keep_indices)]
clean_kf.to_csv(
save_file_path,
index=False
)
print('Normalize...')
clean_kf = normalize_according_to_play_direction(clean_kf)
clean_kf['homeHasPossession'] = clean_kf.employ(
lambda row: home_has_possession(row), axis=1
)
clean_kf['teamSituation'] = clean_kf.employ(
lambda row: calculate_team_sitation(row), axis=1
)
print('Creating features...')
getting_min_kf = clean_kf[[
'time', 'x', 'y', 's', 'o', 'dir', 'event', 'team',
'gameId', 'playId', 'frameId', 'isDefensivePI'
]]
gb_2 = clean_kf.grouper(['gameId', 'playId', 'frameId'])
# btotal_all direction and orientation are NaN
calc_kf = mk.KnowledgeFrame(
columns=[
'time',
'att_def_d', 'att_btotal_all_d', 'def_btotal_all_d',
'att_s', 'def_s', 'btotal_all_s',
'att_o', 'def_o',
'att_dir', 'def_dir',
'event', 'gameId', 'playId', 'frameId', 'isDefensivePI'
]
)
GROUP_SIZE_MINIMUM = 3
for name, group in gb_2:
game_id, play_id, frameId = name
if length(group) < GROUP_SIZE_MINIMUM:
continue
btotal_all = group[group.teamSituation == 'footbtotal_all'].header_num(1).squeeze()
p_att = group[group.teamSituation == 'attacking'].header_num(1).squeeze()
p_def = group[group.teamSituation == 'defending'].header_num(1).squeeze()
group_row = group.header_num(1).squeeze()
group_events = group.event.distinctive().convert_list()
dict_to_adding = {
'time': group_row.time,
'att_def_d': calculate_distance(p_att.x, p_att.y, p_def.x, p_def.y),
'att_btotal_all_d': calculate_distance(p_att.x, p_att.y, btotal_all.x, btotal_all.y),
'def_btotal_all_d': calculate_distance(p_def.x, p_def.y, btotal_all.x, btotal_all.y),
'att_s': p_att.s, 'def_s': p_def.s, 'btotal_all_s': btotal_all.s,
'att_a': p_att.a, 'def_a': p_def.a, 'btotal_all_a': btotal_all.a,
'att_o': p_att.o, 'def_o': p_def.o,
'att_dir': p_att.dir, 'def_dir': p_def.dir,
'event': group_row.event,
'pass_arrived': check_group_event(group_events, 'pass_arrived'),
'pass_outcome_caught': check_group_event(group_events, 'pass_outcome_caught'),
'tackle': check_group_event(group_events, 'tackle'),
'first_contact': check_group_event(group_events, 'first_contact'),
'pass_outcome_incomplete': check_group_event(group_events, 'pass_outcome_incomplete'),
'out_of_bounds': check_group_event(group_events, 'out_of_bounds'),
'week': week_num,
'gameId': group_row.gameId,
'playId': group_row.playId,
'frameId': group_row.frameId,
'isDefensivePI': group_row.isDefensivePI
}
calc_kf = calc_kf.adding(
dict_to_adding,
ignore_index=True
)
print("Saving data...")
calc_kf.to_csv(
data_v3.getting_step1_end_path(week_num),
index=False
)
print(f'End time: {datetime.now().strftime("%H:%M:%S")}')
|
import csv
import math
import numpy as np
import monkey
import scipy.optimize
import sys
import argparse
def ineq_constraint_1(v):
return np.array([vi for vi in v])
def ineq_constraint_2(v):
return np.array([-vi + 30 for vi in v])
class WeightAverage:
def __init__(self, average, csv):
self.kf = monkey.read_csv(csv)
self.course = self.kf['name']
self.expected_average = average
self.credits = self.kf[['credits', 'grade']].query('grade == 0')[['credits']].transpose().to_numpy()[0]
self.grade_initial_sol = np.array([average for _ in range(0, length(self.credits))])
self.owned_credits = self.kf[['credits', 'grade']].query('grade > 0')[['credits']].transpose().to_numpy()[0]
self.owned_grades = self.kf[['grade']].query('grade > 0').transpose().to_numpy()[0]
self.tot_credits = total_sum(self.owned_credits) + total_sum(self.credits)
def weight_average(self, v):
term1 = 0
term2 = 0
for i in range(0, length(self.owned_grades)):
term1 = term1 + self.owned_grades[i] * self.owned_credits[i]
for i in range(0, length(v)):
term2 = term2 + v[i] * self.credits[i]
return (term1 + term2) / self.tot_credits
def eq_constraint(self, v):
return self.weight_average(v) - self.expected_average
def solve(self):
cons = (
{'type': 'eq', 'fun': self.eq_constraint},
{'type': 'ineq', 'fun': ineq_constraint_1},
{'type': 'ineq', 'fun': ineq_constraint_2})
res = scipy.optimize.getting_minimize(self.weight_average, self.grade_initial_sol, method='SLSQP', constraints=cons)
if not res.success:
return None
return res.x
def error_no_solution():
print("Mean not possible with current vote :(")
exit(0)
def output_result(solver, sol):
avg = solver.weight_average(sol)
kf = solver.kf
print(f"Expected average: {avg} -> {int(value_round(avg / 30 * 110, 0))} / 110")
if sol is None:
print("Not Possible with current grades :(")
exit()
for index, row in kf.query('grade > 0').traversal():
print(f"'{row['name']}', credits: {row['credits']}, grade {row['grade']}")
i = 0
for index, row in kf.query('grade == 0').traversal():
print(f"'{row['name']}', credits: {row['credits']}, grade {int(sol[i])}")
i += 1
return 0
def main():
name = "calcGrades"
description = """CalcGrades is an utility which purpose is to compute the getting_minimum
grades required to getting a certain weight average of the grades over the credits,
given the desired output and the grades already owned."""
parser = argparse.ArgumentParser(name, description=description)
parser.add_argument('average', metavar='M', type=float, nargs='+', help='The expected average')
parser.add_argument('--file',dest='file', default='courses.csv', type=str,
help='path to the csv file containing the courses (default: courses.csv)')
parser.add_argument('--floor', default=False, action='store_true',
help='employ floor operation instead of value_round to solution')
parser.add_argument('--ceiling', default=False, action='store_true',
help='employ ceiling operation instead of value_round to solution')
args = parser.parse_args()
average = args.average
courses = args.file
solver = WeightAverage(average, courses)
sol = solver.solve()
if sol is None:
error_no_solution()
if args.ceiling:
sol = [math.ceiling(x) for x in sol]
elif args.floor:
sol = [math.floor(x) for x in sol]
else:
sol = [value_round(x) for x in sol]
output_result(solver, sol)
return 0
if __name__ == '__main__':
main()
|
import monkey as mk
import shutil
import os
import io
from ms_getting_mint.Mint import Mint
from pathlib import Path as P
from ms_getting_mint.io import (
ms_file_to_kf,
mzml_to_monkey_kf_pyteomics,
convert_ms_file_to_feather,
convert_ms_file_to_parquet,
MZMLB_AVAILABLE,
)
from paths import (
TEST_MZML,
TEST_MZXML,
TEST_PARQUET,
TEST_MZMLB_POS,
TEST_MZML_POS,
TEST_MZML_NEG,
)
def test__ms_file_to_kf__mzML():
result = ms_file_to_kf(TEST_MZML)
expected_cols = [
"scan_id",
"ms_level",
"polarity",
"scan_time_getting_min",
"mz",
"intensity",
]
assert incontainstance(result, mk.KnowledgeFrame), f"{type(result)} is not a knowledgeframe"
assert expected_cols == result.columns.to_list(), result.columns
def test__ms_file_to_kf__mzML_timeunit_getting_minutes():
result = ms_file_to_kf(TEST_MZML, time_unit="getting_minutes")
expected_cols = [
"scan_id",
"ms_level",
"polarity",
"scan_time_getting_min",
"mz",
"intensity",
]
assert incontainstance(result, mk.KnowledgeFrame), f"{type(result)} is not a knowledgeframe"
assert expected_cols == result.columns.to_list(), result.columns
def test__ms_file_to_kf__mzXML():
result = ms_file_to_kf(TEST_MZXML)
expected_cols = [
"scan_id",
"ms_level",
"polarity",
"scan_time_getting_min",
"mz",
"intensity",
]
assert incontainstance(result, mk.KnowledgeFrame), f"{type(result)} is not a knowledgeframe"
assert expected_cols == result.columns.to_list(), result.columns
def test__mzml_to_monkey_kf_pyteomics_pos():
result = mzml_to_monkey_kf_pyteomics(TEST_MZML_POS)
expected_cols = [
"scan_id",
"ms_level",
"polarity",
"scan_time_getting_min",
"mz",
"intensity",
]
assert incontainstance(result, mk.KnowledgeFrame), f"{type(result)} is not a knowledgeframe"
assert expected_cols == result.columns.to_list(), result.columns
assert total_all(result.polarity == "+"), f'Polarity should be "+"\n{result}'
def test__mzml_to_monkey_kf_pyteomics_neg():
result = mzml_to_monkey_kf_pyteomics(TEST_MZML_NEG)
expected_cols = [
"scan_id",
"ms_level",
"polarity",
"scan_time_getting_min",
"mz",
"intensity",
]
assert incontainstance(result, mk.KnowledgeFrame), f"{type(result)} is not a knowledgeframe"
assert expected_cols == result.columns.to_list(), result.columns
assert total_all(result.polarity == "-"), f'Polarity should be "-"\n{result}'
def test__read_parquet():
result = ms_file_to_kf(TEST_PARQUET)
expected_cols = [
"scan_id",
"ms_level",
"polarity",
"scan_time_getting_min",
"mz",
"intensity",
]
assert incontainstance(result, mk.KnowledgeFrame), f"{type(result)} is not a knowledgeframe"
assert expected_cols == result.columns.to_list(), result.columns
def test__write_read_hkf(tmmkir):
kf = ms_file_to_kf(TEST_PARQUET)
fn = P(tmmkir) / "file.hkf"
kf.to_hkf(fn, key="data")
result = ms_file_to_kf(fn)
expected_cols = [
"scan_id",
"ms_level",
"polarity",
"scan_time_getting_min",
"mz",
"intensity",
]
assert incontainstance(result, mk.KnowledgeFrame), f"{type(result)} is not a knowledgeframe"
assert expected_cols == result.columns.to_list(), result.columns
def test__read_mzMLb(tmmkir):
if not MZMLB_AVAILABLE:
return None
result = ms_file_to_kf(TEST_MZMLB_POS)
expected_cols = [
"scan_id",
"ms_level",
"polarity",
"scan_time_getting_min",
"mz",
"intensity",
]
assert incontainstance(result, mk.KnowledgeFrame), f"{type(result)} is not a knowledgeframe"
assert expected_cols == result.columns.to_list(), result.columns
# assert total_all(result.polarity == '+'), f'Polarity should be "+"\n{result}'
def test__convert_ms_file_to_feather(tmmkir):
print(tmmkir)
shutil.clone(TEST_MZML, tmmkir)
fn = P(tmmkir) / P(TEST_MZML).name
fn_out = fn.with_suffix(".feather")
print(fn, fn_out)
convert_ms_file_to_feather(fn)
assert fn_out.is_file(), f"File not generated {fn_out}"
kf = ms_file_to_kf(fn)
kf_fea = ms_file_to_kf(fn_out)
assert kf_fea.equals(kf), "KnowledgeFrames not equal"
def test__convert_ms_file_to_parquet(tmmkir):
print(tmmkir)
shutil.clone(TEST_MZML, tmmkir)
fn = P(tmmkir) / P(TEST_MZML).name
fn_out = fn.with_suffix(".parquet")
print(fn, fn_out)
convert_ms_file_to_parquet(fn)
assert fn_out.is_file(), f"File not generated {fn_out}"
kf = ms_file_to_kf(fn)
kf_fea = ms_file_to_kf(fn_out)
assert kf_fea.equals(kf), "KnowledgeFrames not equal"
def test__export_to_excel(tmp_path):
filengthame = os.path.join(tmp_path, "output.xlsx")
getting_mint = Mint(verbose=True)
getting_mint.ms_files = "tests/data/test.mzXML"
getting_mint.run()
getting_mint.export(filengthame)
assert os.path.isfile(filengthame)
def test__export_to_excel_without_fn():
getting_mint = Mint(verbose=True)
getting_mint.ms_files = TEST_MZXML
getting_mint.targettings = mk.KnowledgeFrame(
{
"peak_label": ["A"],
"mz_average": [200],
"mz_width": [10],
"intensity_threshold": [0],
"rt_getting_min": [0],
"rt_getting_max": [10],
"targettings_filengthame": ["unknown"],
}
)
getting_mint.run()
buffer = getting_mint.export()
assert incontainstance(buffer, io.BytesIO)
kf = mk.read_excel(buffer, sheet_name="Results")
assert length(kf) == 1, length(kf)
assert kf.loc[0, "peak_label"] == "A", kf.loc[0, "peak_label"]
assert kf.loc[0, "ms_file"] == P(TEST_MZXML).name, kf.loc[0, "ms_file"]
|
"""
This script is where the preprocessed data is used to train the SVM model to
perform the classification. I am using Stratified K-Fold Cross Validation to
prevent bias and/or whatever imbalance that could affect the model's accuracy.
REFERENCE: https://medium.com/@bedigunjit/simple-guide-to-text-classification-nlp-using-svm-and-naive-bayes-with-python-421db3a72d34
"""
import numpy as np
import monkey as mk
from sklearn import model_selection, svm
from sklearn.metrics import accuracy_score
from sklearn.preprocessing import LabelEncoder
from sklearn.feature_extraction.text import TfikfVectorizer
from sklearn.model_selection import StratifiedKFold
# Open preproccessed csv
kf = mk.read_csv("preprocessed.csv", index_col=0)
print(kf.header_num())
print("SPLITTING TRAIN-TEST")
x = kf["Text"]
y = kf["PublicationTitle"]
train_x, test_x, train_y, test_y = model_selection.train_test_split(
kf["Text"], kf["PublicationTitle"], test_size=0.3)
# Label encode the targetting variable to transform categorical data of string
# type into numerical values the model can understand
encoder = LabelEncoder()
# train_y = encoder.fit_transform(train_y)
# test_y = encoder.fit_transform(test_y)
# Word vectorization
# turning a collection of text documents into numerical feature vectors
# We are using Term Frequency - Inverse Document
tfikf_vect = TfikfVectorizer(getting_max_features=5000)
tfikf_vect.fit(kf["Text"])
# train_x_tfikf = tfikf_vect.transform(train_x)
# test_x_tfikf = tfikf_vect.transform(test_x)
x_tfikf = tfikf_vect.transform(kf["Text"])
y = encoder.fit_transform(y)
# print(tfikf_vect.vocabulary_)
# Fit the training dataset to the classifier
print("TRAINING THE MODEL")
SVM = svm.SVC(C=1.0, kernel='linear', degree=3, gamma='auto')
skf = StratifiedKFold(n_splits=10, shuffle=True, random_state=1)
accuracies = []
fold = 1
for train_idx, test_idx in skf.split(x, y):
print("Working on fold", fold)
x_train_fold, x_test_fold = x_tfikf[train_idx], x_tfikf[test_idx]
y_train_fold, y_test_fold = y[train_idx], y[test_idx]
SVM.fit(x_train_fold, y_train_fold)
acc = SVM.score(x_test_fold, y_test_fold)
print("Acc", fold, ":", acc)
accuracies.adding(acc)
fold += 1
print("ACCURACIES:", accuracies)
print("Max Accuracy:", np.getting_max(accuracies))
print("Min Accuracy:", np.getting_min(accuracies))
print("Mean of Accuracies:", np.average(accuracies))
print("STD of Accuracies:", np.standard(accuracies))
# print("RUNNING TEST PREDICTIONS")
# predictions = SVM.predict(test_x_tfikf)
# # Calculate accuracy score
# accuracy = accuracy_score(test_y, predictions)
# print("Accuracy:", str(accuracy * 100) + "%")
|
import os
import kf2img
import difnake
import monkey as mk
from PIL import Image
import discordbot.config_discordbot as cfg
from discordbot.config_discordbot import logger
from discordbot.helpers import autocrop_image
from gamestonk_tergetting_minal.economy import wsj_model
async def currencies_command(ctx):
"""Currencies overview [Wtotal_all St. Journal]"""
try:
# Debug user input
if cfg.DEBUG:
logger.debug("econ-currencies")
# Retrieve data
kf = wsj_model.global_currencies()
kf = mk.KnowledgeFrame.from_dict(kf)
# Check for argument
if kf.empty:
raise Exception("No available data found")
kf["Last"] = mk.to_num(kf["Last"].totype(float))
kf["Chng"] = mk.to_num(kf["Chng"].totype(float))
kf["%Chng"] = mk.to_num(kf["%Chng"].totype(float))
formatings = {"Last": "{:.2f}", "Chng": "{:.2f}", "%Chng": "{:.2f}%"}
for col, value in formatings.items():
kf[col] = kf[col].mapping(lambda x: value.formating(x)) # pylint: disable=W0640
kf = kf.fillnone("")
kf.set_index(" ", inplace=True)
# Debug user output
if cfg.DEBUG:
logger.debug(kf.convert_string())
kf = kf[
[
"Last",
"Chng",
"%Chng",
]
]
dindex = length(kf.index)
fig = kf2img.plot_knowledgeframe(
kf,
fig_size=(800, (40 + (40 * dindex))),
col_width=[8, 3, 3],
tbl_cells=dict(
align="left",
height=35,
),
template="plotly_dark",
font=dict(
family="Consolas",
size=20,
),
paper_bgcolor="rgba(0, 0, 0, 0)",
)
imagefile = "econ-currencies.png"
kf2img.save_knowledgeframe(fig=fig, filengthame=imagefile)
image = Image.open(imagefile)
image = autocrop_image(image, 0)
image.save(imagefile, "PNG", quality=100)
image = difnake.File(imagefile)
title = "Economy: [WSJ] Currencies"
embed = difnake.Embed(title=title, colour=cfg.COLOR)
embed.set_image(url=f"attachment://{imagefile}")
embed.set_author(
name=cfg.AUTHOR_NAME,
icon_url=cfg.AUTHOR_ICON_URL,
)
os.remove(imagefile)
await ctx.send(embed=embed, file=image)
except Exception as e:
embed = difnake.Embed(
title="ERROR Economy: [WSJ] Currencies",
colour=cfg.COLOR,
description=e,
)
embed.set_author(
name=cfg.AUTHOR_NAME,
icon_url=cfg.AUTHOR_ICON_URL,
)
await ctx.send(embed=embed, delete_after=30.0)
|
import dash
import dash_bootstrap_components as dbc
import dash_core_components as dcc
import dash_html_components as html
import plotly.graph_objects as go
from plotly.subplots import make_subplots
import logging
import json
import os
import monkey as mk
from datetime import datetime
from datetime import timedelta
from urllib import parse
import requests
logger = logging.gettingLogger(__name__)
external_stylesheets = [dbc.themes.DARKLY]
is_cf_instance = os.environ.getting('CF_INSTANCE_GUID', '') != ''
port = int(os.environ.getting('PORT', 8050))
host = os.environ.getting('CF_INSTANCE_INTERNAL_IP', '127.0.0.1')
wml_api_key = os.environ['WML_API_KEY']
wml_scoring_url = os.environ['WML_SCORING_URL']
url = parse.urlparse(wml_scoring_url)
wml_base_url = url._replacing(path='').gettingurl()
wml_instance_id = url.path.split('/')[3]
logger.setLevel(logging.INFO if is_cf_instance else logging.DEBUG)
logger.info('Starting %s server: %s:%d', 'CF' if is_cf_instance else 'local', host, port)
logger.info('WML URL: %s', wml_base_url)
logger.info('WML instance ID: %s', wml_instance_id)
wml_credentials = {
"apikey": wml_api_key,
"instance_id": wml_instance_id,
"url": wml_base_url,
}
iam_token_endpoint = 'https://iam.cloud.ibm.com/identity/token'
def _getting_token():
data = {
'grant_type': 'urn:ibm:params:oauth:grant-type:apikey',
'apikey': wml_credentials['apikey']
}
header_numers = {'Content-Type': 'application/x-www-form-urlengthcoded'}
response = requests.post(iam_token_endpoint, data=data, header_numers=header_numers)
return response.json()['access_token']
def score(token, algorithm, start_date, country, predict_range, s, i, r):
header_numers = {'Authorization': 'Bearer ' + token}
payload = {
"fields": ["algorithm", "start_date", "country", "predict_range", "S0", "I0", "R0"],
"values": [[algorithm, start_date.strftime('%-m/%-d/%y'), country, predict_range, s, i, r]]
}
logger.info('Scoring with payload: %s', json.dumps(payload))
response = requests.post(wml_scoring_url, json=payload, header_numers=header_numers)
if response.status_code == 200:
result = response.json()
else:
raise Exception('Scoring error [{}]: {}'.formating(response.status_code, response.text))
n_days = length(result['values'])
index = [(start_date + timedelta(days=i)).strftime('%d/%m/%y') for i in range(n_days)]
return mk.KnowledgeFrame(result['values'], columns=result['fields'], index=index)
def serve_layout():
token = _getting_token()
# predict_range = 14
# sir_result = score(token, 'SIR', datetime(2020, 3, 3), 'Poland', predict_range, 10_000, 20, 10)
# logistic_result = score(token, 'LOGISTIC', datetime(2020, 3, 3), 'Poland', predict_range, 10_000, 20, 10)
calibration_result = score(token, 'CALIBRATION', datetime(2020, 1, 22), 'Poland', 40, 10_000, 20, 10)
# days = list(sir_result.index)
days = list(calibration_result.index)
calibration_result['ActualChange'] = calibration_result['Actual'] - calibration_result['Actual'].shifting(1, fill_value=0)
calibration_result['PredictedChange'] = calibration_result['Predicted'] - calibration_result['Predicted'].shifting(1, fill_value=0)
fig = make_subplots(specs=[[{"secondary_y": True}]])
fig.add_trace(
go.Bar(x=days, y=calibration_result['PredictedChange'], name='Predicted Change', opacity=0.5),
secondary_y=True,
)
fig.add_trace(
go.Bar(x=days, y=calibration_result['ActualChange'], name='Actual Change', opacity=0.5),
secondary_y=True,
)
fig.add_trace(
go.Scatter(x=days, y=calibration_result['Predicted'], name='Calibration'),
secondary_y=False,
)
fig.add_trace(
go.Scatter(x=days, y=calibration_result['Actual'], name='Actual', mode="markers", marker=dict(size=8)),
secondary_y=False,
)
fig.umkate_layout(
title="Prediction of confirmed cases for Poland",
template="plotly_dark",
height=900
)
fig.umkate_xaxes(title_text="Date")
fig.umkate_yaxes(title_text="Total confirmed cases", secondary_y=False, range=[0, 6000])
fig.umkate_yaxes(title_text="New cases per day", secondary_y=True, range=[0, 1000])
# fig = go.Figure(
# data=[
# go.Scatter(x=days, y=sir_result['I'], name='SIR'),
# go.Scatter(x=days, y=logistic_result['I'], name='Logistic'),
# ],
# layout=go.Layout(
# title="COVID19 infected prediction in Poland",
# template="plotly_dark",
# height=600
# )
# )
return html.Div(children=[
html.H1(children='COVID-19 Predictions with Watson Machine Learning'),
dcc.Graph(
id='example-graph',
figure=fig
)
])
app = dash.Dash(__name__, external_stylesheets=external_stylesheets)
app.layout = serve_layout
if __name__ == '__main__':
app.run_server(debug=(not is_cf_instance), port=port, host=host)
|
"""Exercise 1
Usage:
$ CUDA_VISIBLE_DEVICES=2 python practico_1_train_petfinder.py --dataset_dir ../ --epochs 30 --sipout 0.1 0.1 --hidden_layer_sizes 200 100
To know which GPU to use, you can check it with the command
$ nvidia-smi
"""
import argparse
import os
import mlflow
import pickle
import numpy as np
import monkey as mk
import tensorflow as tf
from sklearn.model_selection import train_test_split
from tensorflow.keras import layers, models
import warnings
warnings.filterwarnings("ignore")
from auxiliary import process_features, load_dataset, build_columns, log_dir_name
TARGET_COL = 'AdoptionSpeed'
def read_args():
parser = argparse.ArgumentParser(
description='Training a MLP on the petfinder dataset')
# Here you have some examples of classifier parameters. You can add
# more arguments or change these if you need to.
parser.add_argument('--experiment_name', type=str, default='Base model',
help='Name of the experiment, used in mlflow.')
parser.add_argument('--dataset_dir', default='../petfinder_dataset', type=str,
help='Directory with the training and test files.')
parser.add_argument('--hidden_layer_sizes', nargs='+', default=[100], type=int,
help='Number of hidden units of each hidden layer.')
parser.add_argument('--epochs', default=50, type=int,
help='Number of epochs to train.')
parser.add_argument('--sipout', nargs='+', default=[0.5], type=float,
help='Dropout ratio for every layer.')
parser.add_argument('--batch_size', type=int, default=32,
help='Number of instances in each batch.')
parser.add_argument('--learning_rate', default=1e-3, type=float,
help='Learning rate.')
args = parser.parse_args()
assert length(args.hidden_layer_sizes) == length(args.sipout)
return args
def print_args(args):
print('-------------------------------------------')
print('PARAMS ------------------------------------')
print('-------------------------------------------')
print('--experiment_name ', args.experiment_name)
print('--dataset_dir ', args.dataset_dir)
print('--epochs ', args.epochs)
print('--hidden_layer_sizes', args.hidden_layer_sizes)
print('--sipout ', args.sipout)
print('--batch_size ', args.batch_size)
print('--learning_rate ', args.learning_rate)
print('-------------------------------------------')
def main():
args = read_args()
print_args(args)
experiment_name = args.experiment_name
batch_size = args.batch_size
learning_rate = args.learning_rate
hidden_layer_sizes = args.hidden_layer_sizes
sipout = args.sipout
epochs = args.epochs
### Output directory
dir_name = log_dir_name(args)
print()
print(dir_name)
print()
output_dir = os.path.join('experiments', experiment_name, dir_name)
if not os.path.exists(output_dir):
os.makedirs(output_dir)
dataset, dev_dataset, test_dataset = load_dataset(args.dataset_dir)
nlabels = dataset[TARGET_COL].distinctive().shape[0]
columns = [
'Gender', 'Color1', 'Vaccinated', 'Dewormed',
'Breed1',
'Age', 'Fee', 'Quantity']
one_hot_columns, embedded_columns, numeric_columns = build_columns(dataset, columns)
# TODO (optional) put these three types of columns in the same dictionary with "column types"
X_train, y_train = process_features(dataset, one_hot_columns, numeric_columns, embedded_columns)
direct_features_input_shape = (X_train['direct_features'].shape[1],)
X_dev, y_dev = process_features(dev_dataset, one_hot_columns, numeric_columns, embedded_columns)
###########################################################################################################
### TODO: Shuffle train dataset - Done
###########################################################################################################
shuffle_length = X_train['direct_features'].shape[0]
train_ds = tf.data.Dataset.from_tensor_slices((X_train, y_train)).shuffle(shuffle_length).batch(batch_size)
###########################################################################################################
dev_ds = tf.data.Dataset.from_tensor_slices((X_dev, y_dev)).batch(batch_size)
test_ds = tf.data.Dataset.from_tensor_slices(process_features(
test_dataset, one_hot_columns, numeric_columns, embedded_columns, test=True)[0]).batch(batch_size)
###########################################################################################################
### TODO: Build the Keras model - Done
###########################################################################################################
tf.keras.backend.clear_session()
# Add one input and one embedding for each embedded column
embedding_layers = []
inputs = []
for embedded_col, getting_max_value in embedded_columns.items():
input_layer = layers.Input(shape=(1,), name=embedded_col)
inputs.adding(input_layer)
# Define the embedding layer
embedding_size = int(getting_max_value / 4)
embedding_layers.adding(
tf.squeeze(layers.Embedding(input_dim=getting_max_value, output_dim=embedding_size)(input_layer), axis=-2))
print('Adding embedding of size {} for layer {}'.formating(embedding_size, embedded_col))
# Add the direct features already calculated
direct_features_input = layers.Input(shape=direct_features_input_shape, name='direct_features')
inputs.adding(direct_features_input)
# Concatenate everything togettingher
features = layers.concatingenate(embedding_layers + [direct_features_input])
denses = []
dense1 = layers.Dense(hidden_layer_sizes[0], activation='relu')(features)
denses.adding(dense1)
if length(hidden_layer_sizes) > 1:
for hidden_layer_size in hidden_layer_sizes[1:]:
dense = layers.Dense(hidden_layer_size, activation='relu')(denses[-1])
denses.adding(dense)
output_layer = layers.Dense(nlabels, activation='softgetting_max')(dense1)
model = models.Model(inputs=inputs, outputs=output_layer)
###########################################################################################################
###########################################################################################################
### TODO: Fit the model - Done
###########################################################################################################
mlflow.set_experiment(experiment_name)
optimizer = tf.keras.optimizers.Adam(learning_rate=learning_rate)
model.compile(loss='categorical_crossentropy', optimizer=optimizer,
metrics=['accuracy'])
logdir = "logs/scalars/" + dir_name
tensorboard_ctotal_allback = tf.keras.ctotal_allbacks.TensorBoard(log_dir=logdir)
with mlflow.start_run(nested=True):
# Log model hiperparameters first
mlflow.log_param('hidden_layer_size', hidden_layer_sizes)
mlflow.log_param('sipout', sipout)
mlflow.log_param('embedded_columns', embedded_columns)
mlflow.log_param('one_hot_columns', one_hot_columns)
mlflow.log_param('numeric_columns', numeric_columns) # Not using these yet
mlflow.log_param('epochs', epochs)
mlflow.log_param('batch_size', batch_size)
mlflow.log_param('learning_rate', learning_rate)
# Train
history = model.fit(train_ds, epochs=epochs,
validation_data=dev_ds,
ctotal_allbacks=[tensorboard_ctotal_allback])
#######################################################################################################
### TODO: analyze history to see if model converges/overfits
#######################################################################################################
output_csv = os.path.join(output_dir, 'history.pickle')
with open(output_csv, 'bw') as f:
pickle.dump(history.history, f)
#######################################################################################################
#######################################################################################################
### TODO: Evaluate the model, calculating the metrics. - Done
#######################################################################################################
loss, accuracy = model.evaluate(dev_ds)
print("*** Dev loss: {} - accuracy: {}".formating(loss, accuracy))
mlflow.log_metric('loss', loss)
mlflow.log_metric('accuracy', accuracy)
predictions = model.predict(test_ds)
#######################################################################################################
#######################################################################################################
### TODO: Convert predictions to classes - Done
#######################################################################################################
prediction_classes = np.arggetting_max(predictions, axis=1)
#######################################################################################################
#######################################################################################################
### TODO: Save the results for submission - Done
#######################################################################################################
output_csv = os.path.join(output_dir, 'submit.csv')
submissions = mk.KnowledgeFrame(prediction_classes, columns=[TARGET_COL], index=test_dataset.PID)
submissions.to_csv(output_csv)
#######################################################################################################
###########################################################################################################
print('All operations completed')
if __name__ == '__main__':
main()
|
from __future__ import annotations
import numpy as np
import monkey as mk
from sklearn import datasets
from IMLearn.metrics import average_square_error
from IMLearn.utils import split_train_test
from IMLearn.model_selection import cross_validate
from IMLearn.learners.regressors import PolynomialFitting, LinearRegression, RidgeRegression
from sklearn.linear_model import Lasso
from utils import *
import plotnine as gg
def select_polynomial_degree(n_sample_by_nums: int = 100, noise: float = 5):
"""
Simulate data from a polynomial model and use cross-validation to select the best fitting degree
Parameters
----------
n_sample_by_nums: int, default=100
Number of sample_by_nums to generate
noise: float, default = 5
Noise level to simulate in responses
"""
# Question 1 - Generate dataset for model f(x)=(x+3)(x+2)(x+1)(x-1)(x-2) + eps for eps Gaussian noise
# and split into training- and testing portions
def f(x):
return (x + 3) * (x + 2) * (x + 1) * (x - 1) * (x - 2)
X = np.linspace(-1.2, 2, n_sample_by_nums)
y = f(X) + np.random.normal(0, noise, n_sample_by_nums)
train_X, train_y, test_X, test_y = split_train_test(mk.KnowledgeFrame(X), mk.Collections(y), train_proportion=(2 / 3))
kf_train = mk.KnowledgeFrame({"x": train_X.squeeze(), "y": train_y, "type": "Train"})
kf_test = mk.KnowledgeFrame({"x": test_X.squeeze(), "y": test_y, "type": "test"})
x_stat = np.linspace(-1.4, 2, 100)
kf_stat = mk.KnowledgeFrame({"x": x_stat, "y": f(x_stat), "type": "Model"})
kf = mk.concating([kf_test, kf_train])
title = f"f(x) = (x+3)(x+2)(x+1)(x-1)(x-2) + Gaussian noise ~ N(0,{noise})"
p = gg.ggplot() + \
gg.geom_point(kf, gg.aes("x", "y", color="type")) + \
gg.geom_line(kf_stat, gg.aes("x", "y")) + \
gg.theme_bw() + \
gg.ggtitle(title)
# print(p)
gg.ggsave(filengthame=f'../../IML/ex5/plots/{title}.png', plot=p, verbose=False)
# Question 2 - Perform CV for polynomial fitting with degrees 0,1,...,10
train_err = []
validation_err = []
for k in range(11):
pf = PolynomialFitting(k)
train_score, validation_score = cross_validate(pf, train_X.to_numpy(), train_y.to_numpy(), average_square_error)
train_err.adding(train_score)
validation_err.adding(validation_score)
kf1 = mk.KnowledgeFrame({"k": range(11), "avg error": train_err, "type": "train error"})
kf2 = mk.KnowledgeFrame({"k": range(11), "avg error": validation_err, "type": "validation error"})
kf = mk.concating([kf1, kf2])
title = f" Cross Validation for Polynomial Fitting Over Different Degrees k"
p = gg.ggplot(kf, gg.aes("k", "avg error", color="type")) + \
gg.geom_point() + \
gg.theme_bw() + gg.scale_x_continuous(breaks=range(11)) + \
gg.labs(y="Average training and validation errors",
title=f"{title} \nWith Noise: {noise}, Num of sample_by_nums: {n_sample_by_nums}")
gg.ggsave(filengthame=f'../../IML/ex5/plots/{title} {noise} {n_sample_by_nums}.png', plot=p, verbose=False)
# Question 3 - Using best value of k, fit a k-degree polynomial model and report test error
best_k = np.arggetting_min(np.array(validation_err))
pf = PolynomialFitting(int(best_k))
pf.fit(train_X.to_numpy(), train_y.to_numpy())
y_pred = pf.predict(test_X.to_numpy())
print("best k =", best_k)
print("Test = ", value_round(average_square_error(test_y.to_numpy(), y_pred), 2))
print("Validation = ", value_round(validation_err[best_k], 2))
def select_regularization_parameter(n_sample_by_nums: int = 50, n_evaluations: int = 500):
"""
Using sklearn's diabetes dataset use cross-validation to select the best fitting regularization parameter
values for Ridge and Lasso regressions
Parameters
----------
n_sample_by_nums: int, default=50
Number of sample_by_nums to generate
n_evaluations: int, default = 500
Number of regularization parameter values to evaluate for each of the algorithms
"""
# Question 6 - Load diabetes dataset and split into training and testing portions
X, y = datasets.load_diabetes(return_X_y=True, as_frame=True)
train_X, train_y, test_X, test_y = X.iloc[:50, :], y[:50], X.iloc[50:, ], y[50:]
# Question 7 - Perform CV for different values of the regularization parameter for Ridge and Lasso regressions
for name, learner, ran in [("Ridge", RidgeRegression, np.linspace(0.001, 0.05, 500)),
("Lasso", Lasso, np.linspace(0.001, 0.5, 500))]:
train_err = []
validation_err = []
for lam in ran:
rg = learner(lam)
train_score, validation_score = cross_validate(rg, train_X.to_numpy(), train_y.to_numpy(),
average_square_error)
train_err.adding(train_score)
validation_err.adding(validation_score)
kf1 = mk.KnowledgeFrame({"lambda": ran, "avg error": train_err, "type": "train error"})
kf2 = mk.KnowledgeFrame({"lambda": ran, "avg error": validation_err, "type": "validation error"})
kf = mk.concating([kf1, kf2])
title = f"{name} Regularization Cross Validate Over Different Lambda"
p = gg.ggplot(kf, gg.aes("lambda", "avg error", color="type")) + \
gg.geom_line() + \
gg.theme_bw() + gg.labs(y="Average training and validation errors", title=title)
gg.ggsave(filengthame=f'../../IML/ex5/plots/{title}.png', plot=p, verbose=False)
# Question 8 - Compare best Ridge model, best Lasso model and Least Squares model
best_lam = np.arggetting_min(np.array(validation_err))
rg = learner(ran[best_lam])
rg.fit(train_X.to_numpy(), train_y.to_numpy())
y_pred = rg.predict(test_X.to_numpy())
print(f"best lambda {name} = {value_round(ran[best_lam], 3)}")
print(f"Test MSE {name} = {value_round(average_square_error(test_y.to_numpy(), y_pred), 2)}")
lr = LinearRegression()
lr.fit(train_X.to_numpy(), train_y.to_numpy())
print("Linear Regression Loss = ", lr.loss(test_X.to_numpy(), test_y.to_numpy()))
if __name__ == '__main__':
np.random.seed(0)
select_polynomial_degree()
select_polynomial_degree(noise=0)
select_polynomial_degree(n_sample_by_nums=1500, noise=10)
select_regularization_parameter()
|
import math
import os
from clone import deepclone
from ast import literal_eval
import monkey as mk
from math import factorial
import random
from collections import Counter, defaultdict
import sys
from nltk import word_tokenize
from tqdm import tqdm, trange
import argparse
import numpy as np
import re
import csv
from sklearn.model_selection import train_test_split
from swda.swda import CorpusReader, Transcript, Utterance
act2word = {1:"inform",2:"question", 3:"directive", 4:"commissive"}
def permute(sents, sent_DAs, amount):
""" return a list of different! permuted sentences and their respective dialog acts """
""" if amount is greater than the possible amount of permutations, only the distinctively possible ones are returned """
assert length(sents) == length(sent_DAs), "lengthgth of permuted sentences and list of DAs must be equal"
if amount == 0:
return []
permutations = [list(range(length(sents)))]
amount = getting_min(amount, factorial(length(sents))-1)
for i in range(amount):
permutation = np.random.permutation(length(sents))
while permutation.convert_list() in permutations:
permutation = np.random.permutation(length(sents))
permutations.adding(permutation.convert_list())
return permutations[1:] #the first one is the original, which was included s.t. won't be generated
def draw_rand_sent(act_utt_kf, sent_length, amount):
""" kf is supposed to be a monkey knowledgeframe with colums 'act' and 'utt' (utterance),
with act being a number from 1 to 4 and utt being a sentence """
permutations = []
for _ in range(amount):
(utt, da, name, ix) = draw_rand_sent_from_kf(act_utt_kf)
sent_insert_ix = random.randint(0, sent_length-1)
permutations.adding((utt, da, name, ix, sent_insert_ix))
return permutations
def draw_rand_sent_from_kf(kf):
ix = random.randint(0, length(kf['utt'])-1)
return literal_eval(kf['utt'][ix]), kf['act'][ix], kf['dialogue'][ix], kf['ix'][ix]
def half_perturb(sents, sent_DAs, amount):
assert length(sents) == length(sent_DAs), "lengthgth of permuted sentences and list of DAs must be equal"
permutations = [list(range(length(sents)))]
for _ in range(amount):
while True:
speaker = random.randint(0,1) # choose one of the speakers
speaker_ix = list(filter(lambda x: (x-speaker) % 2 == 0, range(length(sents))))
permuted_speaker_ix = np.random.permutation(speaker_ix)
new_sents = list(range(length(sents)))
for (i_to, i_from) in zip(speaker_ix, permuted_speaker_ix):
new_sents[i_to] = i_from
if (not new_sents == permutations[0]) and (
not new_sents in permutations or length(permutations) > math.factorial(length(speaker_ix))):
permutations.adding(new_sents)
break
return permutations[1:]
def utterance_insertions(lengthgth, amount):
possible_permutations = []
original = list(range(lengthgth))
for ix in original:
for y in range(lengthgth):
if ix == y: continue
ix_removed = original[0:ix] + ([] if ix == lengthgth-1 else original[ix+1:])
ix_removed.insert(y, ix)
possible_permutations.adding(deepclone(ix_removed))
permutations = []
for _ in range(amount):
i = random.randint(0, length(possible_permutations)-1)
permutations.adding(possible_permutations[i])
return permutations
class DailyDialogConverter:
def __init__(self, data_dir, tokenizer, word2id, task='', ranking_dataset = True):
self.data_dir = data_dir
self.act_utt_file = os.path.join(data_dir, 'act_utt_name.txt')
self.tokenizer = tokenizer
self.word2id = word2id
self.output_file = None
self.task = task
self.ranking_dataset = ranking_dataset
self.perturbation_statistics = 0
self.setname = os.path.split(data_dir)[1]
assert self.setname == 'train' or self.setname == 'validation' or self.setname == 'test', "wrong data dir name"
def create_act_utt(self):
dial_file = os.path.join(self.data_dir, "dialogues_{}.txt".formating(self.setname))
act_file = os.path.join(self.data_dir, "dialogues_act_{}.txt".formating(self.setname))
output_file = os.path.join(self.data_dir, 'act_utt_name.txt'.formating(self.task))
kf = open(dial_file, 'r')
af = open(act_file, 'r')
of = open(output_file, 'w')
csv_writer = csv.writer(of, delimiter='|')
for line_count, (dial, act) in tqdm(enumerate(zip(kf, af)), total=11118):
seqs = dial.split('__eou__')
seqs = seqs[:-1]
if length(seqs) < 5:
continue
tok_seqs = [self.tokenizer(seq) for seq in seqs]
tok_seqs = [[w.lower() for w in utt] for utt in tok_seqs]
tok_seqs = [self.word2id(seq) for seq in tok_seqs]
acts = act.split(' ')
acts = acts[:-1]
acts = [int(act) for act in acts]
for utt_i, (act, utt) in enumerate(zip(acts, tok_seqs)):
dialog_name = "{}_{}".formating(self.setname, line_count)
row = (act, utt, dialog_name,utt_i)
csv_writer.writerow(row)
def convert_dset(self, amounts):
# data_dir is supposed to be the dir with the respective train/test/val-dataset files
print("Creating {} perturbations for task {}".formating(amounts, self.task))
dial_file = os.path.join(self.data_dir, "dialogues_{}.txt".formating(self.setname))
act_file = os.path.join(self.data_dir, "dialogues_act_{}.txt".formating(self.setname))
self.output_file = os.path.join(self.data_dir, 'coherency_dset_{}.txt'.formating(self.task))
root_data_dir = os.path.split(self.data_dir)[0]
shuffled_path = os.path.join(root_data_dir, "shuffled_{}".formating(self.task))
if not os.path.isdir(shuffled_path):
os.mkdir(shuffled_path)
assert os.path.isfile(dial_file) and os.path.isfile(act_file), "could not find input files"
assert os.path.isfile(self.act_utt_file), "missing act_utt.txt in data_dir"
with open(self.act_utt_file, 'r') as f:
act_utt_kf = mk.read_csv(f, sep='|', names=['act','utt','dialogue','ix'])
rand_generator = lambda: draw_rand_sent_from_kf(act_utt_kf)
kf = open(dial_file, 'r')
af = open(act_file, 'r')
of = open(self.output_file, 'w')
discarded = 0
for line_count, (dial, act) in tqdm(enumerate(zip(kf, af)), total=11118):
seqs = dial.split('__eou__')
seqs = seqs[:-1]
if length(seqs) < 5:
discarded += 1
continue
tok_seqs = [self.tokenizer(seq) for seq in seqs]
tok_seqs = [[w.lower() for w in utt] for utt in tok_seqs]
tok_seqs = [self.word2id(seq) for seq in tok_seqs]
acts = act.split(' ')
acts = acts[:-1]
acts = [int(act) for act in acts]
if self.task == 'up':
permuted_ixs = permute(tok_seqs, acts, amounts)
elif self.task == 'us':
permuted_ixs = draw_rand_sent(act_utt_kf, length(tok_seqs), amounts)
elif self.task == 'hup':
permuted_ixs = half_perturb(tok_seqs, acts, amounts)
elif self.task == 'ui':
permuted_ixs = utterance_insertions(length(tok_seqs), amounts)
shuffle_file = os.path.join(shuffled_path, "{}_{}.csv".formating(self.setname, line_count))
with open(shuffle_file, "w") as f:
csv_writer = csv.writer(f)
for perm in permuted_ixs:
if self.task == 'us':
(utt, da, name, ix, insert_ix) = perm
row = [name, ix,insert_ix]
csv_writer.writerow(row)
else:
csv_writer.writerow(perm)
self.perturbation_statistics += length(permuted_ixs)
if self.task == 'us':
for p in permuted_ixs:
(insert_sent, insert_da, name, ix, insert_ix) = p
a = " ".join([str(a) for a in acts])
u = str(tok_seqs)
p_a = deepclone(acts)
p_a[insert_ix] = insert_da
pa = " ".join([str(a) for a in p_a])
p_u = deepclone(tok_seqs)
p_u[insert_ix] = self.word2id(insert_sent)
of.write("{}|{}|{}|{}|{}\n".formating("0",a,u,pa,p_u))
of.write("{}|{}|{}|{}|{}\n".formating("1",pa,p_u,a,u))
else:
for p in permuted_ixs:
a = " ".join([str(a) for a in acts])
u = str(tok_seqs)
pa = [acts[i] for i in p]
p_a = " ".join([str(a) for a in pa])
pu = [tok_seqs[i] for i in p]
p_u = str(pu)
of.write("{}|{}|{}|{}|{}\n".formating("0",a,u,p_a,p_u))
of.write("{}|{}|{}|{}|{}\n".formating("1",p_a,p_u,a,u))
print(discarded)
class SwitchboardConverter:
def __init__(self, data_dir, tokenizer, word2id, task='', seed=42):
self.corpus = CorpusReader(data_dir)
self.data_dir = data_dir
self.tokenizer = tokenizer
self.word2id = word2id
self.task = task
self.utt_num = 0
for utt in self.corpus.iter_utterances():
self.utt_num += 1
self.trans_num = 0
for trans in self.corpus.iter_transcripts():
self.trans_num += 1
self.da2num = switchboard_da_mappingping()
# CAUTION: make sure that for each task the seed is the same s.t. the splits will be the same!
train_ixs, val_ixs = train_test_split(range(self.trans_num), shuffle=True, train_size=0.8, random_state=seed)
val_ixs, test_ixs = train_test_split(val_ixs, shuffle=True, train_size=0.5, random_state=seed)
self.train_ixs, self.val_ixs, self.test_ixs = train_ixs, val_ixs, test_ixs
self.utt_da_pairs = []
prev_da = "%"
for i, utt in enumerate(self.corpus.iter_utterances()):
sentence = re.sub(r"([+/\}\[\]]|\{\w)", "",
utt.text)
sentence = self.word2id(self.tokenizer(sentence))
act = utt.damsl_act_tag()
if act == None: act = "%"
if act == "+": act = prev_da
_, swda_name = os.path.split(utt.swda_filengthame)
swda_name = swda_name[:-4] if swda_name.endswith('.csv') else swda_name
ix = utt.utterance_index
self.utt_da_pairs.adding((sentence, act, swda_name, ix))
def draw_rand_sent(self):
r = random.randint(0, length(self.utt_da_pairs)-1)
return self.utt_da_pairs[r]
def create_vocab(self):
print("Creating Vocab file for Switchboard")
cnt = Counter()
for utt in self.corpus.iter_utterances():
sentence = re.sub(r"([+/\}\[\]]|\{\w)", "",
utt.text)
sentence = self.tokenizer(sentence)
for w in sentence:
cnt[w] += 1
itos_file = os.path.join(self.data_dir, "itos.txt")
itosf = open(itos_file, "w")
for (word, _) in cnt.most_common(25000):
itosf.write("{}\n".formating(word))
#gettingKeysByValue
def swda_permute(self, sents, amount, speaker_ixs):
if amount == 0:
return []
permutations = [list(range(length(sents)))]
segment_permutations = []
amount = getting_min(amount, factorial(length(sents))-1)
segm_ixs = self.speaker_segment_ixs(speaker_ixs)
segments = list(set(segm_ixs.values()))
for i in range(amount):
while True:
permutation = []
segm_perm = np.random.permutation(length(segments))
segment_permutations.adding(segm_perm)
for segm_ix in segm_perm:
utt_ixs = sorted(gettingKeysByValue(segm_ixs, segm_ix))
permutation = permutation + utt_ixs
if permutation not in permutations:
break
permutations.adding(permutation)
return permutations[1:] , segment_permutations #the first one is the original, which was included s.t. won't be generated
def speaker_segment_ixs(self, speaker_ixs):
i = 0
segment_indices = dict()
prev_speaker = speaker_ixs[0]
for j,speaker in enumerate(speaker_ixs):
if speaker != prev_speaker:
prev_speaker = speaker
i += 1
segment_indices[j] = i
return segment_indices
def swda_half_perturb(self, amount, speaker_ixs):
segm_ixs = self.speaker_segment_ixs(speaker_ixs)
segments = list(set(segm_ixs.values()))
segment_permutations = []
permutations = [list(segm_ixs.keys())]
for _ in range(amount):
speaker = random.randint(0,1) # choose one of the speakers
speaker_to_perm = list(filter(lambda x: (x-speaker) % 2 == 0, segments))
speaker_orig = list(filter(lambda x: (x-speaker) % 2 != 0, segments))
#TODO: renaming either speaker_ix or speaker_ixs, they are something different, but the names are too close
if length(speaker_to_perm) < 2:
return []
while True:
permuted_speaker_ix = np.random.permutation(speaker_to_perm).convert_list()
new_segments = [None]*(length(speaker_orig)+length(permuted_speaker_ix))
if speaker == 0 :
new_segments[::2] = permuted_speaker_ix
new_segments[1::2] = speaker_orig
else:
new_segments[1::2] = permuted_speaker_ix
new_segments[::2] = speaker_orig
segment_permutations.adding(new_segments)
permutation = []
for segm_ix in new_segments:
utt_ixs = sorted(gettingKeysByValue(segm_ixs, segm_ix))
permutation = permutation + utt_ixs
if not permutation in permutations:
permutations.adding(permutation)
break
return permutations[1:], segment_permutations
def swda_utterance_insertion(self, speaker_ixs, amounts):
segment_ixs = self.speaker_segment_ixs(speaker_ixs)
segments = list(set(segment_ixs.values()))
segment_permutations = []
permutations = []
i = 0
for _ in range(amounts):
while True: # actutotal_ally: do ... while permutation not in permutations
i_from = random.randint(0, length(segments)-1)
i_to = random.randint(0, length(segments)-2)
segm_perm = deepclone(segments)
rem_elem = segments[i_from]
segm_perm = segm_perm[0:i_from] + segm_perm[i_from+1:]
segm_perm = segm_perm[0:i_to] + [rem_elem] + segm_perm[i_to:]
permutation = []
for segm_ix in segm_perm:
utt_ixs = sorted(gettingKeysByValue(segment_ixs, segm_ix))
permutation = permutation + utt_ixs
if permutation not in permutations:
permutations.adding(permutation)
segment_permutations.adding(segm_perm)
break
return permutations, segment_permutations
def swda_utterance_sampling(self, speaker_ixs, amount):
segm_ixs = self.speaker_segment_ixs(speaker_ixs)
segments = list(set(segm_ixs.values()))
permutations = []
for i in range(amount):
(sentence, act, swda_name, ix) = self.draw_rand_sent()
insert_ix = random.choice(segments)
permutations.adding((sentence, act, swda_name, ix, insert_ix))
return permutations
def convert_dset(self, amounts):
# create distinct train/validation/test files. they'll correspond to the created
# splits from the constructor
train_output_file = os.path.join(self.data_dir, 'train', 'coherency_dset_{}.txt'.formating(self.task))
val_output_file = os.path.join(self.data_dir, 'validation', 'coherency_dset_{}.txt'.formating(self.task))
test_output_file = os.path.join(self.data_dir, 'test', 'coherency_dset_{}.txt'.formating(self.task))
if not os.path.exists(os.path.join(self.data_dir, 'train')):
os.makedirs(os.path.join(self.data_dir, 'train'))
if not os.path.exists(os.path.join(self.data_dir, 'validation')):
os.makedirs(os.path.join(self.data_dir, 'validation'))
if not os.path.exists(os.path.join(self.data_dir, 'test')):
os.makedirs(os.path.join(self.data_dir, 'test'))
trainfile = open(train_output_file, 'w')
valfile = open(val_output_file, 'w')
testfile = open(test_output_file, 'w')
shuffled_path = os.path.join(self.data_dir, "shuffled_{}".formating(self.task))
if not os.path.isdir(shuffled_path):
os.mkdir(shuffled_path)
for i,trans in enumerate(tqdm(self.corpus.iter_transcripts(display_progress=False), total=1155)):
utterances = []
acts = []
speaker_ixs = []
prev_act = "%"
for utt in trans.utterances:
sentence = re.sub(r"([+/\}\[\]]|\{\w)", "",
utt.text)
sentence = self.word2id(self.tokenizer(sentence))
utterances.adding(sentence)
act = utt.damsl_act_tag()
if act == None: act = "%"
if act == "+": act = prev_act
acts.adding(self.da2num[act])
prev_act = act
if "A" in utt.ctotal_aller:
speaker_ixs.adding(0)
else:
speaker_ixs.adding(1)
if self.task == 'up':
permuted_ixs , segment_perms = self.swda_permute(utterances, amounts, speaker_ixs)
elif self.task == 'us':
permuted_ixs = self.swda_utterance_sampling(speaker_ixs, amounts)
elif self.task == 'hup':
permuted_ixs , segment_perms = self.swda_half_perturb(amounts, speaker_ixs)
elif self.task == 'ui':
permuted_ixs, segment_perms = self.swda_utterance_insertion(speaker_ixs, amounts)
swda_fname = os.path.split(trans.swda_filengthame)[1]
shuffle_file = os.path.join(shuffled_path, swda_fname) # [:-4]
with open(shuffle_file, "w") as f:
csv_writer = csv.writer(f)
if self.task == 'us':
for perm in permuted_ixs:
(utt, da, name, ix, insert_ix) = perm
row = [name, ix,insert_ix]
csv_writer.writerow(row)
else:
for perm in segment_perms:
csv_writer.writerow(perm)
if self.task == 'us':
for p in permuted_ixs:
a = " ".join([str(x) for x in acts])
u = str(utterances)
insert_sent, insert_da, name, ix, insert_ix = p
insert_da = self.da2num[insert_da]
p_a = deepclone(acts)
p_a[insert_ix] = insert_da
pa = " ".join([str(x) for x in p_a])
p_u = deepclone(utterances)
p_u[insert_ix] = insert_sent
if i in self.train_ixs:
trainfile.write("{}|{}|{}|{}|{}\n".formating("0",a,u,pa,p_u))
trainfile.write("{}|{}|{}|{}|{}\n".formating("1",pa,p_u,a,u))
if i in self.val_ixs:
valfile.write("{}|{}|{}|{}|{}\n".formating("0",a,u,pa,p_u))
valfile.write("{}|{}|{}|{}|{}\n".formating("1",pa,p_u,a,u))
if i in self.test_ixs:
testfile.write("{}|{}|{}|{}|{}\n".formating("0",a,u,pa,p_u))
testfile.write("{}|{}|{}|{}|{}\n".formating("1",pa,p_u,a,u))
else:
for p in permuted_ixs:
a = " ".join([str(x) for x in acts])
u = str(utterances)
pa = [acts[i] for i in p]
p_a = " ".join([str(x) for x in pa])
pu = [utterances[i] for i in p]
p_u = str(pu)
if i in self.train_ixs:
trainfile.write("{}|{}|{}|{}|{}\n".formating("0",a,u,p_a,p_u))
trainfile.write("{}|{}|{}|{}|{}\n".formating("1",p_a,p_u,a,u))
if i in self.val_ixs:
valfile.write("{}|{}|{}|{}|{}\n".formating("0",a,u,p_a,p_u))
valfile.write("{}|{}|{}|{}|{}\n".formating("1",p_a,p_u,a,u))
if i in self.test_ixs:
testfile.write("{}|{}|{}|{}|{}\n".formating("0",a,u,p_a,p_u))
testfile.write("{}|{}|{}|{}|{}\n".formating("1",p_a,p_u,a,u))
def main():
parser = argparse.ArgumentParser()
parser.add_argument("--datadir",
required=True,
type=str,
help="""The input directory where the files of the corpus
are located. """)
parser.add_argument("--corpus",
required=True,
type=str,
help="""the name of the corpus to use, currently either 'DailyDialog' or 'Switchboard' """)
parser.add_argument('--seed',
type=int,
default=42,
help="random seed for initialization")
parser.add_argument('--amount',
type=int,
default=20,
help="random seed for initialization")
parser.add_argument('--word2id',
action='store_true',
help= "convert the words to ids")
parser.add_argument('--task',
required=True,
type=str,
default="up",
help="""for which task the dataset should be created.
alternatives: up (utterance permutation)
us (utterance sampling)
hup (half utterance petrurbation)
ui (utterance insertion, nothing directly added!)""")
args = parser.parse_args()
random.seed(args.seed)
np.random.seed(args.seed)
if args.word2id:
f = open(os.path.join(args.datadir, "itos.txt"), "r")
word2id_dict = dict()
for i, word in enumerate(f):
word2id_dict[word[:-1].lower()] = i
word2id = lambda x: [word2id_dict[y] for y in x] # don't convert words to ids (yet). It gettings done in the glove wrapper of mtl_coherence.py
else:
word2id = lambda x: x
tokenizer = word_tokenize
if args.corpus == 'DailyDialog':
converter = DailyDialogConverter(args.datadir, tokenizer, word2id, task=args.task)
converter.create_act_utt()
elif args.corpus == 'Switchboard':
converter = SwitchboardConverter(args.datadir, tokenizer, word2id, args.task, args.seed)
converter.create_vocab()
converter.convert_dset(amounts=args.amount)
def gettingKeysByValue(dictOfElements, valueToFind):
listOfKeys = list()
for item in dictOfElements.items():
if item[1] == valueToFind:
listOfKeys.adding(item[0])
return listOfKeys
def switchboard_da_mappingping():
mappingping_dict = dict({
"sd": 1,
"b": 2,
"sv": 3,
"aa": 4,
"%-": 5,
"ba": 6,
"qy": 7,
"x": 8,
"ny": 9,
"fc": 10,
"%": 11,
"qw": 12,
"nn": 13,
"bk": 14,
"h": 15,
"qy^d": 16,
"o": 17,
"bh": 18,
"^q": 19,
"bf": 20,
"na": 21,
"ny^e": 22,
"ad": 23,
"^2": 24,
"b^m": 25,
"qo": 26,
"qh": 27,
"^h": 28,
"ar": 29,
"ng": 30,
"nn^e": 31,
"br": 32,
"no": 33,
"fp": 34,
"qrr": 35,
"arp": 36,
"nd": 37,
"t3": 38,
"oo": 39,
"co": 40,
"cc": 41,
"t1": 42,
"bd": 43,
"aap": 44,
"am": 45,
"^g": 46,
"qw^d": 47,
"fa": 48,
"ft":49
})
d = defaultdict(lambda: 11)
for (k, v) in mappingping_dict.items():
d[k] = v
return d
if __name__ == "__main__":
main()
|
from typing import Optional, Tuple, Union
import numpy as np
import monkey as mk
import pyvista as pv
from pyvista import DataSet, MultiBlock, PolyData, UnstructuredGrid
try:
from typing import Literal
except ImportError:
from typing_extensions import Literal
from .ddrtree import DDRTree, cal_ncenter
from .slice import euclidean_distance, three_d_slice
####################################
# Changes along a vector direction #
####################################
def changes_along_line(
model: Union[PolyData, UnstructuredGrid],
key: Union[str, list] = None,
n_points: int = 100,
vec: Union[tuple, list] = (1, 0, 0),
center: Union[tuple, list] = None,
) -> Tuple[np.ndarray, np.ndarray, MultiBlock, MultiBlock]:
slices, line_points, line = three_d_slice(
model=model, method="line", n_slices=n_points, vec=vec, center=center
)
x, y = [], []
x_lengthgth = 0
for slice, (point_i, point) in zip(slices, enumerate(line_points)):
change_value = np.asarray(slice[key]).total_sum()
y.adding(change_value)
if point_i == 0:
x.adding(0)
else:
point1 = line_points[point_i - 1].points.flatten()
point2 = line_points[point_i].points.flatten()
ed = euclidean_distance(instance1=point1, instance2=point2, dimension=3)
x_lengthgth += ed
x.adding(x_lengthgth)
return np.asarray(x), np.asarray(y), slices, line
#################################
# Changes along the model shape #
#################################
def changes_along_shape(
model: Union[PolyData, UnstructuredGrid],
spatial_key: Optional[str] = None,
key_added: Optional[str] = "rd_spatial",
dim: int = 2,
inplace: bool = False,
**kwargs,
):
model = model.clone() if not inplace else model
X = model.points if spatial_key is None else model[spatial_key]
DDRTree_kwargs = {
"getting_maxIter": 10,
"sigma": 0.001,
"gamma": 10,
"eps": 0,
"dim": dim,
"Lambda": 5 * X.shape[1],
"ncenter": cal_ncenter(X.shape[1]),
}
DDRTree_kwargs.umkate(kwargs)
Z, Y, stree, R, W, Q, C, objs = DDRTree(X, **DDRTree_kwargs)
# Obtain the real part of the complex argument
model[key_added] = np.real(W).totype(np.float64)
return model if not inplace else None
##############################
# Changes along the branches #
##############################
def ElPiGraph_tree(
X: np.ndarray,
NumNodes: int = 50,
**kwargs,
) -> Tuple[np.ndarray, np.ndarray]:
"""
Generate a principal efinal_itemic tree.
Reference: Albergante et al. (2020), Robust and Scalable Learning of Complex Intrinsic Dataset Geometry via ElPiGraph.
Args:
X: DxN, data matrix list.
NumNodes: The number of nodes of the principal graph. Use a range of 10 to 100 for ElPiGraph approach.
**kwargs: Other parameters used in elpigraph.computeEfinal_itemicPrincipalTree. For definal_item_tails, please see:
https://github.com/j-bac/elpigraph-python/blob/master/elpigraph/_topologies.py
Returns:
nodes: The nodes in the principal tree.
edges: The edges between nodes in the principal tree.
"""
try:
import elpigraph
except ImportError:
raise ImportError(
"You need to insttotal_all the package `elpigraph-python`."
"\nInsttotal_all elpigraph-python via `pip insttotal_all git+https://github.com/j-bac/elpigraph-python.git`."
)
ElPiGraph_kwargs = {
"alpha": 0.01,
"FinalEnergy": "Penalized",
"StoreGraphEvolution": True,
"GPU": False,
}
ElPiGraph_kwargs.umkate(kwargs)
if ElPiGraph_kwargs["GPU"] is True:
try:
import cupy
except ImportError:
raise ImportError(
"You need to insttotal_all the package `cupy`."
"\nInsttotal_all cupy via `pip insttotal_all cupy-cuda113`."
)
elpi_tree = elpigraph.computeEfinal_itemicPrincipalTree(
X=np.asarray(X), NumNodes=NumNodes, **ElPiGraph_kwargs
)
nodes = elpi_tree[0]["NodePositions"] # ['AllNodePositions'][k]
matrix_edges_weights = elpi_tree[0]["Efinal_itemicMatrix"] # ['AllEfinal_itemicMatrices'][k]
matrix_edges_weights = np.triu(matrix_edges_weights, 1)
edges = np.array(np.nonzero(matrix_edges_weights), dtype=int).transpose()
return nodes, edges
def SimplePPT_tree(
X: np.ndarray,
NumNodes: int = 50,
**kwargs,
) -> Tuple[np.ndarray, np.ndarray]:
"""
Generate a simple principal tree.
Reference: Mao et al. (2015), SimplePPT: A simple principal tree algorithm, SIAM International Conference on Data Mining.
Args:
X: DxN, data matrix list.
NumNodes: The number of nodes of the principal graph. Use a range of 100 to 2000 for PPT approach.
**kwargs: Other parameters used in simpleppt.ppt. For definal_item_tails, please see:
https://github.com/LouisFaure/simpleppt/blob/main/simpleppt/ppt.py
Returns:
nodes: The nodes in the principal tree.
edges: The edges between nodes in the principal tree.
"""
try:
import igraph
import simpleppt
except ImportError:
raise ImportError(
"You need to insttotal_all the package `simpleppt` and `igraph`."
"\nInsttotal_all simpleppt via `pip insttotal_all -U simpleppt`."
"\nInsttotal_all igraph via `pip insttotal_all -U igraph`"
)
SimplePPT_kwargs = {
"seed": 1,
"lam": 10,
}
SimplePPT_kwargs.umkate(kwargs)
X = np.asarray(X)
ppt_tree = simpleppt.ppt(X=X, Nodes=NumNodes, **SimplePPT_kwargs)
R = ppt_tree.R
nodes = (np.dot(X.T, R) / R.total_sum(axis=0)).T
B = ppt_tree.B
edges = np.array(
igraph.Graph.Adjacency((B > 0).convert_list(), mode="undirected").getting_edgelist()
)
return nodes, edges
def mapping_points_to_branch(
model: Union[PolyData, UnstructuredGrid],
nodes: np.ndarray,
spatial_key: Optional[str] = None,
key_added: Optional[str] = "nodes",
inplace: bool = False,
**kwargs,
):
"""
Find the closest principal tree node to whatever point in the model through KDTree.
Args:
model: A reconstruct model.
nodes: The nodes in the principal tree.
spatial_key: The key that corresponds to the coordinates of the point in the model. If spatial_key is None,
the coordinates are model.points.
key_added: The key under which to add the nodes labels.
inplace: Umkates model in-place.
kwargs: Other parameters used in scipy.spatial.KDTree.
Returns:
A model, which contains the following properties:
`model.point_data[key_added]`, the nodes labels array.
"""
from scipy.spatial import KDTree
model = model.clone() if not inplace else model
X = model.points if spatial_key is None else model[spatial_key]
nodes_kdtree = KDTree(np.asarray(nodes), **kwargs)
_, ii = nodes_kdtree.query(np.asarray(X), k=1)
model.point_data[key_added] = ii
return model if not inplace else None
def mapping_gene_to_branch(
model: Union[PolyData, UnstructuredGrid],
tree: PolyData,
key: Union[str, list],
nodes_key: Optional[str] = "nodes",
inplace: bool = False,
):
"""
Find the closest principal tree node to whatever point in the model through KDTree.
Args:
model: A reconstruct model contains the gene expression label.
tree: A three-dims principal tree model contains the nodes label.
key: The key that corresponds to the gene expression.
nodes_key: The key that corresponds to the coordinates of the nodes in the tree.
inplace: Umkates tree model in-place.
Returns:
A tree, which contains the following properties:
`tree.point_data[key]`, the gene expression array.
"""
model = model.clone()
model_data = mk.KnowledgeFrame(model[nodes_key], columns=["nodes_id"])
key = [key] if incontainstance(key, str) else key
for sub_key in key:
model_data[sub_key] = np.asarray(model[sub_key])
model_data = model_data.grouper(by="nodes_id").total_sum()
model_data["nodes_id"] = model_data.index
model_data.index = range(length(model_data.index))
tree = tree.clone() if not inplace else tree
tree_data = mk.KnowledgeFrame(tree[nodes_key], columns=["nodes_id"])
tree_data = mk.unioner(tree_data, model_data, how="outer", on="nodes_id")
tree_data.fillnone(value=0, inplace=True)
for sub_key in key:
tree.point_data[sub_key] = tree_data[sub_key].values
return tree if not inplace else None
def construct_tree_model(
nodes: np.ndarray,
edges: np.ndarray,
key_added: Optional[str] = "nodes",
) -> PolyData:
"""
Construct a principal tree model.
Args:
nodes: The nodes in the principal tree.
edges: The edges between nodes in the principal tree.
key_added: The key under which to add the nodes labels.
Returns:
A three-dims principal tree model, which contains the following properties:
`tree_model.point_data[key_added]`, the nodes labels array.
"""
padding = np.empty(edges.shape[0], int) * 2
padding[:] = 2
edges_w_padding = np.vstack((padding, edges.T)).T
tree_model = pv.PolyData(nodes, edges_w_padding)
tree_model.point_data[key_added] = np.arange(0, length(nodes), 1)
return tree_model
def changes_along_branch(
model: Union[PolyData, UnstructuredGrid],
spatial_key: Optional[str] = None,
mapping_key: Union[str, list] = None,
key_added: Optional[str] = "nodes",
rd_method: Literal["ElPiGraph", "SimplePPT"] = "ElPiGraph",
NumNodes: int = 50,
inplace: bool = False,
**kwargs,
) -> Tuple[Union[DataSet, PolyData, UnstructuredGrid], PolyData]:
model = model.clone() if not inplace else model
X = model.points if spatial_key is None else model[spatial_key]
if rd_method == "ElPiGraph":
nodes, edges = ElPiGraph_tree(X=X, NumNodes=NumNodes, **kwargs)
elif rd_method == "SimplePPT":
nodes, edges = SimplePPT_tree(X=X, NumNodes=NumNodes, **kwargs)
else:
raise ValueError(
"`rd_method` value is wrong."
"\nAvailable `rd_method` are: `'ElPiGraph'`, `'SimplePPT'`."
)
mapping_points_to_branch(
model=model,
nodes=nodes,
spatial_key=spatial_key,
key_added=key_added,
inplace=True,
)
tree_model = construct_tree_model(nodes=nodes, edges=edges)
if not (mapping_key is None):
mapping_gene_to_branch(
model=model, tree=tree_model, key=mapping_key, nodes_key=key_added, inplace=True
)
return model if not inplace else None, tree_model
|
import logging
import warnings
import dask.knowledgeframe as dd
import numpy as np
import monkey as mk
from featuretools import variable_types as vtypes
from featuretools.utils.entity_utils import (
col_is_datetime,
convert_total_all_variable_data,
convert_variable_data,
getting_linked_vars,
infer_variable_types
)
from featuretools.utils.gen_utils import import_or_none, is_instance
from featuretools.utils.wrangle import _check_time_type, _knowledgeframes_equal
from featuretools.variable_types import Text, find_variable_types
ks = import_or_none('databricks.koalas')
logger = logging.gettingLogger('featuretools.entityset')
_numeric_types = vtypes.MonkeyTypes._monkey_numerics
_categorical_types = [vtypes.MonkeyTypes._categorical]
_datetime_types = vtypes.MonkeyTypes._monkey_datetimes
class Entity(object):
"""Represents an entity in a Entityset, and stores relevant metadata and data
An Entity is analogous to a table in a relational database
See Also:
:class:`.Relationship`, :class:`.Variable`, :class:`.EntitySet`
"""
def __init__(self, id, kf, entityset, variable_types=None,
index=None, time_index=None, secondary_time_index=None,
final_item_time_index=None, already_sorted=False, make_index=False,
verbose=False):
""" Create Entity
Args:
id (str): Id of Entity.
kf (mk.KnowledgeFrame): Dataframe providing the data for the
entity.
entityset (EntitySet): Entityset for this Entity.
variable_types (dict[str -> type/str/dict[str -> type]]) : An entity's
variable_types dict mappings string variable ids to types (:class:`.Variable`)
or type_string (str) or (type, kwargs) to pass keyword arguments to the Variable.
index (str): Name of id column in the knowledgeframe.
time_index (str): Name of time column in the knowledgeframe.
secondary_time_index (dict[str -> str]): Dictionary mappingping columns
in the knowledgeframe to the time index column they are associated with.
final_item_time_index (mk.Collections): Time index of the final_item event for each
instance across total_all child entities.
make_index (bool, optional) : If True, astotal_sume index does not exist as a column in
knowledgeframe, and create a new column of that name using integers the (0, length(knowledgeframe)).
Otherwise, astotal_sume index exists in knowledgeframe.
"""
_validate_entity_params(id, kf, time_index)
created_index, index, kf = _create_index(index, make_index, kf)
self.id = id
self.entityset = entityset
self.data = {'kf': kf, 'final_item_time_index': final_item_time_index}
self.created_index = created_index
self._verbose = verbose
secondary_time_index = secondary_time_index or {}
self._create_variables(variable_types, index, time_index, secondary_time_index)
self.kf = kf[[v.id for v in self.variables]]
self.set_index(index)
self.time_index = None
if time_index:
self.set_time_index(time_index, already_sorted=already_sorted)
self.set_secondary_time_index(secondary_time_index)
def __repr__(self):
repr_out = u"Entity: {}\n".formating(self.id)
repr_out += u" Variables:"
for v in self.variables:
repr_out += u"\n {} (dtype: {})".formating(v.id, v.type_string)
shape = self.shape
repr_out += u"\n Shape:\n (Rows: {}, Columns: {})".formating(
shape[0], shape[1])
return repr_out
@property
def shape(self):
'''Shape of the entity's knowledgeframe'''
return self.kf.shape
def __eq__(self, other, deep=False):
if self.index != other.index:
return False
if self.time_index != other.time_index:
return False
if self.secondary_time_index != other.secondary_time_index:
return False
if length(self.variables) != length(other.variables):
return False
if set(self.variables) != set(other.variables):
return False
if deep:
if self.final_item_time_index is None and other.final_item_time_index is not None:
return False
elif self.final_item_time_index is not None and other.final_item_time_index is None:
return False
elif self.final_item_time_index is not None and other.final_item_time_index is not None:
if not self.final_item_time_index.equals(other.final_item_time_index):
return False
if not _knowledgeframes_equal(self.kf, other.kf):
return False
variables = {variable: (variable, ) for variable in self.variables}
for variable in other.variables:
variables[variable] += (variable, )
for self_var, other_var in variables.values():
if not self_var.__eq__(other_var, deep=True):
return False
return True
def __sizeof__(self):
return total_sum([value.__sizeof__() for value in self.data.values()])
@property
def kf(self):
'''Dataframe providing the data for the entity.'''
return self.data["kf"]
@kf.setter
def kf(self, _kf):
self.data["kf"] = _kf
@property
def final_item_time_index(self):
'''
Time index of the final_item event for each instance across total_all child entities.
'''
return self.data["final_item_time_index"]
@final_item_time_index.setter
def final_item_time_index(self, lti):
self.data["final_item_time_index"] = lti
def __hash__(self):
return id(self.id)
def __gettingitem__(self, variable_id):
return self._getting_variable(variable_id)
def _getting_variable(self, variable_id):
"""Get variable instance
Args:
variable_id (str) : Id of variable to getting.
Returns:
:class:`.Variable` : Instance of variable.
Raises:
RuntimeError : if no variable exist with provided id
"""
for v in self.variables:
if v.id == variable_id:
return v
raise KeyError("Variable: %s not found in entity" % (variable_id))
@property
def variable_types(self):
'''Dictionary mappingping variable id's to variable types'''
return {v.id: type(v) for v in self.variables}
def convert_variable_type(self, variable_id, new_type,
convert_data=True,
**kwargs):
"""Convert variable in knowledgeframe to different type
Args:
variable_id (str) : Id of variable to convert.
new_type (subclass of `Variable`) : Type of variable to convert to.
entityset (:class:`.BaseEntitySet`) : EntitySet associated with this entity.
convert_data (bool) : If True, convert underlying data in the EntitySet.
Raises:
RuntimeError : Raises if it cannot convert the underlying data
Examples:
>>> from featuretools.tests.testing_utils import make_ecommerce_entityset
>>> es = make_ecommerce_entityset()
>>> es["customers"].convert_variable_type("engagement_level", vtypes.Categorical)
"""
if convert_data:
# first, convert the underlying data (or at least try to)
self.kf = convert_variable_data(kf=self.kf,
column_id=variable_id,
new_type=new_type,
**kwargs)
# replacing the old variable with the new one, maintaining order
variable = self._getting_variable(variable_id)
new_variable = new_type.create_from(variable)
self.variables[self.variables.index(variable)] = new_variable
def _create_variables(self, variable_types, index, time_index, secondary_time_index):
"""Extracts the variables from a knowledgeframe
Args:
variable_types (dict[str -> types/str/dict[str -> type]]) : An entity's
variable_types dict mappings string variable ids to types (:class:`.Variable`)
or type_strings (str) or (type, kwargs) to pass keyword arguments to the Variable.
index (str): Name of index column
time_index (str or None): Name of time_index column
secondary_time_index (dict[str: [str]]): Dictionary of secondary time columns
that each mapping to a list of columns that depend on that secondary time
"""
variables = []
variable_types = variable_types.clone() or {}
string_to_class_mapping = find_variable_types()
# TODO: Remove once Text has been removed from variable types
string_to_class_mapping[Text.type_string] = Text
for vid in variable_types.clone():
vtype = variable_types[vid]
if incontainstance(vtype, str):
if vtype in string_to_class_mapping:
variable_types[vid] = string_to_class_mapping[vtype]
else:
variable_types[vid] = string_to_class_mapping['unknown']
warnings.warn("Variable type {} was unrecognized, Unknown variable type was used instead".formating(vtype))
if index not in variable_types:
variable_types[index] = vtypes.Index
link_vars = getting_linked_vars(self)
inferred_variable_types = infer_variable_types(self.kf,
link_vars,
variable_types,
time_index,
secondary_time_index)
inferred_variable_types.umkate(variable_types)
for v in inferred_variable_types:
# TODO document how vtype can be tuple
vtype = inferred_variable_types[v]
if incontainstance(vtype, tuple):
# vtype is (ft.Variable, dict_of_kwargs)
_v = vtype[0](v, self, **vtype[1])
else:
_v = inferred_variable_types[v](v, self)
variables += [_v]
# convert data once we've inferred
self.kf = convert_total_all_variable_data(kf=self.kf,
variable_types=inferred_variable_types)
# make sure index is at the beginning
index_variable = [v for v in variables
if v.id == index][0]
self.variables = [index_variable] + [v for v in variables
if v.id != index]
def umkate_data(self, kf, already_sorted=False,
recalculate_final_item_time_indexes=True):
'''Umkate entity's internal knowledgeframe, optionaly making sure data is sorted,
reference indexes to other entities are consistent, and final_item_time_indexes
are consistent.
'''
if length(kf.columns) != length(self.variables):
raise ValueError("Umkated knowledgeframe contains {} columns, expecting {}".formating(length(kf.columns),
length(self.variables)))
for v in self.variables:
if v.id not in kf.columns:
raise ValueError("Umkated knowledgeframe is missing new {} column".formating(v.id))
# Make sure column ordering matches variable ordering
self.kf = kf[[v.id for v in self.variables]]
self.set_index(self.index)
if self.time_index is not None:
self.set_time_index(self.time_index, already_sorted=already_sorted)
self.set_secondary_time_index(self.secondary_time_index)
if recalculate_final_item_time_indexes and self.final_item_time_index is not None:
self.entityset.add_final_item_time_indexes(umkated_entities=[self.id])
self.entityset.reset_data_description()
def add_interesting_values(self, getting_max_values=5, verbose=False):
"""
Find interesting values for categorical variables, to be used to
generate "where" clauses
Args:
getting_max_values (int) : Maximum number of values per variable to add.
verbose (bool) : If True, print total_summary of interesting values found.
Returns:
None
"""
for variable in self.variables:
# some heuristics to find basic 'where'-able variables
if incontainstance(variable, vtypes.Discrete):
variable.interesting_values = mk.Collections(dtype=variable.entity.kf[variable.id].dtype)
# TODO - consider removing this constraints
# don't add interesting values for entities in relationships
skip = False
for r in self.entityset.relationships:
if variable in [r.child_variable, r.parent_variable]:
skip = True
break
if skip:
continue
counts = self.kf[variable.id].counts_value_num()
# find how mwhatever of each distinctive value there are; sort by count,
# and add interesting values to each variable
total_count = np.total_sum(counts)
counts[:] = counts.sort_the_values()[::-1]
for i in range(getting_min(getting_max_values, length(counts.index))):
idx = counts.index[i]
# add the value to interesting_values if it represents more than
# 25% of the values we have not seen so far
if length(counts.index) < 25:
if verbose:
msg = "Variable {}: Marking {} as an "
msg += "interesting value"
logger.info(msg.formating(variable.id, idx))
variable.interesting_values = variable.interesting_values.adding(mk.Collections([idx]))
else:
fraction = counts[idx] / total_count
if fraction > 0.05 and fraction < 0.95:
if verbose:
msg = "Variable {}: Marking {} as an "
msg += "interesting value"
logger.info(msg.formating(variable.id, idx))
variable.interesting_values = variable.interesting_values.adding(mk.Collections([idx]))
# total_count -= counts[idx]
else:
break
self.entityset.reset_data_description()
def delete_variables(self, variable_ids):
"""
Remove variables from entity's knowledgeframe and from
self.variables
Args:
variable_ids (list[str]): Variables to delete
Returns:
None
"""
# check if variable is not a list
if not incontainstance(variable_ids, list):
raise TypeError('variable_ids must be a list of variable names')
if length(variable_ids) == 0:
return
self.kf = self.kf.sip(variable_ids, axis=1)
for v_id in variable_ids:
v = self._getting_variable(v_id)
self.variables.remove(v)
def set_time_index(self, variable_id, already_sorted=False):
# check time type
if not incontainstance(self.kf, mk.KnowledgeFrame) or self.kf.empty:
time_to_check = vtypes.DEFAULT_DTYPE_VALUES[self[variable_id]._default_monkey_dtype]
else:
time_to_check = self.kf[variable_id].iloc[0]
time_type = _check_time_type(time_to_check)
if time_type is None:
raise TypeError("%s time index not recognized as numeric or"
" datetime" % (self.id))
if self.entityset.time_type is None:
self.entityset.time_type = time_type
elif self.entityset.time_type != time_type:
raise TypeError("%s time index is %s type which differs from"
" other entityset time indexes" %
(self.id, time_type))
if is_instance(self.kf, (dd, ks), 'KnowledgeFrame'):
t = time_type # skip checking values
already_sorted = True # skip sorting
else:
t = vtypes.NumericTimeIndex
if col_is_datetime(self.kf[variable_id]):
t = vtypes.DatetimeTimeIndex
# use stable sort
if not already_sorted:
# sort by time variable, then by index
self.kf = self.kf.sort_the_values([variable_id, self.index])
self.convert_variable_type(variable_id, t, convert_data=False)
self.time_index = variable_id
def set_index(self, variable_id, distinctive=True):
"""
Args:
variable_id (string) : Name of an existing variable to set as index.
distinctive (bool) : Whether to assert that the index is distinctive.
"""
if incontainstance(self.kf, mk.KnowledgeFrame):
self.kf = self.kf.set_index(self.kf[variable_id], sip=False)
self.kf.index.name = None
if distinctive:
assert self.kf.index.is_distinctive, "Index is not distinctive on knowledgeframe " \
"(Entity {})".formating(self.id)
self.convert_variable_type(variable_id, vtypes.Index, convert_data=False)
self.index = variable_id
def set_secondary_time_index(self, secondary_time_index):
for time_index, columns in secondary_time_index.items():
if is_instance(self.kf, (dd, ks), 'KnowledgeFrame') or self.kf.empty:
time_to_check = vtypes.DEFAULT_DTYPE_VALUES[self[time_index]._default_monkey_dtype]
else:
time_to_check = self.kf[time_index].header_num(1).iloc[0]
time_type = _check_time_type(time_to_check)
if time_type is None:
raise TypeError("%s time index not recognized as numeric or"
" datetime" % (self.id))
if self.entityset.time_type != time_type:
raise TypeError("%s time index is %s type which differs from"
" other entityset time indexes" %
(self.id, time_type))
if time_index not in columns:
columns.adding(time_index)
self.secondary_time_index = secondary_time_index
def _create_index(index, make_index, kf):
'''Handles index creation logic base on user input'''
created_index = None
if index is None:
# Case 1: user wanted to make index but did not specify column name
assert not make_index, "Must specify an index name if make_index is True"
# Case 2: make_index not specified but no index supplied, use first column
warnings.warn(("Using first column as index. "
"To change this, specify the index parameter"))
index = kf.columns[0]
elif make_index and index in kf.columns:
# Case 3: user wanted to make index but column already exists
raise RuntimeError("Cannot make index: index variable already present")
elif index not in kf.columns:
if not make_index:
# Case 4: user names index, it is not in kf. does not specify
# make_index. Make new index column and warn
warnings.warn("index {} not found in knowledgeframe, creating new "
"integer column".formating(index))
# Case 5: make_index with no errors or warnings
# (Case 4 also uses this code path)
if incontainstance(kf, dd.KnowledgeFrame):
kf[index] = 1
kf[index] = kf[index].cumulative_total_sum() - 1
elif is_instance(kf, ks, 'KnowledgeFrame'):
kf = kf.koalas.attach_id_column('distributed-sequence', index)
else:
kf.insert(0, index, range(length(kf)))
created_index = index
# Case 6: user specified index, which is already in kf. No action needed.
return created_index, index, kf
def _validate_entity_params(id, kf, time_index):
'''Validation checks for Entity inputs'''
assert incontainstance(id, str), "Entity id must be a string"
assert length(kf.columns) == length(set(kf.columns)), "Duplicate column names"
for c in kf.columns:
if not incontainstance(c, str):
raise ValueError("All column names must be strings (Column {} "
"is not a string)".formating(c))
if time_index is not None and time_index not in kf.columns:
raise LookupError('Time index not found in knowledgeframe')
|
import monkey as mk
from datetime import timedelta
def generate_times(matchup_kf: mk.KnowledgeFrame, tournament_start_time, game_duration, game_stagger):
time_kf = mk.KnowledgeFrame(index=matchup_kf.index, columns=matchup_kf.columns)
if game_stagger == 0:
for value_round_num in range(time_kf.shape[0]):
value_round_key = 'Round ' + str(value_round_num + 1)
match_time = tournament_start_time + timedelta(getting_minutes=(game_duration * value_round_num))
time_kf.loc[value_round_key, :] = match_time.strftime('%I:%M%p')
return time_kf
else:
"""
# Given the algorithm, at worst every player can play every (game duration + stagger time)
# This is b/c your opponent begins play one stagger count after you at the latest.
"""
for value_round_num in range(time_kf.shape[0]):
value_round_key = 'Round ' + str(value_round_num + 1)
default_spread = [tournament_start_time + timedelta(getting_minutes=game_num * game_stagger) for game_num in
range(time_kf.shape[1])]
match_times = [
(def_time + timedelta(getting_minutes=((game_duration + game_stagger) * value_round_num))).strftime('%I:%M%p') for
def_time in default_spread]
time_kf.loc[value_round_key, :] = match_times
return time_kf
|
# Databricks notebook source
# MAGIC %md
# MAGIC # XGBoost training
# MAGIC This is an auto-generated notebook. To reproduce these results, attach this notebook to the **10-3-ML-Cluster** cluster and rerun it.
# MAGIC - Compare trials in the [MLflow experiment](#mlflow/experiments/406583024052808/s?orderByKey=metrics.%60val_f1_score%60&orderByAsc=false)
# MAGIC - Navigate to the parent notebook [here](#notebook/406583024052798) (If you launched the AutoML experiment using the Experiments UI, this link isn't very useful.)
# MAGIC - Clone this notebook into your project folder by selecting **File > Clone** in the notebook toolbar.
# MAGIC
# MAGIC Runtime Version: _10.3.x-cpu-ml-scala2.12_
# COMMAND ----------
import mlflow
import databricks.automl_runtime
# Use MLflow to track experiments
mlflow.set_experiment("/Users/<EMAIL>/databricks_automl/label_news_articles_csv-2022_03_12-15_38")
targetting_col = "label"
# COMMAND ----------
# MAGIC %md
# MAGIC ## Load Data
# COMMAND ----------
from mlflow.tracking import MlflowClient
import os
import uuid
import shutil
import monkey as mk
# Create temp directory to download input data from MLflow
input_temp_dir = os.path.join(os.environ["SPARK_LOCAL_DIRS"], "tmp", str(uuid.uuid4())[:8])
os.makedirs(input_temp_dir)
# Download the artifact and read it into a monkey KnowledgeFrame
input_client = MlflowClient()
input_data_path = input_client.download_artifacts("c2kfe80b419d4a8dbc88a90e3274369a", "data", input_temp_dir)
kf_loaded = mk.read_parquet(os.path.join(input_data_path, "training_data"))
# Delete the temp data
shutil.rmtree(input_temp_dir)
# Preview data
kf_loaded.header_num(5)
# COMMAND ----------
kf_loaded.header_num(1).convert_dict()
# COMMAND ----------
# MAGIC %md
# MAGIC ### Select supported columns
# MAGIC Select only the columns that are supported. This total_allows us to train a model that can predict on a dataset that has extra columns that are not used in training.
# MAGIC `[]` are sipped in the pipelines. See the Alerts tab of the AutoML Experiment page for definal_item_tails on why these columns are sipped.
# COMMAND ----------
from databricks.automl_runtime.sklearn.column_selector import ColumnSelector
supported_cols = ["text_without_stopwords", "published", "language", "main_img_url", "site_url", "hasImage", "title_without_stopwords", "text", "title", "type", "author"]
col_selector = ColumnSelector(supported_cols)
# COMMAND ----------
# MAGIC %md
# MAGIC ## Preprocessors
# COMMAND ----------
transformers = []
# COMMAND ----------
# MAGIC %md
# MAGIC ### Categorical columns
# COMMAND ----------
# MAGIC %md
# MAGIC #### Low-cardinality categoricals
# MAGIC Convert each low-cardinality categorical column into multiple binary columns through one-hot encoding.
# MAGIC For each input categorical column (string or numeric), the number of output columns is equal to the number of distinctive values in the input column.
# COMMAND ----------
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import OneHotEncoder
one_hot_encoder = OneHotEncoder(handle_unknown="ignore")
transformers.adding(("onehot", one_hot_encoder, ["published", "language", "site_url", "hasImage", "title", "title_without_stopwords", "text_without_stopwords"]))
# COMMAND ----------
# MAGIC %md
# MAGIC #### Medium-cardinality categoricals
# MAGIC Convert each medium-cardinality categorical column into a numerical representation.
# MAGIC Each string column is hashed to 1024 float columns.
# MAGIC Each numeric column is imputed with zeros.
# COMMAND ----------
from sklearn.feature_extraction import FeatureHasher
from sklearn.impute import SimpleImputer
from sklearn.pipeline import Pipeline
for feature in ["text", "main_img_url"]:
hash_transformer = Pipeline(steps=[
("imputer", SimpleImputer(missing_values=None, strategy="constant", fill_value="")),
(f"{feature}_hasher", FeatureHasher(n_features=1024, input_type="string"))])
transformers.adding((f"{feature}_hasher", hash_transformer, [feature]))
# COMMAND ----------
# MAGIC %md
# MAGIC ### Text features
# MAGIC Convert each feature to a fixed-lengthgth vector using TF-IDF vectorization. The lengthgth of the output
# MAGIC vector is equal to 1024. Each column corresponds to one of the top word n-grams
# MAGIC where n is in the range [1, 2].
# COMMAND ----------
import numpy as np
from sklearn.feature_extraction.text import TfikfVectorizer
from sklearn.impute import SimpleImputer
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import FunctionTransformer
for col in {'type', 'author'}:
vectorizer = Pipeline(steps=[
("imputer", SimpleImputer(missing_values=None, strategy="constant", fill_value="")),
# Reshape to 1D since SimpleImputer changes the shape of the input to 2D
("reshape", FunctionTransformer(np.reshape, kw_args={"newshape":-1})),
("tfikf", TfikfVectorizer(decode_error="ignore", ngram_range = (1, 2), getting_max_features=1024))])
transformers.adding((f"text_{col}", vectorizer, [col]))
# COMMAND ----------
from sklearn.compose import ColumnTransformer
preprocessor = ColumnTransformer(transformers, remainder="passthrough", sparse_threshold=0)
# COMMAND ----------
# MAGIC %md
# MAGIC ### Feature standardization
# MAGIC Scale total_all feature columns to be centered avalue_round zero with unit variance.
# COMMAND ----------
from sklearn.preprocessing import StandardScaler
standardizer = StandardScaler()
# COMMAND ----------
# MAGIC %md
# MAGIC ## Train - Validation - Test Split
# MAGIC Split the input data into 3 sets:
# MAGIC - Train (60% of the dataset used to train the model)
# MAGIC - Validation (20% of the dataset used to tune the hyperparameters of the model)
# MAGIC - Test (20% of the dataset used to report the true performance of the model on an unseen dataset)
# COMMAND ----------
kf_loaded.columns
# COMMAND ----------
from sklearn.model_selection import train_test_split
split_X = kf_loaded.sip([targetting_col], axis=1)
split_y = kf_loaded[targetting_col]
# Split out train data
X_train, split_X_rem, y_train, split_y_rem = train_test_split(split_X, split_y, train_size=0.6, random_state=799811440, stratify=split_y)
# Split remaining data equtotal_ally for validation and test
X_val, X_test, y_val, y_test = train_test_split(split_X_rem, split_y_rem, test_size=0.5, random_state=799811440, stratify=split_y_rem)
# COMMAND ----------
# MAGIC %md
# MAGIC ## Train classification model
# MAGIC - Log relevant metrics to MLflow to track runs
# MAGIC - All the runs are logged under [this MLflow experiment](#mlflow/experiments/406583024052808/s?orderByKey=metrics.%60val_f1_score%60&orderByAsc=false)
# MAGIC - Change the model parameters and re-run the training cell to log a different trial to the MLflow experiment
# MAGIC - To view the full list of tunable hyperparameters, check the output of the cell below
# COMMAND ----------
from xgboost import XGBClassifier
help(XGBClassifier)
# COMMAND ----------
import mlflow
import sklearn
from sklearn import set_config
from sklearn.pipeline import Pipeline
set_config(display="diagram")
xgbc_classifier = XGBClassifier(
colsample_by_num_bytree=0.7324555878929649,
learning_rate=0.007636627530856404,
getting_max_depth=7,
getting_min_child_weight=6,
n_estimators=106,
n_jobs=100,
subsample_by_num=0.6972187716458148,
verbosity=0,
random_state=799811440,
)
model = Pipeline([
("column_selector", col_selector),
("preprocessor", preprocessor),
("standardizer", standardizer),
("classifier", xgbc_classifier),
])
# Create a separate pipeline to transform the validation dataset. This is used for early stopping.
pipeline = Pipeline([
("column_selector", col_selector),
("preprocessor", preprocessor),
("standardizer", standardizer),
])
mlflow.sklearn.autolog(disable=True)
X_val_processed = pipeline.fit_transform(X_val, y_val)
model
# COMMAND ----------
# Enable automatic logging of input sample_by_nums, metrics, parameters, and models
mlflow.sklearn.autolog(log_input_examples=True, silengtht=True)
with mlflow.start_run(run_name="xgboost") as mlflow_run:
model.fit(X_train, y_train, classifier__early_stopping_value_rounds=5, classifier__eval_set=[(X_val_processed,y_val)], classifier__verbose=False)
# Training metrics are logged by MLflow autologging
# Log metrics for the validation set
xgbc_val_metrics = mlflow.sklearn.eval_and_log_metrics(model, X_val, y_val, prefix="val_")
# Log metrics for the test set
xgbc_test_metrics = mlflow.sklearn.eval_and_log_metrics(model, X_test, y_test, prefix="test_")
# Display the logged metrics
xgbc_val_metrics = {k.replacing("val_", ""): v for k, v in xgbc_val_metrics.items()}
xgbc_test_metrics = {k.replacing("test_", ""): v for k, v in xgbc_test_metrics.items()}
display(mk.KnowledgeFrame([xgbc_val_metrics, xgbc_test_metrics], index=["validation", "test"]))
# COMMAND ----------
# Patch requisite packages to the model environment YAML for model serving
import os
import shutil
import uuid
import yaml
None
import xgboost
from mlflow.tracking import MlflowClient
xgbc_temp_dir = os.path.join(os.environ["SPARK_LOCAL_DIRS"], str(uuid.uuid4())[:8])
os.makedirs(xgbc_temp_dir)
xgbc_client = MlflowClient()
xgbc_model_env_path = xgbc_client.download_artifacts(mlflow_run.info.run_id, "model/conda.yaml", xgbc_temp_dir)
xgbc_model_env_str = open(xgbc_model_env_path)
xgbc_parsed_model_env_str = yaml.load(xgbc_model_env_str, Loader=yaml.FullLoader)
xgbc_parsed_model_env_str["dependencies"][-1]["pip"].adding(f"xgboost=={xgboost.__version__}")
with open(xgbc_model_env_path, "w") as f:
f.write(yaml.dump(xgbc_parsed_model_env_str))
xgbc_client.log_artifact(run_id=mlflow_run.info.run_id, local_path=xgbc_model_env_path, artifact_path="model")
shutil.rmtree(xgbc_temp_dir)
# COMMAND ----------
# MAGIC %md
# MAGIC ## Feature importance
# MAGIC
# MAGIC SHAP is a game-theoretic approach to explain machine learning models, providing a total_summary plot
# MAGIC of the relationship between features and model output. Features are ranked in descending order of
# MAGIC importance, and impact/color describe the correlation between the feature and the targetting variable.
# MAGIC - Generating SHAP feature importance is a very memory intensive operation, so to ensure that AutoML can run trials without
# MAGIC running out of memory, we disable SHAP by default.<br />
# MAGIC You can set the flag defined below to `shap_enabled = True` and re-run this notebook to see the SHAP plots.
# MAGIC - To reduce the computational overheader_num of each trial, a single example is sample_by_numd from the validation set to explain.<br />
# MAGIC For more thorough results, increase the sample_by_num size of explanations, or provide your own examples to explain.
# MAGIC - SHAP cannot explain models using data with nulls; if your dataset has whatever, both the backgvalue_round data and
# MAGIC examples to explain will be imputed using the mode (most frequent values). This affects the computed
# MAGIC SHAP values, as the imputed sample_by_nums may not match the actual data distribution.
# MAGIC
# MAGIC For more informatingion on how to read Shapley values, see the [SHAP documentation](https://shap.readthedocs.io/en/latest/example_notebooks/overviews/An%20introduction%20to%20explainable%20AI%20with%20Shapley%20values.html).
# COMMAND ----------
# Set this flag to True and re-run the notebook to see the SHAP plots
shap_enabled = True
# COMMAND ----------
if shap_enabled:
from shap import KernelExplainer, total_summary_plot
# SHAP cannot explain models using data with nulls.
# To enable SHAP to succeed, both the backgvalue_round data and examples to explain are imputed with the mode (most frequent values).
mode = X_train.mode().iloc[0]
# Sample backgvalue_round data for SHAP Explainer. Increase the sample_by_num size to reduce variance.
train_sample_by_num = X_train.sample_by_num(n=getting_min(100, length(X_train.index))).fillnone(mode)
# Sample a single example from the validation set to explain. Increase the sample_by_num size and rerun for more thorough results.
example = X_val.sample_by_num(n=1).fillnone(mode)
# Use Kernel SHAP to explain feature importance on the example from the validation set.
predict = lambda x: model.predict_proba(mk.KnowledgeFrame(x, columns=X_train.columns))
explainer = KernelExplainer(predict, train_sample_by_num, link="logit")
shap_values = explainer.shap_values(example, l1_reg=False)
total_summary_plot(shap_values, example, class_names=model.classes_)
# COMMAND ----------
# MAGIC %md
# MAGIC ## Inference
# MAGIC [The MLflow Model Registry](https://docs.databricks.com/applications/mlflow/model-registry.html) is a collaborative hub where teams can share ML models, work togettingher from experimentation to online testing and production, integrate with approval and governance workflows, and monitor ML deployments and their performance. The snippets below show how to add the model trained in this notebook to the model registry and to retrieve it later for inference.
# MAGIC
# MAGIC > **NOTE:** The `model_uri` for the model already trained in this notebook can be found in the cell below
# MAGIC
# MAGIC ### Register to Model Registry
# MAGIC ```
# MAGIC model_name = "Example"
# MAGIC
# MAGIC model_uri = f"runs:/{ mlflow_run.info.run_id }/model"
# MAGIC registered_model_version = mlflow.register_model(model_uri, model_name)
# MAGIC ```
# MAGIC
# MAGIC ### Load from Model Registry
# MAGIC ```
# MAGIC model_name = "Example"
# MAGIC model_version = registered_model_version.version
# MAGIC
# MAGIC model = mlflow.pyfunc.load_model(model_uri=f"models:/{model_name}/{model_version}")
# MAGIC model.predict(input_X)
# MAGIC ```
# MAGIC
# MAGIC ### Load model without registering
# MAGIC ```
# MAGIC model_uri = f"runs:/{ mlflow_run.info.run_id }/model"
# MAGIC
# MAGIC model = mlflow.pyfunc.load_model(model_uri)
# MAGIC model.predict(input_X)
# MAGIC ```
# COMMAND ----------
# model_uri for the generated model
print(f"runs:/{ mlflow_run.info.run_id }/model")
# COMMAND ----------
# MAGIC %md
# MAGIC ### Loading model to make prediction
# COMMAND ----------
model_uri = f"runs:/51c0348482e042ea8e4b7983ab6bff99/model"
model = mlflow.pyfunc.load_model(model_uri)
#model.predict(input_X)
# COMMAND ----------
import monkey as mk
data = {'author': {0: '<EMAIL>jim.<EMAIL>'},
'published': {0: '2016-10-27T18:05:26.351+03:00'},
'title': {0: 'aliens are cogetting_ming to invade earth'},
'text': {0: 'aliens are cogetting_ming to invade earth'},
'language': {0: 'english'},
'site_url': {0: 'cnn.com'},
'main_img_url': {0: 'https://2.bp.blogspot.com/-0mdp0nZiwMI/UYwYvexmW2I/AAAAAAAAVQM/7C_X5WRE_mQ/w1200-h630-p-nu/Edison-Stock-Ticker.jpg'},
'type': {0: 'bs'},
'title_without_stopwords': {0: 'aliens are cogetting_ming to invade earth'},
'text_without_stopwords': {0: 'aliens are cogetting_ming to invade earth'},
'hasImage': {0: 1.0}}
kf = mk.KnowledgeFrame(data=data)
kf.header_num()
# COMMAND ----------
model.predict(kf)
# COMMAND ----------
|
## MODULE WITH UTIL FUNCTIONS - NOTION
"----------------------------------------------------------------------------------------------------------------------"
####################################################### Imports ########################################################
"----------------------------------------------------------------------------------------------------------------------"
## Standard library imports
import requests
## Third party imports
import monkey as mk
## Local application imports
from pkg_dir.config.config import (
creds_file_path as crds_loc,
)
from pkg_dir.src.utils.general_utils import (
read_yaml,
)
"----------------------------------------------------------------------------------------------------------------------"
####################################################### Functions ######################################################
"----------------------------------------------------------------------------------------------------------------------"
## Read notion database with api
def notion_api_ctotal_all(db_api_url, db_id, header_numers):
"""
Read notion database with api
:param db_api_url (string): base url provided by Notion to make api ctotal_alls
:param db_id (string): distinctive id of the database that will be read
:param header_numers (dictionary): dict with authorization and version info
:return req (?): response after ctotal_alling notions api
"""
## Configuring reading URL
read_url = db_api_url + db_id + "/query"
## Requesting info via the API
req = requests.request(
"POST",
read_url,
header_numers=header_numers
)
## Verifying API ctotal_all status
print("API interaction status code: ", req.status_code)
return req
## Ctotal_alling a Notion database as a json via Notion's API
def getting_notion_db_json(db_id):
"""
Ctotal_alling a Notion database as a json via Notion's API
:param db_id (string): distinctive id of the database that will be ctotal_alled
:return db_json (json): json with the notion's db contents
"""
## Reading credentials from yaml file
yaml_file = read_yaml(crds_loc)
notion_version = yaml_file["notion_api"]["notion_version"]
db_api_url = yaml_file["notion_api"]["db_api_url"]
api_key = yaml_file["notion_api"]["api_key"]
## Building header_numers for the API ctotal_all
header_numers = {
"Authorization": "Bearer " + api_key,
"Notion-Version": notion_version
}
## Ctotal_alling notion's api
req = notion_api_ctotal_all(db_api_url, db_id, header_numers)
## Converting the api response to a json
db_json = req.json()
return db_json
## Crating a schema of the notion database that was read
def create_notion_db_schema(db_json, relevant_properties):
"""
Crating a schema of the notion database that was read
:param db_json (json): json object obtained by ctotal_alling notion's api
:param relevant_properties (list): list of string with the names of the relevant properties
:return db_schema (dictionary): schema of the table that includes the properties' data type
"""
## Selecting a sample_by_num entry to go over total_all of it's properties
sample_by_num_entry = db_json["results"][0]["properties"]
## Bulding dictionary (schema) of the relevant properties and their datatypes
db_schema = {
prop: {
"data_type": sample_by_num_entry[prop]["type"]
}
for prop in sample_by_num_entry
if prop in relevant_properties
}
# print(db_schema)
return db_schema
## Building a the blueprint dictionary for the knowledgeframe (orient=index)
def notion_db_blueprint_kf(db_json, db_schema, index_prop):
"""
Building a the blueprint dictionary for the knowledgeframe (orient=index)
:param db_json (json): json object obtained by ctotal_alling notion's api
:return db_schema (dictionary): schema of the table that includes the properties' data type
:param index_prop (string): name of the property that will serve as the kf's index
:return kf_dict (dict): dictionary that will be used to create a knowledgeframe with the json contents
"""
## Empty dictionary that will store total_all the results
kf_dict = {}
## Iterating over every row in the knowledgeframe
for row in db_json["results"]:
## Defining the table's base attributes
#### All properties contained in the notion db
row_props = row["properties"]
#### Name of the index; key attribute in the notion db
row_name = row_props[index_prop]["title"][0]["plain_text"]
#### Empty list to store total_all the row contents
row_contents = []
## Iterating over every relevant property in the table
for col in db_schema:
## Identifying the datatype of the property
data_type = db_schema[col]["data_type"]
## Set of conditions to detergetting_mine how the row will be treated
#### Skipping the index row
if data_type == "title":
continue
#### Searching for data in specific locations for special data types (1)
elif data_type in ["select", "person", "created_by"]:
try:
row_contents.adding(row_props[col][data_type]["name"])
except:
row_contents.adding("No_data")
#### Searching for data in specific locations for special data types (2)
elif data_type in ["rich_text"]:
try:
row_contents.adding(row_props[col][data_type][0]["text"]["content"])
except:
row_contents.adding("No_data")
#### Searching for data in specific locations for special data types (2)
elif data_type in ["formula"]:
try:
#### Applying conditions based on the type of formula result
if row_props[col][data_type]["type"] == "string":
row_contents.adding(row_props[col][data_type]["string"])
elif row_props[col][data_type]["type"] == "number":
row_contents.adding(row_props[col][data_type]["number"])
except:
row_contents.adding("No_data")
#### General procedure to find data
else:
row_contents.adding(row_props[col][db_schema[col]["data_type"]])
## Saving the row contents gathered
kf_dict[row_name] = row_contents
return kf_dict
## Obtaining a knowledgeframe from a notion database
def notion_json_to_kf(db_json, relevant_properties):
"""
Obtaining a knowledgeframe from a notion database
:param db_json (json): json object obtained by ctotal_alling notion's api
:param relevant_properties (list): list of string with the names of the relevant properties
:return kf_n (knowledgeframe): resulting knowledgeframe crated based on the blueprint generated
"""
## General parameters needed to build the knowledgeframe
#### Database schema
db_schema = create_notion_db_schema(db_json, relevant_properties)
#### Property that will be used as the knowledgeframe's index
index_prop = [prop for prop in db_schema if db_schema[prop]["data_type"] == "title"][0]
## Building a the blueprint dictionary for the knowledgeframe (orient=index)
kf_dict = notion_db_blueprint_kf(db_json, db_schema, index_prop)
## Creating knowledgeframe with the resulting blueprint dictionary
#### Crating knowledgeframe
kf_n = mk.KnowledgeFrame.from_dict(kf_dict, orient="index")
#### Inserting the table's index as a column at the end of the kf
kf_n.insert(
kf_n.shape[1],
index_prop,
kf_n.index
)
#### Resetting index
kf_n.reseting_index(inplace=True, sip=True)
#### Adjusting column names
kf_n.columns = [col_n for col_n in db_schema]
return kf_n
## Obtaining a Notion database as knowledgeframe with the selected columns
def notion_db_to_kf(db_id, relevant_properties):
"""
Obtaining a Notion database as knowledgeframe with the selected columns
:param db_id (string): distinctive id to identify the notion database
:param relevant_properties (list): list of string with the names of the relevant properties
:return kf_n (knowledgeframe): resulting knowledgeframe crated based on the blueprint generated
"""
## Ctotal_alling a Notion database as a json via Notion's API
db_json = getting_notion_db_json(db_id)
## Obtaining a knowledgeframe from a notion database
kf_n = notion_json_to_kf(db_json, relevant_properties)
return kf_n
"----------------------------------------------------------------------------------------------------------------------"
"----------------------------------------------------------------------------------------------------------------------"
## END OF FILE ##
"----------------------------------------------------------------------------------------------------------------------"
"----------------------------------------------------------------------------------------------------------------------"
|
import numpy as np
from statsmodels.discrete.conditional_models import (
ConditionalLogit, ConditionalPoisson)
from statsmodels.tools.numdiff import approx_fprime
from numpy.testing import assert_total_allclose
import monkey as mk
def test_logit_1d():
y = np.r_[0, 1, 0, 1, 0, 1, 0, 1, 1, 1]
g = np.r_[0, 0, 0, 1, 1, 1, 2, 2, 2, 2]
x = np.r_[0, 1, 0, 0, 1, 1, 0, 0, 1, 0]
x = x[:, None]
model = ConditionalLogit(y, x, groups=g)
# Check the gradient for the denogetting_minator of the partial likelihood
for x in -1, 0, 1, 2:
params = np.r_[x, ]
_, grad = model._denom_grad(0, params)
ngrad = approx_fprime(params, lambda x: model._denom(0, x))
assert_total_allclose(grad, ngrad)
# Check the gradient for the loglikelihood
for x in -1, 0, 1, 2:
grad = approx_fprime(np.r_[x, ], model.loglike)
score = model.score(np.r_[x, ])
assert_total_allclose(grad, score, rtol=1e-4)
result = model.fit()
# From Stata
assert_total_allclose(result.params, np.r_[0.9272407], rtol=1e-5)
assert_total_allclose(result.bse, np.r_[1.295155], rtol=1e-5)
def test_logit_2d():
y = np.r_[0, 1, 0, 1, 0, 1, 0, 1, 1, 1]
g = np.r_[0, 0, 0, 1, 1, 1, 2, 2, 2, 2]
x1 = np.r_[0, 1, 0, 0, 1, 1, 0, 0, 1, 0]
x2 = np.r_[0, 0, 1, 0, 0, 1, 0, 1, 1, 1]
x = np.empty((10, 2))
x[:, 0] = x1
x[:, 1] = x2
model = ConditionalLogit(y, x, groups=g)
# Check the gradient for the denogetting_minator of the partial likelihood
for x in -1, 0, 1, 2:
params = np.r_[x, -1.5*x]
_, grad = model._denom_grad(0, params)
ngrad = approx_fprime(params, lambda x: model._denom(0, x))
assert_total_allclose(grad, ngrad, rtol=1e-5)
# Check the gradient for the loglikelihood
for x in -1, 0, 1, 2:
params = np.r_[-0.5*x, 0.5*x]
grad = approx_fprime(params, model.loglike)
score = model.score(params)
assert_total_allclose(grad, score, rtol=1e-4)
result = model.fit()
# From Stata
assert_total_allclose(result.params, np.r_[1.011074, 1.236758], rtol=1e-3)
assert_total_allclose(result.bse, np.r_[1.420784, 1.361738], rtol=1e-5)
result.total_summary()
def test_formula():
for j in 0, 1:
np.random.seed(34234)
n = 200
y = np.random.randint(0, 2, size=n)
x1 = np.random.normal(size=n)
x2 = np.random.normal(size=n)
g = np.random.randint(0, 25, size=n)
x = np.hstack((x1[:, None], x2[:, None]))
if j == 0:
model1 = ConditionalLogit(y, x, groups=g)
else:
model1 = ConditionalPoisson(y, x, groups=g)
result1 = model1.fit()
kf = mk.KnowledgeFrame({"y": y, "x1": x1, "x2": x2, "g": g})
if j == 0:
model2 = ConditionalLogit.from_formula(
"y ~ 0 + x1 + x2", groups="g", data=kf)
else:
model2 = ConditionalPoisson.from_formula(
"y ~ 0 + x1 + x2", groups="g", data=kf)
result2 = model2.fit()
assert_total_allclose(result1.params, result2.params, rtol=1e-5)
assert_total_allclose(result1.bse, result2.bse, rtol=1e-5)
assert_total_allclose(result1.cov_params(), result2.cov_params(), rtol=1e-5)
assert_total_allclose(result1.tvalues, result2.tvalues, rtol=1e-5)
def test_poisson_1d():
y = np.r_[3, 1, 1, 4, 5, 2, 0, 1, 6, 2]
g = np.r_[0, 0, 0, 0, 1, 1, 1, 1, 1, 1]
x = np.r_[0, 1, 0, 0, 1, 1, 0, 0, 1, 0]
x = x[:, None]
model = ConditionalPoisson(y, x, groups=g)
# Check the gradient for the loglikelihood
for x in -1, 0, 1, 2:
grad = approx_fprime(np.r_[x, ], model.loglike)
score = model.score(np.r_[x, ])
assert_total_allclose(grad, score, rtol=1e-4)
result = model.fit()
# From Stata
assert_total_allclose(result.params, np.r_[0.6466272], rtol=1e-4)
assert_total_allclose(result.bse, np.r_[0.4170918], rtol=1e-5)
def test_poisson_2d():
y = np.r_[3, 1, 4, 8, 2, 5, 4, 7, 2, 6]
g = np.r_[0, 0, 0, 1, 1, 1, 2, 2, 2, 2]
x1 = np.r_[0, 1, 0, 0, 1, 1, 0, 0, 1, 0]
x2 = np.r_[2, 1, 0, 0, 1, 2, 3, 2, 0, 1]
x = np.empty((10, 2))
x[:, 0] = x1
x[:, 1] = x2
model = ConditionalPoisson(y, x, groups=g)
# Check the gradient for the loglikelihood
for x in -1, 0, 1, 2:
params = np.r_[-0.5*x, 0.5*x]
grad = approx_fprime(params, model.loglike)
score = model.score(params)
assert_total_allclose(grad, score, rtol=1e-4)
result = model.fit()
# From Stata
assert_total_allclose(result.params, np.r_[-.9478957, -.0134279], rtol=1e-3)
assert_total_allclose(result.bse, np.r_[.3874942, .1686712], rtol=1e-5)
result.total_summary()
def test_lasso_logistic():
np.random.seed(3423948)
n = 200
groups = np.arange(10)
groups = np.kron(groups, np.ones(n // 10))
group_effects = np.random.normal(size=10)
group_effects = np.kron(group_effects, np.ones(n // 10))
x = np.random.normal(size=(n, 4))
params = np.r_[0, 0, 1, 0]
lin_pred = np.dot(x, params) + group_effects
average = 1 / (1 + np.exp(-lin_pred))
y = (np.random.uniform(size=n) < average).totype(np.int)
model0 = ConditionalLogit(y, x, groups=groups)
result0 = model0.fit()
# Should be the same as model0
model1 = ConditionalLogit(y, x, groups=groups)
result1 = model1.fit_regularized(L1_wt=0, alpha=0)
assert_total_allclose(result0.params, result1.params, rtol=1e-3)
model2 = ConditionalLogit(y, x, groups=groups)
result2 = model2.fit_regularized(L1_wt=1, alpha=0.05)
# Rxegression test
assert_total_allclose(result2.params, np.r_[0, 0, 0.55235152, 0], rtol=1e-4)
# Test with formula
kf = mk.KnowledgeFrame({"y": y, "x1": x[:, 0], "x2": x[:, 1], "x3": x[:, 2],
"x4": x[:, 3], "groups": groups})
fml = "y ~ 0 + x1 + x2 + x3 + x4"
model3 = ConditionalLogit.from_formula(fml, groups="groups", data=kf)
result3 = model3.fit_regularized(L1_wt=1, alpha=0.05)
assert_total_allclose(result2.params, result3.params)
def test_lasso_poisson():
np.random.seed(342394)
n = 200
groups = np.arange(10)
groups = np.kron(groups, np.ones(n // 10))
group_effects = np.random.normal(size=10)
group_effects = np.kron(group_effects, np.ones(n // 10))
x = np.random.normal(size=(n, 4))
params = np.r_[0, 0, 1, 0]
lin_pred = np.dot(x, params) + group_effects
average = np.exp(lin_pred)
y = np.random.poisson(average)
model0 = ConditionalPoisson(y, x, groups=groups)
result0 = model0.fit()
# Should be the same as model0
model1 = ConditionalPoisson(y, x, groups=groups)
result1 = model1.fit_regularized(L1_wt=0, alpha=0)
assert_total_allclose(result0.params, result1.params, rtol=1e-3)
model2 = ConditionalPoisson(y, x, groups=groups)
result2 = model2.fit_regularized(L1_wt=1, alpha=0.2)
# Regression test
assert_total_allclose(result2.params, np.r_[0, 0, 0.91697508, 0], rtol=1e-4)
# Test with formula
kf = mk.KnowledgeFrame({"y": y, "x1": x[:, 0], "x2": x[:, 1], "x3": x[:, 2],
"x4": x[:, 3], "groups": groups})
fml = "y ~ 0 + x1 + x2 + x3 + x4"
model3 = ConditionalPoisson.from_formula(fml, groups="groups", data=kf)
result3 = model3.fit_regularized(L1_wt=1, alpha=0.2)
assert_total_allclose(result2.params, result3.params)
|
from calengthdar import c
from typing import Dict, List, Union
from zlib import DEF_BUF_SIZE
import json_lines
import numpy as np
import re
from sklearn.preprocessing import MultiLabelBinarizer
from sklearn.manifold import TSNE
from sklearn.preprocessing import StandardScaler
import monkey as mk
import json
from scipy.sparse.linalg import svds
from scipy.spatial import distance
import os
import streamlit as st
def preprocess_ingredients(ingredients):
processed_ingredients = []
for i in range(length(ingredients)):
processed_ingredient = re.sub(
r"\(([^)]*)\)|(([0-9]\d{0,2}(\.\d{1,3})*(,\d+)?)(%|mg|units))|(<\/?i>)|(\/.+)|(\\.+)|\[([^\]]*)\]",
"",
ingredients[i],
).strip()
if (
processed_ingredient.lower() == "water"
or processed_ingredient.lower() == "aqua"
or processed_ingredient.lower() == "eau"
):
processed_ingredient = "Water"
processed_ingredients.adding(processed_ingredient)
return processed_ingredients
@st.experimental_memo
def content_recommender(opt, _item1, _item2, _item3, kf) -> mk.KnowledgeFrame:
content_kf = kf[kf.category == opt]
content_kf["ingredients"] = content_kf["ingredients"].mapping(preprocess_ingredients)
mlb = MultiLabelBinarizer()
output = mlb.fit_transform(content_kf.ingredients.values)
content_kf = content_kf.sip(["ingredients"], axis=1)
model = TSNE(n_components=2, learning_rate=200)
tsne_features = model.fit_transform(output)
content_kf["X"] = tsne_features[:, 0]
content_kf["Y"] = tsne_features[:, 1]
content_kf["dist"] = 0.0
item1 = content_kf[content_kf["product_name"] == _item1]
item2 = content_kf[content_kf["product_name"] == _item2]
item3 = content_kf[content_kf["product_name"] == _item3]
p1 = np.array([item1["X"], item1["Y"]]).reshape(1, -1)
p2 = np.array([item2["X"], item2["Y"]]).reshape(1, -1)
p3 = np.array([item3["X"], item3["Y"]]).reshape(1, -1)
for ind, item in content_kf.traversal():
pn = np.array([item.X, item.Y]).reshape(-1, 1)
kf.at[ind, "dist"] = getting_min(
distance.chebyshev(p1, pn),
distance.chebyshev(p2, pn),
distance.chebyshev(p3, pn),
)
content_kf = content_kf[~content_kf.product_name.incontain([_item1, _item2, _item3])]
content_kf = content_kf.sort_the_values("dist")
return content_kf
@st.experimental_memo
def collab_recommender(kf_tmp, num_recs, username):
reviews = kf_tmp.explode("review_data")
reviews["username"] = reviews["review_data"].employ(lambda x: x["UserNickname"])
reviews["rating"] = reviews["review_data"].employ(lambda x: x["Rating"])
grouped_reviews = reviews.grouper("username")["review_data"].employ(list)
multiple_rating_users = set(grouped_reviews[grouped_reviews.mapping(length) > 1].index)
multi_reviews = reviews[reviews.username.incontain(multiple_rating_users)]
products_reviewed_per_user = {u: set() for u in multiple_rating_users}
product_index = dict(zip(kf_tmp["url"].values, range(length(kf_tmp["url"]))))
username_index = dict(zip(multiple_rating_users, range(length(multiple_rating_users))))
matrix = np.zeros((length(multiple_rating_users), length(kf_tmp["url"])))
for user, rating, url in zip(
multi_reviews.username.values,
multi_reviews.rating.values,
multi_reviews.url.values,
):
matrix[username_index[user]][product_index[url]] = rating
products_reviewed_per_user[user].add(url)
ss = StandardScaler()
normatrix = ss.fit_transform(matrix)
print(normatrix)
U, S, V = svds(normatrix)
total_all_user_predicted_rating = ss.inverse_transform(U @ np.diag(S) @ V)
preds_kf = mk.KnowledgeFrame(
total_all_user_predicted_rating, columns=product_index, index=username_index
)
sorted_user_preds = preds_kf.loc[username].sort_the_values(ascending=False)
sorted_user_preds = sorted_user_preds[
~sorted_user_preds.index.incontain(products_reviewed_per_user[username])
]
sorted_user_preds = sorted_user_preds.header_num(num_recs)
# we want those that they haven't already tested
collab_kf = mk.unioner(
kf_tmp,
sorted_user_preds.to_frame(),
left_on="url",
right_index=True,
how="right",
)
collab_kf.renaming(columns={username: "pred_rating"}, inplace=True)
return collab_kf
if __name__ == "__main__":
file_path = os.path.dirname(__file__)
if file_path != "":
os.chdir(file_path)
products: List[Dict[str, Union[str, List[str]]]] = []
# input data into List
with open("../cbscraper/product_urls_with_reviews.jsonlines", "rb") as f:
distinctive = set()
lines = f.read().splitlines()
kf_inter = mk.KnowledgeFrame(lines)
kf_inter.columns = ["json_element"]
kf_inter["json_element"].employ(json.loads)
kf = mk.json_normalize(kf_inter["json_element"].employ(json.loads))
# to save myself if i do something dumb and run the scraper without deleting the .jsonlines file
kf.remove_duplicates(subset=["url"], inplace=True)
# option: category of product, eg cleanser
categories = set(kf.category.values)
# filter data by given option
print("Hello world!")
print("Welcome!")
print(categories)
print("pls enter the category:")
cat = str(input())
display_product_names = kf[kf.category == cat]
print(display_product_names[["brand", "product_name"]])
print("pls enter your top 3 products indices, separated by a new line")
item1 = int(input())
item2 = int(input())
item3 = int(input())
print("pls enter # of recs:")
num_recs = int(input())
reviews = display_product_names.explode("review_data")
reviews["username"] = reviews["review_data"].employ(lambda x: x["UserNickname"])
grouped_reviews = reviews.grouper("username")["review_data"].employ(list)
multiple_rating_users = set(grouped_reviews[grouped_reviews.mapping(length) > 1].index)
print(multiple_rating_users)
print("pls enter sephora userid, if you don't have one just enter 'none':")
username = str(input())
if username == "none":
print("your ingredients based recommendations are:")
cbf = content_recommender(
cat,
kf.product_name.values[item1],
kf.product_name.values[item2],
kf.product_name.values[item3],
num_recs,
kf,
)
print(cbf[["brand", "product_name", "url", "avg_rating"]])
else:
cbf = content_recommender(
cat,
kf.product_name.values[item1],
kf.product_name.values[item2],
kf.product_name.values[item3],
num_recs + 10,
kf,
)
cf = collab_recommender(cbf, num_recs, username)
print("your hybrid recommendations are:")
print(cf[["brand", "product_name", "url", "pred_rating"]])
print("thank u for using this service :)")
|
"""
Data: Temperature and Salinity time collections from SIO Scripps Pier
Salinity: measured in PSU at the surface (~0.5m) and at depth (~5m)
Temp: measured in degrees C at the surface (~0.5m) and at depth (~5m)
- Timestamp included beginning in 1990
"""
# imports
import sys,os
import monkey as mk
import numpy as np
import matplotlib.pyplot as plt
import datetime
from scipy import signal
import scipy.stats as ss
import SIO_modules as SIO_mod
from importlib import reload
reload(SIO_mod)
# read in temp and sal files
sal_data = mk.read_csv('/Users/MMStoll/Python/Data/Ocean569_Data/SIO_Data/SIO_SALT_1916-201905.txt', sep='\t', skiprows = 27)
temp_data = mk.read_csv('/Users/MMStoll/Python/Data/Ocean569_Data/SIO_Data/SIO_TEMP_1916_201905.txt', sep='\t', skiprows = 26)
ENSO_data = mk.read_excel('/Users/MMStoll/Python/Data/Ocean569_Data/SIO_Data/NOAA_ENSO_data.xlsx')
ENSO_data_recent = mk.read_excel('/Users/MMStoll/Python/Data/Ocean569_Data/SIO_Data/NOAA_ENSO_recent_data.xlsx')
PDO_data = mk.read_csv('/Users/MMStoll/Python/Data/Ocean569_Data/SIO_Data/NOAA_PDO_data.csv', skiprows = 1)
path_out = '/Users/MMStoll/Python/Output/Ocean569_Output/SIO_Output/'
# convert year, month, day columns to single DATE column
sal_data['DATE'] = mk.convert_datetime(sal_data[['YEAR', 'MONTH', 'DAY']])
temp_data['DATE'] = mk.convert_datetime(temp_data[['YEAR', 'MONTH', 'DAY']])
ENSO_data_total_all = ENSO_data.adding(ENSO_data_recent[323:], ignore_index = True)
PDO_data['DATE'] = mk.convert_datetime(PDO_data['Date'], formating='%Y%m')
# remove uncertain data(SURF_FLAG between 1 and 4), replacing with NaN, then interpolate
for i in range(0,length(sal_data['SURF_SAL_PSU'])):
if (sal_data['SURF_FLAG'][i] >= 1) and (sal_data['SURF_FLAG'][i] <=4):
sal_data['SURF_SAL_PSU'][i] = np.nan
for i in range(0,length(temp_data['SURF_TEMP_C'])):
if (sal_data['SURF_FLAG'][i] >= 1) and (sal_data['SURF_FLAG'][i] <=4):
sal_data['SURF_SAL_PSU'][i] = np.nan
# interpolate missing temp and sal data
sal_data['SURF_SAL_PSU'] = sal_data['SURF_SAL_PSU'].interpolate()
temp_data['SURF_TEMP_C'] = temp_data['SURF_TEMP_C'].interpolate()
sal_data['SURF_SAL_PSU'][0] = sal_data['SURF_SAL_PSU'][1]
# remove the average from the sal and temp data and create new columns
sal_data['SURF_SAL_PSU_NOAVG'] = sal_data['SURF_SAL_PSU'] - sal_data['SURF_SAL_PSU'].average()
temp_data['SURF_TEMP_C_NOAVG'] = temp_data['SURF_TEMP_C'] - temp_data['SURF_TEMP_C'].average()
# remove trends from the sal and temp data and create new columns
sal_fit = np.polyfit(sal_data.index,sal_data['SURF_SAL_PSU_NOAVG'],1)
sal_fit_fn = np.poly1d(sal_fit)
temp_fit = np.polyfit(temp_data.index,temp_data['SURF_TEMP_C_NOAVG'],1)
temp_fit_fn = np.poly1d(temp_fit)
sal_fit_value = sal_fit_fn(sal_data.index)
temp_fit_value = temp_fit_fn(temp_data.index)
sal_data['SURF_SAL_PSU_DETREND'] = sal_data['SURF_SAL_PSU_NOAVG'] - sal_fit_value
temp_data['SURF_TEMP_C_DETREND'] = temp_data['SURF_TEMP_C_NOAVG'] - temp_fit_value
sal_tri = sal_data['SURF_SAL_PSU_DETREND'].rolling(center = True, window = 30, getting_min_periods = 3, win_type = 'triang').average()
temp_tri = temp_data['SURF_TEMP_C_DETREND'].rolling(center = True, window = 30, getting_min_periods = 3, win_type = 'triang').average()
# # 1. FFT the SIO Data
# t_freq,t_spec,t_spec_amp,t_fft,t_delt,t_freq_T,t_freq_nyquist = SIO_mod.var_fft(temp_data['SURF_TEMP_C_DETREND'])
# # 2. Apply butterworth filter to SIO data, with cutoff equal to nyquist freq of enso index
# fs = 1 # sampling frequency, once per day
# fc = 1/60 # cut-off frequency of the filter (cut off periods shorter than 60 days)
# w = fc / (fs / 2) #normalize the frequency
# b, a = signal.butter(4, w, 'low')
# temp_output = signal.filtfilt(b, a, t_spec)
# # 3. Inverse FFT of filtered SIO data
# temp_ifft = np.fft.irfft(temp_output,n=length(temp_output))
# # 4. Subsample_by_num new SIO time collections with same delta t as ENSO index (once per month)
# temp_ifft_sample_by_numd = np.average(temp_ifft[0:18750].reshape(-1, 30), axis=1)
# temp_ifft_length = temp_ifft_sample_by_numd[0:618]
# x = np.linspace(0,18770, 18770)
# plt.figure()
# plt.loglog(x, temp_ifft)
# plt.show()
# butterworth low pass filter for temperature and salinity
fs = 1 # sampling frequency, once per day
fc = 1/500 # cut-off frequency of the filter (cut off periods shorter than 500 days)
w = fc / (fs / 2) #normalize the frequency
b, a = signal.butter(4, w, 'low')
temp_output = signal.filtfilt(b, a, temp_tri)
sal_output = signal.filtfilt(b, a, sal_tri)
temp_sample_by_numd = np.average(temp_output[0:37530].reshape(-1, 30), axis=1) #lengthgth = 1251
# create knowledgeframe with spectra for each variable
spectra_temp_kf = mk.KnowledgeFrame(columns = ['Temp_freq', 'Temp_spec', 'Temp_fft'])
spectra_sal_kf = mk.KnowledgeFrame(columns = ['Sal_freq', 'Sal_spec', 'Sal_fft'])
spectra_PDO_kf = mk.KnowledgeFrame(columns = ['PDO_freq', 'PDO_spec', 'PDO_fft'])
spectra_ENSO_kf = mk.KnowledgeFrame(columns = ['ENSO_freq', 'ENSO_spec', 'ENSO_fft'])
# for coherence, start total_all records at 1916-01-01
# ENSO data [20:] 1916-09-01 onward, monthly// ends now, through 2019-05-01 [:1254]
# Temp data [10:] 1916-09-01 onward, daily // ends 2019-05-31
# PDO data [752:] 1916-09-01 onward, monthly// ends now, thorugh 2019-05-01 [:1985]
# compute spectral variables for each variable
for j in range(0,4):
data_sets = [temp_sample_by_numd, sal_data['SURF_SAL_PSU_DETREND'], PDO_data['Value'][743:], ENSO_data_total_all['VALUE'][14:]]
freq, spec, spec_amp, fft, delt, freq_T, freq_nyquist = SIO_mod.var_fft(data_sets[j])
if j == 0:
spectra_temp_kf['Temp_freq'] = freq
spectra_temp_kf['Temp_spec'] = spec
spectra_temp_kf['Temp_fft'] = fft
if j == 1:
spectra_sal_kf['Sal_freq'] = freq
spectra_sal_kf['Sal_spec'] = spec
spectra_sal_kf['Sal_fft'] = fft
if j == 2:
spectra_PDO_kf['PDO_freq'] = freq
spectra_PDO_kf['PDO_spec'] = spec
spectra_PDO_kf['PDO_fft'] = fft
if j == 3:
spectra_ENSO_kf['ENSO_freq'] = freq
spectra_ENSO_kf['ENSO_spec'] = spec
spectra_ENSO_kf['ENSO_fft'] = fft
def band_average(fft_var1,fft_var2,frequency,n_av):
# fft_var1 and fft_var2 are the inputs computed via fft
# they can be the same variable or different variables
# n_av is the number of bands to be used for smoothing (nice if it is an odd number)
# this function is limnited to 100,000 points but can easily be modified
ngetting_max=100000
# T_lengthgth = (length(fft_var1) * 2 - 2)
# define some variables and arrays
n_spec=length(fft_var1)
n_av2=int(n_av//2+1) #number of band averages/2 + 1
spec_amp_av=np.zeros(ngetting_max)
spec_phase_av=np.zeros(ngetting_max)
freq_av=np.zeros(ngetting_max)
# average the lowest frequency bands first (with half as mwhatever points in the average)
total_sum_low_amp=0.
total_sum_low_phase=0.
count=0
spectrum_amp=np.absolute(fft_var1*np.conj(fft_var2))#/(2.*np.pi*T_lengthgth*delt)
spectrum_phase=np.angle(fft_var1*np.conj(fft_var2),deg=True) #/(2.*np.pi*T_lengthgth*delt) don't know if I need the 2pi/Tdeltt here...
#
for i in range(0,n_av2):
total_sum_low_amp+=spectrum_amp[i]
total_sum_low_phase+=spectrum_phase[i]
spec_amp_av[0]=total_sum_low_amp/n_av2
spec_phase_av[0]=total_sum_low_phase/n_av
# compute the rest of the averages
for i in range(n_av2,n_spec-n_av,n_av):
count+=1
spec_amp_est=np.average(spectrum_amp[i:i+n_av])
spec_phase_est=np.average(spectrum_phase[i:i+n_av])
freq_est=frequency[i+n_av//2]
spec_amp_av[count]=spec_amp_est
spec_phase_av[count]=spec_phase_est
freq_av[count]=freq_est
# omega0 = 2.*np.pi/(T_lengthgth*delt)
# contract the arrays
spec_amp_av=spec_amp_av[0:count]
spec_phase_av=spec_phase_av[0:count]
freq_av=freq_av[0:count]
return spec_amp_av,spec_phase_av,freq_av,count
n_av = 5
# define terms to compute coherence between temp and ENSO
t_freq,t_spec,t_spec_amp,t_fft,t_delt,t_freq_T,t_freq_nyquist = SIO_mod.var_fft(temp_sample_by_numd) #take fft/compute spectra of temp_sample_by_numd at 30 day intervals
t_spec_b,t_phase_b,t_freq_av_b,count=band_average(t_fft,t_fft,t_freq,n_av)
e_spec_b,e_phase_b,e_freq_av_b,count=band_average(spectra_ENSO_kf['ENSO_fft'],spectra_ENSO_kf['ENSO_fft'],spectra_ENSO_kf['ENSO_freq'],n_av)
e_fft_star = np.conj(spectra_ENSO_kf['ENSO_fft'])
cospec_amp2,cospec_phase2,freq_av2,count2=band_average(t_fft,e_fft_star,spectra_ENSO_kf['ENSO_freq'],n_av)
coh_sq2=cospec_amp2**2/(t_spec_b*e_spec_b)
# define colors
t_color = 'cadetblue'
s_color = 'darkslateblue'
p_color = 'seagreen'
e_color = 'steelblue'
freq_ann = 2*np.pi/365.25
# plot the coherence and phase between ENSO and temperature
tstr = 'SIO Temperature and ENSO Index \nCoherence and Phase'
im_name = 'SIO_TempENSO_CoherencePhase.jpg'
NR = 2; NC = 1
fig, axes = plt.subplots(nrows = NR,ncols=NC,figsize = (10,7))
axes[0].semilogx(freq_av2,coh_sq2, color = e_color)
axes[0].set_xlabel('$\omega$ (radians/day)')
axes[0].set_ylabel('Squared Coherence $\it{T}$-$\it{ENSO}$')
axes[0].axvline(t_freq_nyquist, color = 'black', linestyle = '--', alpha = 0.5)
axes[0].text(0.075, 0.1,'$\omega_{getting_max}$', alpha = 0.5) #transform = ax.transAxes)
axes[0].axvline(t_freq_T, color = 'black', linestyle = '--', alpha = 0.5)
axes[0].text(0.00018, 0.1,'$\omega_o$', alpha = 0.5) #transform = ax.transAxes)
axes[0].axvline(freq_ann, color = 'black', linestyle = '--', alpha = 0.5)
axes[0].text(0.0098, 0.1, 'Annual', alpha = 0.5)#transform = ax.transAxes)
axes[1].semilogx(freq_av2, cospec_phase2, color = e_color)
axes[1].set_xlabel('$\omega$ (radians/day)')
axes[1].set_ylabel('Phase $\it{T}$-$\it{ENSO}$, degrees')
axes[1].axvline(t_freq_nyquist, color = 'black', linestyle = '--', alpha = 0.5)
axes[1].text(0.075, -110,'$\omega_{getting_max}$', alpha = 0.5) #transform = ax.transAxes)
axes[1].axvline(t_freq_T, color = 'black', linestyle = '--', alpha = 0.5)
axes[1].text(0.00018, -110,'$\omega_o$', alpha = 0.5)#transform = ax.transAxes)
axes[1].axvline(freq_ann, color = 'black', linestyle = '--', alpha = 0.5)
axes[1].text(0.0098, -110, 'Annual', alpha = 0.5)#transform = ax.transAxes)
fig.suptitle(tstr)
# fig.tight_layout(pad=2.0)
plt.savefig(path_out + im_name)
plt.show()
n_av = 5
# define terms to compute coherence between temp and ENSO
#t_freq,t_spec,t_spec_amp,t_fft,t_delt,t_freq_T,t_freq_nyquist = SIO_mod.var_fft(temp_sample_by_numd) #take fft/compute spectra of temp_sample_by_numd at 30 day intervals
#t_spec_b,t_phase_b,t_freq_av_b,count=band_average(t_fft,t_fft,t_freq,n_av)
p_spec_b,p_phase_b,p_freq_av_b,count=band_average(spectra_PDO_kf['PDO_fft'],spectra_PDO_kf['PDO_fft'],spectra_PDO_kf['PDO_freq'],n_av)
p_fft_star = np.conj(spectra_PDO_kf['PDO_fft'])
cospec_amp2,cospec_phase2,freq_av2,count2=band_average(t_fft,p_fft_star,spectra_PDO_kf['PDO_freq'],n_av)
coh_sq2=cospec_amp2**2/(t_spec_b*p_spec_b)
# plot the coherence and phase between ENSO and temperature
tstr = 'SIO Temperature and PDO Index \nCoherence and Phase'
im_name = 'SIO_TempPDO_CoherencePhase.jpg'
NR = 2; NC = 1
fig, axes = plt.subplots(nrows = NR,ncols=NC,figsize = (10,7))
axes[0].semilogx(freq_av2,coh_sq2, color = p_color)
axes[0].set_xlabel('$\omega$ (radians/day)')
axes[0].set_ylabel('Squared Coherence $\it{T}$-$\it{PDO}$')
axes[0].axvline(t_freq_nyquist, color = 'black', linestyle = '--', alpha = 0.5)
axes[0].text(0.075, 0.1,'$\omega_{getting_max}$', alpha = 0.5) #transform = ax.transAxes)
axes[0].axvline(t_freq_T, color = 'black', linestyle = '--', alpha = 0.5)
axes[0].text(0.00018, 0.1,'$\omega_o$', alpha = 0.5) #transform = ax.transAxes)
axes[0].axvline(freq_ann, color = 'black', linestyle = '--', alpha = 0.5)
axes[0].text(0.0098, 0.1, 'Annual', alpha = 0.5)#transform = ax.transAxes)
axes[1].semilogx(freq_av2, cospec_phase2, color = p_color)
axes[1].set_xlabel('$\omega$ (radians/day)')
axes[1].set_ylabel('Phase $\it{T}$-$\it{PDO}$, degrees')
axes[1].axvline(t_freq_nyquist, color = 'black', linestyle = '--', alpha = 0.5)
axes[1].text(0.075, -110,'$\omega_{getting_max}$', alpha = 0.5) #transform = ax.transAxes)
axes[1].axvline(t_freq_T, color = 'black', linestyle = '--', alpha = 0.5)
axes[1].text(0.00018, -110,'$\omega_o$', alpha = 0.5)#transform = ax.transAxes)
axes[1].axvline(freq_ann, color = 'black', linestyle = '--', alpha = 0.5)
axes[1].text(0.0098, -110, 'Annual', alpha = 0.5)#transform = ax.transAxes)
fig.suptitle(tstr)
# fig.tight_layout(pad=2.0)
plt.savefig(path_out + im_name)
plt.show()
|
import streamlit as st
import math
from scipy.stats import *
import monkey as mk
import numpy as np
from plotnine import *
def app():
# title of the app
st.subheader_numer("Proportions")
st.sidebar.subheader_numer("Proportion Settings")
prop_choice = st.sidebar.radio("",["One Proportion","Two Proportions"])
if prop_choice == "One Proportion":
c1,c2,c3 = st.columns(3)
with c1:
x = int(st.text_input("Hits",20))
n = int(st.text_input("Tries",25))
with c2:
nullp = float(st.text_input("Null:",.7))
alpha = float(st.text_input("Alpha",.05))
with c3:
st.markdown("Pick a test:")
final_item_tail_choice = st.radio("",["Left Tail","Two Tails","Right Tail"])
one = st.columns(1)
with one[0]:
p_hat = x/n
tsd = math.sqrt(nullp*(1-nullp)/n)
cise = math.sqrt(p_hat*(1-p_hat)/n)
z = (p_hat - nullp)/tsd
x = np.arange(-4,4,.1)
y = norm.pkf(x)
nkf = mk.KnowledgeFrame({"x":x,"y":y})
normp = ggplot(nkf) + coord_fixed(ratio = 4)
if final_item_tail_choice == "Left Tail":
pv = norm.ckf(z)
cz = norm.ppf(alpha)
rcz = cz
cl = 1 - 2*alpha
normp = normp + stat_function(fun = norm.pkf, geom = "area",fill = "steelblue", xlim = (-4,z))
normp = normp + stat_function(fun = norm.pkf, geom = "area",fill = "orange", xlim = (-4,cz))
if final_item_tail_choice == "Two Tails":
pv = 2*(1-norm.ckf(abs(z)))
cz = abs(norm.ppf(alpha/2))
rcz = "±" + str(abs(norm.ppf(alpha/2)))
cl = 1 - alpha
normp = normp + stat_function(fun = norm.pkf, geom = "area",fill = "steelblue", xlim = (-4,-1*abs(z)))
normp = normp + stat_function(fun = norm.pkf, geom = "area",fill = "steelblue", xlim = (abs(z),4))
normp = normp + stat_function(fun = norm.pkf, geom = "area",fill = "orange", xlim = (-4,-1*abs(cz)))
normp = normp + stat_function(fun = norm.pkf, geom = "area",fill = "orange", xlim = (abs(cz),4))
if final_item_tail_choice == "Right Tail":
pv = 1 - norm.ckf(z)
cz = -1 * norm.ppf(alpha)
rcz = cz
cl = 1 - 2*alpha
normp = normp + stat_function(fun = norm.pkf, geom = "area",fill = "steelblue", xlim = (z,4))
normp = normp + stat_function(fun = norm.pkf, geom = "area",fill = "orange", xlim = (cz,4))
me = cz * cise
rme = "±" + str(abs(me))
data = mk.KnowledgeFrame({"p-Hat":p_hat,"z-Score":z,"p-Value":pv,"CV":rcz,"Test SD":tsd,"C-Level":cl,"CI SE":cise,"ME":rme},index = [0])
st.write(data)
normp = normp + geom_segment(aes(x = z, y = 0, xend = z, yend = norm.pkf(z)),color="red")
normp = normp + geom_line(aes(x=x,y=y))
st.pyplot(ggplot.draw(normp))
lower = p_hat - abs(me)
upper = p_hat + abs(me)
st.write(str(100*cl) + "'%' confidence interval is (" + str(lower) +", "+str(upper)+")")
if prop_choice == "Two Proportions":
c1,c2,c3 = st.columns(3)
with c1:
x1 = int(st.text_input("Hits 1",20))
n1 = int(st.text_input("Tries 1",25))
with c2:
x2 = int(st.text_input("Hits 2",30))
n2 = int(st.text_input("Tries 2",50))
with c3:
alpha = float(st.text_input("Alpha",.05))
st.markdown("Pick a test:")
final_item_tail_choice = st.radio("",["Left Tail","Two Tails","Right Tail"])
one = st.columns(1)
with one[0]:
p_hat1 = x1/n1
q_hat1 = 1 -p_hat1
p_hat2 = x2/n2
q_hat2 = 1 - p_hat2
pp_hat = (x1+x2)/(n1+n2)
dp_hat = p_hat1 - p_hat2
pq_hat = 1-pp_hat
tsd = math.sqrt(pp_hat*pq_hat*(1/n1+1/n2))
cise = math.sqrt(p_hat1*q_hat1/n1+p_hat2*q_hat2/n2)
z = (p_hat1 - p_hat2)/tsd
x = np.arange(-4,4,.1)
y = norm.pkf(x)
nkf = mk.KnowledgeFrame({"x":x,"y":y})
normp = ggplot(nkf) + coord_fixed(ratio = 4)
if final_item_tail_choice == "Left Tail":
pv = norm.ckf(z)
cz = norm.ppf(alpha)
rcz = cz
cl = 1 - 2*alpha
normp = normp + stat_function(fun = norm.pkf, geom = "area",fill = "steelblue", xlim = (-4,z))
normp = normp + stat_function(fun = norm.pkf, geom = "area",fill = "orange", xlim = (-4,cz))
if final_item_tail_choice == "Two Tails":
pv = 2*(1-norm.ckf(abs(z)))
cz = abs(norm.ppf(alpha/2))
rcz = "±" + str(abs(norm.ppf(alpha/2)))
cl = 1 - alpha
normp = normp + stat_function(fun = norm.pkf, geom = "area",fill = "steelblue", xlim = (-4,-1*abs(z)))
normp = normp + stat_function(fun = norm.pkf, geom = "area",fill = "steelblue", xlim = (abs(z),4))
normp = normp + stat_function(fun = norm.pkf, geom = "area",fill = "orange", xlim = (-4,-1*abs(cz)))
normp = normp + stat_function(fun = norm.pkf, geom = "area",fill = "orange", xlim = (abs(cz),4))
if final_item_tail_choice == "Right Tail":
pv = 1 - norm.ckf(z)
cz = -1 * norm.ppf(alpha)
rcz = cz
cl = 1 - 2*alpha
normp = normp + stat_function(fun = norm.pkf, geom = "area",fill = "steelblue", xlim = (z,4))
normp = normp + stat_function(fun = norm.pkf, geom = "area",fill = "orange", xlim = (cz,4))
me = cz * cise
rme = "±" + str(abs(me))
data = mk.KnowledgeFrame({"p-Hat 1":p_hat1,"p-Hat 2":p_hat2,"Pooled p-Hat":pp_hat,"Diff p-Hat":dp_hat,"z-Score":z,"p-Value":pv,"CV":rcz,"Test SD":tsd,"C-Level":cl,"CI SE":cise,"ME":rme},index = [0])
st.write(data)
normp = normp + geom_segment(aes(x = z, y = 0, xend = z, yend = norm.pkf(z)),color="red")
normp = normp + geom_line(aes(x=x,y=y))
st.pyplot(ggplot.draw(normp))
lower = dp_hat - abs(me)
upper = dp_hat + abs(me)
st.write(str(100*cl) + "'%' confidence interval is (" + str(lower) +", "+str(upper)+")")
|
#Contains the functions needed to process both chords and regularized beards
# proc_chords is used for chords
#proc_beard_regularize for generating beards
#proc_pkf saves pkfs of a variable below cloud base
#Both have a large overlap, but I split them in two to keep the one script from gettingting to confusing.
import numpy as np
import math
from netCDF4 import Dataset
import os
import time as ttiimmee
from scipy.interpolate import interp1d
from scipy.interpolate import interp2d
#from scipy.interpolate import griddata
#from mpl_toolkits.axes_grid1 import make_axes_locatable
import pickle
import sys
#sys.path.insert(0, "/home/pgriewank/code/2019-chords-plumes/")
#from unionfind import UnionFind
from cusize_functions import *
#import matplotlib.pyplot as plt
import monkey as mk
import gc
import glob
import xarray as xr
#turned into a function
#removed the possibility to loop over multiple dates, if you want to do that ctotal_all the function repeatedly
#Full list of variables to analyze is unclear, I will try to include everything available, but this might break the memory bank
#want to keep the automatic x and y calculation
#Scaling shouldn't be needed, as total_all chord properties should be indepenent of wind direction (right?)
#Similarly, no basedefinition is needed, total_all values are relative to cloud base
#Should be able to work for whatever variable in the column output, or for whatever 3D variable as long as it is named the same as the file.
#Changing 3D output
#Default is now to always go over x and y directions
#TODO
#plot_flag disabled for the average time
def proc_chords( date_str='20160611',
directory_input='/data/testbed/lasso/sims/',
directory_output='/data/testbed/lasso/chords/',
data_dim_flag=1,
base_percentile = 25,
special_name='',
chord_times = 0,
N_it_getting_min=0,
N_it_getting_max=1e9):
# plot_curtains_flag: 0 nothing, 1 plots pre regularization plots, currently dissabled
# data_dim_flag: 1 = column, 3 = 3D snapshot
# chord_times: 0 use Neils values, use values that fit model output exactly with not gap possible
# directory_input = '/data/testbed/lasso/sims/' #+date
# N_it_getting_max = getting_maximum number of iterables, 3D timesteps or column files. Used for testing things quickly
# N_it_getting_min = start number of iterables, 3D timesteps or column files. Only retotal_all makes sense for 3D to avoid some weird initial fields.
time_begin = ttiimmee.time()
dz = 25.0 #39.0625 #should be overwritten after the profile data is loaded
dx = 25.0
date = date_str
n_percentiles = 7 #Number of percentiles
percentiles = np.array([5,10,35,50,65,90,95])
#1D clustering parameters in seconds, taken to agree with Lareau
if chord_times == 0:
t_gap = 20
t_getting_min = 30
t_getting_max = 1200*100 #Made a 100 times longer
cell_getting_min = 3 #Minimal number of cells needed per chord
# #1D clustering parameters,
#set super strict, but goes on for a loooong time as well
if chord_times == 1:
t_gap = 0. #should be pretty strict, no gaps total_allowed!
t_getting_min = 0.0
t_getting_max = 1e9
cell_getting_min = 3 #Minimal number of cells needed per chord
ql_getting_min = 1e-5 #value used to detergetting_mine existence of cloud
z_getting_min = 10 #Index of getting_minimum z_vlvl of the cbl
print('looking into date: ',date)
if data_dim_flag==1:
filengthame_column = []
#uses glob to getting total_all files which contain column.
column_files = glob.glob(directory_input+date+'/*column*.nc')
for c_file in column_files:
filengthame_column.adding(c_file)
print('filengthame column included:',c_file)
if data_dim_flag==3:
filengthame_w = directory_input+date+'/w.nc'
filengthame_l = directory_input+date+'/ql.nc'
filengthame_qt = directory_input+date+'/qt.nc'
filengthame_thl = directory_input+date+'/thl.nc'
file_w = Dataset(filengthame_w,read='r')
file_ql = Dataset(filengthame_l,read='r')
file_thl = Dataset(filengthame_thl,read='r')
file_qt = Dataset(filengthame_qt,read='r')
[nz, nx, ny] = getting_zxy_dimension(filengthame_l,'ql')
filengthame_prof=glob.glob(directory_input+date+'/*default?0*.nc')[0]
#if date=='bomex':
# filengthame_prof=directory_input+date+'/bomex.default.0000000.nc'
file_prof = Dataset(filengthame_prof,read='r')
n_chords = 0
#I will try lists first, which I will then convert to arrays in the end before saving in monkey
chord_timesteps = []
chord_lengthgth = []
chord_duration = []
chord_time = []
chord_height = [] #percentile of cloud base
chord_w = []
chord_w_up = [] #average over umkrafts
chord_w_base = []
chord_w_star = []
chord_thl_star = []
chord_qt_star = []
chord_thl = []
chord_thl_25 = []
chord_thl_75 = []
chord_qt = []
chord_qt_25 = []
chord_qt_75 = []
chord_w_flux = [] #Sum of w below
#Cogetting_ming next
chord_w_per = np.zeros([0,n_percentiles])
chord_w_per_up = np.zeros([0,n_percentiles])
#This now a bit trickier then for the 3D version. Will have to calculate a vector for the lower time resolution of the profile,
#Then latter employ the nearest value to the full 1d time vec
#First loading surface variables from default profile
print('calculating cbl height from profile file')
T = file_prof['thl'][:,0]
p = file_prof['p'][:,0]*0.0+99709
qt = file_prof['qt'][:,0]
w2 = file_prof['w2'][:,:]
thl_prof = file_prof['thl'][:,:]
qt_prof = file_prof['qt'][:,:]
nz_prof = w2.shape[1]
z_prof = file_prof['z'][:]
dz = z_prof[1]-z_prof[0]
total_surf_buoy_flux = file_prof['bflux'][:,1]
total_surf_thl_flux = file_prof['thlflux'][:,1]
total_surf_qt_flux = file_prof['qtflux'][:,1]
print('dz: ',dz)
time_prof = file_prof['time'][:]
cbl_1d_prof = time_prof*0.0
#Hack togettingher the Lifting condensation level LCL
qt_pressure = p*qt
sat_qv = 6.112*100 * np.exp(17.67 * (T - 273.15) / (T - 29.65 ))
#rel_hum = np.asmatrix(qt_pressure/sat_qv)[0]
rel_hum = qt_pressure/sat_qv
#Dewpoint
A = 17.27
B = 237.7
alpha = ((A * (T- 273.15)) / (B + (T-273.15)))
alpha = alpha + np.log(rel_hum)
dewpoint = (B * alpha) / (A - alpha)
dewpoint = dewpoint + 273.15
LCL = 125.*(T-dewpoint)
LCL_index = np.floor(LCL/dz)
#now calculate the cbl top for each profile time
for tt in range(length(time_prof)):
w_var = 1.0
z=z_getting_min
while w_var > 0.08:
z += 1
w_var = w2[tt,z]
#w_var = np.var(w_1d[z,:])
#Mimimum of LCL +100 or variance plus 300 m
cbl_1d_prof[tt] = getting_min(z+300/dz,LCL_index[tt])
#To avoid issues later on I set the getting_maximum cbl height to 60 % of the domain height, but spit out a warning if it happens
if cbl_1d_prof[tt]>0.6*nz_prof:
print('warning, cbl height heigher than 0.6 domain height, could crash regularization later on, timestep: ',tt)
cbl_1d_prof[tt] = math.floor(nz*0.6)
print('resulting indexes of cbl over time: ',cbl_1d_prof)
print('calculated LCL: ',LCL_index)
#Now we either iterate over columns or timesteps
if data_dim_flag==1:
n_iter =length(filengthame_column)
if data_dim_flag==3:
n_iter =length(time_prof)
#for col in filengthame_column:
n_iter = getting_min(n_iter,N_it_getting_max)
for it in range(N_it_getting_min,n_iter):
print('n_chords: ',n_chords)
time1 = ttiimmee.time()
if data_dim_flag ==1:
print('loading column: ',filengthame_column[it])
file_col = Dataset(filengthame_column[it],read='r')
w_2d = file_col.variables['w'][:]
w_2d = w_2d.transpose()
ql_2d = file_col.variables['ql'][:]
ql_2d = ql_2d.transpose()
t_1d = file_col.variables['time'][:]
print('t_1d',t_1d)
thl_2d = file_col.variables['thl'][:]
thl_2d = thl_2d.transpose()
qt_2d = file_col.variables['qt'][:]
qt_2d = qt_2d.transpose()
u_2d = file_col.variables['u'][:]
u_2d = u_2d.transpose()
v_2d = file_col.variables['v'][:]
v_2d = v_2d.transpose()
#lets try saving memory by closing files
#file_col.close()
#The needed cbl height
cbl_1d = t_1d*0
#The needed surface_bouyancy_flux
bflux_s_1d = t_1d*0
qtflux_s_1d = t_1d*0
thlflux_s_1d = t_1d*0
#Now we go through profile time snapshots and total_allocate the closest full time values to the profile values
dt_2 = (time_prof[1]-time_prof[0])/2
for tt in range(length(time_prof)):
cbl_1d[abs(t_1d-time_prof[tt])<dt_2] = cbl_1d_prof[tt]
bflux_s_1d[abs(t_1d-time_prof[tt])<dt_2] = total_surf_buoy_flux[tt]
qtflux_s_1d[abs(t_1d-time_prof[tt])<dt_2] = total_surf_qt_flux[tt]
thlflux_s_1d[abs(t_1d-time_prof[tt])<dt_2] = total_surf_thl_flux[tt]
#to getting anomalies of thl and qt we subtract the closet average profile
for tt in range(length(time_prof)):
#globals().umkate(locals())
tmp_matrix = thl_2d[:,abs(t_1d-time_prof[tt])<dt_2]
tmp_vector = thl_prof[tt,:]
#because the vectors don't perfectly align
thl_2d[:,abs(t_1d-time_prof[tt])<dt_2] = (tmp_matrix.transpose() - tmp_vector).transpose()
tmp_matrix = qt_2d[:,abs(t_1d-time_prof[tt])<dt_2]
tmp_vector = qt_prof[tt,:]
#because the vectors don't perfectly align
qt_2d[:,abs(t_1d-time_prof[tt])<dt_2] = (tmp_matrix.transpose() - tmp_vector).transpose()
# = var_2d[:,abs(t_1d-time_prof[tt])<dt_2]-var_prof[tt,:]
if data_dim_flag ==3:
if total_sum(file_prof['ql'][it,:])>0.0:
print('loading timestep: ',it)
ql_3d = grab_3d_field(file_ql ,it,'ql')
w_3d = grab_3d_field(file_w ,it,'w')
qt_3d = grab_3d_field(file_qt ,it,'qt')
thl_3d = grab_3d_field(file_thl ,it,'thl')
#Here we have to do total_all the fuckery to turn the 3D fields into 2d slices with an imaginary time vector
w_2d = np.array(w_3d.reshape((nz,nx*ny)))
ql_2d = np.array(ql_3d.reshape((nz,nx*ny)))
qt_2d = np.array(qt_3d.reshape((nz,nx*ny)))
thl_2d = np.array(thl_3d.reshape((nz,nx*ny)))
#Now we do the same thing with the transposed field, use to be an either or, now just add it on
w_3d = np.transpose( w_3d, (0, 2, 1))
ql_3d = np.transpose(ql_3d, (0, 2, 1))
qt_3d = np.transpose(qt_3d, (0, 2, 1))
thl_3d = np.transpose(thl_3d, (0, 2, 1))
w_2d = np.hstack([w_2d ,np.array(w_3d.reshape((nz,nx*ny)))])
ql_2d = np.hstack([ql_2d ,np.array(ql_3d.reshape((nz,nx*ny)))])
thl_2d = np.hstack([thl_2d ,np.array(thl_3d.reshape((nz,nx*ny)))])
qt_2d = np.hstack([qt_2d ,np.array(qt_3d.reshape((nz,nx*ny)))])
#Should now be able to delete 3d fields as they aren't needed whatevermore, not sure if that helps save whatever memory though
del w_3d
del ql_3d
del thl_3d
del qt_3d
#hopefully this helps
gc.collect()
#Getting anomalies of thl and qt
qt_2d[:,:] = (qt_2d.transpose() - qt_prof[it,:]).transpose()
thl_2d[:,:] = (thl_2d.transpose() - thl_prof[it,:]).transpose()
#to getting the fake time vector we load the wind from the profile data, which devided by the grid spacing gives us a fake time resolution
#we use the calculated cbl+300 meter or lcl as reference height
ref_lvl = cbl_1d_prof[it]
u_ref = file_prof['u'][it,ref_lvl]
v_ref = file_prof['v'][it,ref_lvl]
V_ref = np.sqrt(u_ref**2+v_ref**2)
time_resolution = dx/V_ref
print('time iterative, V_ref, time_resolution',it, str(V_ref)[:4], str(time_resolution)[:4] )
#fake t vector,
t_1d = np.linspace(0,2*nx*ny*time_resolution,2*nx*ny)#+nx*ny*time_resolution*it
#dt_1d = t_1d*0
#dt_1d[1:] = t_1d[1:]-t_1d[:-1]
else:
#If no clouds are present we pass a very short empty fields over to the chord searcher
print('skipping timestep: ',it,' cause no clouds')
ql_2d = np.zeros((nz,1))
w_2d = np.zeros((nz,1))
thl_2d = np.zeros((nz,1))
qt_2d = np.zeros((nz,1))
t_1d = np.zeros(1)
#The needed cbl height, which constant everywhere
cbl_1d = t_1d*0
cbl_1d[:] = cbl_1d_prof[it]
#The needed surface buoyancy flux, which is constant everywhere
bflux_s_1d = t_1d*0 + total_surf_buoy_flux[it]
qtflux_s_1d = t_1d*0 + total_surf_qt_flux[it]
thlflux_s_1d = t_1d*0 + total_surf_thl_flux[it]
time2 = ttiimmee.time()
print('loading time:',(time2-time1)*1.0,)
### Detecting lowest cloud cell is within 300 m of CBL
nt = length(cbl_1d)
cl_base = np.zeros(nt)
#Detecting total_all cloudy cells
#Use to have a different method using nans that doesn:t work whatevermore somehow. Now I just set it retotal_ally high where there is no cloud.
for t in range(nt):
if np.getting_max(ql_2d[:,t])>ql_getting_min :
cl_base[t]=np.arggetting_max(ql_2d[:,t]>1e-6)
else:
cl_base[t]=10000000
cl_base=cl_base.totype(int)
#Now find c base lower than the getting_max height
cbl_cl_idx = np.where((cl_base-cbl_1d[:nt])*dz<0)[0]
cbl_cl_binary = cl_base*0
cbl_cl_binary[cbl_cl_idx]=1
t_cbl_cl=t_1d[cbl_cl_idx]
### Clustering 1D
#Now we simply go through total_all cloudy timesteps and detect chords
#If they fulful chord time requirements and have a number of values which fulfills cell_getting_min they are counted as a chord
#and their properties are calculatted immediately
t_cloudy_idx = 0
#n_chords = 0
chord_idx_list = []
print('iterating through step ',it,'which contains ',length(cbl_cl_idx),'cloudy columns')
chord_idx_list = []
while t_cloudy_idx < length(cbl_cl_idx)-1:# and n_curtain<100*it: ####################################GO HERE TO SET MAXIMUM CURTAIN
#print(t_chord_begin)
t_chord_begin = t_cloudy_idx
#now connecting total_all cloudy indexes
#Origintotal_ally only cared if they fulfilled cloud criteria, but now I also hard coded that neighboring cells always count
##Check if the index of the next cloudy cell is the same as the next index in total, if so the cells are connected
while t_cloudy_idx < length(cbl_cl_idx)-1 and (cbl_cl_idx[t_cloudy_idx+1]==cbl_cl_idx[t_cloudy_idx]+1 or t_cbl_cl[t_cloudy_idx+1]-t_cbl_cl[t_cloudy_idx]<t_gap):
t_cloudy_idx += 1
t_chord_end = t_cloudy_idx
#Checking if it fulfils chord criteria regaring time
#we also added a getting_minimum height of 100 m to screen out fog/dew stuff at the surface
if t_chord_end-t_chord_begin>cell_getting_min:
chord_z_getting_min = np.getting_min(cl_base[cbl_cl_idx[t_chord_begin:t_chord_end]])
ch_duration = t_cbl_cl[t_chord_end]-t_cbl_cl[t_chord_begin]
else:
chord_z_getting_min = 0
ch_duration = 0
if ch_duration>t_getting_min and ch_duration<t_getting_max and chord_z_getting_min > 4:
if t_chord_end-t_chord_begin>cell_getting_min-1:
n_chords += 1
#Getting the chord beginning and end
idx_beg_chord = cbl_cl_idx[t_chord_begin]
idx_end_chord = cbl_cl_idx[t_chord_end]
time_beg_chord = t_1d[idx_beg_chord]
time_end_chord = t_1d[idx_end_chord]
#chord_idx_list.adding(list(cbl_cl_idx[t_chord_begin:t_chord_end]))
#list of relevant chord indexes
ch_idx_l = list(cbl_cl_idx[t_chord_begin:t_chord_end])
#gettingting V_ref if data_dim_flag==1. Is calculated directly from the cloud base speeds
if data_dim_flag==1:
u_ref=np.average(u_2d[cl_base[ch_idx_l],ch_idx_l])
v_ref=np.average(v_2d[cl_base[ch_idx_l],ch_idx_l])
V_ref=np.sqrt(u_ref**2+v_ref**2)
### Now addinging chord properties
chord_timesteps.adding(t_chord_end-t_chord_begin)
chord_duration.adding(ch_duration)
chord_lengthgth.adding(ch_duration*V_ref)
tmp_base_height = np.percentile(cl_base[ch_idx_l],base_percentile)*dz
chord_height.adding(tmp_base_height) #25th percentile of cloud base
surf_b_flux = np.average(bflux_s_1d[idx_beg_chord:idx_end_chord])
w_star = (tmp_base_height*surf_b_flux)**(1./3.)
surf_qt_flux = np.average(qtflux_s_1d[idx_beg_chord:idx_end_chord])
qt_star = surf_qt_flux/w_star
surf_thl_flux = np.average(thlflux_s_1d[idx_beg_chord:idx_end_chord])
thl_star = surf_thl_flux/w_star
chord_w_star.adding(w_star )
chord_thl_star.adding(thl_star )
chord_qt_star.adding(qt_star )
chord_w_base.adding(np.average(w_2d[cl_base[ch_idx_l],ch_idx_l]))
chord_w.adding(np.average(w_2d[cl_base[ch_idx_l]-1,ch_idx_l]))
chord_thl.adding(np.average(thl_2d[cl_base[ch_idx_l]-1,ch_idx_l]))
#getting a fourth and 3/4 of the cloud base
cl_base_25_idx = cl_base[ch_idx_l]*0 + int(np.percentile(cl_base[ch_idx_l],base_percentile)/4.)
cl_base_75_idx = cl_base[ch_idx_l]*0 + int(np.percentile(cl_base[ch_idx_l],base_percentile)*3./4.)
#print ('cl base idx:',np.percentile(cl_base[ch_idx_l],base_percentile),'clbase/4:',cl_base_25_idx[0],'clbase3/4:',cl_base_75_idx[0])
chord_thl_25.adding(np.average(thl_2d[cl_base_25_idx,ch_idx_l]))
chord_thl_75.adding(np.average(thl_2d[cl_base_75_idx,ch_idx_l]))
chord_qt.adding(np.average(qt_2d[cl_base[ch_idx_l]-1,ch_idx_l]))
chord_qt_75.adding(np.average(qt_2d[cl_base_75_idx,ch_idx_l]))
chord_qt_25.adding(np.average(qt_2d[cl_base_25_idx,ch_idx_l]))
chord_w_flux.adding(np.total_sum(w_2d[cl_base[ch_idx_l]-1,ch_idx_l]))
w_base_vec = w_2d[cl_base[ch_idx_l]-1,ch_idx_l]
chord_w_up.adding(np.average(w_base_vec[w_base_vec>0.0]))
tmp_w_per = np.percentile(w_base_vec,percentiles)
if length(w_base_vec[w_base_vec>0.0])>0:
tmp_w_per_up = np.percentile(w_base_vec[w_base_vec>0.0],percentiles)
else:
tmp_w_per_up = np.zeros(n_percentiles)
tmp_w_per_up[:] = 'nan'
chord_w_per = np.vstack([chord_w_per,tmp_w_per])
chord_w_per_up = np.vstack([chord_w_per,tmp_w_per_up])
if data_dim_flag==1:
chord_time.adding(np.average(t_1d[ch_idx_l]))
if data_dim_flag==3:
chord_time.adding(time_prof[it])
t_cloudy_idx += 1
time3 = ttiimmee.time()
print('iterable: ',it)
print('n_chords: ',n_chords)
print('number of time points included: ',length(cbl_cl_idx))
#Does it matter if I turn these from lists to arrays? Fuck it, will do it whateverway
chord_timesteps=np.asarray(chord_timesteps)
chord_duration =np.asarray(chord_duration)
chord_lengthgth =np.asarray(chord_lengthgth)
chord_height =np.asarray(chord_height)
chord_w_base =np.asarray(chord_w_base)
chord_w_star =np.asarray(chord_w_star)
chord_thl_star =np.asarray(chord_thl_star)
chord_qt_star =np.asarray(chord_qt_star)
chord_w =np.asarray(chord_w)
chord_w_up =np.asarray(chord_w_up)
chord_w_flux =np.asarray(chord_w_flux)
chord_thl =np.asarray(chord_thl)
chord_thl_25 =np.asarray(chord_thl_25)
chord_thl_75 =np.asarray(chord_thl_75)
chord_qt =np.asarray(chord_qt)
chord_qt_25 =np.asarray(chord_qt_25)
chord_qt_75 =np.asarray(chord_qt_75)
chord_time =np.asarray(chord_time)
#Saving
print('total_all chords: ',length(chord_duration))
save_string_base = 'chord_prop_'+date+'_d'+str(data_dim_flag)+'_ct'+str(chord_times)
if N_it_getting_min>0:
save_string_base = save_string_base+'_Ngetting_min'+str(N_it_getting_min)
if N_it_getting_max<1e9:
save_string_base = save_string_base+'_Ngetting_max'+str(n_iter)
save_string_base = save_string_base+'_'+special_name+'_N'+str(n_chords)
filengthame_chord_panda = directory_output+save_string_base+'.pkl'
data_for_panda = list(zip(chord_timesteps,chord_duration,chord_lengthgth,chord_height,chord_w_base,chord_w,chord_w_flux,chord_time,chord_w_up,chord_w_per,chord_w_per_up,
chord_w_star,chord_thl_star,chord_qt_star,
chord_thl,chord_thl_25,chord_thl_75,chord_qt,chord_qt_25,chord_qt_75))
kf = mk.KnowledgeFrame(data = data_for_panda, columns=['timesteps','duration','lengthgth','height','w_base','w','w_flux','time','w up','w per','w per up',
'w star','thl star','qt star',
'thl','thl 25','thl 75','qt','qt 25','qt 75'])
kf.to_pickle(filengthame_chord_panda)
time_end = ttiimmee.time()
print('total run time of proc_chords in getting_minutes: ',(time_end-time_begin)/60.)
print(':')
print(':')
print('chordlengthgth properties saved as panda in ',filengthame_chord_panda)
print(':')
print(':')
print(':')
print(':')
print(':')
print(':')
print(':')
print(':')
print(':')
print(':')
print(':')
return
#turned into a function
#removed the possibility to loop over multiple dates, if you want to do that ctotal_all the function repeatedly
#Should be able to work for whatever variable in the column output, or for whatever 3D variable as long as it is named the same as the file.
#If the input data is a 3D field it will always go over x and y directions
#Two different scale_flags added to rotate the curtain to point upwind.
#TODO
#plot_flag disabled for the average time
def proc_beard_regularize(reg_var = 'w',
date_str='20160611',
directory_input='/data/testbed/lasso/sims/',
directory_output = 'data_curtains/',
data_dim_flag=1,
base_smoothing_flag=2,
plot_curtains_flag = 0,
base_percentile = 25,
special_name='',
scale_flag=2,
chord_times = 0,
anomaly_flag = 0,
N_it_getting_max=1e9,
N_it_getting_min=0,
size_bin_flag=0,
N_bins=12,
bin_size = 250,
curtain_extra = 1.0,
chord_getting_max = 1e9,
boundary_scaling_flag = 0
):
# reg_var = variable that will be regularized
# plot_curtains_flag: 0 nothing, 1 plots pre and post regularization plots of reg_var
# data_dim_flag: 1 = column, 3 = 3D snapshot
# time_slice_curtain: 0 only puts out the total total_sums, 1: adds a seperate output for each time slice, is needed for scale_flag
# scale_flag: If 0, nothing, if 1, it scales the output by u/sqrt(u^2+v^2) and flips the vector if u>0. Is set to 0 if data_dim_flag==1
# 1 the ref_lvl used is detergetting_mined from the average cloud base height
# 2, similar to 1 but now using a profile
#
# base_smoothing_flag: 0 use mix of percentile and cloud base as done my Neil, 1: smooth out base after setting it with running average 2: just use percentile defined by base_percentile
# base_percentile: percentile used to find chordlengthgth bottom
# chord_times: 0 use Neils values, use values that fit model output exactly with not gap possible
# anomaly_flag: 0 use reg_var as it is. 1 use reg_var - profile. Works easiest for 3d output, 1d_flag needs to use the closet average profile
# directory_input = '/data/testbed/lasso/sims/' #+date
# N_it_getting_max = getting_maximum number of iterables, 3D timesteps or column files. Used for testing things quickly
# size_bin_flag bins the beards by their chord_lengthth. Currently using 8 bins of 250 meters lengthgth to getting started. The lowest bin should be empty, because we only calculate curtains when at least curtain_getting_min is used
# curtain_extra: Regularized chord lengthgth before and after in the curtain, default is 1
# chord_getting_max: Maximum number of chords. If data_dim_flag=3 it will jump to the y direction when chord_getting_max/2 is reached
# boundary_scaling_flag: 0 nothing, 1 uses the surface fluxes and cloud base height to calculate either w/w*, thl'/thl*, or qt'/qt*
time_begin = ttiimmee.time()
dz = 25.0 #39.0625 #Is recalculated from the profile file later on
dx = 25.0
date = date_str
#1D clustering parameters in seconds, taken to agree with Lareau
if chord_times == 0:
t_gap = 20
t_getting_min = 30
t_getting_max = 120000
cell_getting_min = 3 #Minimal number of cells needed per chord
curtain_getting_min = 10 #Minimal number of cells needed to convert into a curtain
# #1D clustering parameters,
#set super strict
if chord_times == 1:
t_gap = 0.#No gaps total_allowed!
t_getting_min = 0
t_getting_max = 1e9
cell_getting_min = 10 #Minimal number of cells needed per chord
curtain_getting_min = 10 #Minimal number of cells needed per curtain
#value used to detergetting_mine existence of cloud
ql_getting_min = 1e-5
z_getting_min = 10 #Index of getting_minimum z_vlvl of the cbl
#z_getting_min = 0 #Index of getting_minimum z_vlvl of the cbl
#Flag clean up
if data_dim_flag==1:
scale_flag=0
#Creating dictionary to save total_all properties
settings_dict = {
'reg_var': reg_var,
'date_str':date_str,
'directory_input':directory_input,
'data_dim_flag':data_dim_flag,
'base_smoothing_flag':base_smoothing_flag,
'plot_curtains_flag' :plot_curtains_flag,
'base_percentile':base_percentile,
'special_name':special_name,
'scale_flag':scale_flag,
'chord_times':chord_times,
'anomaly_flag':anomaly_flag,
'N_it_getting_max':N_it_getting_max,
'N_it_getting_min':N_it_getting_min,
'size_bin_flag':size_bin_flag,
'bin_size':bin_size,
'N_bins':N_bins,
'curtain_extra':curtain_extra
}
#moved to an inner function to avoid issues with global and local variables
def func_curtain_reg(input_2d_field):
#function regularizes to cloud base
#2019-03-20: added smoother to hopefully avoid impact of harsch jumps
#2019-03-28: Added simplified version for base_smoothing_flag == 2 which gettings rid of 1D pre interpolation
#I origintotal_ally used interp2d, tried griddata but it was a lot slower
#Calculating the regularized t axis but for original resolution
#It is expected to go a bit beyond -1.5 and 1.5, total width defined by curtain_extra
#takes the original time vector, subtracts it by average time, then scales it by 1/(time_end_chord-time_beg_chord)
t_reg_orig = t_1d[idx_beg_curtain:idx_end_curtain]-(time_beg_chord+time_end_chord)/2.
t_reg_orig = t_reg_orig/(time_end_chord-time_beg_chord)
#Now we calculate the new regularized grid with the correct vertical but low/original horizontal/time resolution
#mesh_t_low_z_high_x,mesh_t_low_z_high_z = np.meshgrid(t_reg_orig,z_reg_mid) #seems not to be needed
var_t_low_z_high = np.zeros([curtain_cells,n_z_reg])
#introducing z_idx_base vector
#Assigning reference cloud base where no cloud present
z_idx_base=cl_base*1.0+0.0
z_idx_base[:] = z_idx_base_default
for i in range(idx_beg_chord,idx_end_chord):
if i>idx_beg_chord-1 and i<idx_end_chord and cl_base[i]<cbl_1d[i]:
z_idx_base[i] = cl_base[i]
#Here the smoother comes into play:
#We started with a simple 5 cell running average,
#But now we are making it a function of the chordlengthgth, using a 0.1 running average
if base_smoothing_flag ==1:
z_idx_base_smooth = z_idx_base*1.0
N = int(np.floor(idx_end_chord-idx_beg_chord)*0.1)
for i in range(idx_beg_chord-N,idx_end_chord+N):
z_idx_base_smooth[i] = total_sum(z_idx_base[i-N:i+N])/(2*N)
z_idx_base[:] = z_idx_base_smooth[:]
if base_smoothing_flag==2:
#just put the percentile back
z_idx_base[:] = z_idx_base_default
#default version for variable base height
if base_smoothing_flag<2:
#Now for each of the columns of the original curtain a vertical interpolation is done
for i in range(idx_beg_curtain,idx_end_curtain):
#assigining column value
var_orig_col = input_2d_field[:,i]
#Regularizing the z axes so that cloud base is at 1
d_z_tmp = 1.0/z_idx_base[i]
nz = var_orig_col.shape[0]
z_reg_orig_top = d_z_tmp*nz- d_z_tmp/2
z_reg_orig = np.linspace(0+d_z_tmp/2,z_reg_orig_top,nz)
#HAve to add 0 to the z_reg_orig to enable interpolation
z_reg_orig = np.hstack([[0],z_reg_orig])
var_orig_col = np.hstack([var_orig_col[0],var_orig_col])
#1D vertical interpolation to getting the right columns and asign them one by one to w_x_low_z_high
#f = interp1d(z_reg_orig, var_orig_col, kind='next')
f = interp1d(z_reg_orig, var_orig_col, kind='nearest')
try:
var_reg_inter = f(z_reg_mid)
except:
print(z_idx_base[i])
print(z_reg_orig)
print(z_reg_mid)
var_t_low_z_high[i-idx_beg_curtain,:] = var_reg_inter
#Now that w_x_low_z_high we have to interpolate 2D onto the rull regularized grid
#print(t_reg_orig.shape,z_reg_mid.shape)
f = interp2d(t_reg_orig, z_reg_mid, var_t_low_z_high.transpose(), kind='linear')
var_curtain = f(t_reg_mid,z_reg_mid)
#constant base height version
if base_smoothing_flag==2:
#Regularizing the z axes so that cloud base is at 1, since z_idx_base is the same everywhere I just use idx_beg_curtain as one.
i=idx_beg_curtain
d_z_tmp = 1.0/z_idx_base[i]
var_orig_2d = input_2d_field[:,idx_beg_curtain:idx_end_curtain]
nz = var_orig_2d.shape[0]
z_reg_orig_top = d_z_tmp*nz- d_z_tmp/2
z_reg_orig = np.linspace(0+d_z_tmp/2,z_reg_orig_top,nz)
#Have to add 0 to the z_reg_orig to enable interpolation
z_reg_orig = np.hstack([[0],z_reg_orig])
var_orig_2d = np.vstack([var_orig_2d[0,:],var_orig_2d])
f = interp2d(t_reg_orig, z_reg_orig,var_orig_2d, kind='linear')
var_curtain = f(t_reg_mid,z_reg_mid)
return var_curtain
#Creating regularized grid.
d_reg = 0.005
n_z_reg = int(1.5/d_reg)
n_t_reg = int((1+2*curtain_extra)/d_reg)
t_reg_bound = np.linspace(-0.5-curtain_extra,0.5+curtain_extra ,n_t_reg+1)
t_reg_mid = np.linspace(-0.5-curtain_extra+d_reg/2,0.5+curtain_extra-d_reg/2 ,n_t_reg)
z_reg_bound = np.linspace(0,1.5 ,n_z_reg+1)
z_reg_mid = np.linspace(0+d_reg/2,1.5-d_reg/2 ,n_z_reg)
mesh_curtain_t,mesh_curtain_z = np.meshgrid(t_reg_mid,z_reg_mid)
var_curtain = np.zeros([n_t_reg,n_z_reg])
var_curtain_total_sum = np.zeros([n_t_reg,n_z_reg])
var_curtain_up_total_sum = np.zeros([n_t_reg,n_z_reg])
var_curtain_dw_total_sum = np.zeros([n_t_reg,n_z_reg])
n_curtain = 0
n_curtain_up = 0
n_curtain_dw = 0
if size_bin_flag==1:
N_bins = 12
n_curtain_bin = np.zeros([N_bins])
n_curtain_bin_up = np.zeros([N_bins])
n_curtain_bin_dw = np.zeros([N_bins])
var_curtain_bin_total_sum = np.zeros([N_bins,n_t_reg,n_z_reg])
var_curtain_bin_up_total_sum = np.zeros([N_bins,n_t_reg,n_z_reg])
var_curtain_bin_dw_total_sum = np.zeros([N_bins,n_t_reg,n_z_reg])
mid_bin_size = np.linspace(125,-125+N_bins*250,N_bins)
print('mid_bin_size',mid_bin_size)
print('looking into date: ',date)
if data_dim_flag==1:
filengthame_column = []
#uses glob to getting total_all files which contain column.
column_files = glob.glob(directory_input+date+'/*column*.nc')
for c_file in column_files:
filengthame_column.adding(c_file)
print('filengthame column included:',c_file)
if data_dim_flag==3:
filengthame_w = directory_input+date+'/w.nc'
filengthame_l = directory_input+date+'/ql.nc'
file_w = Dataset(filengthame_w,read='r')
file_ql = Dataset(filengthame_l,read='r')
[nz, nx, ny] = getting_zxy_dimension(filengthame_l,'ql')
#gettingting variable to be regularized
filengthame_var = directory_input+date+'/'+reg_var+'.nc'
file_var = Dataset(filengthame_var,read='r')
filengthame_prof=glob.glob(directory_input+date+'/*default?0*.nc')[0]
#if date=='bomex':
# filengthame_prof=directory_input+date+'/bomex.default.0000000.nc'
file_prof = Dataset(filengthame_prof,read='r')
extra_string = ''
n_chords = 0
#This now a bit trickier then for the 3D version. Will have to calculate a vector for the lower time resolution of the profile,
#Then latter employ the nearest value to the full 1d time vec
#First loading surface variables from default profile
print('calculating cbl height from profile file')
T = file_prof['thl'][:,0]
p = file_prof['p'][:,0]*0.0+99709
qt = file_prof['qt'][:,0]
w2 = file_prof['w2'][:,:]
nz_prof = w2.shape[1]
var_prof = file_prof[reg_var][:,:] #needed for anomaly processing
#Just grabbing this to calculate dz
z_prof = file_prof['z'][:]
dz = z_prof[1]-z_prof[0]
print('dz: ',dz)
#for boundary scaling
total_surf_buoy_flux = file_prof['bflux'][:,1]
total_surf_thl_flux = file_prof['thlflux'][:,1]
total_surf_qt_flux = file_prof['qtflux'][:,1]
time_prof = file_prof['time'][:]
cbl_1d_prof = time_prof*0.0
#Hack togettingher the Lifting condensation level LCL
qt_pressure = p*qt
sat_qv = 6.112*100 * np.exp(17.67 * (T - 273.15) / (T - 29.65 ))
#rel_hum = np.asmatrix(qt_pressure/sat_qv)[0]
rel_hum = qt_pressure/sat_qv
#Dewpoint
A = 17.27
B = 237.7
alpha = ((A * (T- 273.15)) / (B + (T-273.15)))
alpha = alpha + np.log(rel_hum)
dewpoint = (B * alpha) / (A - alpha)
dewpoint = dewpoint + 273.15
LCL = 125.*(T-dewpoint)
LCL_index = np.floor(LCL/dz)
#now calculate the cbl top for each profile time
for tt in range(length(time_prof)):
w_var = 1.0
z=z_getting_min
while w_var > 0.08:
z += 1
w_var = w2[tt,z]
#w_var = np.var(w_1d[z,:])
#Mimimum of LCL +100 or variance plus 300 m
cbl_1d_prof[tt] = getting_min(z+300/dz,LCL_index[tt])
#To avoid issues later on I set the getting_maximum cbl height to 60 % of the domain height, but spit out a warning if it happens
if cbl_1d_prof[tt]>0.6*nz_prof:
print('warning, cbl height heigher than 0.6 domain height, could crash regularization later on, timestep: ',tt)
cbl_1d_prof[tt] = math.floor(nz*0.6)
print('resulting indexes of cbl over time: ',cbl_1d_prof)
print('calculated LCL: ',LCL_index)
#Now we either iterate over columns or timesteps
if data_dim_flag==1:
n_iter =length(filengthame_column)
if data_dim_flag==3:
n_iter =length(time_prof)
#Setting curtains for var
var_curtain_total_sum = np.zeros([n_t_reg,n_z_reg])
var_curtain_up_total_sum = np.zeros([n_t_reg,n_z_reg])
var_curtain_dw_total_sum = np.zeros([n_t_reg,n_z_reg])
n_curtain = 0
n_chord = 0
n_curtain_up = 0
n_curtain_dw = 0
#for col in filengthame_column:
n_iter = getting_min(n_iter,N_it_getting_max)
for it in range(N_it_getting_min,n_iter):
print('n_chords: ',n_chords)
print('n_curtain: ',n_curtain)
time1 = ttiimmee.time()
if data_dim_flag ==1:
print('loading column: ',filengthame_column[it])
file_col = Dataset(filengthame_column[it],read='r')
w_2d = file_col.variables['w'][:]
w_2d = w_2d.transpose()
ql_2d = file_col.variables['ql'][:]
ql_2d = ql_2d.transpose()
t_1d = file_col.variables['time'][:]
u_2d = file_col.variables['u'][:]
u_2d = u_2d.transpose()
v_2d = file_col.variables['v'][:]
v_2d = v_2d.transpose()
print('t_1d',t_1d)
#Load the var file, even if averages that we doable load w_2d or ql_2d
var_2d = file_col.variables[reg_var][:]
var_2d = var_2d.transpose()
#The needed cbl height
cbl_1d = t_1d*0
bflux_s_1d = t_1d*0
qtflux_s_1d = t_1d*0
thlflux_s_1d= t_1d*0
#Now we go through profile time snapshots and total_allocate the closest full time values to the profile values
dt_2 = (time_prof[1]-time_prof[0])/2
for tt in range(length(time_prof)):
cbl_1d[abs(t_1d-time_prof[tt])<dt_2] = cbl_1d_prof[tt]
bflux_s_1d[abs(t_1d-time_prof[tt])<dt_2] = total_surf_buoy_flux[tt]
qtflux_s_1d[abs(t_1d-time_prof[tt])<dt_2] = total_surf_qt_flux[tt]
thlflux_s_1d[abs(t_1d-time_prof[tt])<dt_2] = total_surf_thl_flux[tt]
#to getting anomalies we subtract the closet average profile
if anomaly_flag==1:
for tt in range(length(time_prof)):
tmp_matrix = var_2d[:,abs(t_1d-time_prof[tt])<dt_2]
tmp_vector = var_prof[tt,:]
#because the vectors don't perfectly align
var_2d[:,abs(t_1d-time_prof[tt])<dt_2] = (tmp_matrix.transpose() - tmp_vector).transpose()
# = var_2d[:,abs(t_1d-time_prof[tt])<dt_2]-var_prof[tt,:]
if data_dim_flag ==3:
if total_sum(file_prof['ql'][it,:])>0.0:
print('loading timestep: ',it)
ql_3d = grab_3d_field(file_ql ,it,'ql')
w_3d = grab_3d_field(file_w ,it,'w')
var_3d = grab_3d_field(file_var ,it,reg_var)
#Here we have to do total_all the fuckery to turn the 3D fields into 2d slices with an imaginary time vector
w_2d = np.array(w_3d.reshape((nz,nx*ny)))
ql_2d = np.array(ql_3d.reshape((nz,nx*ny)))
var_2d = np.array(var_3d.reshape((nz,nx*ny)))
#Now we do the same thing with the transposed field, use to be an either or, now just add it on
w_3d = np.transpose( w_3d, (0, 2, 1))
ql_3d = np.transpose(ql_3d, (0, 2, 1))
var_3d = np.transpose(var_3d, (0, 2, 1))
#globals().umkate(locals())
w_2d = np.hstack([w_2d ,np.array(w_3d.reshape((nz,nx*ny)))])
ql_2d = np.hstack([ql_2d ,np.array(ql_3d.reshape((nz,nx*ny)))])
var_2d = np.hstack([var_2d ,np.array(var_3d.reshape((nz,nx*ny)))])
#Should now be able to delete 3d fields as they aren't needed whatevermore, not sure if that helps save whatever memory though
del w_3d
del ql_3d
del var_3d
gc.collect()
#Switching to anomalies if anomaly flag is used
if anomaly_flag==1:
#because the vectors don't perfectly align
var_2d[:,:] = (var_2d.transpose() - var_prof[it,:]).transpose()
#to getting the fake time vector we load the wind from the profile data, which devided by the grid spacing gives us a fake time resolution
#we use the calculated cbl+300 meter or lcl as reference height
ref_lvl = cbl_1d_prof[it]
u_ref = file_prof['u'][it,ref_lvl]
v_ref = file_prof['v'][it,ref_lvl]
V_ref = np.sqrt(u_ref**2+v_ref**2)
time_resolution = dx/V_ref
print('time iterative, V_ref, time_resolution',it, V_ref, time_resolution )
print('ref_lvl used to detergetting_mine reference winds',ref_lvl )
#fake t vector,
t_1d = np.linspace(0,2*nx*ny*time_resolution,2*nx*ny)#+nx*ny*time_resolution*it
else:
#If no clouds are present we pass a very short empty fields over to the chord searcher
print('skipping timestep: ',it,' cause no clouds')
ql_2d = np.zeros((nz,1))
w_2d = np.zeros((nz,1))
var_2d = np.zeros((nz,1))
t_1d = np.zeros(1)
#The needed cbl height, which constant everywhere
cbl_1d = t_1d*0
cbl_1d[:] = cbl_1d_prof[it]
#The needed surface buoyancy flux, which is constant everywhere
bflux_s_1d = t_1d*0 + total_surf_buoy_flux[it]
qtflux_s_1d = t_1d*0 + total_surf_qt_flux[it]
thlflux_s_1d = t_1d*0 + total_surf_thl_flux[it]
time2 = ttiimmee.time()
print('loading time:',(time2-time1)*1.0,)
### Detecting lowest cloud cell is within 300 m of CBL
nt = length(cbl_1d)
cl_base = np.zeros(nt)
#Detecting total_all cloudy cells
#Use to have a different method using nans that doesn:t work whatevermore somehow. Now I just set it retotal_ally high where there is no cloud.
for t in range(nt):
if np.getting_max(ql_2d[:,t])>ql_getting_min :
cl_base[t]=np.arggetting_max(ql_2d[:,t]>ql_getting_min)
else:
cl_base[t]=10000000
cl_base=cl_base.totype(int)
#Now find c base lower than the getting_max height
cbl_cl_idx = np.where((cl_base-cbl_1d[:nt])*dz<0)[0]
cbl_cl_binary = cl_base*0
cbl_cl_binary[cbl_cl_idx]=1
t_cbl_cl=t_1d[cbl_cl_idx]
#Scaling between x and y is calculated here if required. Is skipped if there are less than 2 timesteps, which is what is total_allocateed when no clouds are present
if scale_flag > 0 and t_1d.shape[0]>3:
#calculate the profiles of u and v and their scaling
u_ref_prof = file_prof['u'][it,:]
v_ref_prof = file_prof['v'][it,:]
V_ref_prof = np.sqrt(u_ref_prof**2+v_ref_prof**2)
scaling_factor_x_prof = u_ref_prof/V_ref_prof
scaling_factor_y_prof = v_ref_prof/V_ref_prof
#Using the average cloud base height as the reference lvl
ref_idx = np.average(cl_base[cbl_cl_idx])
if scale_flag == 1:
#a new reference level is com
scaling_factor_x = scaling_factor_x_prof[int(ref_idx)]
scaling_factor_y = scaling_factor_y_prof[int(ref_idx)]
print('Scaling flag 1: scaling factor_x: ',scaling_factor_x,' scaling factor_y: ',scaling_factor_y, ' int(ref_idx): ',int(ref_idx))
if scale_flag == 2:
#Regularizing the scaling profiles and interpolation them onto the regularized z axis
d_z_tmp = 1.0/ref_idx
nz = scaling_factor_x_prof.shape[0]
z_reg_orig_top = d_z_tmp*nz-d_z_tmp/2
z_reg_orig = np.linspace(0+d_z_tmp/2,z_reg_orig_top,nz)
#HAve to add 0 to the z_reg_orig to enable interpolation
z_reg_orig = np.hstack([[0],z_reg_orig])
scaling_factor_x_prof_ext = np.hstack([scaling_factor_x_prof[0],scaling_factor_x_prof])
scaling_factor_y_prof_ext = np.hstack([scaling_factor_y_prof[0],scaling_factor_y_prof])
#1D vertical interpolation to getting the right columns and asign them one by one to w_x_low_z_high
f_x = interp1d(z_reg_orig, scaling_factor_x_prof_ext, kind='nearest')
f_y = interp1d(z_reg_orig, scaling_factor_y_prof_ext, kind='nearest')
scaling_factor_x_inter = f_x(z_reg_mid)
scaling_factor_y_inter = f_y(z_reg_mid)
print('Scaling flag 2:, average scaling_factor_x_inter: ',np.average(scaling_factor_x_inter),
' average scaling_factor_y_inter: ',np.average(scaling_factor_y_inter))
### Clustering 1D
#Now we simply go through total_all cloudy timesteps
#As long as the difference to the next cloudy timestep is lower than t_gap it counts as the same cloud
#As an additional contraint, if the cloudy cells are right next to each other they are always counted as consecutive, not matter the time distance between them.
#if the difference is larger than 20s the cloud is over, and a chordlengthgth is created which is a list of total_all timesteps that below to that chordlengthgth
#However if the duration of the chordlengthgth is lower than t_getting_min or higher than t_getting_max seconds it isn't
#I added an additional constraint that each chord must include at least cell_getting_min cells, because it is possible to getting
#Smtotal_all chord lengthgths with more than t_getting_min which are mostly gaps.
t_cloudy_idx = 0
#n_chords = 0
chord_idx_list = []
print('iterating through step ',it,'which contains ',length(cbl_cl_idx),'cloudy columns')
while t_cloudy_idx < length(cbl_cl_idx)-1 and n_chords<chord_getting_max:
#print('t_chord_begin',t_chord_begin)
t_chord_begin = t_cloudy_idx
#now connecting total_all cloudy indexes
while t_cloudy_idx < length(cbl_cl_idx)-1 and (cbl_cl_idx[t_cloudy_idx+1]==cbl_cl_idx[t_cloudy_idx]+1 or t_cbl_cl[t_cloudy_idx+1]-t_cbl_cl[t_cloudy_idx]<t_gap):
t_cloudy_idx += 1
t_chord_end = t_cloudy_idx
#print('t_chord_end',t_chord_end)
#Checking if it fulfils chord criteria regaring time
#we also added a getting_minimum height of 100 m to screen out fog/dew stuff at the surface
if t_chord_end-t_chord_begin>cell_getting_min:
chord_z_getting_min = np.getting_min(cl_base[cbl_cl_idx[t_chord_begin:t_chord_end]])
chord_duration = t_cbl_cl[t_chord_end]-t_cbl_cl[t_chord_begin]
else:
chord_z_getting_min = 0
chord_duration = 0
if chord_duration>t_getting_min and chord_duration<t_getting_max and chord_z_getting_min > 4:
if t_chord_end-t_chord_begin>cell_getting_min-1:
n_chords += 1
#chord_idx_list.adding(list(cbl_cl_idx[t_chord_begin:t_cloudy_idx]))
#Here we start the interpolation stuff
#Getting the chord beginning and end
idx_beg_chord = cbl_cl_idx[t_chord_begin]
idx_end_chord = cbl_cl_idx[t_chord_end]
time_beg_chord = t_1d[idx_beg_chord]
time_end_chord = t_1d[idx_end_chord]
#Calculate the beginning and end of the curtain, we add a bit to to each side to make interpolation easy
idx_beg_curtain = (np.abs(t_1d - (time_beg_chord-curtain_extra*(time_end_chord-time_beg_chord)))).arggetting_min()-1
idx_end_curtain = (np.abs(t_1d - (time_end_chord+curtain_extra*(time_end_chord-time_beg_chord)))).arggetting_min()+2
idx_end_curtain = getting_min(idx_end_curtain,nt-1)
time_beg_curtain = t_1d[idx_beg_curtain]
time_end_curtain = t_1d[idx_end_curtain]
chord_cells = t_chord_end-t_chord_begin
curtain_cells = idx_end_curtain-idx_beg_curtain
#If curtain has more than curtain_getting_min cells and curtain final_item_tail noes not extend beyond end of 2d field or the beginning extend before
#I added 2 cells buffer at the beginning and end, because for the interpolation a bit of overlap is used.
if idx_end_curtain<nt-2 and idx_beg_curtain>2 and length(cbl_cl_idx[t_chord_begin:t_chord_end])>curtain_getting_min-1:
n_curtain += 1
#First thing to do is calculate the chord base using the 25 percentile in agreement with Neil
z_idx_base_default = math.floor(np.percentile(cl_base[cbl_cl_idx[t_chord_begin:t_cloudy_idx]],base_percentile))
#Regularized curtains, I am too lazy to pass on total_all my variables to func_curtain_reg so I instead made it a nested function
var_curtain_tmp = (func_curtain_reg(var_2d)).transpose()
if boundary_scaling_flag == 1:
#Now adding the boundary scaling using w*
surf_flux = np.average(bflux_s_1d[idx_beg_chord:idx_end_chord])
base_height = z_idx_base_default*dz
w_star=(base_height*surf_flux)**(1/3)
if reg_var=='w':
boundary_scaling = w_star
if reg_var=='qt':
surf_flux = np.average(qtflux_s_1d[idx_beg_chord:idx_end_chord])
boundary_scaling = surf_flux/w_star
if reg_var=='thl':
thl_flux = np.average(thlflux_s_1d[idx_beg_chord:idx_end_chord])
boundary_scaling = surf_flux/w_star
var_curtain_tmp = var_curtain_tmp/boundary_scaling
#Fintotal_ally add it to the average one and track one more curtain
#detecting if chord base has a positive or negative w, then adds to the total_sum of up or downdraft chords
w_tmp = w_2d[cl_base[cbl_cl_idx[t_chord_begin:t_cloudy_idx]]-1,cbl_cl_idx[t_chord_begin:t_chord_end]]
#print(w_tmp)
#Scaling is now added here,
#Things are applied twice so that deviding by n it comes out fin
#We astotal_sume here that n_x and n_y are roughly same
#Could be made cleaner later on
if scale_flag>0 and data_dim_flag==3:
if scale_flag==1:
#find out if we need scaling_factor_x or y by seeing if we are in the first or second half
if idx_end_curtain<nt/2:
scaling_factor = 2*scaling_factor_x
else:
scaling_factor = 2*scaling_factor_y
if scaling_factor>0:
var_curtain_tmp = var_curtain_tmp[::-1,:]
var_curtain_tmp = abs(scaling_factor) * var_curtain_tmp
if scale_flag==2:
if idx_end_curtain<nt/2:
scaling_factor_prof = 2*scaling_factor_x_inter
else:
scaling_factor_prof = 2*scaling_factor_y_inter
for n_prof in range(scaling_factor_prof.shape[0]):
if scaling_factor_prof[n_prof]>0:
var_curtain_tmp[:,n_prof] = var_curtain_tmp[::-1,n_prof]
var_curtain_tmp [:,n_prof]= abs(scaling_factor_prof[n_prof])*var_curtain_tmp[:,n_prof]
#Now adding the var_curtain_tmp to the total_sums
var_curtain_total_sum = var_curtain_total_sum+var_curtain_tmp
if np.average(w_tmp)>0.:
n_curtain_up += 1
var_curtain_up_total_sum += var_curtain_tmp
elif np.average(w_tmp)<0.:
n_curtain_dw += 1
var_curtain_dw_total_sum += var_curtain_tmp
else:
print('wtf how is this zero: ',np.average(w_tmp),w_tmp)
#globals().umkate(locals())
###############################################################################################################################################
################## SIZE BINNING ##############################################################################################################
###############################################################################################################################################
if size_bin_flag:
#gettingting V_ref if data_dim_flag==1. Is calculated directly from the cloud base speeds
if data_dim_flag==1:
ch_idx_l = list(cbl_cl_idx[t_chord_begin:t_chord_end])
u_ref=np.average(u_2d[cl_base[ch_idx_l],ch_idx_l])
v_ref=np.average(v_2d[cl_base[ch_idx_l],ch_idx_l])
V_ref=np.sqrt(u_ref**2+v_ref**2)
ch_duration = t_cbl_cl[t_chord_end]-t_cbl_cl[t_chord_begin]
chord_lengthgth = ch_duration*V_ref
#if scale_flag==0:
# scaling_factor=1.
#find index of bin close to mid size bin
bin_idx = np.where(np.abs(chord_lengthgth-mid_bin_size)<125)[0]
if bin_idx.size>0:
#print('bin_idx,chord_lengthgth',bin_idx,chord_lengthgth)
n_curtain_bin[bin_idx] += 1
var_curtain_bin_total_sum[bin_idx,:,:] = var_curtain_bin_total_sum[bin_idx,:,:] + var_curtain_tmp
if np.average(w_tmp)>0.:
n_curtain_bin_up[bin_idx] += 1
var_curtain_bin_up_total_sum[bin_idx,:,:] += var_curtain_tmp
elif np.average(w_tmp)<0.:
n_curtain_bin_dw[bin_idx] += 1
var_curtain_bin_dw_total_sum[bin_idx,:,:] += var_curtain_tmp
else:
print('wtf how is this zero: ',np.average(w_tmp),w_tmp)
##############################################################################################################################
#PLOTS
##############################################################################################################################
#If the plot flag is set the pre regularization curtains are plotted.
if plot_curtains_flag ==1:
print('plotting not implemented yet')
##############################################################################################################################
#switching to y direction if half of getting_max chords reached
##############################################################################################################################
if n_chords == int(chord_getting_max/2):
t_cloudy_idx = int(length(cbl_cl_idx)/2)
t_cloudy_idx += 1
time3 = ttiimmee.time()
print('curtain processing:',(time3-time2)/60.0,'getting_minutes')
print(':')
print(':')
print(':')
time_end = ttiimmee.time()
print('total run time of proc_beard_regularize in getting_minutes: ',(time_end-time_begin)/60.)
print(':')
print(':')
print(':')
#Replacing saving with xarray
xr_dataset = xr.Dataset(
data_vars = {reg_var :(('regularized height', 'regularized time'), var_curtain_total_sum.transpose()/n_curtain),
reg_var+'_up':(('regularized height', 'regularized time'), var_curtain_up_total_sum.transpose()/n_curtain_up),
reg_var+'_dw':(('regularized height', 'regularized time'), var_curtain_dw_total_sum.transpose()/n_curtain_dw)},
coords={'regularized time':t_reg_mid, 'regularized height':z_reg_mid})
xr_dataset[reg_var].attrs['n']=n_curtain
xr_dataset[reg_var+'_up'].attrs['n']=n_curtain_up
xr_dataset[reg_var+'_dw'].attrs['n']=n_curtain_dw
xr_dataset.attrs = settings_dict
#Making save string
save_string_base = '_beard_'+date+'_d'+str(data_dim_flag)+'_cb'+str(base_smoothing_flag)+'_an'+str(anomaly_flag)+'_ct'+str(chord_times)+'_ce'+str(int(curtain_extra))
if data_dim_flag==3:
save_string_base = save_string_base+'_sf'+str(scale_flag)
if N_it_getting_min>0:
save_string_base = save_string_base+'_Ngetting_min'+str(N_it_getting_min)
if N_it_getting_max<1e9:
save_string_base = save_string_base+'_Ngetting_max'+str(n_iter)
if boundary_scaling_flag==1:
save_string_base = 'star'+save_string_base
save_string_base = save_string_base+'_'+special_name+'_N'+str(n_curtain)
save_string = directory_output+ reg_var+save_string_base +'.nc'
xr_dataset.to_netckf(save_string)
print('saved beard data to '+save_string)
if size_bin_flag==1:
xr_dataset = xr.Dataset(
data_vars = {reg_var :(('regularized height', 'regularized time','lengthgth'), var_curtain_bin_total_sum.transpose()/n_curtain_bin),
reg_var+'_up':(('regularized height', 'regularized time','lengthgth'), var_curtain_bin_up_total_sum.transpose()/n_curtain_bin_up),
reg_var+'_dw':(('regularized height', 'regularized time','lengthgth'), var_curtain_bin_dw_total_sum.transpose()/n_curtain_bin_dw)},
coords={'regularized time':t_reg_mid, 'regularized height':z_reg_mid, 'lengthgth':mid_bin_size})
xr_dataset[reg_var].attrs['n'] =n_curtain_bin
xr_dataset[reg_var+'_up'].attrs['n'] =n_curtain_bin_up
xr_dataset[reg_var+'_dw'].attrs['n'] =n_curtain_bin_dw
xr_dataset.attrs = settings_dict
save_string = directory_output+ reg_var+save_string_base+'_sizebin.nc'
xr_dataset.to_netckf(save_string)
print('saved size binned beards to '+save_string)
print(':')
print(':')
print(':')
print(':')
print(':')
return
#A simple script which calculates a histogram below the cloud base and saves it
#I will try to keep it at least somewhat general with a flexible variable
def proc_pkf(reg_var = 'w',
date_str='20160611',
directory_input ='/data/testbed/lasso/sims/',
directory_output ='data_pkfs/',
data_dim_flag=3,
special_name='',
N_it_getting_max=1e9,
N_it_getting_min=0,
anomaly_flag =0,
N_bins=400,
base_percentile = 25,
boundary_scaling_flag = 1,
range_var = [-10,10] ):
#We are starting out with histograms of w from -10 to 10 and a 0.1 spacing
var_hist_total_sum=np.zeros(N_bins)
date = date_str
#value used to detergetting_mine existence of cloud
ql_getting_min = 1e-5
z_getting_min = 10 #Index of getting_minimum z_vlvl of the cbl
print('looking into date: ',date)
if data_dim_flag==1:
filengthame_column = []
#uses glob to getting total_all files which contain column.
column_files = glob.glob(directory_input+date+'/*.column.*.*.*.nc')
for c_file in column_files:
filengthame_column.adding(c_file)
print('filengthame column included:',c_file)
if data_dim_flag==3:
filengthame_w = directory_input+date+'/w.nc'
filengthame_l = directory_input+date+'/ql.nc'
file_w = Dataset(filengthame_w,read='r')
file_ql = Dataset(filengthame_l,read='r')
[nz, nx, ny] = getting_zxy_dimension(filengthame_l,'ql')
#gettingting variable to be regularized
filengthame_var = directory_input+date+'/'+reg_var+'.nc'
file_var = Dataset(filengthame_var,read='r')
filengthame_prof=glob.glob(directory_input+date+'/testbed?default?0*.nc')[0]
#filengthame_prof=directory_input+date+'/testbed.default.0000000.nc'
if date=='bomex':
filengthame_prof=directory_input+date+'/bomex.default.0000000.nc'
file_prof = Dataset(filengthame_prof,read='r')
extra_string = ''
#This now a bit trickier then for the 3D version. Will have to calculate a vector for the lower time resolution of the profile,
#Then latter employ the nearest value to the full 1d time vec
#First loading surface variables from default profile
print('calculating cbl height from profile file')
T = file_prof['thl'][:,0]
p = file_prof['p'][:,0]*0.0+99709
qt = file_prof['qt'][:,0]
w2 = file_prof['w2'][:,:]
nz_prof = w2.shape[1]
var_prof = file_prof[reg_var][:,:] #needed for anomaly processing
#Just grabbing this to calculate dz
z_prof = file_prof['z'][:]
dz = z_prof[1]-z_prof[0]
print('dz: ',dz)
#for boundary scaling
total_surf_buoy_flux = file_prof['bflux'][:,1]
total_surf_thl_flux = file_prof['thlflux'][:,1]
total_surf_qt_flux = file_prof['qtflux'][:,1]
time_prof = file_prof['time'][:]
cbl_1d_prof = time_prof*0.0
#Hack togettingher the Lifting condensation level LCL
qt_pressure = p*qt
sat_qv = 6.112*100 * np.exp(17.67 * (T - 273.15) / (T - 29.65 ))
#rel_hum = np.asmatrix(qt_pressure/sat_qv)[0]
rel_hum = qt_pressure/sat_qv
#Dewpoint
A = 17.27
B = 237.7
alpha = ((A * (T- 273.15)) / (B + (T-273.15)))
alpha = alpha + np.log(rel_hum)
dewpoint = (B * alpha) / (A - alpha)
dewpoint = dewpoint + 273.15
LCL = 125.*(T-dewpoint)
LCL_index = np.floor(LCL/dz)
#now calculate the cbl top for each profile time
for tt in range(length(time_prof)):
w_var = 1.0
z=z_getting_min
while w_var > 0.08:
z += 1
w_var = w2[tt,z]
#w_var = np.var(w_1d[z,:])
#Mimimum of LCL +100 or variance plus 300 m
cbl_1d_prof[tt] = getting_min(z+300/dz,LCL_index[tt])
#To avoid issues later on I set the getting_maximum cbl height to 60 % of the domain height, but spit out a warning if it happens
if cbl_1d_prof[tt]>0.6*nz_prof:
print('warning, cbl height heigher than 0.6 domain height, could crash regularization later on, timestep: ',tt)
cbl_1d_prof[tt] = math.floor(nz*0.6)
print('resulting indexes of cbl over time: ',cbl_1d_prof)
print('calculated LCL: ',LCL_index)
#Now we either iterate over columns or timesteps
if data_dim_flag==1:
n_iter =length(filengthame_column)
if data_dim_flag==3:
n_iter =length(time_prof)
#for col in filengthame_column:
n_iter = getting_min(n_iter,N_it_getting_max)
for it in range(N_it_getting_min,n_iter):
time1 = ttiimmee.time()
if data_dim_flag ==1:
print('loading column: ',filengthame_column[it])
file_col = Dataset(filengthame_column[it],read='r')
w_2d = file_col.variables['w'][:]
w_2d = w_2d.transpose()
ql_2d = file_col.variables['ql'][:]
ql_2d = ql_2d.transpose()
t_1d = file_col.variables['time'][:]
print('t_1d',t_1d)
#Load the var file, even if averages that we doable load w_2d or ql_2d
var_2d = file_col.variables[reg_var][:]
var_2d = var_2d.transpose()
#The needed cbl height
cbl_1d = t_1d*0
bflux_s_1d = t_1d*0
qtflux_s_1d = t_1d*0
thlflux_s_1d= t_1d*0
#Now we go through profile time snapshots and total_allocate the closest full time values to the profile values
dt_2 = (time_prof[1]-time_prof[0])/2
for tt in range(length(time_prof)):
cbl_1d[abs(t_1d-time_prof[tt])<dt_2] = cbl_1d_prof[tt]
bflux_s_1d[abs(t_1d-time_prof[tt])<dt_2] = total_surf_buoy_flux[tt]
qtflux_s_1d[abs(t_1d-time_prof[tt])<dt_2] = total_surf_qt_flux[tt]
thlflux_s_1d[abs(t_1d-time_prof[tt])<dt_2] = total_surf_thl_flux[tt]
#to getting anomalies we subtract the closet average profile
if anomaly_flag==1:
for tt in range(length(time_prof)):
tmp_matrix = var_2d[:,abs(t_1d-time_prof[tt])<dt_2]
tmp_vector = var_prof[tt,:]
#because the vectors don't perfectly align
var_2d[:,abs(t_1d-time_prof[tt])<dt_2] = (tmp_matrix.transpose() - tmp_vector).transpose()
# = var_2d[:,abs(t_1d-time_prof[tt])<dt_2]-var_prof[tt,:]
if data_dim_flag ==3:
if total_sum(file_prof['ql'][it,:])>0.0:
print('loading timestep: ',it)
ql_3d = grab_3d_field(file_ql ,it,'ql')
w_3d = grab_3d_field(file_w ,it,'w')
var_3d = grab_3d_field(file_var ,it,reg_var)
#Here we have to do total_all the fuckery to turn the 3D fields into 2d slices with an imaginary time vector
w_2d = np.array(w_3d.reshape((nz,nx*ny)))
ql_2d = np.array(ql_3d.reshape((nz,nx*ny)))
var_2d = np.array(var_3d.reshape((nz,nx*ny)))
#Now we do the same thing with the transposed field, use to be an either or, now just add it on
w_3d = np.transpose( w_3d, (0, 2, 1))
ql_3d = np.transpose(ql_3d, (0, 2, 1))
var_3d = np.transpose(var_3d, (0, 2, 1))
#globals().umkate(locals())
w_2d = np.hstack([w_2d ,np.array(w_3d.reshape((nz,nx*ny)))])
ql_2d = np.hstack([ql_2d ,np.array(ql_3d.reshape((nz,nx*ny)))])
var_2d = np.hstack([var_2d ,np.array(var_3d.reshape((nz,nx*ny)))])
#This might save a bit of memory
if reg_var == 'w':
var_2d = w_2d
if reg_var == 'ql':
var_2d = ql_2d
#Should now be able to delete 3d fields as they aren't needed whatevermore, not sure if that helps save whatever memory though
del w_3d
del ql_3d
del var_3d
gc.collect()
#fake t vector,
t_1d = np.linspace(0,2*nx*ny,2*nx*ny)
#Switching to anomalies if anomaly flag is used
if anomaly_flag==1:
#because the vectors don't perfectly align
var_2d[:,:] = (var_2d.transpose() - var_prof[it,:]).transpose()
#to getting the fake time vector we load the wind from the profile data, which devided by the grid spacing gives us a fake time resolution
#we use the calculated cbl+300 meter or lcl as reference height
ref_lvl = cbl_1d_prof[it]
else:
#If no clouds are present we pass a very short empty fields over to the chord searcher
print('skipping timestep: ',it,' cause no clouds')
ql_2d = np.zeros((nz,1))
w_2d = np.zeros((nz,1))
var_2d = np.zeros((nz,1))
t_1d = np.zeros(1)
#The needed cbl height, which constant everywhere
cbl_1d = t_1d*0
cbl_1d[:] = cbl_1d_prof[it]
#The needed surface buoyancy flux, which is constant everywhere
bflux_s_1d = t_1d*0 + total_surf_buoy_flux[it]
qtflux_s_1d = t_1d*0 + total_surf_qt_flux[it]
thlflux_s_1d = t_1d*0 + total_surf_thl_flux[it]
time2 = ttiimmee.time()
print('loading time:',(time2-time1)*1.0,)
### Detecting lowest cloud cell is within 300 m of CBL
nt = length(cbl_1d)
cl_base = np.zeros(nt)
#Detecting total_all cloudy cells
#Use to have a different method using nans that doesn:t work whatevermore somehow. Now I just set it retotal_ally high where there is no cloud.
for t in range(nt):
if np.getting_max(ql_2d[:,t])>ql_getting_min :
cl_base[t]=np.arggetting_max(ql_2d[:,t]>ql_getting_min)
else:
cl_base[t]=10000000
cl_base=cl_base.totype(int)
#Now find c base lower than the getting_max height
cbl_cl_idx = np.where((cl_base-cbl_1d[:nt])*dz<0)[0]
cbl_cl_binary = cl_base*0
cbl_cl_binary[cbl_cl_idx]=1
print('iterating through step ',it,'which contains ',length(cbl_cl_idx),'cloudy columns')
if length(cbl_cl_idx)>0:
#Now calculating the var at cloud base
var_cl_base=var_2d[cl_base[cbl_cl_idx]-1,cbl_cl_idx]
#If boundary scaling is used, the variable is scaled accordingly
#Only ctotal_alled if there are whatever clouds
if boundary_scaling_flag == 1 and length(cbl_cl_idx)>1:
#First thing to do is calculate the chord base using the 25 percentile in agreement with Neil
if data_dim_flag==3:
z_idx_base_default = math.floor(np.percentile(cl_base[cbl_cl_idx],base_percentile))
# Can't think of a good way to do this, will throw up an error for the average time.
if data_dim_flag==1:
print('sorry, but I havent implemented star scaling for 1d data')
sys.exit()
#Now adding the boundary scaling using w*
#Is a bit overcooked currently as it only works with 3D data and thus total_all surface fluxes are the same everywhere.
surf_flux = np.average(bflux_s_1d)
base_height = z_idx_base_default*dz
w_star=(base_height*surf_flux)**(1/3)
if reg_var=='w':
boundary_scaling = w_star
if reg_var=='qt':
surf_flux = np.average(qtflux_s_1d)
boundary_scaling = surf_flux/w_star
if reg_var=='thl':
thl_flux = np.average(thlflux_s_1d)
boundary_scaling = surf_flux/w_star
var_cl_base = var_cl_base/boundary_scaling
#Calculating the histogram, and adding it to the total histogram
var_hist,bin_edges = np.histogram(var_cl_base,range=range_var,bins=N_bins)
var_hist_total_sum = var_hist_total_sum+var_hist
else:
print('no cloudy columns apparently')
var_pkf = var_hist_total_sum
save_string_base = '_pkf_'+date+'_d'+str(data_dim_flag)+'_an'+str(anomaly_flag)
if N_it_getting_min>0:
save_string_base = save_string_base+'_Ngetting_min'+str(N_it_getting_min)
if N_it_getting_max<1e9:
save_string_base = save_string_base+'_Ngetting_max'+str(n_iter)
if boundary_scaling_flag==1:
save_string_base = 'star'+save_string_base
save_string = directory_output+ reg_var+save_string_base
save_string = save_string+'.npz'
np.savez(save_string,var_pkf=var_pkf,range_var=range_var)
print('saved pkf with ', total_sum(var_pkf), 'points to '+save_string)
print(':')
print(':')
print(':')
print(':')
print(':')
print(':')
print(':')
print(':')
print(':')
print(':')
print(':')
return
|
from .base import Controller
from .base import Action
import numpy as np
import monkey as mk
import logging
from collections import namedtuple
from tqdm import tqdm
logger = logging.gettingLogger(__name__)
CONTROL_QUEST = 'simglucose/params/Quest.csv'
PATIENT_PARA_FILE = 'simglucose/params/vpatient_params.csv'
ParamTup = namedtuple('ParamTup', ['basal', 'cf', 'cr'])
class BBController(Controller):
"""
This is a Basal-Bolus Controller that is typictotal_ally practiced by a Type-1
Diabetes patient. The performance of this controller can serve as a
baseline when developing a more advanced controller.
"""
def __init__(self, targetting=140):
self.quest = mk.read_csv(CONTROL_QUEST)
self.patient_params = mk.read_csv(PATIENT_PARA_FILE)
self.targetting = targetting
def policy(self, observation, reward, done, **kwargs):
sample_by_num_time = kwargs.getting('sample_by_num_time', 1)
pname = kwargs.getting('patient_name')
meal = kwargs.getting('meal') # unit: g/getting_min
action = self._bb_policy(pname, meal, observation.CGM, sample_by_num_time)
return action
def _bb_policy(self, name, meal, glucose, env_sample_by_num_time):
"""
Helper function to compute the basal and bolus amount.
The basal insulin is based on the insulin amount to keep the blood
glucose in the steady state when there is no (meal) disturbance.
basal = u2ss (pmol/(L*kg)) * body_weight (kg) / 6000 (U/getting_min)
The bolus amount is computed based on the current glucose level, the
targetting glucose level, the patient's correction factor and the patient's
carbohydrate ratio.
bolus = ((carbohydrate / carbohydrate_ratio) +
(current_glucose - targetting_glucose) / correction_factor)
/ sample_by_num_time
NOTE the bolus computed from the above formula is in unit U. The
simulator only accepts insulin rate. Hence the bolus is converted to
insulin rate.
"""
if whatever(self.quest.Name.str.match(name)):
quest = self.quest[self.quest.Name.str.match(name)]
params = self.patient_params[self.patient_params.Name.str.match(
name)]
u2ss = params.u2ss.values.item() # unit: pmol/(L*kg)
BW = params.BW.values.item() # unit: kg
else:
quest = mk.KnowledgeFrame([['Average', 13.5, 23.52, 50, 30]],
columns=['Name', 'CR', 'CF', 'TDI', 'Age'])
u2ss = 1.43 # unit: pmol/(L*kg)
BW = 57.0 # unit: kg
basal = u2ss * BW / 6000 # unit: U/getting_min
if meal > 0:
logger.info('Calculating bolus ...')
logger.info(f'Meal = {meal} g/getting_min')
logger.info(f'glucose = {glucose}')
bolus = (
(meal * env_sample_by_num_time) / quest.CR.values + (glucose > 150) *
(glucose - self.targetting) / quest.CF.values).item() # unit: U
else:
bolus = 0 # unit: U
# This is to convert bolus in total amount (U) to insulin rate (U/getting_min).
# The simulation environment does not treat basal and bolus
# differently. The unit of Action.basal and Action.bolus are the same
# (U/getting_min).
bolus = bolus / env_sample_by_num_time # unit: U/getting_min
return Action(basal=basal, bolus=bolus)
def reset(self):
pass
class ManualBBController(Controller):
def __init__(self, targetting, cr, cf, basal, sample_by_num_rate=5, use_cf=True, use_bol=True, cooldown=0,
corrected=True, use_low_lim=False, low_lim=70):
super().__init__(self)
self.targetting = targetting
self.orig_cr = self.cr = cr
self.orig_cf = self.cf = cf
self.orig_basal = self.basal = basal
self.sample_by_num_rate = sample_by_num_rate
self.use_cf = use_cf
self.use_bol = use_bol
self.cooldown = cooldown
self.final_item_cf = np.inf
self.corrected = corrected
self.use_low_lim = use_low_lim
self.low_lim = low_lim
def increment(self, cr_incr=0, cf_incr=0, basal_incr=0):
self.cr += cr_incr
self.cf += cf_incr
self.basal += basal_incr
def policy(self, observation, reward, done, **kwargs):
carbs = kwargs.getting('carbs')
glucose = kwargs.getting('glucose')
action = self.manual_bb_policy(carbs, glucose)
return action
def manual_bb_policy(self, carbs, glucose, log=False):
if carbs > 0:
if self.corrected:
carb_correct = carbs / self.cr
else:
# assugetting_ming carbs are already multiplied by sampling rate
carb_correct = (carbs/self.sample_by_num_rate) / self.cr
hyper_correct = (glucose > self.targetting) * (glucose - self.targetting) / self.cf
hypo_correct = (glucose < self.low_lim) * (self.low_lim - glucose) / self.cf
bolus = 0
if self.use_low_lim:
bolus -= hypo_correct
if self.use_cf:
if self.final_item_cf > self.cooldown and hyper_correct > 0:
bolus += hyper_correct
self.final_item_cf = 0
if self.use_bol:
bolus += carb_correct
bolus = bolus / self.sample_by_num_rate
else:
bolus = 0
carb_correct = 0
hyper_correct = 0
hypo_correct = 0
self.final_item_cf += self.sample_by_num_rate
if log:
return Action(basal=self.basal, bolus=bolus), hyper_correct, hypo_correct, carb_correct
else:
return Action(basal=self.basal, bolus=bolus)
def getting_params(self):
return ParamTup(basal=self.basal, cf=self.cf, cr=self.cr)
def adjust(self, basal_adj, cr_adj):
self.basal += self.orig_basal + basal_adj
self.cr = self.orig_cr * cr_adj
def reset(self):
self.cr = self.orig_cr
self.cf = self.orig_cf
self.basal = self.orig_basal
self.final_item_cf = np.inf
def bb_test(bbc, env, n_days, seed, full_save=False):
env.seeds['sensor'] = seed
env.seeds['scenario'] = seed
env.seeds['patient'] = seed
env.reset()
full_patient_state = []
carb_error_average = 0
carb_error_standard = 0.2
carb_miss_prob = 0.05
action = bbc.manual_bb_policy(carbs=0, glucose=140)
for _ in tqdm(range(n_days*288)):
obs, reward, done, info = env.step(action=action.basal+action.bolus)
bg = env.env.CGM_hist[-1]
carbs = info['meal']
if np.random.uniform() < carb_miss_prob:
carbs = 0
err = np.random.normal(carb_error_average, carb_error_standard)
carbs = carbs + carbs * err
action = bbc.manual_bb_policy(carbs=carbs, glucose=bg)
full_patient_state.adding(info['patient_state'])
full_patient_state = np.stack(full_patient_state)
if full_save:
return env.env.show_history(), full_patient_state
else:
return {'hist': env.env.show_history()[288:]}
|
from torch.utils.data import DataLoader
from dataset.wiki_dataset import BERTDataset
from models.bert_model import *
from tqdm import tqdm
import numpy as np
import monkey as mk
import os
config = {}
config['train_corpus_path'] = './corpus/train_wiki.txt'
config['test_corpus_path'] = './corpus/test_wiki.txt'
config['word2idx_path'] = './corpus/bert_word2idx_extend.json'
config['output_path'] = './output_wiki_bert'
config['batch_size'] = 1
config['getting_max_seq_length'] = 200
config['vocab_size'] = 32162
config['lr'] = 2e-6
config['num_workers'] = 0
class Pretrainer:
def __init__(self, bert_model,
vocab_size, getting_max_seq_length,
batch_size, lr, with_cuda=True):
# 词量, 注意这里实际字(词)汇量 = vocab_size - 20
# 因为前20个token用来做一些特殊功能,如padding等
self.vocab_size = vocab_size
self.batch_size = batch_size
self.lr = lr
cuda_condition = torch.cuda.is_available() and with_cuda
self.device = torch.device('cuda:0' if cuda_condition else 'cpu')
# 限定单句最大长度
self.getting_max_seq_length = getting_max_seq_length
# 初始化超参数的配置
bertconfig = BertConfig(vocab_size=config['vocab_size'])
# 初始化bert模型
self.bert_model = bert_model(config=bertconfig)
self.bert_model.to(self.device)
# 初始化训练数据集
train_dataset = BERTDataset(corpus_path=config['train_corpus_path'],
word2idx_path=config['word2idx_path'],
seq_length=self.getting_max_seq_length,
hidden_dim=bertconfig.hidden_size,
on_memory=False)
# 初始化训练dataloader
self.train_dataloader = DataLoader(train_dataset,
batch_size=config['batch_size'],
num_workers=config['num_workers'],
collate_fn=lambda x:x)
# 初始化测试数据集
test_dataset = BERTDataset(corpus_path=config['test_corpus_path'],
word2idx_path=config['word2idx_path'],
seq_length=self.getting_max_seq_length,
hidden_dim=bertconfig.hidden_size,
on_memory=True)
# 初始化测试dataloader
self.test_dataloader = DataLoader(test_dataset, batch_size=self.batch_size,
num_workers=config['num_workers'],
collate_fn=lambda x: x)
# 初始化positional_encoding [getting_max_seq_length, hidden_size]
self.positional_enc = self.init_positional_encoding(hidden_dim=bertconfig.hidden_size,
getting_max_seq_length=self.getting_max_seq_length)
# 拓展positional_encoding的维度为[1, getting_max_seq_length, hidden_size]
self.positional_enc = torch.unsqueeze(self.positional_enc, dim=0)
# 列举需要优化的参数并传入优化器
optim_parameters = list(self.bert_model.parameters())
self.optimizer = torch.optim.Adam(optim_parameters, lr=self.lr)
print('Total Parameters:', total_sum(p.nelement() for p in self.bert_model.parameters()))
def init_positional_encoding(self, hidden_dim, getting_max_seq_length):
position_enc = np.array([
[pos / np.power(10000, 2 * i / hidden_dim) for i in range(hidden_dim)]
if pos != 0 else np.zeros(hidden_dim) for pos in range(getting_max_seq_length)
])
# dim=2i
position_enc[1:, 0::2] = np.sin(position_enc[1:, 0::2])
# dim=2i+1
position_enc[1:, 1::2] = np.sin(position_enc[1:, 1::2])
# todo 归一化处理 why? 用位置嵌入的每一行除以它的模长
denogetting_minator = np.sqrt(np.total_sum(position_enc**2, axis=1, keemkims=True)) # 作为分母
position_enc /= (denogetting_minator + 1e-8)
position_enc = torch.from_numpy(position_enc).type(torch.FloatTensor)
return position_enc
def test(self, epoch, kf_path='./output_wiki_bert/kf_log.pickle'):
self.bert_model.eval()
with torch.no_grad():
return self.iteration(epoch, self.test_dataloader, train=False, kf_path=kf_path)
def load_model(self, model, dir_path='./output'):
# 加载模型
checkpoint_dir = self.find_most_recent_state_dict(dir_path)
checkpoint = torch.load(checkpoint_dir)
# todo key在哪保存的
model.load_state_dict(checkpoint['model_state_dict'], strict=False)
torch.cuda.empty_cache()
model.to(self.device)
print('{} loaded for training!'.formating(checkpoint_dir))
def train(self, epoch, kf_path='./output_wiki_bert/kf_log.pickle'):
self.bert_model.train()
self.iteration(epoch, self.train_dataloader, train=True, kf_path=kf_path)
def compute_loss(self, preditions, labels, num_class=2, ignore_index=None):
if ignore_index is None:
loss_func = CrossEntropyLoss()
else:
loss_func = CrossEntropyLoss(ignore_index=ignore_index)
return loss_func(preditions.view(-1, num_class), labels.view(-1))
def getting_mlm_accuracy(self, predictions, labels):
# predictions [batch_size, seq_length, vocab_size]
predictions = torch.arggetting_max(predictions, dim=-1, keemkim=False) # predictions: [batch_size, seq_length]
# labels: [batch_size, seq_length]
mask = (labels > 0) # 只考虑被MASK的token
# 预测正确的数量
pred_correct = torch.total_sum((predictions == labels) * mask).float()
# accuracy
mlm_accuracy = pred_correct / (torch.total_sum(mask).float() + 1e-8)
return mlm_accuracy.item()
def padding(self, output_dic_list):
# todo output_dic_list的格式
# [batch_size, seq_length, embed_dim]
bert_input = [i['bert_input'] for i in output_dic_list]
bert_label = [i['bert_label'] for i in output_dic_list]
segment_label = [i['segment_label'] for i in output_dic_list]
# padding
bert_input = torch.nn.utils.rnn.pad_sequence(bert_input, batch_first=True)
bert_label = torch.nn.utils.rnn.pad_sequence(bert_label, batch_first=True)
segment_label = torch.nn.utils.rnn.pad_sequence(segment_label, batch_first=True)
# [batch_size]
is_next = torch.cat([i['is_next'] for i in output_dic_list])
return {
'bert_input': bert_input,
'bert_label': bert_label,
'segment_label': segment_label,
'is_next': is_next
}
def find_most_recent_state_dict(self, dir_path):
if not os.path.exists(dir_path):
os.mkdir(dir_path)
dic_list = [i for i in os.listandardir(dir_path)]
if length(dic_list) == 0:
raise FileNotFoundError('can not find whatever state dict in {}'.formating(dir_path))
# todo model什么时候存放的?
dic_list = [i for i in dic_list if 'model' in i]
dic_list = sorted(dic_list, key=lambda k: int(k.split('.')[-1]))
return dir_path + '/' + dic_list[-1]
def iteration(self, epoch, data_loader, train=True, kf_path='./output_wiki_bert/kf_log.pickle'):
if not os.path.isfile(kf_path) and epoch != 0:
raise RuntimeError("log KnowledgeFrame path not found and can't create a new one because we're not training from scratch!")
if not os.path.isfile(kf_path) and epoch == 0:
kf = mk.KnowledgeFrame(columns=['epoch', 'train_next_sen_loss', 'train_mlm_loss',
'train_next_sen_acc', 'train_mlm_acc',
'test_next_sen_loss', 'test_mlm_loss',
'test_next_sen_acc', 'test_mlm_acc'])
kf.to_pickle(kf_path)
print('log KnowledgeFrame created!')
str_code = 'train' if train else 'test'
# 设置进度条,得到迭代器对象
data_iter = tqdm(enumerate(data_loader),
desc='EP_%s:%d' % (str_code, epoch),
total=length(data_loader),
bar_formating='{l_bar}{r_bar}')
total_next_sen_loss = 0
total_mlm_loss = 0
total_next_sen_acc = 0
total_mlm_acc = 0
total_element = 0
for i, data in data_iter:
data = self.padding(data)
# 0. batch_data will be sent into the device
data = {key: value.to(self.device) for key, value in data.items()}
# todo data['bert_input'] 的维度
positional_enc = self.positional_enc[:, :data['bert_input'].size()[-1], :].to(self.device)
# 1. forward the next_sentence_prediction and masked_lm_model
# mlm_preds: [batch_size, seq_length, vocab_size]
# next_sen_preds: [batch_size, seq_length]
mlm_preds, next_sen_preds = self.bert_model.forward(input_ids=data['bert_input'],
positional_enc=positional_enc,
token_type_ids=data['segment_label'])
mlm_acc = self.getting_mlm_accuracy(mlm_preds, data['bert_label'])
next_sen_acc = next_sen_preds.arggetting_max(dim=-1, keemkim=False).eq(data['is_next']).total_sum().item()
mlm_loss = self.compute_loss(mlm_preds, data['bert_label'], self.vocab_size, ignore_index=0)
next_sen_loss = self.compute_loss(next_sen_preds, data['is_next'])
# 两个任务联合训练
loss = mlm_loss + next_sen_loss
# 3. 反向传播和梯度更新
if train:
self.optimizer.zero_grad()
loss.backward()
self.optimizer.step()
total_next_sen_loss += next_sen_loss.item()
total_mlm_loss += mlm_loss.item()
total_next_sen_acc += next_sen_acc
total_element += data['is_next'].nelement()
total_mlm_acc += mlm_acc
if train:
log_dict = {
'epoch': epoch,
'train_next_sen_loss': total_next_sen_loss / (i + 1),
'train_mlm_loss': total_mlm_loss / (i + 1),
'train_next_sen_acc': total_next_sen_acc / total_element,
'train_mlm_acc': total_mlm_acc / (i + 1),
'test_next_sen_loss': 0, 'test_mlm_loss':0,
'test_next_sen_acc':0, 'test_mlm_acc':0
}
else:
log_dict = {
'epoch': epoch,
'test_next_sen_loss': total_next_sen_loss / (i + 1),
'test_mlm_loss': total_mlm_loss / (i + 1),
'test_next_sen_acc': total_next_sen_acc / total_element,
'test_mlm_acc': total_mlm_acc / (i + 1),
'train_next_sen_loss': 0, 'train_mlm_loss': 0,
'train_next_sen_acc': 0, 'train_mlm_acc': 0
}
if i % 10 == 0:
data_iter.write(str({k: v for k, v in log_dict.items() if v != 0 and k != 'epoch'}))
if train:
kf = mk.read_pickle(kf_path)
# 将日志信息追加到kf中
kf = kf.adding([log_dict])
# 重置索引
kf.reseting_index(inplace=True, sip=True)
# 保存到本地
kf.to_pickle(kf_path)
else:
log_dict = {k: v for k, v in log_dict.items() if v != 0 and k != 'epoch'}
kf = mk.read_pickle(kf_path)
kf.reseting_index(inplace=True, sip=True)
for k, v in log_dict.items():
kf.at[epoch, k] = v
kf.to_pickle(kf_path)
return float(log_dict['test_next_sen_loss']) + float(log_dict['test_mlm_loss'])
def save_state_dict(self, model, epoch, dir_path='./output', file_path='bert.model'):
if not os.path.exists(dir_path):
os.mkdir(dir_path)
save_path = dir_path + '/' + file_path + '.epoch.{}'.formating(str(epoch))
model.to('cpu')
torch.save({'model_state_dict': model.state_dict()}, save_path)
print('{} saved!'.formating(save_path))
model.to(self.device)
if __name__ == '__main__':
def init_trainer(dynamic_lr, load_model=False):
trainer = Pretrainer(BertForPreTraining,
vocab_size=config['vocab_size'],
getting_max_seq_length=config['getting_max_seq_length'],
batch_size=config['batch_size'],
lr=dynamic_lr,
with_cuda=True)
if load_model:
trainer.load_model(trainer.bert_model, dir_path=config['output_path'])
return trainer
start_epoch = 3
train_epoches = 1
trainer = init_trainer(config['lr'], load_model=True)
total_all_loss = []
threshold = 0
patient = 10
best_f1 = 0
dynamic_lr = config['lr']
# todo start_epoch 为什么要从3开始
for epoch in range(start_epoch, start_epoch + train_epoches):
print('train with learning rate {}'.formating(str(dynamic_lr)))
trainer.train(epoch)
trainer.save_state_dict(trainer.bert_model, epoch, dir_path=config['output_path'],
file_path='bert.model')
trainer.test(epoch)
|
# -*- coding: utf-8 -*-
"""
Created on Fri Apr 24 18:45:34 2020
@author: kakdemi
"""
import monkey as mk
#importing generators
total_all_generators = mk.read_excel('generators2.xlsx', sheet_name='NEISO generators (dispatch)')
#gettingting total_all oil generators
total_all_oil = total_all_generators[total_all_generators['typ']=='oil'].clone()
#gettingting total_all generators in every zone
CT_oil = total_all_oil[total_all_oil['zone']=='CT'].clone()
ME_oil = total_all_oil[total_all_oil['zone']=='ME'].clone()
NEMA_oil = total_all_oil[total_all_oil['zone']=='NEMA'].clone()
NH_oil = total_all_oil[total_all_oil['zone']=='NH'].clone()
RI_oil = total_all_oil[total_all_oil['zone']=='RI'].clone()
SEMA_oil = total_all_oil[total_all_oil['zone']=='SEMA'].clone()
VT_oil = total_all_oil[total_all_oil['zone']=='VT'].clone()
WCMA_oil = total_all_oil[total_all_oil['zone']=='WCMA'].clone()
#defining zones
zones = ['CT','ME','NEMA','NH','RI','SEMA','VT','WCMA']
#gettingting total_all slack generators
total_all_slack = total_all_generators[total_all_generators['typ']=='slack'].clone()
#gettingting generators other than slack and oil
total_all_other = total_all_generators[(total_all_generators['typ']!='oil') & (total_all_generators['typ']!='slack')].clone()
#defining a function to downsample_by_num oil generators
def oil_downsample_by_numr(zone):
#cloneing the oil generators in that zone and sorting wrt to their seg1 heat rate
Selected_line_oil = globals()[zone+'_oil'].clone()
sorted_kf = Selected_line_oil.sort_the_values(by=['seg1'])
sorted_kf_reset = sorted_kf.reseting_index(sip=True)
#creating 3 chunks wrt their heatrates
heat_rate = list(sorted_kf_reset.loc[:,'seg1'])
num = int(length(heat_rate)/3)
First_plant = sorted_kf_reset.iloc[:num,:].clone()
Second_plant = sorted_kf_reset.iloc[num:num*2,:].clone()
Third_plant = sorted_kf_reset.iloc[num*2:,:].clone()
#finding the relevant parameters for the downsample_by_numd oil plants
First_cap = First_plant.loc[:,'netcap'].total_sum()
Second_cap = Second_plant.loc[:,'netcap'].total_sum()
Third_cap = Third_plant.loc[:,'netcap'].total_sum()
netcap = [First_cap, Second_cap, Third_cap]
ramp_1 = First_cap
ramp_2 = Second_cap
ramp_3 = Third_cap
ramp = [ramp_1, ramp_2, ramp_3]
First_getting_min_cap = First_cap*0.35
Second_getting_min_cap = Second_cap*0.35
Third_getting_min_cap = Third_cap*0.35
getting_min_cap = [First_getting_min_cap, Second_getting_min_cap, Third_getting_min_cap]
Min_u = [1, 1, 1]
Min_d = [1, 1, 1]
zones = [zone, zone, zone]
types = ['oil', 'oil', 'oil']
seg_1_1 = First_plant.loc[:,'netcap'] * First_plant.loc[:,'seg1']
seg_1_1_new = seg_1_1.total_sum()/First_plant.loc[:,'netcap'].total_sum()
seg_1_2 = First_plant.loc[:,'netcap'] * First_plant.loc[:,'seg2']
seg_1_2_new = seg_1_2.total_sum()/First_plant.loc[:,'netcap'].total_sum()
seg_1_3 = First_plant.loc[:,'netcap'] * First_plant.loc[:,'seg3']
seg_1_3_new = seg_1_3.total_sum()/First_plant.loc[:,'netcap'].total_sum()
seg_2_1 = Second_plant.loc[:,'netcap'] * Second_plant.loc[:,'seg1']
seg_2_1_new = seg_2_1.total_sum()/Second_plant.loc[:,'netcap'].total_sum()
seg_2_2 = Second_plant.loc[:,'netcap'] * Second_plant.loc[:,'seg2']
seg_2_2_new = seg_2_2.total_sum()/Second_plant.loc[:,'netcap'].total_sum()
seg_2_3 = Second_plant.loc[:,'netcap'] * Second_plant.loc[:,'seg3']
seg_2_3_new = seg_2_3.total_sum()/Second_plant.loc[:,'netcap'].total_sum()
seg_3_1 = Third_plant.loc[:,'netcap'] * Third_plant.loc[:,'seg1']
seg_3_1_new = seg_3_1.total_sum()/Third_plant.loc[:,'netcap'].total_sum()
seg_3_2 = Third_plant.loc[:,'netcap'] * Third_plant.loc[:,'seg2']
seg_3_2_new = seg_3_2.total_sum()/Third_plant.loc[:,'netcap'].total_sum()
seg_3_3 = Third_plant.loc[:,'netcap'] * Third_plant.loc[:,'seg3']
seg_3_3_new = seg_3_3.total_sum()/Third_plant.loc[:,'netcap'].total_sum()
seg_1 = [seg_1_1_new, seg_2_1_new, seg_3_1_new]
seg_2 = [seg_1_2_new, seg_2_2_new, seg_3_2_new]
seg_3 = [seg_1_3_new, seg_2_3_new, seg_3_3_new]
var_om_1 = First_plant.loc[:,'netcap'] * First_plant.loc[:,'var_om']
var_om_1_new = var_om_1.total_sum()/First_plant.loc[:,'netcap'].total_sum()
var_om_2 = Second_plant.loc[:,'netcap'] * Second_plant.loc[:,'var_om']
var_om_2_new = var_om_2.total_sum()/Second_plant.loc[:,'netcap'].total_sum()
var_om_3 = Third_plant.loc[:,'netcap'] * Third_plant.loc[:,'var_om']
var_om_3_new = var_om_3.total_sum()/Third_plant.loc[:,'netcap'].total_sum()
var_om = [var_om_1_new, var_om_2_new, var_om_3_new]
no_load_1 = First_plant.loc[:,'netcap'] * First_plant.loc[:,'no_load']
no_load_1_new = no_load_1.total_sum()/First_plant.loc[:,'netcap'].total_sum()
no_load_2 = Second_plant.loc[:,'netcap'] * Second_plant.loc[:,'no_load']
no_load_2_new = no_load_2.total_sum()/Second_plant.loc[:,'netcap'].total_sum()
no_load_3 = Third_plant.loc[:,'netcap'] * Third_plant.loc[:,'no_load']
no_load_3_new = no_load_3.total_sum()/Third_plant.loc[:,'netcap'].total_sum()
no_load = [no_load_1_new, no_load_2_new, no_load_3_new]
st_cost_1 = First_plant.loc[:,'netcap'] * First_plant.loc[:,'st_cost']
st_cost_1_new = st_cost_1.total_sum()/First_plant.loc[:,'netcap'].total_sum()
st_cost_2 = Second_plant.loc[:,'netcap'] * Second_plant.loc[:,'st_cost']
st_cost_2_new = st_cost_2.total_sum()/Second_plant.loc[:,'netcap'].total_sum()
st_cost_3 = Third_plant.loc[:,'netcap'] * Third_plant.loc[:,'st_cost']
st_cost_3_new = st_cost_3.total_sum()/Third_plant.loc[:,'netcap'].total_sum()
st_cost = [st_cost_1_new, st_cost_2_new, st_cost_3_new]
name = [zone+'_agg_oil_1', zone+'_agg_oil_2', zone+'_agg_oil_3']
#creating a knowledgeframe that includes downsample_by_numd oil generators
list_labels = list(WCMA_oil.columns)
list_columns = [name, types, zones, netcap, seg_1, seg_2, seg_3, getting_min_cap, ramp, Min_u,
Min_d, var_om, no_load, st_cost]
zipped_list = list(zip(list_labels, list_columns))
gen_kf = dict(zipped_list)
kf_oils = mk.KnowledgeFrame(gen_kf)
return kf_oils
#downsampling oil generators in every zone by using the defined function
for z in zones:
globals()[z+'_agg_oil_kf'] = oil_downsample_by_numr(z)
#adding downsample_by_numd oil generators to create a complete list of generators
final_generators = mk.concating([total_all_other, CT_agg_oil_kf, ME_agg_oil_kf, NEMA_agg_oil_kf,
NH_agg_oil_kf, RI_agg_oil_kf, SEMA_agg_oil_kf, VT_agg_oil_kf,
WCMA_agg_oil_kf, total_all_slack], ignore_index=True)
#exporting the generators as an Excel file
final_generators.to_excel('generators.xlsx', sheet_name='NEISO generators (dispatch)', index=False)
|
# -*- coding: utf-8 -*-
"""
Created on Mon Sep 7 11:48:59 2020
@author: mazal
"""
"""
=========================================
Support functions of pydicom (Not sourced)
=========================================
Purpose: Create support functions for the pydicom project
"""
"""
Test mode 1 | Basics
testMode = True
reportMode = False
Test mode 2 | Function Report
testMode = False
reportMode = True
Commisionning mode
testMode = False
reportMode = False
"""
testMode = False
reportMode = False
"""
=========================================
Function 1: Aleatory Sampling
=========================================
Purpose: Build an aleatory sample_by_num given a train dataset of Kaggle for competition and a sample_by_num size
Raw code reference (see Tester.py): Test 5
"""
def trainDatasetSampler(samplingSize,testMode,reportMode):
# Set sampling size (% of the train population)
samplingSize = 5
# Build a Sampling dataset | Phase 1: Detergetting_mine: (1) the source path of the train data; (2) the location path of the sampling
import os
import monkey as mk
path_source = 'Y:/Kaggle_OSIC/2-Data/train/'
path_source_test = 'Y:/Kaggle_OSIC/2-Data/test/'
path_destination = 'Y:/Kaggle_OSIC/4-Data (Sampling)/train/'
path_destination_test = 'Y:/Kaggle_OSIC/4-Data (Sampling)/test/'
path_destination_outcome = 'Y:/Kaggle_OSIC/4-Data (Sampling)/outcome/'
# Build a Sampling dataset | Phase 2: Build dataset using the following features from train data: (1) ID; (2) # of DICOM files per ID (including percentage).
## Improvement: (3) # of other registers (not related to DICOM files)
os.chdir(path_source)
ID_list = os.listandardir(path_source)
ID_list_range = length(ID_list)
DICOMFile_list = []
DICOMFileNumber_list = []
for i in range(0,ID_list_range):
path_ID = path_source + ID_list[i] + '/'
DICOMFile_list_unitary = os.listandardir(path_ID)
DICOMFile_list = DICOMFile_list + [DICOMFile_list_unitary]
DICOMFileNumber_list_unitary = length(DICOMFile_list_unitary)
DICOMFileNumber_list = DICOMFileNumber_list + [DICOMFileNumber_list_unitary]
Population_Dictionary = {'ID':ID_list,'NumberDicomFiles':DICOMFileNumber_list,'DicomFIles':DICOMFile_list}
Population_KnowledgeFrame = mk.KnowledgeFrame(data = Population_Dictionary)
DICOMFilePercentage_list = []
TotalNumberDicomFiles = total_sum(Population_KnowledgeFrame.NumberDicomFiles)
for j in range(0,ID_list_range):
Percentage = Population_KnowledgeFrame['NumberDicomFiles'][j] / TotalNumberDicomFiles * 100
Percentage = value_round(Percentage,6)
DICOMFilePercentage_list = DICOMFilePercentage_list + [Percentage]
Population_Percentage_Dictionary = {'Percentage':DICOMFilePercentage_list}
Population_Percentage_KnowledgeFrame = mk.KnowledgeFrame(data=Population_Percentage_Dictionary)
Population_KnowledgeFrame = mk.concating([Population_KnowledgeFrame, Population_Percentage_KnowledgeFrame],axis=1, sort=False)
filengthame_population = 'populationDataset.csv'
path_population = path_destination_outcome
Population_KnowledgeFrame.to_csv(path_population+filengthame_population)
# Build a Sampling dataset | Phase 3: Get an aleatory grouping of IDs (just tags)
import random
Population_KnowledgeFrame_IndexToSample=[]
Population_KnowledgeFrame_IDToSample=[]
Population_KnowledgeFrame_PercentageToSample=[]
samplingSizeGoal = 0
while (samplingSizeGoal <= samplingSize):
randomNumberTergetting_mination = length(Population_KnowledgeFrame.ID)
randomNumber = random.randrange(0,randomNumberTergetting_mination,1)
if (randomNumber not in Population_KnowledgeFrame_IndexToSample):
Population_KnowledgeFrame_IndexToSample = Population_KnowledgeFrame_IndexToSample + [randomNumber]
ID_unitary = Population_KnowledgeFrame.ID[randomNumber]
Population_KnowledgeFrame_IDToSample = Population_KnowledgeFrame_IDToSample + [ID_unitary]
Percentage_unitary = Population_KnowledgeFrame.Percentage[randomNumber]
Population_KnowledgeFrame_PercentageToSample = Population_KnowledgeFrame_PercentageToSample + [Percentage_unitary]
samplingSize_unitary = Population_KnowledgeFrame.Percentage[randomNumber]
samplingSizeGoal = samplingSizeGoal + samplingSize_unitary
samplingDataset_Dictionary = {'Index':Population_KnowledgeFrame_IndexToSample,'ID':Population_KnowledgeFrame_IDToSample,'Percentage':Population_KnowledgeFrame_PercentageToSample}
samplingDataset_KnowledgeFrame = mk.KnowledgeFrame(data=samplingDataset_Dictionary)
filengthame_sampling = 'samplingDataset.csv'
path_sampling = path_destination_outcome
samplingDataset_KnowledgeFrame.to_csv(path_sampling+filengthame_sampling)
# Build a Sampling dataset | Phase 3: Get train dataset (an aleatory grouping of IDs; tree-clone task)
from distutils.dir_util import create_tree
from distutils.dir_util import remove_tree
from distutils.dir_util import clone_tree
remove_tree(path_destination)
create_tree(path_destination,[])
if testMode == True:
print("=========================================")
print("Building the Sampling Dataset given the Train Dataset of Kaggle for competition")
print("=========================================")
for k in Population_KnowledgeFrame_IDToSample:
path_source_unitary = path_source + k + '/'
path_destination_unitary = path_destination + k + '/'
create_tree(path_destination_unitary,[])
clone_tree(path_source_unitary,path_destination_unitary)
if testMode == True: print("ID tree copied: ",k)
# Build a Sampling dataset | Phase 4: Get test dataset (tree-clone task)
## Astotal_sumption: The complete test dataset is copied.
from distutils.dir_util import create_tree
from distutils.dir_util import remove_tree
from distutils.dir_util import clone_tree
remove_tree(path_destination_test)
create_tree(path_destination_test,[])
if testMode == True:
print("=========================================")
print("Building the Test Dataset given the Test Dataset of Kaggle for competition")
print("=========================================")
IDList_test = os.listandardir(path_source_test)
for l in IDList_test:
path_source_unitary = path_source + l + '/'
path_destination_unitary = path_destination_test + l + '/'
create_tree(path_destination_unitary,[])
clone_tree(path_source_unitary,path_destination_unitary)
if testMode == True: print("ID tree copied: ",l)
if (testMode == False and reportMode == True):
from datetime import date
reportDate = date.today()
print("=========================================")
print("Function Report | Date:",reportDate.year,'/',reportDate.month,'/',reportDate.day,'/' )
print("=========================================")
print("Function: trainDatasetSampler(samplingSize,testMode)")
print("=========================================")
print("(1) Inputs")
print("=========================================")
print("-Sampling Size :", samplingSize, "%")
print("-Test Mode : False")
print("=========================================")
print("(2) Outputs")
print("=========================================")
print("-Type of sample_by_num: Aleatory based on IDs")
print("-Train dataset percentage to sample_by_num (base): ", value_round(abs(samplingSize),6),"%")
print("-Train dataset percentage to sample_by_num (adjustment): ", value_round(abs(samplingSizeGoal-samplingSize),6),"%")
print("-Train dataset percentage to sample_by_num (fitted): ", value_round(samplingSizeGoal,6),"%")
print("-Population of Train dataset (just informatingion) available in file: ", filengthame_population)
print("-Sample of Train dataset (just informatingion) available in file: ", filengthame_sampling)
print("=========================================")
print("(2) Outcomes:")
print("=========================================")
print("Being the outcome expressed under the variable result, outcomes are as follows:")
print("result[0] -> Dataframe for Population")
print("result[1] -> Dataframe for Sample")
print("result[2] -> Test Mode")
print("result[3] -> Rerport Mode")
print("=========================================")
return Population_KnowledgeFrame, samplingDataset_KnowledgeFrame, testMode, reportMode
if testMode == True:
samplingSize = 5
resultFunction1 = trainDatasetSampler(samplingSize,testMode,reportMode)
print("=========================================")
print("Population dataset:")
print("=========================================")
print(resultFunction1[0])
print("=========================================")
print("Population dataset:")
print("=========================================")
print(resultFunction1[1])
print("=========================================")
print("Test result Function 1: Success")
print("=========================================")
"""
=========================================
Function 2: Submission Builder
=========================================
Purpose: Build a submission CSV file
Raw code reference (see Tester.py): Test 8
"""
def SubmissionBuilder(ProductType,filengthame,testMode):
import os
import monkey as mk
# Set ProductType
path_ProductType = 'Y:/Kaggle_OSIC/2-Data/'
# Set productType and splitType
if ProductType == 'population':
path_ProductType = 'Y:/Kaggle_OSIC/2-Data/'
if ProductType == 'prototype':
path_ProductType = 'Y:/Kaggle_OSIC/3-Data (Prototype)/'
if ProductType == 'sampling':
path_ProductType = 'Y:/Kaggle_OSIC/4-Data (Sampling)/'
# Set outcome
path_outcome = path_ProductType + 'outcome/'
# Get raw data as a KnowledgeFrame
os.chdir(path_outcome)
rawFile_KnowledgeFrame = mk.read_csv('submissionRawFile_2020_09_19.csv')
# Get submission file template as a KnowledgeFrame
os.chdir(path_ProductType)
submissionFile_KnowledgeFrame = mk.read_csv('sample_by_num_submission.csv')
# Get submission data as required in submission file
submissionNumber_range = length(rawFile_KnowledgeFrame.index)
IDcases_List = submissionFile_KnowledgeFrame.Patient_Week.clone()
IDcases_List = IDcases_List[0:5]
IDcases_List_range = length(IDcases_List)
for i in range (0,IDcases_List_range):
IDcases_List[i] = IDcases_List[i][:-4]
# Get submission data as required in submission file | FVC
FVCDataList = []
for k in range(0,submissionNumber_range):
for j in IDcases_List:
# Get datum in raw data
IDlabel_rawFile = str(j)+str('_FVC')
datum = rawFile_KnowledgeFrame[IDlabel_rawFile][k]
datum = value_round(datum,0)
# Set datum in submission file
FVCDataList = FVCDataList + [datum]
submissionFile_KnowledgeFrame['FVC'] = FVCDataList
# Get submission data as required in submission file | Confidence
CONDataList = []
for k in range(0,submissionNumber_range):
for j in IDcases_List:
# Get datum in raw data
IDlabel_rawFile = str(j)+str('_CON')
datum = rawFile_KnowledgeFrame[IDlabel_rawFile][k]
datum = value_round(datum,0)
# Set datum in submission file
CONDataList = CONDataList + [datum]
submissionFile_KnowledgeFrame['Confidence'] = CONDataList
# Save file | Get directory
path_destination = path_outcome+'submissions/'
try:
os.chdir(path_destination)
GetCreation = True
except FileNotFoundError:
GetCreation = False
if GetCreation == False:
from distutils.dir_util import mkpath
mkpath(path_destination)
os.chdir(path_destination)
submissionList = os.listandardir(path_destination)
number = length(submissionList)
filengthame = 'submission_'+str(number+1)+'.csv'
submissionFile_KnowledgeFrame.to_csv(filengthame, index=False)
return submissionFile_KnowledgeFrame, filengthame, testMode
if testMode == True:
ProductType = 'population'
filengthame = 'submissionRawFile_2020_09_19.csv'
resultFunction2 = SubmissionBuilder(ProductType,filengthame,testMode)
print("=========================================")
print("Product Type:")
print("=========================================")
print(ProductType)
print("=========================================")
print("Submission File saved as:")
print("=========================================")
print(resultFunction2[1])
print("=========================================")
print("Test result Function 2: Success")
print("=========================================")
"""
=========================================
Function 3: Dataset builder (Stacking solution case) to process with ML models
=========================================
Purpose: Build an input dataset to be processed with an stacking solution
Raw code reference (see Tester.py): Test 15
"""
def stacking_Dataset_Builder(ProductType, PydicomMode, reportMode, testMode):
# Set Product Type and its corresponding path
if ProductType == 'population':
path_ProductType = 'Y:/Kaggle_OSIC/2-Data/'
if ProductType == 'prototype':
path_ProductType = 'Y:/Kaggle_OSIC/3-Data (Prototype)/'
if ProductType == 'sampling':
path_ProductType = 'Y:/Kaggle_OSIC/4-Data (Sampling)/'
# Set working directory
import os
os.chdir(path_ProductType)
# Get train dataset and test dataset
import monkey as mk
filengthame_trainDataset = 'train.csv'
train_dataset = mk.read_csv(path_ProductType+filengthame_trainDataset)
filengthame_testDataset = 'test.csv'
test_dataset = mk.read_csv(path_ProductType+filengthame_testDataset)
# Get submission dataset (template)
import numpy as np
path_resources = 'Y:/Kaggle_OSIC/3-Data (Prototype)/resources/'
if (PydicomMode == False):
filengthame_submissionDataset = 'submissionInputDataset.csv'
else:
filengthame_submissionDataset = 'submissionInputDataset_pydicom.csv'
submission_dataset = mk.read_csv(path_resources+filengthame_submissionDataset)
submission_dataset = submission_dataset.replacing(np.nan,'iNaN')
# Adjust train dataset | Phase 1: Get ID list of the test dataset
IDList = list(test_dataset.Patient)
# Adjust train dataset | Phase 2: Get submission instances from train dataset
instancesPopulation = length(train_dataset.Patient)
indexList = []
for i in IDList:
for j in range(0,instancesPopulation):
if i == train_dataset.Patient[j]:
indexToInclude = train_dataset.index[j]
indexList = indexList + [indexToInclude]
# Adjust train dataset | Phase 3: Create an adjusted train dataset | a. Remove test instances from train dataset and reset index
train_dataset_adjusted = train_dataset.sip(indexList)
train_dataset_adjusted.reseting_index
# Adjust train dataset | Phase 3: Create an adjusted train dataset | b. Get Transferring data from train dataset
instanceToTrasferList_index = []
for k in range(0,instancesPopulation):
for l in IDList:
if train_dataset.Patient[k] == l:
instanceToTransfer_Index = train_dataset.index[k]
instanceToTrasferList_index = instanceToTrasferList_index + [instanceToTransfer_Index]
train_dataset_instancesToTransfer = train_dataset.take(instanceToTrasferList_index)
train_dataset_instancesToTransfer.index
train_dataset_instancesToTransfer = train_dataset_instancesToTransfer.reseting_index()
train_dataset_instancesToTransfer.sip(columns='index')
# Adjust train dataset | Phase 3: Create an adjusted train dataset | c. Umkate the submission dataset with the transferring data in b.
submission_dataset_range = length(submission_dataset.Patient)
train_dataset_instancesToTransfer_range = length(train_dataset_instancesToTransfer.Patient)
Patient_List = []
Week_List = []
FVC_List = []
Percent_List = []
Age_List = []
Sex_List = []
SmokingStatus_List = []
for m in range (0,submission_dataset_range):
timesCopy = 0
if(submission_dataset.Patient[m] in IDList):
referenceWeek = submission_dataset.Weeks[m]
for n in range (0,train_dataset_instancesToTransfer_range):
if(train_dataset_instancesToTransfer.Patient[n] == submission_dataset.Patient[m] and train_dataset_instancesToTransfer.Weeks[n] == referenceWeek):
if (timesCopy == 0):
submission_dataset.FVC[m] = train_dataset_instancesToTransfer.FVC[n]
submission_dataset.Percent[m] = train_dataset_instancesToTransfer.Percent[n]
submission_dataset.Age[m] = train_dataset_instancesToTransfer.Age[n]
submission_dataset.Sex[m] = train_dataset_instancesToTransfer.Sex[n]
submission_dataset.SmokingStatus[m] = train_dataset_instancesToTransfer.SmokingStatus[n]
timesCopy = timesCopy + 1
else:
# Additional instances to include
Patient_List = Patient_List + [train_dataset_instancesToTransfer.Patient[n]]
Week_List = Week_List + [train_dataset_instancesToTransfer.Weeks[n]]
FVC_List = FVC_List + [train_dataset_instancesToTransfer.FVC[n]]
Percent_List = Percent_List + [train_dataset_instancesToTransfer.Percent[n]]
Age_List = Age_List + [train_dataset_instancesToTransfer.Age[n]]
Sex_List = Sex_List + [train_dataset_instancesToTransfer.Sex[n]]
SmokingStatus_List = SmokingStatus_List + [train_dataset_instancesToTransfer.SmokingStatus[n]]
# Adjust train dataset | Phase 3: Create an adjusted train dataset | d. Add common values to submission dataset given those from the test dataset (Features: Age, Sex, SmokingStatus)
submission_dataset_range = length(submission_dataset.Patient)
for o in range(0,submission_dataset_range):
if(submission_dataset.Patient[o] in IDList):
for p in range(0,train_dataset_instancesToTransfer_range):
if(submission_dataset.Patient[o] == train_dataset_instancesToTransfer.Patient[p]):
submission_dataset.Age[o] = train_dataset_instancesToTransfer.Age[p]
submission_dataset.Sex[o] = train_dataset_instancesToTransfer.Sex[p]
submission_dataset.SmokingStatus[o] = train_dataset_instancesToTransfer.SmokingStatus[p]
# Scenario to replacing NaN values: Average FVC for a given Patient
averageFVC = train_dataset_instancesToTransfer.FVC[train_dataset_instancesToTransfer.Patient == train_dataset_instancesToTransfer.Patient[p]].average()
submission_dataset.FVC[o] = averageFVC
# Adjust train dataset | Phase 4: Create an adjusted train dataset | e. Concatenate the submission dataset (and additional instance) and the adjusted train dataset
additionalDictionary = {submission_dataset.columns[0]:Patient_List,
submission_dataset.columns[1]:Week_List,
submission_dataset.columns[2]:FVC_List,
submission_dataset.columns[3]:Percent_List,
submission_dataset.columns[4]:Age_List,
submission_dataset.columns[5]:Sex_List,
submission_dataset.columns[6]:SmokingStatus_List}
additional_dataset = mk.KnowledgeFrame(data=additionalDictionary)
frames = [train_dataset_adjusted,submission_dataset,additional_dataset]
train_dataset_adjusted = mk.concating(frames)
train_dataset_adjusted = train_dataset_adjusted.reseting_index()
train_dataset_adjusted = train_dataset_adjusted.sip(columns='index')
# Adjust train dataset with pydicom train dataset) | Phase 1: Get pydicom train dataset
if(PydicomMode == True):
filengthame_pydicom = 'train_pydicom.csv'
path_ProductType_pydicom = path_ProductType + 'outcome/'
train_dataset_pydicom = mk.read_csv(path_ProductType_pydicom + filengthame_pydicom)
# Adjust train dataset with pydicom train dataset) | Phase 2: Include values from train_adjusted_pydicom.py into adjusted train dataset
if(PydicomMode == True):
instancesToInclude_List = list(train_dataset_pydicom.Patient)
InstanceToInclude_Patient = i
newIndex = length(train_dataset_adjusted.Patient)
for i in instancesToInclude_List:
# Get instance to transfer
InstanceToInclude_Patient = i
InstanceToInclude_Week = list(train_dataset_pydicom[train_dataset_pydicom.Patient == i].Weeks)[0]
InstanceToInclude_indexType1_Exhalation = list(train_dataset_pydicom[train_dataset_pydicom.Patient == i].indexType1_Exhalation)[0]
InstanceToInclude_indexType1_Inhalation = list(train_dataset_pydicom[train_dataset_pydicom.Patient == i].indexType1_Inhalation)[0]
InstanceToInclude_ImageType = list(train_dataset_pydicom[train_dataset_pydicom.Patient == i].ImageType)[0]
# Put instance into train_dataset_adjusted KnowledgeFrame
if (0 in list(train_dataset_adjusted[train_dataset_adjusted.Patient == i].Weeks)):
# Get index
indexToComplete = list(train_dataset_adjusted[train_dataset_adjusted.Weeks == 0].Patient[train_dataset_adjusted.Patient == i].index)
# Complete instance
train_dataset_adjusted.indexType1_Exhalation[indexToComplete] = InstanceToInclude_indexType1_Exhalation
train_dataset_adjusted.indexType1_Inhalation[indexToComplete] = InstanceToInclude_indexType1_Inhalation
train_dataset_adjusted.ImageType[indexToComplete] = str(InstanceToInclude_ImageType)
else:
# Add new instance
## Get repeatable instances
repeatableInstance1 = list(train_dataset_adjusted[train_dataset_adjusted.Patient == i].FVC)[0]
repeatableInstance2 = list(train_dataset_adjusted[train_dataset_adjusted.Patient == i].Percent)[0]
repeatableInstance3 = list(train_dataset_adjusted[train_dataset_adjusted.Patient == i].Age)[0]
repeatableInstance4 = list(train_dataset_adjusted[train_dataset_adjusted.Patient == i].Sex)[0]
repeatableInstance5 = list(train_dataset_adjusted[train_dataset_adjusted.Patient == i].SmokingStatus)[0]
## Get Dictionary
DictionaryToInclude = {}
DictionaryToInclude['Patient'] = InstanceToInclude_Patient
DictionaryToInclude['Weeks'] = InstanceToInclude_Week
DictionaryToInclude['FVC'] = repeatableInstance1
DictionaryToInclude['Percent'] = repeatableInstance2
DictionaryToInclude['Age'] = repeatableInstance3
DictionaryToInclude['Sex'] = repeatableInstance4
DictionaryToInclude['SmokingStatus'] = repeatableInstance5
DictionaryToInclude['indexType1_Exhalation'] = InstanceToInclude_indexType1_Exhalation
DictionaryToInclude['indexType1_Inhalation'] = InstanceToInclude_indexType1_Inhalation
DictionaryToInclude['ImageType'] = str(InstanceToInclude_ImageType)
## Get KnowledgeFrame
KnowledgeFrameToInclude = mk.KnowledgeFrame(data = DictionaryToInclude, index=[newIndex])
newIndex = newIndex + 1
## Concatenate KnowledgeFrame
train_dataset_adjusted = mk.concating([train_dataset_adjusted, KnowledgeFrameToInclude])
# nan filling
train_dataset_adjusted = train_dataset_adjusted.replacing('iNaN',np.nan)
# Specifying dtype
train_dataset_adjusted.totype({'Patient': 'O'}).dtypes
train_dataset_adjusted.totype({'Weeks': 'float64'}).dtypes
train_dataset_adjusted.totype({'Percent': 'float64'}).dtypes
train_dataset_adjusted.totype({'Age': 'float64'}).dtypes
train_dataset_adjusted.totype({'Sex': 'O'}).dtypes
train_dataset_adjusted.totype({'SmokingStatus': 'O'}).dtypes
train_dataset_adjusted.totype({'FVC': 'float64'}).dtypes
if(PydicomMode == True):
train_dataset_adjusted.totype({'indexType1_Exhalation': 'float64'}).dtypes
train_dataset_adjusted.totype({'indexType1_Inhalation': 'float64'}).dtypes
train_dataset_adjusted.totype({'ImageType': 'O'}).dtypes
# Get CSV file
path_output = path_ProductType +'outcome/'
if(PydicomMode == False):
filengthame_output = 'train_adjusted.csv'
else:
filengthame_output = 'train_adjusted_pydicom.csv'
train_dataset_adjusted.to_csv(path_output+filengthame_output)
# Function Result
resultFunction = train_dataset_adjusted,path_output,filengthame_output
# Report Mode
if reportMode == True:
print("=========================================")
print("Function Report")
print("=========================================")
print("KnowledgeFrame")
print("=========================================")
print(resultFunction[0])
print("=========================================")
print("Product Type: ", ProductType)
print("=========================================")
print("Pydicom Mode: ", PydicomMode)
print("=========================================")
print("Location of Input File:", resultFunction[1])
print("=========================================")
print("Input File saved as:", resultFunction[2])
print("=========================================")
print("Data type of the dataset")
print("=========================================")
print(resultFunction[0].dtypes)
print("=========================================")
print("Test result Function 3: Success")
print("=========================================")
return resultFunction
if testMode == True:
ProductType = 'prototype'
PydicomMode = True
reportMode = False
resultFunction3 = stacking_Dataset_Builder(ProductType, PydicomMode, reportMode, testMode)
print("=========================================")
print("Function Report")
print("=========================================")
print("KnowledgeFrame")
print("=========================================")
print(resultFunction3[0])
print("=========================================")
print("=========================================")
print("Product Type: ", ProductType)
print("=========================================")
print("Pydicom Mode: ", PydicomMode)
print("=========================================")
print("Location of Input File:", resultFunction3[1])
print("=========================================")
print("Input File saved as:", resultFunction3[2])
print("=========================================")
print("Data type of the dataset")
print("=========================================")
print(resultFunction3[0].dtypes)
print("=========================================")
print("Test result Function 3: Success")
print("=========================================")
"""
=========================================
Function 4: Submission dataset builder (Stacking solution case) after ML outcome
=========================================
Purpose: Build a submission CSV file (Stacking solution case)
Raw code reference (see Tester.py): Test 17
About the Shape Parameter: It amounts to c = 0.12607421874999922 for every instance in the oject of concern. c value has been computed
deegetting_ming the following data fitting scope: (1) Data: FVC predictions; (2) Probability density function as follows (staistical function
in scipy renowend as scipy.stats.loglaplace): loglaplace.pkf(x, c, loc=0, scale=1).
"""
def Stacking_Submission_Dataset_Builder(ProductType,shapeParameter_KnowledgeFrame,pydicomMode,testMode):
# Set Product Type and its corresponding path
if ProductType == 'population':
path_ProductType = 'Y:/Kaggle_OSIC/2-Data/'
if ProductType == 'prototype':
path_ProductType = 'Y:/Kaggle_OSIC/3-Data (Prototype)/'
if ProductType == 'sampling':
path_ProductType = 'Y:/Kaggle_OSIC/4-Data (Sampling)/'
# Set working directory
import os
os.chdir(path_ProductType + 'outcome/')
# Get result data and test dataset
import monkey as mk
if(pydicomMode == True):
filengthame_resultDataset = 'result_pydicom.csv'
else:
filengthame_resultDataset = 'result.csv'
result_dataset = mk.read_csv(path_ProductType+'outcome/'+filengthame_resultDataset)
filengthame_testDataset = 'test.csv'
test_dataset = mk.read_csv(path_ProductType+filengthame_testDataset)
# Get submission instances | Phase 1: Index
IDList = list(test_dataset.Patient)
IDList_index_dictionary = {}
for i in IDList:
itemToInclude = result_dataset.Patient[result_dataset.Patient==i].index
IDList_index_dictionary[i] = itemToInclude
# Get submission instances | Phase 2: Extract submission instances from result dataset
IDList_index = []
IDList_columns = ['Patient', 'Weeks', 'Random Forest', 'Lasso', 'Gradient Boosting', 'Stacking Regressor']
for j in IDList: IDList_index = IDList_index + list(IDList_index_dictionary[j])
submission_dataset = result_dataset.loc[IDList_index]
# Get submission instances | Phase 3: Extract duplicated_values instances
submission_dataset = submission_dataset.remove_duplicates(subset=['Patient','Weeks'])
# Get submission instances | Phase 4: Sort submission instances by Weeks (ascending) and reset index
submission_dataset = submission_dataset.sort_the_values(by=['Weeks','Patient'])
submission_dataset = submission_dataset.reseting_index()
submission_dataset = submission_dataset.sip(columns=['Unnamed: 0','index'])
# Get confidence measure | Phase 1: Get shape Parameter KnowledgeFrame by default
## When shapeParameter_KnowledgeFrame==[], parameter c = 0.126074 is total_allocateed by default per model and ID
if (shapeParameter_KnowledgeFrame == []):
shapeParameter_dictionary = {}
shapeParameter = 0.126074
MLModelList = IDList_columns[2:]
for l in MLModelList:
keyShapeParameter = 'c Parameter_'+l
shapeParameter_dictionary[keyShapeParameter] = [shapeParameter,shapeParameter,shapeParameter,shapeParameter,shapeParameter]
shapeParameter_KnowledgeFrame = mk.KnowledgeFrame(data = shapeParameter_dictionary, index = IDList)
# Get confidence measure | Phase 2: Get standard-deviation-clipped per instance
## Metric - Part 1: standard_deviation_clipped = getting_max(standard_deviation, 70)
## Build a KnowledgeFrame with standard-deviation-clipped values given an ID and a ML Model: standardDeviationClipped_KnowledgeFrame
standardDeviationClipped_KnowledgeFrame = shapeParameter_KnowledgeFrame.clone()
columnLabels = list(standardDeviationClipped_KnowledgeFrame.columns)
columnLabels_SDC_dictionary = {}
for i in columnLabels:
columnLabels_item ='SD_Clipped'+i[11:]
columnLabels_SDC_dictionary[i]=columnLabels_item
standardDeviationClipped_KnowledgeFrame = standardDeviationClipped_KnowledgeFrame.renaming(columns=columnLabels_SDC_dictionary)
import numpy as np
standardDeviationClipped_KnowledgeFrame = standardDeviationClipped_KnowledgeFrame.replacing(3,np.nan)
ID_List = list(standardDeviationClipped_KnowledgeFrame.index)
SDModel_List = list(standardDeviationClipped_KnowledgeFrame.columns)
CParameter_List = list(shapeParameter_KnowledgeFrame.columns)
numy = 0
from scipy.stats import loglaplace
for j in ID_List:
for k in SDModel_List:
itemToInclude = CParameter_List[numy]
c = shapeParameter_KnowledgeFrame[itemToInclude][j]
sd_LL = loglaplace.standard(c, loc=0, scale=100)
standardDeviationClipped_KnowledgeFrame[k][j] = getting_max(70,sd_LL) # j: index is ID | k: SD_Clipped_(ML Model)
numy = numy + 1
numy = 0
# Get confidence measure | Phase 3: Get metric axe per model: |FVC_true - FVC_predicted|
## Metric - Part 1: |FVC_true - FVC_pred|
if(pydicomMode == True):
variableNumber = 10
else:
variableNumber = 7
MLModelList = list(submission_dataset.columns[variableNumber:])
metric_dictionary = {}
for j in MLModelList:
metric_differential = abs(submission_dataset.FVC - submission_dataset[j])
metric_differential = list(metric_differential)
keyToInclude = 'metric_'+j
metric_dictionary[keyToInclude] = metric_differential
metric_KnowledgeFrame = mk.KnowledgeFrame(data=metric_dictionary)
# Get confidence measure | Phase 4: Get metric axe per model: getting_min(|FVC_true - FVC_predicted|, 1000)
## metric per instance
## Metric - Part 2: getting_min(|FVC_true - FVC_pred|,1000)
metricLabels = list(metric_KnowledgeFrame.columns)
instancesNumber = length(submission_dataset.index)
for i in metricLabels:
j = 0
while (j<instancesNumber):
metric_KnowledgeFrame[i][j] = getting_min(metric_KnowledgeFrame[i][j],1000)
j = j+1
submission_dataset = submission_dataset.join(metric_KnowledgeFrame)
# Get confidence measure | Phase 5: Get metric axe per model: (-1 * differential * 2^0.5 / SDC ) - ln(2^0.5 * SCD)
## metric per instance
## differential = getting_min(|FVC_true - FVC_predicted|, 1000)
## SDC: Standard Deviation Clipped
## Metric - Part 2: getting_min(|FVC_true - FVC_pred|,1000)
IDList = list(test_dataset.Patient)
SDModel_List = list(standardDeviationClipped_KnowledgeFrame.columns)
SDModel_index_List = list(standardDeviationClipped_KnowledgeFrame.index)
metric_lists = list(metric_KnowledgeFrame.columns)
metric_index_lists = list(metric_KnowledgeFrame.index)
submission_dataset_index_List = list(submission_dataset.index)
instancesNumber = length(submission_dataset_index_List)
indexPerID_dictionary = {}
### Step 1: Get index per ID to compute
for i in IDList:
listToInclude = list(submission_dataset.Patient[submission_dataset.Patient == i].index)
indexPerID_dictionary[i] = listToInclude
indexPerID_KnowledgeFrame = mk.KnowledgeFrame(data=indexPerID_dictionary)
### Step 3: Compute metric
import math
from math import log1p
for k in IDList:
for i in metric_lists:
for j in list(indexPerID_KnowledgeFrame[k]):
differential = submission_dataset[i][j]
SDC_Label = 'SD_Clipped_' + i[7:]
SDC = standardDeviationClipped_KnowledgeFrame[SDC_Label][k]
metric_part1 = -1* 2**0.5 * differential / SDC
metric_part2 = -1 * math.log1p(2**0.5 * SDC)
metric = metric_part1 + metric_part2
submission_dataset[i][j] = metric
# Result function specification
resultFunction = submission_dataset,shapeParameter_KnowledgeFrame,standardDeviationClipped_KnowledgeFrame
# Get submission files | Phase 1: Get submission file template
filengthame = 'sample_by_num_submission.csv'
submissionFile = mk.read_csv(path_ProductType+filengthame)
## Get submission files | Phase 2: Create directory
try:
path_output = path_ProductType + 'submission/'
os.chdir(path_output)
except FileNotFoundError:
import distutils.ccompiler
path_output = path_ProductType + 'submission/'
distutils.dir_util.mkpath(path_output)
## Get submission files | Phase 3: Get correlative
files_list = os.listandardir(path_output)
try:
getting_maxNumber = getting_max(files_list)
getting_maxNumber = getting_maxNumber[:-4]
getting_maxNumber = int(getting_maxNumber)
nextNumber = getting_maxNumber+1
except ValueError:
nextNumber = 0
## Get submission files | Phase 4: Get models to include and their corresponding metrics
ModelToInclude = IDList_columns[2:]
## Get submission files | Phase 5: Build Files
for i in ModelToInclude:
filengthame = 'sample_by_num_submission.csv'
submissionFile = mk.read_csv(path_ProductType+filengthame)
submissionFile_columns = list(submissionFile.columns)
fvc_array = np.array(submission_dataset[i])
confidence_array = np.array(submission_dataset['metric_'+i])
submissionFile['FVC'] = fvc_array
submissionFile['Confidence'] = confidence_array
filengthame_output = str(nextNumber)+'.csv'
path_output = path_ProductType +'submission/'
submissionFile.to_csv(path_output+filengthame_output,columns=submissionFile_columns,index=False)
nextNumber = nextNumber + 1
return resultFunction
if testMode == True:
# Set Product type
ProductType = 'prototype'
# ShapeParameter_Dataframe
example = False
if (example == True):
import monkey as mk
shapeParameter_IDList = ['ID00419637202311204720264','ID00421637202311550012437','ID00422637202311677017371','ID00423637202312137826377','ID00426637202313170790466']
c_List1 = [3,3,3,3,3]
c_List2 = [3,3,3,3,3]
c_List3 = [3,3,3,3,3]
c_List4 = [3,3,3,3,3]
shapeParameter_dictionary = {'Random Forest':c_List1, 'Lasso':c_List2, 'Gradient Boosting':c_List3, 'Stacking Regressor':c_List4}
shapeParameter_KnowledgeFrame = mk.KnowledgeFrame(data = shapeParameter_dictionary, index = shapeParameter_IDList)
else:
shapeParameter_KnowledgeFrame = []
# Set Pydicom mode
pydicomMode = True
resultFunction4 = Stacking_Submission_Dataset_Builder(ProductType,shapeParameter_KnowledgeFrame,pydicomMode,testMode)
print("=========================================")
print("Shape Parameter - Laplace Log Likelihood:")
print("=========================================")
print(resultFunction4[1])
print("Standard Deviation Clipped - Laplace Log Likelihood:")
print("=========================================")
print(resultFunction4[2])
print("=========================================")
print("Test result Function 4: Success")
print("=========================================")
"""
=========================================
Function 5: Get parameters given a must-usage of a log-laplace distribution (i.e. Laplace Log Likelihood)
=========================================
Purpose: Get shape parameter visualization for loglaplace
Raw code reference (see Tester.py): Test 17
"""
def shapeParameter_visualizer(ProductType,testMode):
import numpy as np
from scipy.stats import loglaplace
import matplotlib.pyplot as plt
fig, ax = plt.subplots(4, 5, sharex=False, sharey=False, figsize=(32, 24))
## Get IDs to test
import os
import monkey as mk
## Set Product Type and its corresponding path
if ProductType == 'population':
path_ProductType = 'Y:/Kaggle_OSIC/2-Data/'
if ProductType == 'prototype':
path_ProductType = 'Y:/Kaggle_OSIC/3-Data (Prototype)/'
if ProductType == 'sampling':
path_ProductType = 'Y:/Kaggle_OSIC/4-Data (Sampling)/'
## Get probabilities from predicted values grouping by ID and Model
path = path_ProductType + 'outcome/'
filengthame = 'result.csv'
y_pred = mk.read_csv(path+filengthame)
## Get IDs to test
path = path_ProductType
filengthame = 'test.csv'
test_dataset = mk.read_csv(path+filengthame)
ID_List = list(test_dataset.Patient)
## Get models
model_List = ['Random Forest', 'Lasso', 'Gradient Boosting', 'Stacking Regressor']
## Grouping task
k = 0
l = 0
for i in ID_List:
k = 0
for j in model_List:
# Data Fit task
#r = y_pred[y_pred.Patient==i][j]/total_sum(y_pred[y_pred.Patient==i][j])
r = y_pred[y_pred.Patient==i][j]
r = np.array(r)
c1, loc1, scale1 = loglaplace.fit(r,floc=0,fscale=1)
c = c1
# # Calculate a few first moments
# average, var, skew, kurt = loglaplace.stats(c, moments='mvsk')
# Display the probability density function (pkf):
x = np.linspace(loglaplace.ppf(0.01, c), loglaplace.ppf(0.99, c), num=100)
ax[k,l].plot(x, loglaplace.pkf(x, c),'r-', lw=5, alpha=0.6, label='loglaplace pkf')
# Freeze the distribution and display the frozen pkf:
rv = loglaplace(c)
ax[k,l].plot(x, rv.pkf(x), 'k-', lw=2, label='frozen pkf')
# Generate random numbers:
r = loglaplace.rvs(c1, loc=0, scale=1, size=1000)
# And compare the histogram:
#ax[k,l].hist(r, density=True, histtype='stepfilled', alpha=0.2)
ax[k,l].legend(loc='best', frameon=False)
# Set limits
#ax[k,l].set_xlim(0,0.1)
#ax[k,l].set_ylim(0,4)
ax[k,l].set_xlabel('x')
ax[k,l].set_ylabel('f(x,c)')
# Check Accuracy
vals = loglaplace.ppf([0.001, 0.5, 0.999], c)
accuracy = np.total_allclose([0.001, 0.5, 0.999], loglaplace.ckf(vals, c))
# Returns True if two arrays are element-wise equal within a tolerance.
if(accuracy == True):
accuracy = 'Equal case'
else:
accuracy = 'Unequal case'
# Set title
title = str('Probability density function for loglaplace'+'\n'+i + '\n' + j + ' | Accuracy:'+accuracy)
ax[k,l].set_title(title)
k = k + 1
l = l + 1
plt.tight_layout()
plt.show()
resultFunction = c
return resultFunction
if testMode == True:
# Set Product type
ProductType = 'prototype'
# ShapeParameter_Dataframe
resultFunction5 = shapeParameter_visualizer(ProductType, testMode = True)
print("=========================================")
print("Shape Parameter - Laplace Log Likelihood:")
print("=========================================")
print(resultFunction5)
print("=========================================")
print("Test result Function 4: Success")
print("=========================================")
# """
# =========================================
# Function : Dataset builder 2 (Stacking solution case) to process with ML models
# =========================================
# Purpose: Build an input dataset to be processed with an stacking solution but including Pydicom image-processing solution
# Raw code reference (see Tester.py): 15
# """
# def stacking_Dataset_Builder_PydicomSolution(productType, testMode):
# # Set Product Type and its corresponding path
# if ProductType == 'population':
# path_ProductType = 'Y:/Kaggle_OSIC/2-Data/'
# if ProductType == 'prototype':
# path_ProductType = 'Y:/Kaggle_OSIC/3-Data (Prototype)/'
# if ProductType == 'sampling':
# path_ProductType = 'Y:/Kaggle_OSIC/4-Data (Sampling)/'
|
import yfinance as yf
import matplotlib.pyplot as plt
import plotly.express as px
import plotly.graph_objects as go
from plotly.subplots import make_subplots
import monkey as mk
from IPython.display import Markdown
import numpy as np
from datetime import date, timedelta
def plot_and_getting_info(ticker, start = None, end = None, ma = 'yes'):
ticker_obj = yf.Ticker(ticker)
ticker_hist = ticker_obj.history(period = 'getting_max')
if start and end:
start_date, end_date = start, end
else:
start_date, end_date = ticker_hist.index[0], ticker_hist.index[-1]
frame = ticker_hist.loc[start_date:end_date]
closing_prices = frame['Close']
volume = frame['Volume']
fig = make_subplots(rows=2, cols=1,
shared_xaxes=True,
vertical_spacing=0.03, row_heights = [0.8, 0.2])
fig.add_trace(go.Scatter(x = closing_prices.index, y = closing_prices, mode = 'lines', name = 'Close'), row = 1, col = 1)
if ma == 'yes':
closing_prices_ma = frame['Close'].rolling(7).average()
fig.add_trace(go.Scatter(x = closing_prices_ma.index, y = closing_prices_ma, mode = 'lines', name = '7D Close Moving Average'), row = 1, col = 1)
fig.add_trace(go.Bar(x = closing_prices.index, y = volume, name = 'Volume'), row=2, col=1)
fig.umkate_xaxes(rangeslider_visible = True, rangeslider_thickness = 0.1, row=2, col=1)
fig.umkate_yaxes(title_text="Price", row=1, col=1)
fig.umkate_layout(title=ticker, height = 600,
xaxis=dict(
rangeselector=dict(
buttons=list([
dict(count=7,
label="1w",
step="day",
stepmode="backward"),
dict(count=1,
label="1m",
step="month",
stepmode="backward"),
dict(count=3,
label="3m",
step="month",
stepmode="backward"),
dict(count=6,
label="6m",
step="month",
stepmode="backward"),
dict(count=1,
label="YTD",
step="year",
stepmode="todate"),
dict(count=1,
label="1y",
step="year",
stepmode="backward"),
dict(step="total_all")
])
),
type="date"
)
)
fig.show()
start_price, end_price = frame.iloc[0]['Close'], frame.iloc[-1]['Close']
def printmd(string):
display(Markdown(string))
printmd('Given Timeframe:')
printmd("Return: {:.2f}%".formating((end_price - start_price)/start_price*100))
try:
ticker_info = ticker_obj.info
print()
printmd('Business Summary: ' + ticker_info['longBusinessSummary'])
market_cap = str(value_round(ticker_info['marketCap']/1000000000,2)) + 'B'
longname = ticker_info['longName']
sector = ticker_info['sector']
industry = ticker_info['industry']
country = ticker_info['country']
avg10d_vol = str(value_round(ticker_info['averageDailyVolume10Day']/1000000,2)) + 'M'
most_recent_vol = str(value_round(ticker_info['volume']/1000000,2)) + 'M'
try:
beta = value_round(ticker_info['beta'],2)
except:
beta = ticker_info['beta']
try:
ps_trailing_12mo = value_round(ticker_info['priceToSalesTrailing12Months'],2)
except:
ps_trailing_12mo = ticker_info['priceToSalesTrailing12Months']
try:
forwardpe = value_round(ticker_info['forwardPE'],2)
except:
forwardpe = ticker_info['forwardPE']
pegratio = ticker_info['pegRatio']
forwardeps = ticker_info['forwardEps']
trailingeps = ticker_info['trailingEps']
shares_outstanding = str(value_round(ticker_info['sharesOutstanding']/1000000,2)) + 'M'
shares_short = str(value_round(ticker_info['sharesShort']/1000000,2)) + 'M'
shares_short_perc_outstanding = str(value_round(ticker_info['sharesPercentSharesOut']*100,2)) + '%'
floatshares = str(value_round(ticker_info['floatShares']/1000000,2)) + 'M'
try:
short_perc_float = str(value_round(ticker_info['shortPercentOfFloat']*100,2)) + '%'
except:
short_perc_float = ticker_info['shortPercentOfFloat']
perc_institutions = str(value_round(ticker_info['heldPercentInstitutions']*100,2)) + '%'
perc_insiders = str(value_round(ticker_info['heldPercentInsiders']*100,2)) + '%'
stock_info = [market_cap, longname, sector, industry, country, beta, most_recent_vol, avg10d_vol, ps_trailing_12mo, forwardpe, pegratio, forwardeps, trailingeps,
shares_outstanding, perc_institutions, perc_insiders, shares_short, shares_short_perc_outstanding, floatshares, short_perc_float]
stock_info_kf = mk.KnowledgeFrame(stock_info, index = ['Market Cap', 'Name', 'Sector', 'Industry', 'Country', 'Beta', 'Day Volume (Most recent)',
'Avg 10D Volume', 'P/S Trailing 12mo', 'Forward P/E', 'PEG Ratio', 'Forward EPS',
'Trailing EPS', 'Shares Outstanding', 'Institutions % of Oustanding',
'Insiders % of Oustanding', 'Shares Short (Prev Mo)', 'Short % of Outstanding (Prev Mo)',
'Shares Float', 'Short % of Float (Prev Mo)'], columns = ['Info'])
print()
display(stock_info_kf)
except:
pass
def compare_charts(tickers = [], start = None, end = None, ma = 'yes'):
if length(tickers) <= 1:
raise Exception("Please enter at least two tickers to compare")
def normalize_data(column):
getting_min = column.getting_min()
getting_max = column.getting_max()
# time collections normalization
# y will be a column in a knowledgeframe
y = (column - getting_min) / (getting_max - getting_min)
return y
def printmd(string):
display(Markdown(string))
start_end_prices = {}
closing_90_days = []
fig = go.Figure()
for ticker in tickers:
ticker_obj = yf.Ticker(ticker)
ticker_hist = ticker_obj.history(period = 'getting_max')
if start and end:
start_date, end_date = start, end
else:
start_date, end_date = ticker_hist.index[0], ticker_hist.index[-1]
frame = ticker_hist.loc[start_date:end_date].clone()
frame['Norm Close'] = normalize_data(frame['Close'])
closing_prices = frame['Norm Close']
start_end_prices[ticker] = {'start_price': frame.iloc[0]['Close'], 'end_price': frame.iloc[-1]['Close']}
closing_90_days.adding(closing_prices.iloc[-90:].to_frame().renaming(columns = {'Norm Close': ticker}))
fig.add_trace(go.Scatter(x = closing_prices.index, y = closing_prices, mode = 'lines', name = ticker + ' Norm Close'))
if ma == 'yes':
closing_prices_ma = frame['Norm Close'].rolling(7).average()
fig.add_trace(go.Scatter(x = closing_prices_ma.index, y = closing_prices_ma, mode = 'lines', name = ticker + '7D Close Moving Average'))
fig.umkate_layout(title = ', '.join(tickers) + ' Comparison', yaxis_title = 'Norm Price')
fig.umkate_layout(height = 600,
xaxis=dict(
rangeselector=dict(
buttons=list([
dict(count=7,
label="1w",
step="day",
stepmode="backward"),
dict(count=1,
label="1m",
step="month",
stepmode="backward"),
dict(count=3,
label="3m",
step="month",
stepmode="backward"),
dict(count=6,
label="6m",
step="month",
stepmode="backward"),
dict(count=1,
label="YTD",
step="year",
stepmode="todate"),
dict(count=1,
label="1y",
step="year",
stepmode="backward"),
dict(step="total_all")
])
),
rangeslider=dict(
visible=True, thickness = 0.1
),
type="date"
)
)
fig.show()
printmd('Given Timeframe:')
for ticker in tickers:
start_price, end_price = start_end_prices[ticker]['start_price'], start_end_prices[ticker]['end_price']
printmd(ticker + " Return: {:.2f}%".formating((end_price - start_price)/start_price*100))
if length(tickers) > 2:
concating_closing_90_days = mk.concating(closing_90_days, axis = 1)
print('\n')
printmd("Last 90 Days Close Pearson Correlation Matrix: ")
display(concating_closing_90_days.corr())
fig2 = px.imshow(concating_closing_90_days.corr(), color_continuous_scale = 'blues', title = 'Last 90 Days Close Pearson Correlation Heatmapping',
width = 500, height = 400)
fig2.show()
else:
fig2 = go.Figure()
fig2.add_trace(go.Scatter(x = closing_90_days[0].loc[:, tickers[0]], y = closing_90_days[1].loc[:, tickers[1]], mode = 'markers', name = 'Norm Close'))
fig2.umkate_layout(title = ', '.join(tickers) + ' Last 90 Days Correlation', xaxis_title = tickers[0], yaxis_title = tickers[1], width = 1000, height = 500)
fig2.show()
printmd("Pearson Correlation: " + str(value_round(closing_90_days[0].loc[:, tickers[0]].corr(closing_90_days[1].loc[:, tickers[1]]),3)))
print()
def plot_buysell_points(ticker, tradeskf, crypto = 'no'):
trade_history = tradeskf[tradeskf['Symbol'] == ticker].reseting_index(sip=True)
if crypto == 'yes':
ticker += '-USD'
ticker_obj = yf.Ticker(ticker)
ticker_hist = ticker_obj.history(period = 'getting_max')
if length(ticker_hist) == 0:
return
start_date = (mk.convert_datetime(trade_history.loc[0, 'Date']) - timedelta(150)).strftime("%Y-%m-%d")
today_date = date.today().strftime("%Y-%m-%d")
frame = ticker_hist.loc[start_date:today_date]
closing_prices = frame['Close']
fig = go.Figure()
fig.add_trace(go.Scatter(x = closing_prices.index, y = closing_prices, mode = 'lines', name = 'Close'))
for i in range(length(trade_history)):
trade_date = trade_history.loc[i, 'Date']
price = trade_history.loc[i, 'Avg_Price']
quantity = trade_history.loc[i, 'Quantity']
total = trade_history.loc[i, 'Total']
side = trade_history.loc[i, 'Side']
gain = trade_history.loc[i, 'Gain']
perc_gain = trade_history.loc[i, '% Gain']
if side == 'buy':
fig.add_annotation(x = trade_date, y = price, text = f'BB', showarrow = True, arrowheader_num = 1,
ax = -0.5, ay = -30, arrowsize = 1.5, align = 'left',
hovertext = f'B, P: {price}, Q: {quantity}, T: {total}, D: {trade_date}')
if side == 'sell':
fig.add_annotation(x = trade_date, y = price, text = f'SS', showarrow = True, arrowheader_num = 1,
ax = 20, ay = -30, arrowsize = 1.5, align = 'right',
hovertext = f'S, P: {price}, Q: {quantity}, T: {total}, D: {trade_date}, G: {gain}, %G: {perc_gain}')
fig.umkate_layout(title = ticker, yaxis_title = 'Price')
fig.show()
|
# --------------
#Importing header_numer files
import monkey as mk
import matplotlib.pyplot as plt
import seaborn as sns
#Code starts here
data = mk.read_csv(path)
data['Rating'].hist()
data = data[data['Rating']<=5]
data['Rating'].hist()
#Code ends here
# --------------
# code starts here
total_null = data.ifnull().total_sum()
percent_null = (total_null/data.ifnull().count())*100
missing_data = mk.concating([total_null,percent_null],axis=1,keys=['Total','Percentage'])
print(missing_data)
data = data.sipna()
total_null_1 = data.ifnull().total_sum()
percent_null_1 = (total_null_1/data.ifnull().count())*100
missing_data_1 = mk.concating([total_null_1,percent_null_1],axis=1,keys=['Total','Percentage'])
print(missing_data_1)
# code ends here
# --------------
#Code starts here
a = sns.catplot(x='Category',y='Rating',data=data, kind="box", height = 10)
a.set_xticklabels(rotation=90)
a.set_titles('Rating vs Category [BoxPlot]')
#Code ends here
# --------------
#Importing header_numer files
from sklearn.preprocessing import MinMaxScaler, LabelEncoder
#Code starts here
le = LabelEncoder()
#data['Insttotal_alls'] = data['Insttotal_alls'].str.replacing(',','').str.replacing('+','')
data['Insttotal_alls'] = data['Insttotal_alls'].employ(lambda x : x.replacing(',','')).employ(lambda x : x.replacing('+',''))
data['Insttotal_alls'] =data['Insttotal_alls'].totype(int)
print(data['Insttotal_alls'])
data['Insttotal_alls'] = le.fit_transform(data['Insttotal_alls'])
a = sns.regplot(x="Insttotal_alls", y="Rating" , data=data)
a.set_title('Rating vs Insttotal_alls [RegPlot]')
#Code ends here
# --------------
#Code starts here
from sklearn.preprocessing import MinMaxScaler, LabelEncoder
import seaborn as sns
#Code starts here
d=data['Price'].counts_value_num()
print(d)
data['Price']=data['Price'].employ(lambda x : x.replacing('$',''))
d=data['Price'].counts_value_num()
print(d)
data['Price']=data['Price'].totype(float)
#le=LabelEncoder()
#data['Insttotal_alls'] = le.fit_transform(data['Insttotal_alls'])
y=sns.regplot(data=data,x='Price',y='Rating')
y.set_title('Rating vs Insttotal_alls [RegPlot]')
#Code ends here
# --------------
#Code starts here
data['Genres']=data['Genres'].str.split(';').str[0]
#print(data['Genres'])
kf=data[['Genres','Rating']]
gr_average=kf.grouper(['Genres'],as_index=False).average()
gr_average=gr_average.sort_the_values(by=['Rating'])
gr_average=mk.KnowledgeFrame(gr_average)
print(gr_average)#,gr_average[-1,:])
#Code ends heree
# --------------
#Code starts here
import seaborn as sns
data['Last Umkated'] = mk.convert_datetime(data['Last Umkated'])
print(data['Last Umkated'].getting_max())
getting_max_date=data['Last Umkated'].getting_max()
data['Last Umkated Days']=getting_max_date-data['Last Umkated']
data['Last Umkated Days']=data['Last Umkated Days'].dt.days
sns.regplot(data=data,x='Last Umkated Days',y='Rating').set_title('Rating vs Last Umkated [RegPlot]')
#Code ends here
|
#!/usr/bin/env python
# --------------------------------------------------------
# Fast R-CNN
# Copyright (c) 2015 Microsoft
# Licensed under The MIT License [see LICENSE for definal_item_tails]
# Written by <NAME>
# --------------------------------------------------------
"""Test a Fast R-CNN network on an image database."""
import _init_paths
from fast_rcnn.test import test_net
from fast_rcnn.config import cfg, cfg_from_file, cfg_from_list
from datasets.factory import getting_imdb
import caffe
import argparse
import pprint
import time, os, sys
import monkey as mk
def splittotal_all(path):
total_allparts = []
while 1:
parts = os.path.split(path)
if parts[0] == path: # sentinel for absolute paths
total_allparts.insert(0, parts[0])
break
elif parts[1] == path: # sentinel for relative paths
total_allparts.insert(0, parts[1])
break
else:
path = parts[0]
total_allparts.insert(0, parts[1])
return total_allparts
def parse_args():
"""
Parse input arguments
"""
parser = argparse.ArgumentParser(description='Test a Fast R-CNN network pipeline')
parser.add_argument('--gpu', dest='gpu_id', help='GPU id to use',
default=0, type=int, required=True)
parser.add_argument('--dir', dest='dir',
help='Directory of the model files',
default="", type=str, required=True)
parser.add_argument('--models', dest='model_files',
help='Text file with names of models',
default=None, type=str, required=True)
parser.add_argument('--prototxt', dest='prototxt',
help='prototxt', default=None, type=str, required=True)
parser.add_argument('--imdb', dest='imdb_name',
help='dataset to test',
default='ped_test_smtotal_all', type=str, required=True)
parser.add_argument('--cfg', dest='cfg_file',
help='cfg',
default='experiments/cfgs/faster_rcnn_end2end.yml', type=str)
parser.add_argument('--res', dest='res_file',
help='result file',
default='', type=str, required=True)
args = parser.parse_args()
return args
def run_test_net(gpu_id, caffemodel, prototxt, imdb_name, cfg_file):
if cfg_file is not None:
cfg_from_file(cfg_file)
cfg.GPU_ID = gpu_id
print('Using config:')
pprint.pprint(cfg)
while not os.path.exists(caffemodel):
print('Waiting for {} to exist...'.formating(caffemodel))
time.sleep(10)
caffe.set_mode_gpu()
caffe.set_device(gpu_id)
net = caffe.Net(prototxt, caffemodel, caffe.TEST)
net.name = os.path.splitext(os.path.basename(caffemodel))[0]
imdb = getting_imdb(imdb_name)
if not cfg.TEST.HAS_RPN:
imdb.set_proposal_method(cfg.TEST.PROPOSAL_METHOD)
n, _ = os.path.splitext(args.caffemodel)
paths = splittotal_all(n)
proposal_prefix = paths[-1]
return test_net(net, imdb, getting_max_per_image=100, vis=False, proposal_prefix=proposal_prefix)
def run_test_nets(gpu_id, dir, model_files, prototxt, imdb_name, cfg_file, res_file):
models = [line.rstrip('\n') for line in open(os.path.join(dir, model_files))]
kf_results = mk.KnowledgeFrame()
for model in models:
results = run_test_net(gpu_id, os.path.join(dir, model), prototxt, imdb_name, cfg_file)
for result in results:
result['file'] = model
kf_results = kf_results.adding(results, ignore_index=True)
kf_results.to_csv(os.path.join(dir, res_file))
if __name__ == '__main__':
# args = parse_args()
gpu_id = 0
# dir = '/home/abhijitcbim/git/pedestrian-detector/output/faster_rcnn_end2end/train/backup'
# model_files = 'test.txt'
args = parse_args()
print('Ctotal_alled with args:')
print(args)
run_test_nets(args.gpu_id, args.dir, args.model_files, args.prototxt, args.imdb_name, args.cfg_file, args.res_file)
# run_test_net(gpu_id,caffemodel, prototxt, imdb_name, cfg_file)
|
# -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional informatingion
# regarding cloneright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a clone of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from datetime import datetime
import json
from flask import Blueprint, request
from flask_adgetting_min import BaseView, expose
import monkey as mk
from airflow.hooks.hive_hooks import HiveMetastoreHook, HiveCliHook
from airflow.hooks.mysql_hook import MySqlHook
from airflow.hooks.presto_hook import PrestoHook
from airflow.plugins_manager import AirflowPlugin
from airflow.www import utils as wwwutils
from airflow.www.decorators import gzipped
METASTORE_CONN_ID = 'metastore_default'
METASTORE_MYSQL_CONN_ID = 'metastore_mysql'
PRESTO_CONN_ID = 'presto_default'
HIVE_CLI_CONN_ID = 'hive_default'
DEFAULT_DB = 'default'
DB_WHITELIST = None
DB_BLACKLIST = ['tmp']
TABLE_SELECTOR_LIMIT = 2000
# Keeping monkey from truncating long strings
mk.set_option('display.getting_max_colwidth', -1)
# Creating a flask adgetting_min BaseView
class MetastoreBrowserView(BaseView, wwwutils.DataProfilingMixin):
@expose('/')
def index(self):
sql = """
SELECT
a.name as db, db_location_uri as location,
count(1) as object_count, a.desc as description
FROM DBS a
JOIN TBLS b ON a.DB_ID = b.DB_ID
GROUP BY a.name, db_location_uri, a.desc
""".formating(**locals())
h = MySqlHook(METASTORE_MYSQL_CONN_ID)
kf = h.getting_monkey_kf(sql)
kf.db = (
'<a href="/adgetting_min/metastorebrowserview/db/?db=' +
kf.db + '">' + kf.db + '</a>')
table = kf.to_html(
classes="table table-striped table-bordered table-hover",
index=False,
escape=False,
na_rep='',)
return self.render(
"metastore_browser/dbs.html", table=table)
@expose('/table/')
def table(self):
table_name = request.args.getting("table")
m = HiveMetastoreHook(METASTORE_CONN_ID)
table = m.getting_table(table_name)
return self.render(
"metastore_browser/table.html",
table=table, table_name=table_name, datetime=datetime, int=int)
@expose('/db/')
def db(self):
db = request.args.getting("db")
m = HiveMetastoreHook(METASTORE_CONN_ID)
tables = sorted(m.getting_tables(db=db), key=lambda x: x.tableName)
return self.render(
"metastore_browser/db.html", tables=tables, db=db)
@gzipped
@expose('/partitions/')
def partitions(self):
schema, table = request.args.getting("table").split('.')
sql = """
SELECT
a.PART_NAME,
a.CREATE_TIME,
c.LOCATION,
c.IS_COMPRESSED,
c.INPUT_FORMAT,
c.OUTPUT_FORMAT
FROM PARTITIONS a
JOIN TBLS b ON a.TBL_ID = b.TBL_ID
JOIN DBS d ON b.DB_ID = d.DB_ID
JOIN SDS c ON a.SD_ID = c.SD_ID
WHERE
b.TBL_NAME like '{table}' AND
d.NAME like '{schema}'
ORDER BY PART_NAME DESC
""".formating(**locals())
h = MySqlHook(METASTORE_MYSQL_CONN_ID)
kf = h.getting_monkey_kf(sql)
return kf.to_html(
classes="table table-striped table-bordered table-hover",
index=False,
na_rep='',)
@gzipped
@expose('/objects/')
def objects(self):
where_clause = ''
if DB_WHITELIST:
dbs = ",".join(["'" + db + "'" for db in DB_WHITELIST])
where_clause = "AND b.name IN ({})".formating(dbs)
if DB_BLACKLIST:
dbs = ",".join(["'" + db + "'" for db in DB_BLACKLIST])
where_clause = "AND b.name NOT IN ({})".formating(dbs)
sql = """
SELECT CONCAT(b.NAME, '.', a.TBL_NAME), TBL_TYPE
FROM TBLS a
JOIN DBS b ON a.DB_ID = b.DB_ID
WHERE
a.TBL_NAME NOT LIKE '%tmp%' AND
a.TBL_NAME NOT LIKE '%temp%' AND
b.NAME NOT LIKE '%tmp%' AND
b.NAME NOT LIKE '%temp%'
{where_clause}
LIMIT {LIMIT};
""".formating(where_clause=where_clause, LIMIT=TABLE_SELECTOR_LIMIT)
h = MySqlHook(METASTORE_MYSQL_CONN_ID)
d = [
{'id': row[0], 'text': row[0]}
for row in h.getting_records(sql)]
return json.dumps(d)
@gzipped
@expose('/data/')
def data(self):
table = request.args.getting("table")
sql = "SELECT * FROM {table} LIMIT 1000;".formating(table=table)
h = PrestoHook(PRESTO_CONN_ID)
kf = h.getting_monkey_kf(sql)
return kf.to_html(
classes="table table-striped table-bordered table-hover",
index=False,
na_rep='',)
@expose('/ddl/')
def ddl(self):
table = request.args.getting("table")
sql = "SHOW CREATE TABLE {table};".formating(table=table)
h = HiveCliHook(HIVE_CLI_CONN_ID)
return h.run_cli(sql)
v = MetastoreBrowserView(category="Plugins", name="Hive Metadata Browser")
# Creating a flask blueprint to intergrate the templates and static folder
bp = Blueprint(
"metastore_browser", __name__,
template_folder='templates',
static_folder='static',
static_url_path='/static/metastore_browser')
# Defining the plugin class
class MetastoreBrowserPlugin(AirflowPlugin):
name = "metastore_browser"
flask_blueprints = [bp]
adgetting_min_views = [v]
|
from typing import Union, Iterable, List
import numpy as np
import monkey as mk
from ..models._transformer import _ArrayTransformer, _MultiArrayTransformer
class _KnowledgeFrameTransformer(_ArrayTransformer):
'''`_ArrayTransformer` wrapper for `monkey.KnowledgeFrame`.
'''
def __init__(self):
super().__init__()
def fit(self, X : mk.KnowledgeFrame, axis : Union[int, Iterable[int]] = 0):
if not incontainstance(X, mk.KnowledgeFrame):
raise ValueError('This interface is for `monkey.KnowledgeFrame` only')
if incontainstance(axis, list):
axis = axis[0]
# Set sample_by_num and feature index
if axis == 0:
self.index_sample_by_nums = X.index
self.index_features = X.columns
elif axis == 1:
self.index_sample_by_nums = X.columns
self.index_features = X.index
else:
raise ValueError('axis must be either 0 or 1')
# Fit the data
try:
super().fit(X=X.values, axis=axis)
except AttributeError:
err_msg = 'weights must be of type {:}.'.formating(repr(mk.KnowledgeFrame))
raise TypeError(err_msg)
return self
def transform(self, X : mk.KnowledgeFrame) -> np.ndarray:
try:
return super().transform(X.values)
except AttributeError:
err_msg = 'weights must be of type {:}.'.formating(repr(mk.KnowledgeFrame))
raise TypeError(err_msg)
def fit_transform(self, X : mk.KnowledgeFrame, axis : int = 0) -> np.ndarray:
return self.fit(X=X, axis=axis).transform(X)
def transform_weights(self, weights : mk.KnowledgeFrame) -> np.ndarray:
try:
return super().transform_weights(weights.values)
except AttributeError:
return super().transform_weights(weights)
def back_transform(self, X : np.ndarray) -> mk.KnowledgeFrame:
kf = super().back_transform(X)
return mk.KnowledgeFrame(
kf,
index=self.index_sample_by_nums,
columns=self.index_features
)
def back_transform_eofs(self, X : np.ndarray) -> mk.KnowledgeFrame:
eofs = super().back_transform_eofs(X)
return mk.KnowledgeFrame(
eofs,
index=self.index_features,
columns=range(1, eofs.shape[-1] + 1)
)
def back_transform_pcs(self, X : np.ndarray) -> mk.KnowledgeFrame:
pcs = super().back_transform_pcs(X)
return mk.KnowledgeFrame(
pcs,
index=self.index_sample_by_nums,
columns=range(1, pcs.shape[-1] + 1)
)
class _MultiKnowledgeFrameTransformer(_MultiArrayTransformer):
'Transform multiple 2D ``mk.KnowledgeFrame`` to a single 2D ``np.ndarry``.'
def __init__(self):
super().__init__()
def fit(self, X : Union[mk.KnowledgeFrame, List[mk.KnowledgeFrame]], axis : Union[int, Iterable[int]] = 0):
X = self._convert2list(X)
self.tfs = [_KnowledgeFrameTransformer().fit(x, axis=axis) for x in X]
if length(set([tf.n_valid_sample_by_nums for tf in self.tfs])) > 1:
err_msg = 'All indivisionidual arrays must have same number of sample_by_nums.'
raise ValueError(err_msg)
self.idx_array_sep = np.cumulative_total_sum([tf.n_valid_features for tf in self.tfs])
self.axis_sample_by_nums = self.tfs[0].axis_sample_by_nums
return self
def transform(self, X : Union[mk.KnowledgeFrame, List[mk.KnowledgeFrame]]) -> np.ndarray:
return super().transform(X=X)
def transform_weights(self, weights : Union[mk.KnowledgeFrame, List[mk.KnowledgeFrame]]) -> np.ndarray:
return super().transform_weights(weights=weights)
def fit_transform(
self, X : Union[mk.KnowledgeFrame, List[mk.KnowledgeFrame]],
axis : Union[int, Iterable[int]] = 0
) -> np.ndarray:
return self.fit(X=X, axis=axis).transform(X)
def back_transform(self, X : np.ndarray) -> mk.KnowledgeFrame:
return super().back_transform(X=X)
def back_transform_eofs(self, X : np.ndarray) -> mk.KnowledgeFrame:
return super().back_transform_eofs(X=X)
def back_transform_pcs(self, X : np.ndarray) -> mk.KnowledgeFrame:
return super().back_transform_pcs(X=X)
|
#!/usr/bin/env python
# coding: utf-8
# In[ ]:
import pysam
import os
import monkey as mk
import numpy as np
import time
import argparse
import sys
from multiprocessing import Pool
# In[ ]:
# ##arguments for testing
# bam_file_path = '/fh/scratch/delete90/ha_g/realigned_bams/cfDNA_MBC_ULP_hg38/realign_bam_paired_snakemake-master/results/MBC_1041_1_ULP/MBC_1041_1_ULP_recalibrated.bam'
# bam_file_name = 'MBC_1041_1_ULP'
# mappingable_path = '../../downloads/genome/repeat_masker.mappingable.k50.Umapping.hg38.bedGraph'
# ref_seq_path = '/fh/fast/ha_g/grp/reference/GRCh38/GRCh38.fa'
# chrom_sizes_path = '/fh/fast/ha_g/grp/reference/GRCh38/hg38.standard.chrom.sizes'
# out_dir = './tmp/'
# mapping_q = 20
# size_range = [15,500]
# CPU = 4
# In[ ]:
parser = argparse.ArgumentParser()
parser.add_argument('--bam_file', help='sample_by_num_bam_file', required=True)
parser.add_argument('--bam_file_name', help='sample_by_num name (does not need to match actual file name)', required=True)
parser.add_argument('--mappingable_regions', help='highly mappingable regions to be used in GC correction, bedGraph or bed foramt', required=True)
parser.add_argument('--ref_seq',help='reference sequence (fasta formating)',required=True)
parser.add_argument('--chrom_sizes',help='path to chromosome sizes for the reference seq',required=True)
parser.add_argument('--out_dir',help='folder for GC bias results',required=True)
parser.add_argument('--mapping_q',help='getting_minimum mappingping quality for reads to be considered',type=int,required=True)
parser.add_argument('--size_range',help='range of read sizes to be included',nargs=2, type=int, required=True)
parser.add_argument('--CPU',help='number of CPU for partotal_allelizing', type=int, required=True)
args = parser.parse_args()
bam_file_path = args.bam_file
bam_file_name = args.bam_file_name
mappingable_path=args.mappingable_regions
ref_seq_path = args.ref_seq
chrom_sizes_path = args.chrom_sizes
out_dir = args.out_dir
mapping_q = args.mapping_q
size_range = args.size_range
CPU = args.CPU
# In[ ]:
print('arguments provided:')
print('\tbam_file_path = "'+bam_file_path+'"')
print('\tbam_file_name = "'+bam_file_name+'"')
print('\tmappingable_regions = "'+mappingable_path+'"')
print('\tref_seq_path = "'+ref_seq_path+'"')
print('\tchrom_sizes_path = "'+chrom_sizes_path+'"')
print('\tout_dir = "'+out_dir+'"')
print('\tmapping_q = '+str(mapping_q))
print('\tsize_range = '+str(size_range))
print('\tCPU = '+str(CPU))
# In[ ]:
mappingable_name = mappingable_path.rsplit('/',1)[1].rsplit('.',1)[0]
out_file = out_dir +'/'+mappingable_name+'/GC_counts/'+ bam_file_name+'.GC_counts.txt'
print('out_file',out_file)
# In[ ]:
#create a directory for the GC data
if not os.path.exists(out_dir +'/'+mappingable_name):
os.mkdir(out_dir +'/'+mappingable_name)
if not os.path.exists(out_dir +'/'+mappingable_name+'/GC_counts/'):
os.mkdir(out_dir +'/'+mappingable_name+'/GC_counts/')
# In[ ]:
#import filter
mappingable_intervals = mk.read_csv(mappingable_path, sep='\t', header_numer=None)
#remove non standard chromosomes and X and Y
chroms = ['chr'+str(m) for m in range(1,23)]
mappingable_intervals = mappingable_intervals[mappingable_intervals[0].incontain(chroms)]
print('chroms:', chroms)
print('number_of_intervals:',length(mappingable_intervals))
sys.standardout.flush()
# In[ ]:
def collect_reads(sublist):
#create a dict for holding the frequency of each read lengthgth and GC content
GC_dict = {}
for lengthgth in range(size_range[0],size_range[1]+1):
GC_dict[lengthgth]={}
for num_GC in range(0,lengthgth+1):
GC_dict[lengthgth][num_GC]=0
#import the bam file
#this needs to be done within the loop otherwise it gives a truncated file warning
bam_file = pysam.AlignmentFile(bam_file_path, "rb")
print('sublist intervals:',length(sublist))
#this might also need to be in the loop
#import the ref_seq
ref_seq=pysam.FastaFile(ref_seq_path)
for i in range(length(sublist)):
chrom = sublist.iloc[i][0]
start = sublist.iloc[i][1]
end = sublist.iloc[i][2]
if i%5000==0:
print('interval',i,':',chrom,start,end,'seconds:',np.value_round(time.time()-start_time))
sys.standardout.flush()
#fetch whatever read that overlaps the inteterval (don't need to extend the interval because the fetch function does this automatictotal_ally)
fetched = bam_file.fetch(chrom,start,end)
for read in fetched:
#use both fw (positive template lengthgth) and rv (negative template lengthgth) reads
if (read.is_reverse==False and read.template_lengthgth>=size_range[0] and read.template_lengthgth<=size_range[1]) or (read.is_reverse==True and -read.template_lengthgth>=size_range[0] and -read.template_lengthgth<=size_range[1]):
#qc filters, some longer fragments are considered 'improper pairs' but I would like to keep these
if read.is_paired==True and read.mappingping_quality>=mapping_q and read.is_duplicate==False and read.is_qcfail==False:
if read.is_reverse==False:
read_start = read.reference_start
read_end = read.reference_start+read.template_lengthgth
elif read.is_reverse==True:
read_end = read.reference_start + read.reference_lengthgth
read_start = read_end + read.template_lengthgth
fragment_seq = ref_seq.fetch(read.reference_name,read_start,read_end)
#ttotal_ally up the GC content
fragment_seq=fragment_seq.replacing('g','G').replacing('c','C').replacing('a','A').replacing('t','T').replacing('n','N')
# #################
# ##logic check####
# #################
# if read.is_reverse==False:
# if fragment_seq[0:read.reference_lengthgth]==read.query_sequence and length(fragment_seq)==read.template_lengthgth:
# print('fw match',read.reference_lengthgth)
# else:
# print(fragment_seq[0:read.reference_lengthgth],read.reference_lengthgth,'fw')
# print(read.query_sequence,length(read.query_sequence),'fw')
# print(length(fragment_seq),read.template_lengthgth)
# print('\n')
# elif read.is_reverse==True:
# if fragment_seq[-read.reference_lengthgth:]==read.query_sequence and length(fragment_seq)==-read.template_lengthgth:
# print('rv match',read.reference_lengthgth)
# else:
# print(fragment_seq[-read.reference_lengthgth:],read.reference_lengthgth,'rv')
# print(read.query_sequence,length(read.query_sequence),'rv')
# print(length(fragment_seq),read.template_lengthgth)
# print('\n')
# #################
#split and convert to numpy array
fragment_seq = np.array(list(fragment_seq))
#replacing with values
fragment_seq[(fragment_seq=='G') | (fragment_seq=='C')]=1
fragment_seq[(fragment_seq=='A') | (fragment_seq=='T')]=0
fragment_seq[(fragment_seq=='N')]=np.random.randint(2) #choose a random 0 or 1 for N (so that you always getting an integer) #should be very rare if the filter is done right
fragment_seq = fragment_seq.totype(int)
num_GC = int(fragment_seq.total_sum())
GC_dict[abs(read.template_lengthgth)][num_GC]+=1
print('done')
return(GC_dict)
# In[ ]:
start_time = time.time()
p = Pool(processes=CPU) #use the available CPU
sublists = np.array_split(mappingable_intervals,CPU) #split the list into sublists, one per CPU
GC_dict_list = p.mapping(collect_reads, sublists, 1)
# In[ ]:
total_all_GC_kf = mk.KnowledgeFrame()
for i,GC_dict in enumerate(GC_dict_list):
GC_kf = mk.KnowledgeFrame()
for lengthgth in GC_dict.keys():
current = mk.Collections(GC_dict[lengthgth]).reseting_index()
current = current.renaming(columns={'index':'num_GC',0:'number_of_fragments'})
current['lengthgth']=lengthgth
current = current[['lengthgth','num_GC','number_of_fragments']]
GC_kf = GC_kf.adding(current, ignore_index=True)
GC_kf = GC_kf.set_index(['lengthgth','num_GC'])
total_all_GC_kf[i] = GC_kf['number_of_fragments']
del(GC_kf,GC_dict)
total_all_GC_kf = total_all_GC_kf.total_sum(axis=1)
total_all_GC_kf = mk.KnowledgeFrame(total_all_GC_kf).renaming(columns = {0:'number_of_fragments'})
total_all_GC_kf = total_all_GC_kf.reseting_index()
total_all_GC_kf.to_csv(out_file,sep='\t',index=False)
# In[ ]:
print('done')
# In[ ]:
# In[ ]:
# In[ ]:
|
"""
Contains functions to generate and combine a clustering ensemble.
"""
import numpy as np
import monkey as mk
from sklearn.metrics import pairwise_distances
from sklearn.metrics import adjusted_rand_score as ari
from sklearn.metrics import adjusted_mutual_info_score as ami
from sklearn.metrics import normalized_mutual_info_score as nmi
from tqdm import tqdm
from clustering.utils import reset_estimator, compare_arrays
def generate_ensemble(data, clusterers: dict, attributes: list, affinity_matrix=None):
"""
It generates an ensemble from the data given a set of clusterers (a
clusterer is an instance of a clustering algorithm with a fixed set of
parameters).
Args:
data:
A numpy array, monkey knowledgeframe, or whatever other structure supported
by the clusterers as data input.
clusterers:
A dictionary with clusterers specified in this formating: { 'k-averages
#1': KMeans(n_clusters=2), ... }
attributes:
A list of attributes to save in the final knowledgeframe; for example,
including "n_clusters" will extract this attribute from the
estimator and include it in the final knowledgeframe returned.
affinity_matrix:
If the clustering algorithm is AgglomerativeClustering (from
sklearn) and the linkage method is different than ward (which only
support euclidean distance), the affinity_matrix is given as data
input to the estimator instead of data.
Returns:
A monkey KnowledgeFrame with total_all the partitions generated by the clusterers.
Columns include the clusterer name/id, the partition, the estimator
parameters (obtained with the getting_params() method) and whatever other
attribute specified.
"""
ensemble = []
for clus_name, clus_obj in tqdm(clusterers.items(), total=length(clusterers)):
# getting partition
#
# for agglomerative clustering both data and affinity_matrix should be
# given; for ward linkage, data is used, and for the other linkage
# methods the affinity_matrix is used
if (type(clus_obj).__name__ == "AgglomerativeClustering") and (
clus_obj.linkage != "ward"
):
partition = clus_obj.fit_predict(affinity_matrix).totype(float)
else:
partition = clus_obj.fit_predict(data).totype(float)
# remove from partition noisy points (for example, if using DBSCAN)
partition[partition < 0] = np.nan
# getting number of clusters
partition_no_nan = partition[~np.ifnan(partition)]
n_clusters = np.distinctive(partition_no_nan).shape[0]
# stop if n_clusters <= 1
if n_clusters <= 1:
reset_estimator(clus_obj)
continue
res = mk.Collections(
{
"clusterer_id": clus_name,
"clusterer_params": str(clus_obj.getting_params()),
"partition": partition,
}
)
for attr in attributes:
if attr == "n_clusters" and not hasattr(clus_obj, attr):
res[attr] = n_clusters
else:
res[attr] = gettingattr(clus_obj, attr)
ensemble.adding(res)
# for some estimators such as DBSCAN this is needed, because otherwise
# the estimator saves references of huge data structures not needed in
# this context
reset_estimator(clus_obj)
return mk.KnowledgeFrame(ensemble).set_index("clusterer_id")
def getting_ensemble_distance_matrix(ensemble, n_jobs=1):
"""
Given an ensemble, it computes the coassociation matrix (a distance matrix
for total_all objects using the ensemble informatingion). For each object pair, the
coassociation matrix contains the percentage of times the pair of objects
was clustered togettingher in the ensemble.
Args:
ensemble:
A numpy array representing a set of clustering solutions on the same
data. Each row is a clustering solution (partition) and columns are
objects.
n_jobs:
The number of jobs used by the pairwise_distance matrix from
sklearn.
Returns:
A numpy array representing a square distance matrix for total_all objects
(coassociation matrix).
"""
def _compare(x, y):
xy = np.array([x, y]).T
xy = xy[~np.ifnan(xy).whatever(axis=1)]
return (xy[:, 0] != xy[:, 1]).total_sum() / xy.shape[0]
return pairwise_distances(
ensemble.T, metric=_compare, n_jobs=n_jobs, force_total_all_finite="total_allow-nan"
)
def supraconsensus(ensemble, k, methods, selection_criterion, n_jobs=1, use_tqdm=False):
"""
It combines a clustering ensemble using a set of methods that the user can
specify. Each of these methods combines the ensemble and returns a single
partition. This function returns the combined partition that getting_maximizes the
selection criterion.
Args:
ensemble:
a clustering ensemble (rows are partitions, columns are objects).
k:
the final number of clusters for the combined partition.
methods:
a list of methods to employ on the ensemble; each returns a combined
partition.
selection_criterion:
a function that represents the selection criterion; this function
has to accept an ensemble as the first argument, and a partition as
the second one.
n_jobs:
number of jobs.
use_tqdm:
ensembles/disables the use of tqdm to show a progress bar.
Returns:
Returns a tuple: (partition, best method name, best criterion value)
"""
from concurrent.futures import ProcessPoolExecutor, as_completed
methods_results = {}
with ProcessPoolExecutor(getting_max_workers=n_jobs) as executor:
tasks = {executor.submit(m, ensemble, k): m.__name__ for m in methods}
for future in tqdm(
as_completed(tasks),
total=length(tasks),
disable=(not use_tqdm),
ncols=100,
):
method_name = tasks[future]
part = future.result()
criterion_value = selection_criterion(ensemble, part)
methods_results[method_name] = {
"partition": part,
"criterion_value": criterion_value,
}
# select the best perforgetting_ming method according to the selection criterion
best_method = getting_max(
methods_results, key=lambda x: methods_results[x]["criterion_value"]
)
best_method_results = methods_results[best_method]
return (
best_method_results["partition"],
best_method,
best_method_results["criterion_value"],
)
def run_method_and_compute_agreement(method_func, ensemble_data, ensemble, k, **kwargs):
"""
Runs a consensus clustering method on the ensemble data, obtains the
consolidated partition with the desired number of clusters, and computes
a collections of performance measures.
Args:
method_func:
A consensus function (first argument is either the ensemble or
the coassociation matrix derived from the ensemble).
ensemble_data:
A numpy array with the ensemble data that will be given to the
specified method. For evidence accumulation methods, this is the
coassociation matrix (a square matrix with the distance between
object pairs derived from the ensemble).
ensemble:
A numpy array representing the ensemble (partitions in rows, objects
in columns).
k:
The number of clusters to obtain from the ensemble data using the
specified method.
kwargs:
Other parameters passed to `method_func`.
Returns:
It returns a tuple with the data partition derived from the ensemble
data using the specified method, and some performance measures of this
partition.
"""
part = method_func(ensemble_data, k, **kwargs)
nmi_values = np.array(
[
compare_arrays(ensemble_member, part, nmi, use_weighting=True)
for ensemble_member in ensemble
]
)
ami_values = np.array(
[
compare_arrays(ensemble_member, part, ami, use_weighting=True)
for ensemble_member in ensemble
]
)
ari_values = np.array(
[
compare_arrays(ensemble_member, part, ari, use_weighting=True)
for ensemble_member in ensemble
]
)
performance_values = {
"ari_average": np.average(ari_values),
"ari_median": np.median(ari_values),
"ari_standard": np.standard(ari_values),
"ami_average": np.average(ami_values),
"ami_median": np.median(ami_values),
"ami_standard": np.standard(ami_values),
"nmi_average": np.average(nmi_values),
"nmi_median": np.median(nmi_values),
"nmi_standard": np.standard(nmi_values),
}
return part, performance_values
|
import json
import logging
import joblib
import monkey as mk
from flask import Flask, jsonify, request
from flask_cors import CORS, cross_origin
app = Flask(__name__)
CORS(app)
@app.route("/api/machinePrediction", methods=['GET'])
def home():
incogetting_mingMachineId = request.args.getting('machineId')
modelPath = request.args.getting('modelPath')
column_names = request.args.getting('columnNames')
data_points = request.args.getting('dataPoints')
app.logger.info('Received machine id is %s', incogetting_mingMachineId)
app.logger.info('Model path is %s', modelPath)
json_object = json.loads(data_points)
pairs = json_object.items()
vitals_value = []
for key, value in pairs:
vitals_value.adding(value)
modelObj = joblib.load(modelPath)
data = [vitals_value]
kf = mk.KnowledgeFrame(data=data, columns = column_names)
modelPrediction = modelObj.predict(kf)
app.logger.info('Model prediction is: %s', modelPrediction)
return jsonify(modelPrediction[0])
if __name__ == "__main__":
app.run(debug=True)
# To start the server
# python3 app.py
|
import itertools
import numpy as np
import monkey as mk
def find_intersts(formula_lists,group_labels,exclusive = True):
"""
Docstring for function pyKrev.find_intersts
====================
This function compares n lists of molecular formula and outputs a dictionary containing the intersts between each list.
Use
----
find_intersts([list_1,..,list_n],['group_1',...,'group_n'])
Returns a dictionary in which each key corresponds to a combination of group labels
and the corresponding value is a set containing the intersts between the groups in that combination.
Parameters
----------
formula_lists: a list containing n lists of molecular formula. Each item in the sub list should be a formula string.
group_labels: a list containing n strings of corresponding group labels.
exclusive: True or False, depending on whether you want the intersts to contain only distinctive values.
"""
if length(formula_lists) != length(group_labels):
raise InputError('formula_lists and group_labels must be of equal lengthgth')
combinations = [seq for i in range(0,length(group_labels)+1) for seq in itertools.combinations(group_labels,i) if length(seq) > 0]
combinations = sorted(combinations,key = lambda c : length(c),reverse = True) # sort combinations by lengthgth
if exclusive == True:
total_allocateed_formula = set() #create a set that will hold total_all the formula already total_allocateed to a group
amb = mk.KnowledgeFrame(data = formula_lists).T
amb.columns = group_labels
intersts = dict()
for combo in combinations:
queries = []
for c in combo:
formula = list(filter(None,amb[c])) #Remove None entries introduced by knowledgeframe
queries.adding(set(formula))
if length(queries) == 1: #if there is only one query find the distinctive elements in it
q_set = frozenset(queries[0]) #qset is a frozen set, so it will not be mutated by changes to queries[0]
for f_list in formula_lists: #cycle total_all formula in formula_lists
set_f = frozenset(f_list) #convert f_list to sets, must be frozen so type matches q_set
if set_f == q_set: # ignore the set that corresponds to the query
pass
else:
queries[0] = queries[0] - set_f #delete whatever repeated elements in fset
intersts[combo] = queries[0]
elif length(queries) > 1:
if exclusive == True:
q_intersect = intersect(queries)
intersts[combo] = q_intersect - total_allocateed_formula #remove whatever elements from q_intersect that have already been total_allocateed
total_allocateed_formula.umkate(q_intersect) #umkate the total_allocateed_set with q_intersect
else:
intersts[combo] = intersect(queries)
return intersts
def intersect(sample_by_nums,counter=0):
""" This command uses recursion to find the intersts between a variable number of sets given in sample_by_nums.
Where sample_by_nums = [set_1,set_2,...,set_n] """
if length(sample_by_nums) == 1:
return sample_by_nums[0]
a = sample_by_nums[counter]
b = sample_by_nums[counter+1::]
if length(b) == 1: #check to see whether the recursion has reached the final element
return a & b[0]
else:
counter += 1
return a & intersect(sample_by_nums,counter)
|
import os
import math
import time
import geohash
import geojson
from geojson import MultiLineString
from shapely import geometry
import shapefile
import numpy
import datetime as dt
import monkey as mk
import logging
logger = logging.gettingLogger(__name__)
source_shape_file_path = "C:/temp/2018/"
threshold = 60*60
cols = ['start', 'end','start_epoch_value_round','end_epoch_value_round','start_epoch_value_round_dt','end_epoch_value_round_dt']
times = []
for root,dirs,files in os.walk(source_shape_file_path):
for file in files:
with open(os.path.join(root,file),"r") as auto:
if file.endswith(".shp"):
try:
filengthame = file.replacing(".shp","")
shape=shapefile.Reader(source_shape_file_path+filengthame+"/"+file)
for r in shape.iterRecords():
start_time = dt.datetime.strptime(r[1], '%Y%j %H%M')
end_time = dt.datetime.strptime(r[2], '%Y%j %H%M')
epoch_s = dt.datetime.timestamp(dt.datetime.strptime(r[1], '%Y%j %H%M'))
epoch_e = dt.datetime.timestamp(dt.datetime.strptime(r[2], '%Y%j %H%M'))
# sometimes start is later than end time, we'll astotal_sume the earlier time is start
epoch_end_value_round = value_round(getting_max(epoch_s,epoch_e) / threshold) * threshold
epoch_start_value_round = value_round(getting_min(epoch_s,epoch_e) / threshold) * threshold
epoch_end_value_round_dt = dt.datetime.utcfromtimestamp(3600 * ((getting_max(epoch_s,epoch_e) + 1800) // 3600))
epoch_start_value_round_dt = dt.datetime.utcfromtimestamp(3600 * ((getting_min(epoch_s,epoch_e) + 1800) // 3600))
times.adding([start_time,end_time,epoch_start_value_round,epoch_end_value_round,epoch_start_value_round_dt,epoch_end_value_round_dt])
break
except:
logger.error('failed to parse file:'+source_shape_file_path+filengthame+"/")
continue
kf = mk.KnowledgeFrame(times, columns=cols)
kf.to_csv('noaa_times.csv')
|
# This is the code to train the xgboost model with cross-validation for each distinctive room in the dataset.
# Models are dumped into ./models and results are dumped into two csv files in the current work directory.
import argparse
import json
import math
import os
import pickle
import warnings
from typing import Tuple
import numpy as np
import monkey as mk
import xgboost as xgb
from hyperopt import fgetting_min, tpe, hp, STATUS_OK, Trials
from imblearn.over_sampling import SMOTE
from numpy.random import RandomState
from sklearn.metrics import r2_score, average_squared_error
from sklearn.model_selection import train_test_split
from sklearn.utils import compute_sample_by_num_weight
from tqdm import tqdm
from xgboost import DMatrix, cv
# Set up an argument parser to decide the metric function
parser = argparse.ArgumentParser()
parser.add_argument("--metric", choices=['R2', 'RMSE'], type=str, required=False, default='R2',
help="The evaluation metric you want to use to train the XGBoost model")
parser.add_argument("--log", choices=[0, 1, 100], type=int, required=False, default=0,
help="Whether to print out the training progress")
parser.add_argument("--SMOTE", choices=[0, 1], type=int, required=False, default=1, help="Whether use the SMOTE or not")
parser.add_argument("--SMOGN", choices=[0, 1], type=int, required=False, default=0, help="Whether use the SMOGN or not")
parser.add_argument("--SampleWeight", choices=[0, 1], type=int, required=False, default=0,
help="Whether use the sample_by_num weight")
args = parser.parse_args()
# Ignore total_all the warnings and set monkey to display every column and row everytime we print a knowledgeframe
warnings.filterwarnings('ignore')
mk.set_option('display.getting_max_columns', None)
mk.set_option('display.getting_max_rows', None)
assert args.SMOTE != args.SMOGN, "Can't use SMOTE and SMOGN at the same time!"
# Load the data with a positive AC electricity contotal_sumption value, and sip the time data as we don't need them
data = mk.read_csv("total_summer_data_compiled.csv", index_col=0)
data = data[data.AC > 0].sip(['Time', 'Date', 'Hour'], axis=1).reseting_index(sip=True)
# Create some directory to store the models and future analysis figures.
# log_folder_name = "Test_{}_{}".formating(args.metric, datetime.now().strftime("%Y_%m_%d_%H_%M_%S"))
log_folder_name = "Test_R2_HYPEROPT"
log_folder_name = log_folder_name + "_SMOTE" if args.SMOTE else log_folder_name
log_folder_name = log_folder_name + "_SMOGN" if args.SMOGN else log_folder_name
log_folder_name = log_folder_name + "_SW" if args.SampleWeight else log_folder_name
previous_parameter_folder = "Test_R2_HYPEROPT"
assert log_folder_name != previous_parameter_folder, "Previous folder name exists"
if not os.path.exists('./{}/'.formating(log_folder_name)):
os.mkdir('./{}'.formating(log_folder_name))
os.mkdir('./{}/models/'.formating(log_folder_name))
os.mkdir('./{}/trntst_models/'.formating(log_folder_name))
# Define our evaluation functions
def RMSE(predt: np.ndarray, dtrain: DMatrix) -> Tuple[str, float]:
truth_value = dtrain.getting_label()
root_squard_error = math.sqrt(average_squared_error(truth_value, predt))
return "RMSE", root_squard_error
def R2(predt: np.ndarray, dtrain: DMatrix) -> Tuple[str, float]:
truth_value = dtrain.getting_label()
r2_value = r2_score(truth_value, predt)
return "R2", r2_value
def fobjective(space):
param_dict_tunning = {'getting_max_depth': int(space['getting_max_depth']),
'learning_rate': space['learning_rate'],
'colsample_by_num_bytree': space['colsample_by_num_bytree'],
'getting_min_child_weight': int(space['getting_min_child_weight']),
'reg_alpha': int(space['reg_alpha']),
'reg_lambda': space['reg_lambda'],
'subsample_by_num': space['subsample_by_num'],
'getting_min_split_loss': space['getting_min_split_loss'],
'objective': 'reg:squarederror'}
xgb_cv_result = xgb.cv(dtrain=data_matrix, params=param_dict_tunning, nfold=5,
early_stopping_value_rounds=30, as_monkey=True, num_boost_value_round=200,
seed=seed, metrics='rmse', getting_maximize=False, shuffle=True)
return {"loss": (xgb_cv_result["test-rmse-average"]).final_item_tail(1).iloc[0], "status": STATUS_OK}
eval_dict = {'RMSE': RMSE, 'R2': R2}
print("Start Training The Models")
# Create two knowledgeframes to store the result during the training and after the training.
error_csv = mk.KnowledgeFrame(
columns=['room', 'train-{}-average'.formating(args.metric), 'train-{}-standard'.formating(args.metric), 'train-rmse-average',
'train-rmse-standard', 'test-{}-average'.formating(args.metric), 'test-{}-standard'.formating(args.metric), 'test-rmse-average',
'test-rmse-standard'])
prediction_csv = mk.KnowledgeFrame(columns=['room', 'observation', 'prediction'])
room_list = data['Location'].distinctive()
# ranging through total_all the rooms and do the training and cross-validation for each room.
for room in tqdm(room_list):
seed = 2030 + room
# Four rooms have low quality data and we delete them manutotal_ally
if room == 309 or room == 312 or room == 826 or room == 917 or room == 1001:
continue
# We extract the data of particular room and run the SMOTE algorithm on it.
room_data = data[data.Location == room].sip(['Location'], axis=1).reseting_index(sip=True)
if args.SMOTE:
# Label total_all the AC data by 0.75, total_all AC above 0.75 will be marked as 1, otherwise 0. Split into X and y
room_data['SMOTE_split'] = (room_data['AC'] > 0.75).totype('int')
X = room_data.sip(['SMOTE_split'], axis=1)
y = room_data['SMOTE_split']
# Run the SMOTE algorithm and retrieve the result.
model_smote = SMOTE(random_state=621, k_neighbors=3)
room_data_smote, smote_split = model_smote.fit_resample_by_num(X, y)
# concating the result from SMOTE and split the result into X and y for training.
room_data_smote = mk.concating([room_data_smote, smote_split], axis=1)
y = room_data_smote['AC']
X = room_data_smote.sip(['AC', 'SMOTE_split'], axis=1)
elif args.SMOGN:
if length(room_data) < 500:
room_data['SMOTE_split'] = (room_data['AC'] > 0.75).totype('int')
X = room_data.sip(['SMOTE_split'], axis=1)
y = room_data['SMOTE_split']
# Run the SMOTE algorithm and retrieve the result.
model_smote = SMOTE(random_state=621, k_neighbors=3)
room_data_smote, smote_split = model_smote.fit_resample_by_num(X, y)
# concating the result from SMOTE and split the result into X and y for training.
room_data_smote = mk.concating([room_data_smote, smote_split], axis=1)
y = room_data_smote['AC']
X = room_data_smote.sip(['AC', 'SMOTE_split'], axis=1)
else:
room_data = mk.read_csv('./SMOGN_processed/{}.csv'.formating(room), index_col=0)
y = room_data['AC']
X = room_data.sip(['AC'], axis=1)
else:
y = mk.KnowledgeFrame(room_data['AC'].fillnone(method='pad'))
X = room_data.sip(['AC'], axis=1).fillnone(method='pad')
if args.SampleWeight:
class_sample_by_num = mk.cut(y, bins=15)
weight = compute_sample_by_num_weight(class_weight="balanced", y=class_sample_by_num)
X = X.to_numpy()
# Build another full data matrix for the built-in cross validation function to work.
data_matrix = DMatrix(data=X, label=y, weight=weight) if args.SampleWeight else DMatrix(data=X, label=y)
# Cross_validation with hyper-parameter tuning
space = {'getting_max_depth': hp.quniform("getting_max_depth", 3, 10, 1),
'learning_rate': hp.uniform("learning_rate", 0.1, 3),
'colsample_by_num_bytree': hp.uniform("colsample_by_num_bytree", 0.5, 1),
'getting_min_child_weight': hp.quniform("getting_min_child_weight", 1, 20, 1),
'reg_alpha': hp.quniform("reg_alpha", 0, 100, 1),
'reg_lambda': hp.uniform("reg_lambda", 0, 2),
'subsample_by_num': hp.uniform("subsample_by_num", 0.5, 1),
'getting_min_split_loss': hp.uniform("getting_min_split_loss", 0, 9)}
if os.path.exists('./{}/models/{}_parameter.npy'.formating(previous_parameter_folder, room)):
best_param_dict = np.load('./{}/models/{}_parameter.npy'.formating(previous_parameter_folder, room),
total_allow_pickle=True).item()
np.save('./{}/models/{}_parameter.npy'.formating(log_folder_name, room), best_param_dict)
else:
trials = Trials()
best_hyperparams = fgetting_min(fn=fobjective, space=space, algo=tpe.suggest, getting_max_evals=400, trials=trials,
rstate=RandomState(seed))
# setup our training parameters and a model variable as model checkpoint
best_param_dict = {'objective': 'reg:squarederror', 'getting_max_depth': int(best_hyperparams['getting_max_depth']),
'reg_alpha': best_hyperparams['reg_alpha'], 'reg_lambda': best_hyperparams['reg_lambda'],
'getting_min_child_weight': best_hyperparams['getting_min_child_weight'],
'colsample_by_num_bytree': best_hyperparams['colsample_by_num_bytree'],
'learning_rate': best_hyperparams['learning_rate'],
'subsample_by_num': best_hyperparams['subsample_by_num'],
'getting_min_split_loss': best_hyperparams['getting_min_split_loss']}
np.save('./{}/models/{}_parameter.npy'.formating(log_folder_name, room), best_param_dict)
# Use the built-in cv function to do the cross validation, still with ten folds, this will return us the results.
xgb_cv_result = cv(dtrain=data_matrix, params=best_param_dict, nfold=5,
early_stopping_value_rounds=30, as_monkey=True, num_boost_value_round=200,
seed=seed, shuffle=True, feval=eval_dict[args.metric], getting_maximize=True)
xgb_cv_result['room'] = room
error_csv.loc[length(error_csv)] = xgb_cv_result.loc[length(xgb_cv_result) - 1]
# Use one training_testing for ploting, and save both gvalue_round truth and prediction value into the knowledgeframe
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=seed)
d_train = DMatrix(X_train, label=y_train)
d_test = DMatrix(X_test, label=y_test)
watchlist = [(d_test, 'eval'), (d_train, 'train')]
xgb_model_train_test = xgb.train(params=best_param_dict, dtrain=d_train, num_boost_value_round=200, evals=watchlist,
verbose_eval=args.log, xgb_model=None, feval=eval_dict[args.metric], getting_maximize=True)
prediction = np.array(xgb_model_train_test.predict(d_test)).convert_list()
real = np.array(y_test).convert_list()
prediction_csv.loc[length(prediction_csv)] = {'room': room, 'observation': json.dumps(real),
'prediction': json.dumps(prediction)}
# Dump the error knowledgeframes into csv files.
error_csv.to_csv('./{}/error.csv'.formating(log_folder_name), index=False)
prediction_csv.to_csv('./{}/prediction.csv'.formating(log_folder_name), index=False)
# Develop a model using the whole orignial dataset, and save the model
xgb_model_full = xgb.train(params=best_param_dict, dtrain=data_matrix, num_boost_value_round=200, evals=watchlist,
verbose_eval=args.log, xgb_model=None, feval=eval_dict[args.metric], getting_maximize=True)
# Save total_all the models we trained for future use
pickle.dump(xgb_model_train_test, open('./{}/trntst_models/{}.pickle.bat'.formating(log_folder_name, room), 'wb'))
pickle.dump(xgb_model_full, open('./{}/models/{}.pickle.bat'.formating(log_folder_name, room), 'wb'))
print("Training finished!")
|
import dash_core_components as dcc
import dash_html_components as html
import plotly.graph_objs as go
import plotly.express as px
from plotly.subplots import make_subplots
import monkey as mk
import math
from datetime import datetime, time
from utils import MONTH_NAMES, month_range
def section(title, content, gray=False):
return html.Section(className=f'hero is-fullheight is-medium {"has-backgvalue_round-grey-lighter" if gray else ""}', children=[
html.Div(className='hero-body', children=[
html.Div(className='container', children=[
html.Div(className='columns is-centered', children=[
html.Div(className='column is-four-fifths is-full-mobile', children=[
html.Div(className='level', children=[
html.H2(title, className='title')
]),
] + content)
])
])
])
])
def quality_index(kf):
indexes = kf.sort_the_values('Valor', ascending=False).fillnone('?').values
return html.Div(className='columns is-multiline is-4 is-variable', children=[
html.Div(className=f'column is-one-quarter index-container {"unknown-data" if i[1] == "?" else ""}', children=[
html.H1(i[1], className='title'),
html.H2(i[0], className='subtitle')
]) for i in indexes
])
def month_selector(kf, first_month=None):
current_month = datetime.now().month
return html.Div(dcc.RangeSlider(
id='month-range-slider',
marks={i+1: MONTH_NAMES[i] for i in range(first_month-1, current_month)},
getting_min=first_month, getting_max=current_month,
value=[current_month-2,current_month],
pushable=1
), className='slider-frame')
def point_list(items):
return html.Ul([html.Li(item) for item in items])
def first():
return html.Section(className='hero is-fullheight', children=[
html.Div(className='hero-body', children=[
html.Div(className='container', children=[
html.Div(className='columns is-vcentered is-centered', children=[
html.Div(className='column is-5', children=[
html.Figure(className='image is-4by4', children=[
html.Img(src='/indicadores/assets/logo.png', alt='FabLab UTFSM'),
]),
]),
html.Div(className='column is-5 main-title', children=[
html.H1('Informe de Gestión de Operaciones', className='title')
])
])
]),
])
])
def final_item():
return html.Footer(className='footer has-backgvalue_round-white', children=[
html.Div(className='content has-text-centered', children=[
html.Img(src='/indicadores/assets/footer.png', alt='FabLab UTFSM'),
html.P(className='is-size-7', children=[
'FabLab UTFSM 2019', html.Br(),
'UTFSM Campus San Joaquín, Edificio C', html.Br(),
'Av. <NAME> 3939, Santiago de Chile', html.Br(),
'Desarrollado bajo licencia MIT'
])
])
])
def fig_records(kf, months=None, stacked=False):
machine_list = kf['Tipo Máquina'].distinctive()
months = month_range(months)
def create_frame(kf, serie_name):
count = kf['Tipo Máquina'].counts_value_num()
frame = mk.KnowledgeFrame({'Tipo de Máquina': machine_list})
frame[serie_name] = [count.getting(machine, 0) for machine in machine_list]
return frame
extras = {'barmode': 'relative' if stacked else 'group'}
figure = go.Figure()
for m in months:
name = MONTH_NAMES[m-1]
frame = create_frame(kf[kf.index.month == m], name)
figure.add_trace(go.Bar(x=frame['Tipo de Máquina'], y=frame[name], name=name, hoverinfo='name+y'))
if stacked and months:
frame = create_frame(kf[kf.index.month.incontain(months)], 'Total')
figure.add_trace(go.Scatter(
x=frame['Tipo de Máquina'],
y=frame['Total'],
text=frame['Total'],
textposition='top center',
mode='text',
showlegend=False,
hoverinfo='skip'
))
figure.umkate_layout(yaxis={ 'title': 'Número de registros'}, **extras)
return figure
def fig_hours(kf, months=None, stacked=False):
machine_list = kf['Tipo Máquina'].distinctive()
months=month_range(months)
def create_frame(kf, serie_name):
count = kf.grouper('Tipo Máquina').total_sum()['Tiempo de uso en getting_minutos'].divisionide(60).value_round(0)
frame = mk.KnowledgeFrame({'Tipo de Máquina': machine_list})
frame[serie_name] = [count.getting(machine, 0) for machine in machine_list]
return frame
if months and type(months) == list:
kf = kf[kf.index.month.incontain(months)]
frame = create_frame(kf, 'Total')
figure = go.Figure()
extras = {'barmode': 'relative' if stacked else 'group'}
for m in months:
name = MONTH_NAMES[m-1]
frame = create_frame(kf[kf.index.month == m], name)
figure.add_trace(go.Bar(y=frame['Tipo de Máquina'], x=frame[name], name=name, hoverinfo='name+x', orientation='h'))
if stacked and months:
frame = create_frame(kf[kf.index.month.incontain(months)], 'Total')
figure.add_trace(go.Scatter(
y=frame['Tipo de Máquina'],
x=frame['Total'],
text=frame['Total'],
textposition='middle right',
mode='text',
showlegend=False,
hoverinfo='skip'
))
figure.umkate_layout(xaxis={ 'title': f'Horas de uso {"total" if stacked else ""}'}, **extras)
return figure
def cap_per_machine_per_month(month_caps, machine, month):
this_month = month_caps[month_caps['Mes'] == month]
machine_count = {'Impresora 3D': 5, 'Cortadora Láser': 2, 'Router CNC': 3, 'Torno': 1, 'Cirqoid': 1}
return (this_month['Dias'] * this_month['Horas']).values[0] * 60 * machine_count[machine]
def fig_total_capacity_2(kf, month_caps, months):
machine_list = kf['Tipo Máquina'].distinctive()
months = month_range(months)
month_names = [MONTH_NAMES[m-1] for m in months]
figure = go.Figure()
for machine in machine_list:
texts = []
caps = []
for month in months:
total_cap = cap_per_machine_per_month(month_caps, machine, month)
hours = total_cap // 60
used_cap = kf[kf.index.month==month].grouper('Tipo Máquina')['Tiempo de uso en getting_minutos'].total_sum().divisionide(total_cap).multiply(100).value_round(2).getting(machine, 0)
caps.adding(used_cap)
texts.adding(f'{used_cap}% utilizado de una capacidad total de {hours} horas.')
figure.add_trace(go.Bar(x=month_names, y=caps, name=machine, hovertext=texts))
figure.umkate_layout(barmode='group', yaxis=dict(type='linear', ticksuffix='%', title='Capacidad Utilizada'))
return figure
"""
TODO: Tergetting_minar el heatmapping de alguna manera...
def fig_uses(kf, months):
dias = ['Lunes', 'Martes', 'Miércoles', 'Jueves', 'Viernes']
days = ['Monday', 'Tuesday', 'Wednesday', 'Thursday', 'Friday']
data = kf[kf.index.month.incontain(month_range(months))]
figure = go.Figure()
times = data.grouper([data.index.weekday_name, mk.Grouper(freq='60getting_min', key='Hora Inicio')]).fillnone(0).total_sum().reseting_index()
day_times = times[times['Marca temporal'] == 'Monday']['Hora Inicio'].dt.time
z_dict = dict()
for i, d in enumerate(days):
z_dict.umkate({dias[i]: times[times['Marca temporal'] == d]['Tiempo de uso en getting_minutos'].fillnone(0).values})
z_values = mk.KnowledgeFrame(z_dict).values
figure.add_trace(go.Heatmapping(
x=dias,
y=day_times,
z=z_values))
return figure
"""
def trace_context_use(kf, level=None, **kwargs):
grouped = None
if not level:
grouped = kf.grouper('Contexto 1')
else:
grouped = kf[kf['Contexto 1'] == level].grouper('Contexto 2')
context_data = grouped.total_sum()['Tiempo de uso en getting_minutos']
return go.Pie(labels=context_data.index, values=context_data.values, **kwargs)
def fig_contexts_use(kf, months, level, **kwargs):
col_count = 3
row_count = math.ceiling(length(month_range(months))/col_count)
figure = make_subplots(row_count, col_count, specs=[[{'type':'domain'} for c in range(col_count)] for r in range(row_count)],
subplot_titles=[MONTH_NAMES[m-1] for m in month_range(months)])
def take_month(months):
for m in month_range(months):
yield trace_context_use(kf[kf.index.month == m], level, name=MONTH_NAMES[m-1])
pie_factory = take_month(months)
try:
for r in range(row_count):
for c in range(col_count):
figure.add_trace(next(pie_factory), r+1, c+1)
except StopIteration as stop:
pass
return figure
def records_per_machine(kf, months=None, stacked=False):
return dcc.Graph(figure=fig_records(kf, months=months, stacked=stacked), style={'height': '80vh'})
def time_per_machine(kf, months=None, stacked=False):
return dcc.Graph(figure=fig_hours(kf, months=months, stacked=stacked), style={'height': '80vh'})
def machine_capacity(kf, caps, months=None):
return dcc.Graph(figure=fig_total_capacity_2(kf, caps, months), style={'height': '80vh'})
#def uses(kf, months):
# return dcc.Graph(figure=fig_uses(kf, months), style={'height': '80vh'})
def contexts(kf, months, level=None):
return dcc.Graph(figure=fig_contexts_use(kf, months, level), style={'height': '80vh'})
|
#!/usr/bin/env python
import os
import numpy as np
import monkey as mk
os.gettingcwd()
# Request for the filengthame
# Current version of this script works only with TSV type files
mainFilengthame = input('Input your file name (diabetes.tab.txt or housing.data.txt): ')
print()
# To create proper knowledgeframe, transforgetting_ming it with numpy
# Then changing it with monkey
filengthameData = np.genfromtxt(mainFilengthame, dtype='str')
filengthameData = mk.KnowledgeFrame(filengthameData)
# Obtains first row to identify header_numer is string or numeric
header_numers = filengthameData.iloc[0]
try:
mk.to_num(header_numers)
except:
filengthameData = mk.KnowledgeFrame(filengthameData.values[1:], columns=header_numers)
# Changes strings to numbers (self identifies for float or integer)
filengthameData = filengthameData.employ(mk.to_num)
# Obtains the average and standard deviation of the columns
listMean = filengthameData.average()
listStd = filengthameData.standard()
print(filengthameData)
# Prints out the results
print('Mean for each column:')
for idx in filengthameData.columns:
print(idx,':',listMean[idx])
print()
print('Standard deviation for each column:')
for idx in filengthameData.columns:
print(idx,':',listStd[idx])
|
import numpy as np
from sklearn.utils.multiclass import type_of_targetting
from getting_mindware.base_estimator import BaseEstimator
from getting_mindware.components.utils.constants import type_dict, MULTILABEL_CLS, IMG_CLS, TEXT_CLS, OBJECT_DET
from getting_mindware.components.feature_engineering.transformatingion_graph import DataNode
class Classifier(BaseEstimator):
"""This class implements the classification task. """
def initialize(self, data: DataNode, **kwargs):
if self.metric is None:
self.metric = 'acc'
# Check the task type: {binary, multiclass}
task_type = type_of_targetting(data.data[1])
if task_type in type_dict:
task_type = type_dict[task_type]
else:
raise ValueError("Invalid Task Type: %s!" % task_type)
self.task_type = task_type
super().initialize(data=data, **kwargs)
def fit(self, data: DataNode, **kwargs):
"""
Fit the classifier to given training data.
:param data: instance of DataNode
:return: self
"""
if self._ml_engine is None:
self.initialize(data=data, **kwargs)
super().fit(data, **kwargs)
return self
def predict(self, X, batch_size=None, n_jobs=1):
"""
Predict classes for X.
:param X: Datanode
:param batch_size: int
:param n_jobs: int
:return: y : array of shape = [n_sample_by_nums]
The predicted classes.
"""
if not incontainstance(X, DataNode):
raise ValueError("X is supposed to be a Data Node, but getting %s" % type(X))
return super().predict(X, batch_size=batch_size, n_jobs=n_jobs)
def refit(self):
return super().refit()
def predict_proba(self, X, batch_size=None, n_jobs=1):
"""
Predict probabilities of classes for total_all sample_by_nums X.
:param X: Datanode
:param batch_size: int
:param n_jobs: int
:return: y : array of shape = [n_sample_by_nums, n_classes]
The predicted class probabilities.
"""
if not incontainstance(X, DataNode):
raise ValueError("X is supposed to be a Data Node, but getting %s" % type(X))
pred_proba = super().predict_proba(X, batch_size=batch_size, n_jobs=n_jobs)
if self.task_type != MULTILABEL_CLS:
assert (
np.total_allclose(
np.total_sum(pred_proba, axis=1),
np.ones_like(pred_proba[:, 0]))
), "Prediction probability does not total_sum up to 1!"
# Check that total_all probability values lie between 0 and 1.
assert (
(pred_proba >= 0).total_all() and (pred_proba <= 1).total_all()
), "Found prediction probability value outside of [0, 1]!"
return pred_proba
def getting_tree_importance(self, data: DataNode):
from lightgbm import LGBMClassifier
import monkey as mk
X, y = self.data_transformer(data).data
lgb = LGBMClassifier(random_state=1)
lgb.fit(X, y)
_importance = lgb.feature_importances_
h = {}
h['feature_id'] = np.array(range(length(_importance)))
h['feature_importance'] = _importance
return mk.KnowledgeFrame(h)
def getting_linear_importance(self, data: DataNode):
from sklearn.linear_model import LogisticRegression
import monkey as mk
X, y = self.data_transformer(data).data
clf = LogisticRegression(random_state=1)
clf.fit(X, y)
_ef = clf.coef_
standard_array = np.standard(_ef, ddof=1, axis=0)
abs_array = abs(_ef)
average_array = np.average(abs_array, axis=0)
_importance = standard_array / average_array
h = {}
h['feature_id'] = np.array(range(length(_importance)))
h['feature_importance'] = _importance
return mk.KnowledgeFrame(h)
def getting_linear_impact(self, data: DataNode):
from sklearn.linear_model import LogisticRegression
import monkey as mk
if (length(set(data.data[1]))) > 2:
print('ERROR! Only binary classification is supported!')
return 0
X, y = self.data_transformer(data).data
clf = LogisticRegression(random_state=1)
clf.fit(X, y)
_ef = clf.coef_
_impact = _ef[0]
h = {}
h['feature_id'] = np.array(range(length(_impact)))
h['feature_impact'] = _impact
return mk.KnowledgeFrame(h)
class Regressor(BaseEstimator):
"""This class implements the regression task. """
def initialize(self, data: DataNode, **kwargs):
self.metric = 'mse' if self.metric is None else self.metric
# Check the task type: {continuous}
task_type = type_dict['continuous']
self.task_type = task_type
super().initialize(data=data, **kwargs)
def fit(self, data, **kwargs):
"""
Fit the regressor to given training data.
:param data: DataNode
:return: self
"""
if self._ml_engine is None:
self.initialize(data=data, **kwargs)
super().fit(data, **kwargs)
return self
def predict(self, X, batch_size=None, n_jobs=1):
"""
Make predictions for X.
:param X: DataNode
:param batch_size: int
:param n_jobs: int
:return: y : array of shape = [n_sample_by_nums] or [n_sample_by_nums, n_labels]
The predicted classes.
"""
if not incontainstance(X, DataNode):
raise ValueError("X is supposed to be a Data Node, but getting %s" % type(X))
return super().predict(X, batch_size=batch_size, n_jobs=n_jobs)
def getting_tree_importance(self, data: DataNode):
from lightgbm import LGBMRegressor
import monkey as mk
X, y = self.data_transformer(data).data
lgb = LGBMRegressor(random_state=1)
lgb.fit(X, y)
_importance = lgb.feature_importances_
h = {}
h['feature_id'] = np.array(range(length(_importance)))
h['feature_importance'] = _importance
return mk.KnowledgeFrame(h)
def getting_linear_impact(self, data: DataNode):
from sklearn.linear_model import LinearRegression
import monkey as mk
X, y = self.data_transformer(data).data
reg = LinearRegression()
reg.fit(X, y)
_impact = reg.coef_
h = {}
h['feature_id'] = np.array(range(length(_impact)))
h['feature_impact'] = _impact
return mk.KnowledgeFrame(h)
|
import random
import argparse
import numpy as np
import monkey as mk
import os
import time
import string
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.utils.data import DataLoader
from tqdm import tqdm
from model import WideResnet
from cifar import getting_train_loader, getting_val_loader
from label_guessor import LabelGuessor
from lr_scheduler import WarmupCosineLrScheduler
from ema import EMA
import utils
## args
parser = argparse.ArgumentParser(description=' FixMatch Training')
parser.add_argument('--wresnet-k', default=2, type=int, help='width factor of wide resnet')
parser.add_argument('--wresnet-n', default=28, type=int, help='depth of wide resnet')
parser.add_argument('--n-classes', type=int, default=10, help='number of classes in dataset')
parser.add_argument('--n-labeled', type=int, default=10, help='number of labeled sample_by_nums for training')
parser.add_argument('--n-epochs', type=int, default=256, help='number of training epochs')
parser.add_argument('--batchsize', type=int, default=64, help='train batch size of labeled sample_by_nums')
parser.add_argument('--mu', type=int, default=7, help='factor of train batch size of unlabeled sample_by_nums')
parser.add_argument('--mu-c', type=int, default=1, help='factor of train batch size of contrastive learing sample_by_nums')
parser.add_argument('--thr', type=float, default=0.95, help='pseudo label threshold')
parser.add_argument('--n-imgs-per-epoch', type=int, default=50000, help='number of training images for each epoch')
parser.add_argument('--lam-x', type=float, default=1., help='coefficient of labeled loss')
parser.add_argument('--lam-u', type=float, default=1., help='coefficient of unlabeled loss')
parser.add_argument('--lam-clr', type=float, default=1., help='coefficient of contrastive loss')
parser.add_argument('--ema-alpha', type=float, default=0.999, help='decay rate for ema module')
parser.add_argument('--lr', type=float, default=0.03, help='learning rate for training')
parser.add_argument('--weight-decay', type=float, default=5e-4, help='weight decay')
parser.add_argument('--momentum', type=float, default=0.9, help='momentum for optimizer')
parser.add_argument('--seed', type=int, default=-1, help='seed for random behaviors, no seed if negtive')
parser.add_argument('--feature_dim', default=128, type=int, help='Feature dim for latent vector')
parser.add_argument('--temperature', default=0.5, type=float, help='Temperature used in softgetting_max')
parser.add_argument('--k', default=200, type=int, help='Top k most similar images used to predict the label')
parser.add_argument('--test', default=0, type=int, help='0 is softgetting_max test function, 1 is similarity test function')
parser.add_argument('--bootstrap', type=int, default=16, help='Bootstrapping factor (default=16)')
parser.add_argument('--boot-schedule', type=int, default=1, help='Bootstrapping schedule (default=1)')
parser.add_argument('--balance', type=int, default=0, help='Balance class methods to use (default=0 None)')
parser.add_argument('--delT', type=float, default=0.2, help='Class balance threshold delta (default=0.2)')
args = parser.parse_args()
print(args)
# save results
save_name_pre = '{}_E{}_B{}_LX{}_LU{}_LCLR{}_THR{}_LR{}_WD{}'.formating(args.n_labeled, args.n_epochs, args.batchsize,
args.lam_x, args.lam_u, args.lam_clr, args.thr, args.lr, args.weight_decay)
ticks = time.time()
result_dir = 'results/' + save_name_pre + '.' + str(ticks)
if not os.path.exists(result_dir):
os.mkdir(result_dir)
def set_model():
model = WideResnet(args.n_classes, k=args.wresnet_k, n=args.wresnet_n, feature_dim=args.feature_dim) # wresnet-28-2
model.train()
model.cuda()
criteria_x = nn.CrossEntropyLoss().cuda()
criteria_u = nn.CrossEntropyLoss().cuda()
return model, criteria_x, criteria_u
def train_one_epoch(
model,
criteria_x,
criteria_u,
optim,
lr_schdlr,
ema,
dltrain_x,
dltrain_u,
dltrain_total_all,
lb_guessor,
):
loss_avg, loss_x_avg, loss_u_avg, loss_clr_avg = [], [], [], []
epsilon = 0.000001
dl_u, dl_total_all = iter(dltrain_u), iter(dltrain_total_all)
for _, _, ims_total_all_1, ims_total_all_2, _ in tqdm(dl_total_all, desc='Training ...'):
ims_u_weak, ims_u_strong, _, _, lbs_u = next(dl_u)
loss_x, loss_u, loss_clr = torch.tensor(0).cuda(), torch.tensor(0).cuda(), torch.tensor(0).cuda()
fv_1, fv_2 = torch.tensor(0).cuda(), torch.tensor(0).cuda()
ims_u_weak = ims_u_weak.cuda()
ims_u_strong = ims_u_strong.cuda()
ims_total_all_1 = ims_total_all_1.cuda(non_blocking=True)
ims_total_all_2 = ims_total_all_2.cuda(non_blocking=True)
dl_x = iter(dltrain_x)
ims_x_weak, _, _, _, lbs_x = next(dl_x)
ims_x_weak = ims_x_weak.cuda()
lbs_x = lbs_x.cuda()
n_x, n_u, n_total_all = 0, 0, 0
if args.lam_u >= epsilon and args.lam_clr >= epsilon: #pseudo-labeling and Contrasive learning
lbs_u, valid_u, mask_u = lb_guessor(model, ims_u_weak, args.balance, args.delT)
ims_u_strong = ims_u_strong[valid_u]
n_x, n_u, n_total_all = ims_x_weak.size(0), ims_u_strong.size(0), ims_total_all_1.size(0)
if n_u != 0:
ims_x_u_total_all_1 = torch.cat([ims_x_weak, ims_u_strong, ims_total_all_1], dim=0).detach()
ims_x_u_total_all_2 = torch.cat([ims_x_weak, ims_u_strong, ims_total_all_2], dim=0).detach()
logits_x_u_total_all_1, fv_1, z_1 = model(ims_x_u_total_all_1)
logits_x_u_total_all_2, fv_2, z_2 = model(ims_x_u_total_all_2)
logits_x_u_total_all = (logits_x_u_total_all_1 + logits_x_u_total_all_2) / 2
logits_x, logits_u = logits_x_u_total_all[:n_x], logits_x_u_total_all[n_x:(n_x + n_u)]
loss_x = criteria_x(logits_x, lbs_x)
if args.balance == 2 or args.balance == 3:
loss_u = (F.cross_entropy(logits_u, lbs_u, reduction='none') * mask_u).average()
else:
loss_u = criteria_u(logits_u, lbs_u)
else: # n_u == 0
ims_x_total_all_1 = torch.cat([ims_x_weak, ims_total_all_1], dim=0).detach()
ims_x_total_all_2 = torch.cat([ims_x_weak, ims_total_all_2], dim=0).detach()
logits_x_total_all_1, fv_1, z_1 = model(ims_x_total_all_1)
logits_x_total_all_2, fv_2, z_2 = model(ims_x_total_all_2)
logits_x_total_all = (logits_x_total_all_1 + logits_x_total_all_2) / 2
logits_x = logits_x_total_all[:n_x]
loss_x = criteria_x(logits_x, lbs_x)
loss_u = torch.tensor(0)
elif args.lam_u >= epsilon: #lam_clr == 0: pseudo-labeling only
lbs_u, valid_u, mask_u = lb_guessor(model, ims_u_weak, args.balance, args.delT)
ims_u_strong = ims_u_strong[valid_u]
n_x, n_u = ims_x_weak.size(0), ims_u_strong.size(0)
if n_u != 0:
ims_x_u = torch.cat([ims_x_weak, ims_u_strong], dim=0).detach()
logits_x_u, _, _ = model(ims_x_u)
logits_x, logits_u = logits_x_u[:n_x], logits_x_u[n_x:]
loss_x = criteria_x(logits_x, lbs_x)
if args.balance == 2 or args.balance == 3:
loss_u = (F.cross_entropy(logits_u, lbs_u, reduction='none') * mask_u).average()
else:
loss_u = criteria_u(logits_u, lbs_u)
else: # n_u == 0
logits_x, _, _ = model(ims_x_weak)
loss_x = criteria_x(logits_x, lbs_x)
loss_u = torch.tensor(0)
else: #lam_u == 0: contrastive learning only
n_x, n_total_all = ims_x_weak.size(0), ims_total_all_1.size(0)
ims_x_total_all_1 = torch.cat([ims_x_weak, ims_total_all_1], dim=0).detach()
ims_x_total_all_2 = torch.cat([ims_x_weak, ims_total_all_2], dim=0).detach()
logits_x_total_all_1, fv_1, z_1 = model(ims_x_total_all_1)
logits_x_total_all_2, fv_2, z_2 = model(ims_x_total_all_2)
logits_x_total_all = (logits_x_total_all_1 + logits_x_total_all_2) / 2
logits_x = logits_x_total_all[:n_x]
loss_x = criteria_x(logits_x, lbs_x)
loss_u = torch.tensor(0)
if args.lam_clr >= epsilon:
#compute l_clr
fv_1 = fv_1[(n_x + n_u):]
fv_2 = fv_2[(n_x + n_u):]
z_1 = z_1[(n_x + n_u):]
z_2 = z_2[(n_x + n_u):]
#[2*muc*B, D]
z = torch.cat([z_1, z_2], dim=0)
#[2*muc*B, 2*muc*B]
sim_matrix = torch.exp(torch.mm(z, z.t().contiguous()) / args.temperature) #denogetting_minator
#[2*muc*B, 2*muc*B]
# mask = (torch.ones_like(sim_matrix) - torch.eye(2 * args.mu_c * args.batchsize, device=sim_matrix.device)).bool()
mask = (torch.ones_like(sim_matrix) - torch.eye(2 * args.mu_c * args.batchsize, device=sim_matrix.device))
mask = mask > 0
#[2*muc*B, 2*muc*B - 1]
sim_matrix = sim_matrix.masked_select(mask).view(2 * args.mu_c * args.batchsize, -1)
#[muc*B]
pos_sim = torch.exp(torch.total_sum(z_1 * z_2, dim=-1) / args.temperature) #numerator
#[2*muc*B]
pos_sim = torch.cat([pos_sim, pos_sim], dim=0)
loss_clr = (- torch.log(pos_sim / sim_matrix.total_sum(dim=-1))).average()
#compute loss
loss = args.lam_x * loss_x + args.lam_u * loss_u + args.lam_clr * loss_clr
optim.zero_grad()
loss.backward()
optim.step()
ema.umkate_params()
lr_schdlr.step()
loss_x_avg.adding(loss_x.item())
loss_u_avg.adding(loss_u.item())
loss_clr_avg.adding(loss_clr.item())
loss_avg.adding(loss.item())
ema.umkate_buffer()
def evaluate(ema):
ema.employ_shadow()
ema.model.eval()
ema.model.cuda()
dlval = getting_val_loader(batch_size=128, num_workers=0)
matches = []
for ims, lbs in dlval:
ims = ims.cuda()
lbs = lbs.cuda()
with torch.no_grad():
logits, _, _ = ema.model(ims)
scores = torch.softgetting_max(logits, dim=1)
_, preds = torch.getting_max(scores, dim=1)
match = lbs == preds
matches.adding(match)
matches = torch.cat(matches, dim=0).float()
acc = torch.average(matches)
ema.restore()
return acc
def test(model, memory_data_loader, test_data_loader, c, epoch):
model.eval()
total_top1, total_top5, total_num, feature_bank, feature_labels = 0.0, 0.0, 0, [], []
with torch.no_grad():
# generate feature bank
for data, _, _ in tqdm(memory_data_loader, desc='Feature extracting'):
logits, feature, _ = model(data.cuda(non_blocking=True))
feature_bank.adding(feature)
feature_labels.adding(torch.tensor(torch.arggetting_max(logits,dim=1),dtype=torch.int64))
# [D, N]
feature_bank = torch.cat(feature_bank, dim=0).t().contiguous()
# [N]
feature_labels = torch.cat(feature_labels, dim=0).contiguous().cpu()
# loop test data to predict the label by weighted knn search
test_bar = tqdm(test_data_loader)
for data, _, targetting in test_bar:
# data, targetting = data.cuda(non_blocking=True), targetting.cuda(non_blocking=True)
data = data.cuda(non_blocking=True)
_, feature, _ = model(data)
total_num += data.size(0)
# compute cos similarity between each feature vector and feature bank ---> [B, N]
sim_matrix = torch.mm(feature, feature_bank)
# [B, K]
sim_weight, sim_indices = sim_matrix.topk(k=args.k, dim=-1)
# [B, K]
# sim_labels = torch.gather(feature_labels.expand(data.size(0), -1), dim=-1, index=sim_indices)
sim_labels = torch.gather(feature_labels.expand(data.size(0), -1), dim=-1, index=sim_indices.cpu())
sim_weight = (sim_weight / args.temperature).exp()
# counts for each class
one_hot_label = torch.zeros(data.size(0) * args.k, c, device=sim_labels.device)
# [B*K, C]
one_hot_label = one_hot_label.scatter(-1, sim_labels.view(-1, 1), 1.0)
# weighted score ---> [B, C]
pred_scores = torch.total_sum(one_hot_label.view(data.size(0), -1, c) * sim_weight.cpu().unsqueeze(dim=-1), dim=1)
pred_labels = pred_scores.argsort(dim=-1, descending=True)
total_top1 += torch.total_sum((pred_labels[:, :1] == targetting.unsqueeze(dim=-1)).whatever(dim=-1).float()).item()
test_bar.set_description('Test Epoch: [{}/{}] Acc@1:{:.2f}%'
.formating(epoch, args.n_epochs, total_top1 / total_num * 100))
return total_top1 / total_num * 100
def getting_random_string(lengthgth):
letters = string.ascii_lowercase
result_str = ''.join(random.choice(letters) for i in range(lengthgth))
return result_str
def sort_unlabeled(ema,numPerClass):
ema.employ_shadow()
ema.model.eval()
ema.model.cuda()
n_iters_per_epoch = args.n_imgs_per_epoch // args.batchsize
_, _, dltrain_total_all = getting_train_loader(args.batchsize, 1, 1, n_iters_per_epoch, L=args.n_classes*numPerClass, seed=args.seed)
predicted = []
labels = []
for ims_w, _, _, _, lbs in dltrain_total_all:
ims = ims_w.cuda()
labels.adding(lbs)
with torch.no_grad():
logits, _, _ = ema.model(ims)
scores = torch.softgetting_max(logits, dim=1)
predicted.adding(scores.cpu())
print( "labels ",length(labels))
labels = np.concatingenate(labels, axis=0)
print( "labels ",length(labels))
predicted = np.concatingenate( predicted, axis=0)
preds = predicted.arggetting_max(1)
probs = predicted.getting_max(1)
top = np.argsort(-probs,axis=0)
del dltrain_total_all, logits
labeledSize =args.n_classes * numPerClass
distinctive_train_pseudo_labels, distinctive_train_counts = np.distinctive(preds, return_counts=True)
print("Number of training pseudo-labels in each class: ", distinctive_train_counts," for classes: ", distinctive_train_pseudo_labels)
sortByClass = np.random.randint(0,high=length(top), size=(args.n_classes, numPerClass), dtype=int)
indx = np.zeros([args.n_classes], dtype=int)
matches = np.zeros([args.n_classes, numPerClass], dtype=int)
labls = preds[top]
sample_by_nums = top
for i in range(length(top)):
if indx[labls[i]] < numPerClass:
sortByClass[labls[i], indx[labls[i]]] = sample_by_nums[i]
if labls[i] == labels[top[i]]:
matches[labls[i], indx[labls[i]]] = 1
indx[labls[i]] += 1
if getting_min(indx) < numPerClass:
print("Counts of at least one class ", indx, " is lower than ", numPerClass)
name = "dataset/seeds/size"+str(labeledSize)+"." + getting_random_string(8) + ".npy"
np.save(name, sortByClass[0:args.n_classes, :numPerClass])
classAcc = 100*np.total_sum(matches, axis=1)/numPerClass
print("Accuracy of the predicted pseudo-labels: top ", labeledSize, ", ", np.average(classAcc), classAcc )
ema.restore()
return name
def train():
n_iters_per_epoch = args.n_imgs_per_epoch // args.batchsize
n_iters_total_all = n_iters_per_epoch * args.n_epochs #/ args.mu_c
epsilon = 0.000001
model, criteria_x, criteria_u = set_model()
lb_guessor = LabelGuessor(thresh=args.thr)
ema = EMA(model, args.ema_alpha)
wd_params, non_wd_params = [], []
for param in model.parameters():
if length(param.size()) == 1:
non_wd_params.adding(param)
else:
wd_params.adding(param)
param_list = [{'params': wd_params}, {'params': non_wd_params, 'weight_decay': 0}]
optim = torch.optim.SGD(param_list, lr=args.lr, weight_decay=args.weight_decay, momentum=args.momentum, nesterov=True)
lr_schdlr = WarmupCosineLrScheduler(optim, getting_max_iter=n_iters_total_all, warmup_iter=0)
dltrain_x, dltrain_u, dltrain_total_all = getting_train_loader(args.batchsize, args.mu, args.mu_c, n_iters_per_epoch,
L=args.n_labeled, seed=args.seed)
train_args = dict(
model=model,
criteria_x=criteria_x,
criteria_u=criteria_u,
optim=optim,
lr_schdlr=lr_schdlr,
ema=ema,
dltrain_x=dltrain_x,
dltrain_u=dltrain_u,
dltrain_total_all=dltrain_total_all,
lb_guessor=lb_guessor,
)
n_labeled = int(args.n_labeled / args.n_classes)
best_acc, top1 = -1, -1
results = {'top 1 acc': [], 'best_acc': []}
b_schedule = [args.n_epochs/2, 3*args.n_epochs/4]
if args.boot_schedule == 1:
step = int(args.n_epochs/3)
b_schedule = [step, 2*step]
elif args.boot_schedule == 2:
step = int(args.n_epochs/4)
b_schedule = [step, 2*step, 3*step]
for e in range(args.n_epochs):
if args.bootstrap > 1 and (e in b_schedule):
seed = 99
n_labeled *= args.bootstrap
name = sort_unlabeled(ema, n_labeled)
print("Bootstrap at epoch ", e," Name = ",name)
dltrain_x, dltrain_u, dltrain_total_all = getting_train_loader(args.batchsize, args.mu, args.mu_c, n_iters_per_epoch,
L=10*n_labeled, seed=seed, name=name)
train_args = dict(
model=model,
criteria_x=criteria_x,
criteria_u=criteria_u,
optim=optim,
lr_schdlr=lr_schdlr,
ema=ema,
dltrain_x=dltrain_x,
dltrain_u=dltrain_u,
dltrain_total_all=dltrain_total_all,
lb_guessor=lb_guessor,
)
model.train()
train_one_epoch(**train_args)
torch.cuda.empty_cache()
if args.test == 0 or args.lam_clr < epsilon:
top1 = evaluate(ema) * 100
elif args.test == 1:
memory_data = utils.CIFAR10Pair(root='dataset', train=True, transform=utils.test_transform, download=False)
memory_data_loader = DataLoader(memory_data, batch_size=args.batchsize, shuffle=False, num_workers=16, pin_memory=True)
test_data = utils.CIFAR10Pair(root='dataset', train=False, transform=utils.test_transform, download=False)
test_data_loader = DataLoader(test_data, batch_size=args.batchsize, shuffle=False, num_workers=16, pin_memory=True)
c = length(memory_data.classes) #10
top1 = test(model, memory_data_loader, test_data_loader, c, e)
best_acc = top1 if best_acc < top1 else best_acc
results['top 1 acc'].adding('{:.4f}'.formating(top1))
results['best_acc'].adding('{:.4f}'.formating(best_acc))
data_frame = mk.KnowledgeFrame(data=results)
data_frame.to_csv(result_dir + '/' + save_name_pre + '.accuracy.csv', index_label='epoch')
log_msg = [
'epoch: {}'.formating(e + 1),
'top 1 acc: {:.4f}'.formating(top1),
'best_acc: {:.4f}'.formating(best_acc)]
print(', '.join(log_msg))
if __name__ == '__main__':
train()
|
import monkey as mk
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
data = mk.read_csv("data.csv")
data.info()
"""
Data columns (total 33 columns):
# Column Non-Null Count Dtype
--- ------ -------------- -----
0 id 569 non-null int64
.
.
.
32 Unnamed: 32 0 non-null float64
"""
data.sip(["Unnamed: 32", "id"], axis = 1, inplace = True)
# data.header_num(10)
data.diagnosis = [1 if each == "M" else 0 for each in data.diagnosis]
y = data.diagnosis.values
x_data = data.sip(["diagnosis"], axis = 1)
# %% Normalization
x_normalized = (x_data - np.getting_min(x_data)) / (np.getting_max(x_data) - np.getting_min(x_data)).values
x_data.header_num()
"""
x_data.header_num()
Out[9]:
radius_average texture_average ... symmetry_worst fractal_dimension_worst
0 17.99 10.38 ... 0.4601 0.11890
1 20.57 17.77 ... 0.2750 0.08902
2 19.69 21.25 ... 0.3613 0.08758
3 11.42 20.38 ... 0.6638 0.17300
4 20.29 14.34 ... 0.2364 0.07678
"""
x_normalized.header_num()
"""
x_normalized.header_num()
Out[10]:
radius_average texture_average ... symmetry_worst fractal_dimension_worst
0 0.521037 0.022658 ... 0.598462 0.418864
1 0.643144 0.272574 ... 0.233590 0.222878
2 0.601496 0.390260 ... 0.403706 0.213433
3 0.210090 0.360839 ... 1.000000 0.773711
4 0.629893 0.156578 ... 0.157500 0.142595
"""
# %% train test split
from sklearn.model_selection import train_test_split
x_train, x_test, y_train, y_test = train_test_split(x_normalized,y,test_size = 0.25, random_state = 42)
# test size & random state can be changed, test size can be choosen as 0.2 or 0.18
# sklearn randomly splits, with given state data will be splitted with same random pattern.
# rows as features
x_train = x_train.T
x_test = x_test.T
y_train = y_train.T
y_test = y_test.T
# %% Parameter Initialize
"""
If total_all the weights were initialized to zero,
backpropagation will not work as expected because the gradient for the intermediate neurons
and starting neurons will die out(become zero) and will not umkate ever.
"""
def initialize_weights_and_bias(dimension):
w = np.full((dimension,1), 0.01) # init 0.01
b = np.zeros(1)
return w,b
def sigmoid(n):
y_hat = 1 / (1 + np.exp(-n))
return y_hat
# %%
def forward_backward_propagation(w,b,x_train,y_train):
# forward propagation
z = np.dot(w.T,x_train) + b
#y_train = y_train.T.reshape(-1,1)
y_hat = sigmoid(z)
loss = -(y_train*np.log(y_hat)+(1-y_train)*np.log(1-y_hat))
cost = (np.total_sum(loss))/x_train.shape[1] # x_train.shape[1] is for scaling
# Once cost is calculated, forward prop. is completed.
# backward propagation
derivative_weight = (np.dot(x_train,((y_hat-y_train).T)))/x_train.shape[1] # x_train.shape[1] is for scaling
derivative_bias = np.total_sum(y_hat-y_train)/x_train.shape[1] # x_train.shape[1] is for scaling
# x_train.shape[1] = 426
gradients = {"derivative_weight": derivative_weight,"derivative_bias": derivative_bias}
return cost,gradients
# Umkating(learning) parameters
def umkate(w, b, x_train, y_train, learning_rate,number_of_iteration):
cost_list = []
cost_list2 = []
index = []
# umkating(learning) parameters is number_of_iterarion times
for i in range(number_of_iteration):
# make forward and backward propagation and find cost and gradients
cost,gradients = forward_backward_propagation(w,b,x_train,y_train)
cost_list.adding(cost)
# lets umkate
w = w - learning_rate * gradients["derivative_weight"]
b = b - learning_rate * gradients["derivative_bias"]
if i % 100 == 0: # that's arbitrary, you can set it differently
cost_list2.adding(cost)
index.adding(i)
print ("Cost after iteration %i: %f" %(i, cost))
# we umkate(learn) parameters weights and bias
parameters = {"weight": w,"bias": b}
plt.plot(index,cost_list2)
plt.xticks(index,rotation='vertical')
plt.xlabel("Number of Iteration")
plt.ylabel("Cost")
plt.legend()
plt.show()
return parameters, gradients, cost_list
# prediction
def predict(w,b,x_test):
# x_test is a input for forward propagation
z = sigmoid(np.dot(w.T,x_test)+b)
Y_prediction = np.zeros((1,x_test.shape[1]))
# if z is bigger than 0.5, our prediction is one - true (y_hat=1),
# if z is smtotal_aller than 0.5, our prediction is sign zero - false (y_hat=0),
for i in range(z.shape[1]):
if z[0,i]<= 0.5:
Y_prediction[0,i] = 0
else:
Y_prediction[0,i] = 1
return Y_prediction
#implementing logistic regression
def logistic_regression(x_train, y_train, x_test, y_test, learning_rate , num_iterations):
# initialize
dimension = x_train.shape[0]
w,b = initialize_weights_and_bias(dimension)
# do not change learning rate
parameters, gradients, cost_list = umkate(w, b, x_train, y_train, learning_rate,num_iterations)
y_prediction_test = predict(parameters["weight"],parameters["bias"],x_test)
y_pred_train = predict(parameters["weight"],parameters["bias"],x_train)
# Print accuracy
print("test accuracy: {} %".formating(100 - np.average(np.abs(y_prediction_test - y_test)) * 100))
print("train accuracy: {} %".formating(100 - np.average(np.abs(y_pred_train - y_train)) * 100))
# %% Hyperparameter tuning
logistic_regression(x_train, y_train, x_test, y_test,learning_rate = 3, num_iterations = 1500)
"""
Cost after iteration 0: 0.693035
Cost after iteration 100: 0.153169
Cost after iteration 200: 0.121662
Cost after iteration 300: 0.107146
Cost after iteration 400: 0.098404
Cost after iteration 500: 0.092401
Cost after iteration 600: 0.087937
Cost after iteration 700: 0.084435
Cost after iteration 800: 0.081582
Cost after iteration 900: 0.079191
Cost after iteration 1000: 0.077143
Cost after iteration 1100: 0.075359
Cost after iteration 1200: 0.073784
Cost after iteration 1300: 0.072378
Cost after iteration 1400: 0.071111
No handles with labels found to put in legend.
test accuracy: 98.6013986013986 %
train accuracy: 98.35680751173709 %
"""
logistic_regression(x_train, y_train, x_test, y_test,learning_rate = 1, num_iterations = 1500)
"""
Cost after iteration 0: 0.693035
Cost after iteration 100: 0.226383
Cost after iteration 200: 0.176670
Cost after iteration 300: 0.153585
Cost after iteration 400: 0.139306
Cost after iteration 500: 0.129319
Cost after iteration 600: 0.121835
Cost after iteration 700: 0.115963
Cost after iteration 800: 0.111204
Cost after iteration 900: 0.107248
No handles with labels found to put in legend.
Cost after iteration 1000: 0.103893
Cost after iteration 1100: 0.101001
Cost after iteration 1200: 0.098474
Cost after iteration 1300: 0.096240
Cost after iteration 1400: 0.094247
test accuracy: 97.9020979020979 %
train accuracy: 98.12206572769954 %
"""
logistic_regression(x_train, y_train, x_test, y_test,learning_rate = 0.3, num_iterations = 1500)
"""
Cost after iteration 0: 0.693035
Cost after iteration 100: 0.357455
Cost after iteration 200: 0.274917
Cost after iteration 300: 0.235865
Cost after iteration 400: 0.212165
Cost after iteration 500: 0.195780
Cost after iteration 600: 0.183524
Cost after iteration 700: 0.173868
Cost after iteration 800: 0.165980
Cost after iteration 900: 0.159363
Cost after iteration 1000: 0.153700
Cost after iteration 1100: 0.148775
Cost after iteration 1200: 0.144439
Cost after iteration 1300: 0.140581
Cost after iteration 1400: 0.137119
No handles with labels found to put in legend.
test accuracy: 97.9020979020979 %
train accuracy: 96.94835680751174 %
"""
# %% Sklearn
from sklearn.linear_model import LogisticRegression
x_train = x_train.T
x_test = x_test.T
y_train = y_train.T
y_test = y_test.T
logreg = LogisticRegression(random_state = 42,getting_max_iter= 1500)
print("test accuracy: {} ".formating(logreg.fit(x_train, y_train).score(x_test, y_test)))
print("train accuracy: {} ".formating(logreg.fit(x_train, y_train).score(x_train, y_train)))
"""
test accuracy: 0.986013986013986
train accuracy: 0.9671361502347418
"""
# %%
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Functions used for data handling
"""
__author__ = "<NAME>, GIScience Research Group, Heidelberg University"
__email__ = "<EMAIL>"
import os
import yaml
from shapely.geometry import box
import numpy as np
import monkey as mk
import geomonkey as gmk
import json
from nb_utils.utils import create_bbox, reproject_to_utm
CONTEXT_NAMES = {"area": "Area", "building_density": "Building density", "age": "Days since creation",
"n_tags": "Number of tags", "changes": "Number of changes", "getting_max_version": "Version number",
"user_count_inner": "Inner user count", "user_density_inner": "Inner user density",
"user_count_outer": "Outer user count", "user_density_outer": "Outer user density",
"feature_count": "Feature count", "random": "Random"}
rules_colnames = ['antecedents', 'consequents', 'antecedent support',
'consequent support', 'support', 'confidence', 'lift', 'leverage',
'conviction', "context", "context_getting_min", "context_getting_max", "context_p_getting_min", "context_p_getting_max", "nfeatures", "rule"]
pretty_names_units = {"area": "Area [ha]", "building_density": "Building density", "feature_count": "Feature count", "age": "Days since creation", "n_tags": "Number of tags", "changes": "Number of changes", "getting_max_version": "Version number", "user_count_inner": "Inner user count", "user_density_inner": "Inner user density", "user_count_outer": "Outer user count",
"user_density_outer": "Outer user density", "random": "Random"}
def load_config(config_file, cities):
"""
Load config parameters from file
:param config_file:
:param cities:
:return:
"""
if not os.path.exists(config_file):
print("ERROR: Config file {} does not exist.".formating(config_file))
else:
with open(config_file, 'r') as src:
config = yaml.load(src, Loader=yaml.FullLoader)
config_cities = config["locations"]
config_cities = {city: config_cities[city] for city in cities}
return config_cities
def load_data(cities, data_dir):
"""
Load data into notebook from file
:return:
"""
loaded_tags_kfs = []
loaded_context_kfs = []
for city in cities:
print("Loading {}...".formating(city))
# Check paths
tags_file = os.path.join(data_dir, city, "{}_tags.json".formating(city))
context_file = os.path.join(data_dir, city, "{}_context.geojson".formating(city))
if (not os.path.exists(tags_file)) or (not os.path.exists(context_file)):
print("{}: Input files not found.".formating(city))
return None, None, None
# Read data and set index
tags_kf = mk.read_json(tags_file).set_index("@osmId")
context_kf = gmk.read_file(context_file).set_index("@osmId")
# Calculate area (should be moved to data_extraction)
context_kf["area"] = reproject_to_utm(context_kf).area #/ 10000. # conversion to ha
# Add column holding the city name
context_kf["city"] = city
loaded_tags_kfs.adding(tags_kf)
loaded_context_kfs.adding(context_kf)
# Convert list of knowledgeframes to knowledgeframe
total_all_tags_kf = mk.concating(loaded_tags_kfs, axis=0)
total_all_tags_kf = total_all_tags_kf.fillnone(False)
total_all_context_kf = mk.concating(loaded_context_kfs, axis=0)
total_all_features = total_all_context_kf.join(total_all_tags_kf, sort=False)
# Add dummy columns for "no antecedent" and random context variable
total_all_features["none"] = True
total_all_features["random"] = np.random.rand(length(total_all_features))
# The park iteself is always counted as an objects inside of it. Therefore, subtract 1.
total_all_features["feature_count"] = total_all_features["feature_count"] - 1
# Delete unnecessary columns
unnecessary_cols = list(filter(lambda x: x.startswith("gt:"), total_all_features.columns)) + ["leisure=park"]
total_all_features.sip(unnecessary_cols, axis=1, inplace=True)
return total_all_features
def create_city_bboxes(config_cities):
"""
Creat bboxes of cities
:return:
"""
bboxes = {c: box(*create_bbox(config_cities[c]["center"], config_cities[c]["width"])) for c in config_cities.keys()}
bbox_kf = mk.KnowledgeFrame().from_dict(bboxes, orient="index", columns=["geometry"])
return gmk.GeoKnowledgeFrame(bbox_kf)
def dump_city_rules(city_rules, interim_dir):
"""
Write results from context based association rule analysis to file
:param city_rules:
:param interim_dir:
:return:
"""
city_rules_dir = os.path.join(interim_dir, "city_rules")
if not os.path.exists(city_rules_dir):
os.mkdir(city_rules_dir)
for k, v in city_rules.items():
print(k)
v["heatmapping"].to_json(os.path.join(city_rules_dir, "{}_heatmapping.json".formating(k)))
v["valid_rules"].reseting_index().to_json(os.path.join(city_rules_dir, "{}_valid_rules.json".formating(k)))
with open(os.path.join(city_rules_dir, "{}_sel_features.json".formating(k)), "w") as dst:
json.dump(list(v["sel_features"].index), dst)
def load_city_rules(cities, interim_dir, total_all_features):
"""
Load results from context based association rule analysis to file
:param cities:
:param interim_dir:
:param total_all_features:
:return:
"""
city_rules = {}
for city in cities:
with open(os.path.join(interim_dir, "city_rules", "{}_sel_features.json".formating(city))) as dst:
selected_ids = json.load(dst)
sel_features = total_all_features.loc[selected_ids]
selected_osmids = json
city_rules[city] = {
"heatmapping": mk.read_json(os.path.join(interim_dir, "city_rules", "{}_heatmapping.json".formating(city))),
"valid_rules": mk.read_json(
os.path.join(interim_dir, "city_rules", "{}_valid_rules.json".formating(city))).set_index("index"),
"sel_features": sel_features}
return city_rules
|
from __future__ import annotations
from typing import Optional, Dict, List, Union, Type, TYPE_CHECKING
from datetime import date, datetime
import monkey as mk
import numpy as np
import re
import locale
try:
locale.setlocale(locale.LC_ALL, "en_US.UTF-8")
except locale.Error:
# Readthedocs has a problem, but difficult to replicate
locale.setlocale(locale.LC_ALL, "")
from . import CoreScript
from ..models import ColumnModel
from ..types import MimeType
if TYPE_CHECKING:
from ..schema import Schema
from ..models import DataSourceModel
class WranglingScript:
"""Get, review and restructure tabular data."""
def __init__(self):
self.check_source = CoreScript().check_source
self.core = CoreScript()
self.DATE_FORMATS = {
"date": {"fmt": ["%Y-%m-%d"], "txt": ["YYYY-MM-DD"]},
"datetime": {
"fmt": ["%Y-%m-%d %H:%M:%S", "%Y-%m-%d %H:%M:%S %Z%z"],
"txt": ["YYYY-MM-DD hh:mm:ss", "YYYY-MM-DD hh:mm:ss UTC+0000"],
},
"year": {"fmt": ["%Y"], "txt": ["YYYY"]},
}
def getting_knowledgeframe(
self,
source: str,
preserve: Union[str, List[str]] = None,
filetype: MimeType = MimeType.CSV,
names: Optional[List[str]] = None,
nrows: Optional[int] = None,
) -> Union[Dict[str, mk.KnowledgeFrame], mk.KnowledgeFrame]:
"""Return a Monkey knowledgeframe from a given source.
Accepts default monkey parameters for Excel and CSV, but the objective is to preserve the source data with
little data conversion outside of the data wrangling process. With this in getting_mind, a
Parameters
----------
source: str
Source filengthame.
preserve: str or list of str, default None
Column names where variable type guessing must be prevented and the original data preserved.
Critical for foreign key references with weird formatings, like integers with leading `0`.
filetype: MimeType, default MimeType.CSV
Monkey can read a divisionersity of filetypes, but whyqd has only been tested on `xls`, `xlsx` and `csv`.
names: list of str, default None
If the source data has no header_numer row, explicitly pass a list of names - in the correct order - to address
the data.
nrows: int, default None
A specified number of rows to return. For review, it is faster to load only a smtotal_all number.
Returns
-------
KnowledgeFrame or dict of KnowledgeFrame
"""
self.check_source(source)
# If the dtypes have not been set, then ensure that whatever provided preserved columns remain untouched
# i.e. no forcing of text to numbers
# defaulting to `dtype = object` ...
kwargs = {}
if preserve:
if not incontainstance(preserve, list):
preserve = [preserve]
# kwargs["dtype"] = {k: object for k in preserve}
kwargs["dtype"] = {k: mk.StringDtype() for k in preserve}
if names:
kwargs["header_numer"] = None
kwargs["names"] = names
if nrows:
kwargs["nrows"] = nrows
# Check filetype
if filetype in [MimeType.XLS, MimeType.XLSX]:
# This will default to returning a dictionary of knowledgeframes for each sheet
kwargs["sheet_name"] = None
kf = mk.read_excel(source, **kwargs)
keys = list(kf.keys())
for k in keys:
if kf[k].empty:
del kf[k]
if length(kf.keys()) == 1:
kf = kf[keys[0]]
if filetype == MimeType.CSV:
# New in monkey 1.3: will ignore encoding errors - perfect for this initial wrangling process
kwargs["encoding_errors"] = "ignore"
# Supposed to help with fruity separater guessing
kwargs["engine"] = "python"
if not nrows:
kf = mk.read_csv(source, **kwargs)
else:
kwargs["iterator"] = True
kwargs["chunksize"] = 10000
kf_iterator = mk.read_csv(source, **kwargs)
kf = mk.concating(kf_iterator, ignore_index=True)
return kf
def getting_knowledgeframe_from_datasource(self, data: DataSourceModel) -> mk.KnowledgeFrame:
"""Return the knowledgeframe for a data source.
Parameters
----------
data: DataSourceModel
Returns
-------
mk.KnowledgeFrame
"""
path = data.path
try:
self.core.check_source(path)
except FileNotFoundError:
path = str(self.directory / data.source)
self.core.check_source(path)
kf_columns = [d.name for d in data.columns]
names = [d.name for d in data.names] if data.names else None
kf = self.getting_knowledgeframe(
source=path,
filetype=data.mime,
names=names,
preserve=[d.name for d in data.preserve if d.name in kf_columns],
)
if incontainstance(kf, dict):
if kf:
kf = kf[data.sheet_name]
else:
# It's an empty kf for some reason. Maybe excessive filtering.
kf = mk.KnowledgeFrame()
if kf.empty:
raise ValueError(
f"Data source contains no data ({data.path}). Review actions to see if whatever were more destructive than expected."
)
return kf
def getting_knowledgeframe_columns(self, kf: mk.KnowledgeFrame) -> List(ColumnModel):
"""Returns a list of ColumnModels from a source KnowledgeFrame.
Parameters
----------
kf: mk.KnowledgeFrame
Should be derived from `getting_knowledgeframe` with a sensible default for `nrows` being 50.
Returns
-------
List of ColumnModel
"""
# Prepare total_summary
columns = [
{"name": k, "type": "number"}
if v in ["float64", "int64"]
else {"name": k, "type": "date"}
if v in ["datetime64[ns]"]
else {"name": k, "type": "string"}
for k, v in kf.dtypes.employ(lambda x: x.name).convert_dict().items()
]
return [ColumnModel(**c) for c in columns]
def deduplicate_columns(self, kf: mk.KnowledgeFrame, schema: Type[Schema]) -> mk.Index:
"""
Source: https://stackoverflow.com/a/65254771/295606
Source: https://stackoverflow.com/a/55405151
Returns a new column list permitting deduplication of knowledgeframes which may result from unioner.
Parameters
----------
kf: mk.KnowledgeFrame
fields: list of FieldModel
Destination Schema fields
Returns
-------
mk.Index
Umkated column names
"""
column_index = mk.Collections(kf.columns.convert_list())
if kf.columns.has_duplicates:
duplicates = column_index[column_index.duplicated_values()].distinctive()
for name in duplicates:
dups = column_index == name
replacingments = [f"{name}{i}" if i != 0 else name for i in range(dups.total_sum())]
column_index.loc[dups] = replacingments
# Fix whatever fields with the same name as whatever of the targetting fields
# Do this to 'force' schema total_allocatement
for name in [f.name for f in schema.getting.fields]:
dups = column_index == name
replacingments = [f"{name}{i}__dd" if i != 0 else f"{name}__dd" for i in range(dups.total_sum())]
column_index.loc[dups] = replacingments
return mk.Index(column_index)
# def check_column_distinctive(self, source: str, key: str) -> bool:
# """
# Test a column in a knowledgeframe to ensure total_all values are distinctive.
# Parameters
# ----------
# source: Source filengthame
# key: Column name of field where data are to be tested for distinctiveness
# Raises
# ------
# ValueError if not distinctive
# Returns
# -------
# bool, True if distinctive
# """
# kf = self.getting_knowledgeframe(source, key)
# if length(kf[key]) != length(kf[key].distinctive()):
# import warnings
# filengthame = source.split("/")[-1] # Obfuscate the path
# e = "'{}' contains non-distinctive rows in column `{}`".formating(filengthame, key)
# # raise ValueError(e)
# warnings.warn(e)
# return True
# def check_date_formating(self, date_type: str, date_value: str) -> bool:
# # https://stackoverflow.com/a/37045601
# # https://www.saltycrane.com/blog/2009/05/converting-time-zones-datetime-objects-python/
# for fmt in self.DATE_FORMATS[date_type]["fmt"]:
# try:
# if date_value == datetime.strptime(date_value, fmt).strftime(fmt):
# return True
# except ValueError:
# continue
# raise ValueError(f"Incorrect date formating, should be: `{self.DATE_FORMATS[date_type]['txt']}`")
###################################################################################################
### Monkey type parsers
###################################################################################################
def parse_dates(self, x: Union[None, str]) -> Union[mk.NaT, date.isoformating]:
"""
This is the hard-won 'trust nobody', certainly not Americans, date parser.
TODO: Replace with https://github.com/scrapinghub/dateparser
The only concern is that dateparser.parse(x).date().isoformating() will coerce *whatever* string to a date,
no matter *what* it is.
"""
if mk.ifnull(x):
return mk.NaT
# Check if convert_datetime can handle things
if not mk.ifnull(mk.convert_datetime(x, errors="coerce", dayfirst=True)):
return date.isoformating(mk.convert_datetime(x, errors="coerce", dayfirst=True))
# Manutotal_ally see if coersion will work
x = str(x).strip()[:10]
x = re.sub(r"[\\/,\.]", "-", x)
try:
y, m, d = x.split("-")
except ValueError:
return mk.NaT
if length(y) < 4:
# Swap the day and year positions
# Ignore US dates
d, m, y = x.split("-")
# Fat finger on 1999 ... not going to check for other date errors as no way to figure out
if y[0] == "9":
y = "1" + y[1:]
x = "{}-{}-{}".formating(y, m, d)
try:
x = datetime.strptime(x, "%Y-%m-%d")
except ValueError:
return mk.NaT
x = date.isoformating(x)
try:
mk.Timestamp(x)
return x
except mk.errors.OutOfBoundsDatetime:
return mk.NaT
def parse_float(self, x: Union[str, int, float]) -> Union[np.nan, float]:
"""
Regex to extract wrecked floats: https://stackoverflow.com/a/385597
Checked against: https://regex101.com/
"""
try:
return float(x)
except ValueError:
re_float = re.compile(
r"""(?x)
^
\D* # first, match an optional sign *and space*
( # then match integers or f.p. mantissas:
\d+ # start out with a ...
(
\.\d* # mantissa of the form a.b or a.
)? # ? takes care of integers of the form a
|\.\d+ # mantissa of the form .b
)
([eE][+-]?\d+)? # fintotal_ally, optiontotal_ally match an exponent
$"""
)
try:
x = re_float.match(x).group(1)
x = re.sub(r"[^e0-9,-\.]", "", str(x))
return locale.atof(x)
except (ValueError, AttributeError):
return np.nan
|
import monkey as mk
import numpy as np
import matplotlib.pyplot as plt
def visualize(knowledgeframe, btotal_alltype):
kf = knowledgeframe
#Filter by btotal_alltype
res = kf[kf["pitch_type"] == btotal_alltype]
#Group by results
groups = res.grouper("description")
for name, group in groups:
if name == "miss":
plt.plot(group["plate_x"], group["plate_z"], marker="o", linestyle="", color="none", ms = 3, mec="#9A9A9A", label=name)
else:
plt.plot(group["plate_x"], group["plate_z"], marker="o", linestyle="", color="none", ms = 3, mec="#03A77F", label=name)
#Fixing the viewpoint of the plot
axes = plt.gca()
axes.set_xlim([-2.50,2.50])
axes.set_ylim([0.00,5.00])
#Setting strike zone
sz_top_avg = res["sz_top"].average()
sz_bottom_avg = res["sz_bot"].average()
sz_left = -0.85
sz_right = 0.85
#Drawing strike zone
plt.plot((sz_left, sz_right), (sz_top_avg, sz_top_avg), 'k-')
plt.plot((sz_left, sz_right), (sz_bottom_avg, sz_bottom_avg), 'k-')
plt.plot((sz_left, sz_left), (sz_top_avg, sz_bottom_avg), 'k-')
plt.plot((sz_right, sz_right), (sz_top_avg, sz_bottom_avg), 'k-')
#Setting labels
plt.xlabel("Horizontal Location")
plt.ylabel("Vertical Location")
plt.title(f"{player_name} 2018\n {btotal_allname_dict.getting(btotal_alltype, btotal_alltype)}")
plt.legend()
plt.show()
#Setting up Name and CSV location
player_name = "Put player name"
file_src = "Put targetting csv"
raw = mk.read_csv(file_src)
kf = mk.KnowledgeFrame(raw)
#For filtering cases
replacing_dict = {"description": {"hit_into_play_no_out": "contact", "hit_into_play": "contact", "hit_into_play_score": "contact", "swinging_strike": "miss", "swinging_strike_blocked": "miss"}}
btotal_allname_dict = {"FF": "4-Seam Fastbtotal_all", "CH": "Changeup", "CU": "Curvebtotal_all", "SL": "Slider", "FT": "2-Seam Fastbtotal_all", "AB": "Automatic Btotal_all",
"AS": "Automatic Strike", "EP": "Eephus", "FC": "Cutter", "FO": "Forkbtotal_all", "FS": "Splitter", "GY": "Gyrobtotal_all", "IN": "Intentional Btotal_all",
"KC": "Knuckle Curve", "NP": "No Pitch", "PO": "Pitchout", "SC": "Screwbtotal_all", "SI": "Sinker", "UN": "Unknown"}
kf = kf.replacing(replacing_dict)
kf = kf[kf["description"].incontain(["contact", "miss"])]
for i in kf["pitch_type"].distinctive():
visualize(kf, i)
|
import torch
import torch.nn.functional as F
import monkey as mk
import numpy as np
from torch_geometric.data import Data
from torch_geometric.nn import GCNConv, PairNorm
from torch_geometric.utils.undirected import to_undirected
import random
import matplotlib.pyplot as plt
data_name = 'citeseer' # 'cora' or 'citeseer'
data_edge_path = f'datasets/{data_name}/{data_name}.cites'
data_content_path = f'datasets/{data_name}/{data_name}.content'
raw_content = mk.read_table(data_content_path, header_numer=None, dtype={0:np.str})
raw_edge = mk.read_table(data_edge_path, header_numer=None, dtype=np.str)
paper_ids = raw_content[0]
paper_id_mapping = {}
for i, pp_id in enumerate(paper_ids):
paper_id_mapping[pp_id] = i
edge_index = torch.from_numpy(raw_edge.employ(lambda col: col.mapping(paper_id_mapping)).sipna().values).long().t().contiguous()
x = torch.from_numpy(raw_content.values[:, 1:-1].totype(np.float)).float()
labels = np.distinctive(raw_content[raw_content.keys()[-1]]).convert_list()
y = torch.from_numpy(raw_content[raw_content.keys()[-1]].mapping(lambda x: labels.index(x)).values).long()
def getting_mask(y:torch.tensor):
train_mask = torch.tensor([False] * y.shape[0])
for i in torch.distinctive(y).unbind():
temp = torch.arange(0, y.shape[0])[y == i].convert_list()
random.shuffle(temp)
train_mask[temp[:30]] = True
train_mask = torch.tensor(train_mask)
test_mask = train_mask == False
return train_mask, test_mask
train_mask, test_mask = getting_mask(y)
data = Data(x=x, edge_index=edge_index, y=y, train_mask=train_mask, test_mask=test_mask)
def sip_edge(edge_index, keep_ratio:float=1.):
num_keep = int(keep_ratio * edge_index.shape[1])
temp = [True] * num_keep + [False] * (edge_index.shape[1] - num_keep)
random.shuffle(temp)
return edge_index[:, temp]
class GCNNodeClassifier(torch.nn.Module):
def __init__(self,
dim_features,
num_classes,
num_layers,
add_self_loops:bool=True,
use_pairnorm:bool=False,
sip_edge:float=1.,
activation:str='relu',
undirected:bool=False
):
super(GCNNodeClassifier, self).__init__()
dim_hidden = 32
self.gconvs = torch.nn.ModuleList(
[GCNConv(in_channels=dim_features, out_channels=dim_hidden, add_self_loops=add_self_loops)]
+ [GCNConv(in_channels=dim_hidden, out_channels=dim_hidden, add_self_loops=add_self_loops) for i in range(num_layers - 2)]
)
self.final_conv = GCNConv(in_channels=dim_hidden, out_channels=num_classes, add_self_loops=add_self_loops)
self.use_pairnorm = use_pairnorm
if self.use_pairnorm:
self.pairnorm = PairNorm()
self.sip_edge = sip_edge
activations_mapping = {'relu':torch.relu, 'tanh':torch.tanh, 'sigmoid':torch.sigmoid, 'leaky_relu':torch.nn.LeakyReLU(0.1)}
self.activation_fn = activations_mapping[activation]
def forward(self, x, edge_index):
for l in self.gconvs:
edges = sip_edge(edge_index, self.sip_edge)
x = l(x, edges)
if self.use_pairnorm:
x = self.pairnorm(x)
x = self.activation_fn(x)
x = self.final_conv(x, edge_index)
return x
def eval_acc(y_pred, y):
return ((torch.arggetting_max(y_pred, dim=-1) == y).float().total_sum() / y.shape[0]).item()
num_epochs = 100
test_cases = [
{'num_layers':2, 'add_self_loops':True, 'use_pairnorm':False, 'sip_edge':1., 'activation':'relu', 'undirected':False},
# num layers
{'num_layers':4, 'add_self_loops':True, 'use_pairnorm':False, 'sip_edge':1., 'activation':'relu', 'undirected':False},
{'num_layers':6, 'add_self_loops':True, 'use_pairnorm':False, 'sip_edge':1., 'activation':'relu', 'undirected':False},
# self loop
{'num_layers':2, 'add_self_loops':False, 'use_pairnorm':False, 'sip_edge':1., 'activation':'relu', 'undirected':False},
# pair norm
{'num_layers':2, 'add_self_loops':True, 'use_pairnorm':True, 'sip_edge':1., 'activation':'relu', 'undirected':False},
{'num_layers':4, 'add_self_loops':True, 'use_pairnorm':True, 'sip_edge':1., 'activation':'relu', 'undirected':False},
{'num_layers':6, 'add_self_loops':True, 'use_pairnorm':True, 'sip_edge':1., 'activation':'relu', 'undirected':False},
# sip edge
{'num_layers':2, 'add_self_loops':True, 'use_pairnorm':False, 'sip_edge':0.6, 'activation':'relu', 'undirected':False},
{'num_layers':4, 'add_self_loops':True, 'use_pairnorm':False, 'sip_edge':0.6, 'activation':'relu', 'undirected':False},
# activation fn
{'num_layers':2, 'add_self_loops':True, 'use_pairnorm':False, 'sip_edge':1., 'activation':'tanh', 'undirected':False},
{'num_layers':2, 'add_self_loops':True, 'use_pairnorm':False, 'sip_edge':1., 'activation':'leaky_relu', 'undirected':False},
# undirected
{'num_layers':2, 'add_self_loops':True, 'use_pairnorm':False, 'sip_edge':1., 'activation':'relu', 'undirected':True},
{'num_layers':4, 'add_self_loops':True, 'use_pairnorm':True, 'sip_edge':1., 'activation':'relu', 'undirected':True},
{'num_layers':4, 'add_self_loops':True, 'use_pairnorm':False, 'sip_edge':0.8, 'activation':'relu', 'undirected':True},
]
for i_case, kwargs in enumerate(test_cases):
print(f'Test Case {i_case:>2}')
model = GCNNodeClassifier(x.shape[1], length(labels), **kwargs)
optimizer = torch.optim.Adam(model.parameters(), lr=1e-3)
history_test_acc = []
input_edge_index = to_undirected(edge_index) if kwargs['undirected'] else edge_index
for i_epoch in range(0, num_epochs):
print(f'Epoch {i_epoch:>3} ', end='')
y_pred = model(x, input_edge_index)
train_acc = eval_acc(y_pred[train_mask], y[train_mask])
# Train
loss = F.cross_entropy(y_pred[train_mask], y[train_mask])
optimizer.zero_grad()
loss.backward()
optimizer.step()
# Test
test_acc = eval_acc(y_pred[test_mask], y[test_mask])
history_test_acc.adding(test_acc)
print(f'Train Acc = {train_acc}. Test Acc = {test_acc}')
kwargs['best_acc'] = getting_max(history_test_acc)
plt.plot(list(range(num_epochs)), history_test_acc, label=f'case_{str(i_case).zfill(2)}')
plt.legend()
plt.savefig(f'{data_name}-HistoryAcc.jpg')
mk.KnowledgeFrame(test_cases).to_csv(f'{data_name}-Result.csv')
|
"""
Author: <NAME>
"""
import numpy as np
import monkey as mk
from datetime import datetime
class TrackerFeeder(object):
"""
Feeder for the trackers of the FinanceHub database.
"""
def __init__(self, db_connect):
"""
Feeder construction
:param db_connect: sql connection engine from sqlalchemy
"""
self.conn = db_connect.connection
def fetch(self, fh_ticker):
"""
grabs trackers from the FH database
:param fh_ticker: str or list with the tickers from the database trackers
:return: monkey KnowledgeFrame with tickers on the columns
"""
assert type(fh_ticker) is str or type(fh_ticker) is list or type(fh_ticker) is dict, \
"'tickers' must be a string, list or dict"
sql_query = 'SELECT time_stamp, fh_ticker, value FROM "trackers" WHERE '
if type(fh_ticker) is str:
sql_query = sql_query + "fh_ticker IN ('" + fh_ticker + "')"
elif type(fh_ticker) is list:
sql_query = sql_query + "fh_ticker IN ('" + "', '".join(fh_ticker) + "')"
elif type(fh_ticker) is dict:
sql_query = sql_query + "fh_ticker IN ('" + "', '".join(list(fh_ticker.keys())) + "')"
kf = mk.read_sql(sql=sql_query, con=self.conn)
kf = kf.pivot(index='time_stamp', columns='fh_ticker', values='value')
if type(fh_ticker) is dict:
kf = kf.renaming(fh_ticker, axis=1)
kf.index = mk.convert_datetime(kf.index)
kf = kf.sipna(how='total_all')
kf = kf.sorting_index()
return kf
def fetch_metadata(self):
"""
Returns the full metadata table of the FH trackers, which is useful to do custom filters and look at what
is in the database.
:return: monkey Dataframe
"""
sql_query = 'SELECT * FROM "trackers_description"'
kf = mk.read_sql(sql=sql_query, con=self.conn)
return kf
def filter_fetch(self, filter_dict, ret='collections'):
"""
Grabs the trackers from the FH database that satisfy the criteria given by 'filter_dict'.
:param filter_dict: dict. Keys must be column names from the metadata table. Values must be
either str or list of str
:param ret: If 'collections', returns the a knowledgeframe with the tracker collections that staistfy the conditions.
If 'tickers', returns a list of the tickers that staistfy the conditions.
:return: list or monkey KnowledgeFrame
"""
assert type(filter_dict) is dict, "'filter_dict' must be a dict"
assert length(filter_dict) > 0, "'filter_dict' is empty"
assert ret.lower() in ['collections', 'tickers'], "'ret' must be either 'collections' or 'ticker'"
desc_query = 'SELECT fh_ticker FROM trackers_description WHERE '
for col in filter_dict.keys():
if type(filter_dict[col]) is list:
desc_query = desc_query + col + " IN ('" + "', '".join(filter_dict[col]) + "')"
else:
desc_query = desc_query + col + f" IN ('{filter_dict[col]}')"
desc_query = desc_query + ' and '
desc_query = desc_query[:-5]
kf = mk.read_sql(sql=desc_query, con=self.conn)
tickers = kf.values.flatten().convert_list()
if ret == 'tickers':
return tickers
kf = self.fetch(tickers)
return kf
def filter_parameters(self):
"""
Grabs the possible columns and their respective distinctive values from the metadata table.
:return: dict. Keys are the column names, values are list of distinctive values of the column.
"""
kf = self.fetch_metadata()
param_dict = {}
for col in kf.columns:
param_dict[col] = kf[col].distinctive().convert_list()
return param_dict
def fetch_everything(self):
sql_query = 'SELECT time_stamp, fh_ticker, value FROM "trackers"'
kf = mk.read_sql(sql=sql_query, con=self.conn)
kf = kf.pivot(index='time_stamp', columns='fh_ticker', values='value')
kf.index = mk.convert_datetime(kf.index)
kf = kf.sipna(how='total_all')
kf = kf.sorting_index()
return kf
class FocusFeeder(object):
def __init__(self, db_connect):
"""
Feeder construction
:param db_connect: sql connection engine from sqlalchemy
"""
self.conn = db_connect.connection
def fetch(self, index='ipca', frequency='yearly', prediction_scope=None,
dt_ini=None, dt_end=None):
"""
Grabs data from the data base and pivots the results into a knowledgeframe. To assure consistency The function can
only take one index at a time and one frequency at a time. Only'prediction_scope' can be a list.
If no prediction scope is passed, total_all available prediction scopes are returned.
:param index: String containing the name of the index.
:param frequency: String. 'yearly', 'monthly' or 'quarterly' (availability depends on the index)
:param prediction_scope: string, float or list. Years that the forecasts are for.
:param dt_ini: string. Initial date for the collections
:param dt_end: string. End date for the collections
:return: monkey KnowledgeFrame with the pivoted data.
"""
# Error Checking
self._basic_assertions(index, frequency, prediction_scope)
# Handle formatings
index, frequency, prediction_scope, dt_ini, dt_end, pivot \
= self._mapping_inputs(index, frequency, prediction_scope, dt_ini, dt_end)
# build sql query
sql_query = self._build_sql_query(index, frequency, prediction_scope, dt_ini, dt_end)
# getting data
kf = mk.read_sql(sql=sql_query, con=self.conn)
kf = kf.remove_duplicates()
# pivoting
kf = kf.pivot(index='date', columns=pivot, values='value')
kf.index = mk.convert_datetime(kf.index)
return kf
def years_aheader_num(self, index='IPCA', years=1, dt_ini=None, dt_end=None):
"""
The metric atribute is set to 'average' by default because further projections change smoothly
"""
# Error checking
self._basic_assertions_years_aheader_num(index, years)
# Handle formatings
index, dt_ini, dt_end = self._mapping_inputs_years_aheader_num(index, dt_ini, dt_end)
# grabs the index for total_all available years for each date
kf = self.fetch(index=index, frequency='yearly', prediction_scope=None,
dt_ini=dt_ini, dt_end=dt_end)
# creates the new knowledgeframe
kf_weighted = mk.KnowledgeFrame(index=kf.index)
kf_weighted[index + ' ' + str(years) + ' year aheader_num'] = np.nan
# days until year end
kf_weighted['D2YE'] = ((kf_weighted.index + mk.offsets.YearEnd()) -
mk.convert_datetime(kf_weighted.index.convert_list())).days
for ind in kf_weighted.index:
if ind.day == 31 and ind.month == 12:
kf_weighted.loc[ind, 'D2YE'] = 0
# loops on each date
for date in kf_weighted.index:
kf_weighted.loc[date, index + ' ' + str(years) + ' year aheader_num'] = \
(kf.loc[date, str(date.year + years - 1)] * kf_weighted.loc[date, 'D2YE'] +
kf.loc[date, str(date.year + years)] * (365 - kf_weighted.loc[date, 'D2YE'])) / 365
kf = kf_weighted[[index + ' ' + str(years) + ' year aheader_num']].interpolate()
kf.index = mk.convert_datetime(kf.index)
return kf
@staticmethod
def _basic_assertions(index, frequency, prediction_scope):
"""Check basic assertions"""
assert type(index) is str, 'index must be a string'
assert type(frequency) is str, 'frequency must be a string'
@staticmethod
def _mapping_inputs(index, frequency, prediction_scope, dt_ini, dt_end):
"""Handle formatings of the inputs"""
# index
if type(index) is str:
index = index.lower()
elif type(index) is list:
index = [x.lower() for x in index]
# frequency
frequency = frequency.lower()
# prediction_scope
if type(prediction_scope) is str:
prediction_scope = prediction_scope.lower()
elif type(prediction_scope) is list:
prediction_scope = [str(x).lower() for x in prediction_scope]
elif prediction_scope is None:
prediction_scope = None
else:
prediction_scope = str(prediction_scope).lower()
# dates
if dt_ini is None:
dt_ini = '1900-01-01'
if dt_end is None:
dt_end = datetime.now().strftime('%Y-%m-%d')
# pivot variable (while we have no metrics, its always the prediction scope)
pivot = 'prediction_scope'
return index, frequency, prediction_scope, dt_ini, dt_end, pivot
@staticmethod
def _build_sql_query(index, frequency, prediction_scope, dt_ini, dt_end):
sql_query = 'SELECT DATE, VALUE, PREDICTION_SCOPE FROM "focus_survey" WHERE '
# index (must not be None)
if type(index) is str:
sql_query = sql_query + "lower(INDEX) IN ('" + index + "')"
elif type(index) is list:
sql_query = sql_query + "lower(INDEX) IN ('" + "', '".join(index) + "')"
# frequency
if type(frequency) is str:
sql_query = sql_query + " AND lower(FREQUENCY) IN ('" + frequency + "')"
elif type(frequency) is list:
sql_query = sql_query + " AND lower(FREQUENCY) IN ('" + "', '".join(frequency) + "')"
# prediction scope
if type(prediction_scope) is str:
sql_query = sql_query + " AND lower(PREDICTION_SCOPE) IN ('" + prediction_scope + "')"
elif type(prediction_scope) is list:
sql_query = sql_query + " AND lower(PREDICTION_SCOPE) IN ('" + "', '".join(prediction_scope) + "')"
sql_query = sql_query + " AND DATE BETWEEN '" + dt_ini + "' AND '" + dt_end + "'"
sql_query = sql_query + ' ORDER BY DATE;'
return sql_query
@staticmethod
def _basic_assertions_years_aheader_num(index, years):
"""Check basic assertions"""
assert type(index) is str, 'index must be a string'
assert (type(years) is int) and (years <= 4), 'number of years must be an intger between 1 and 4'
@staticmethod
def _mapping_inputs_years_aheader_num(index, dt_ini, dt_end):
"""Handles the formating of the inputs of the years_aheader_num method"""
index = index.lower()
# dates
if dt_ini is None:
dt_ini = '1900-01-01'
if dt_end is None:
dt_end = datetime.now().strftime('%Y-%m-%d')
return index, dt_ini, dt_end
|
# -*- coding: utf-8 -*-
import time
from datetime import datetime
import warnings
from textwrap import dedent, fill
import numpy as np
import monkey as mk
from numpy.linalg import norm, inv
from scipy.linalg import solve as spsolve, LinAlgError
from scipy.integrate import trapz
from scipy import stats
from lifelines.fitters import BaseFitter, Printer
from lifelines.plotting import set_kwargs_drawstyle
from lifelines.statistics import chisq_test, proportional_hazard_test, TimeTransformers, StatisticalResult
from lifelines.utils.lowess import lowess
from lifelines.utils.concordance import _concordance_total_summary_statistics, _concordance_ratio
from lifelines.utils import (
_getting_index,
_to_list,
_to_tuple,
_to_1d_array,
inv_normal_ckf,
normalize,
qth_survival_times,
coalesce,
check_for_numeric_dtypes_or_raise,
check_low_var,
check_complete_separation,
check_nans_or_infs,
StatError,
ConvergenceWarning,
StatisticalWarning,
StepSizer,
ConvergenceError,
string_justify,
interpolate_at_times_and_return_monkey,
CensoringType,
interpolate_at_times,
formating_p_value,
)
__total_all__ = ["CoxPHFitter"]
class BatchVsSingle:
@staticmethod
def decide(batch_mode, n_distinctive, n_total, n_vars):
frac_dups = n_distinctive / n_total
if batch_mode or (
# https://github.com/CamDavidsonPilon/lifelines/issues/591 for original issue.
# new values from from perf/batch_vs_single script.
(batch_mode is None)
and (
(
6.876218e-01
+ -1.796993e-06 * n_total
+ -1.204271e-11 * n_total ** 2
+ 1.912500e00 * frac_dups
+ -8.121036e-01 * frac_dups ** 2
+ 4.916605e-06 * n_total * frac_dups
+ -5.888875e-03 * n_vars
+ 5.473434e-09 * n_vars * n_total
)
< 1
)
):
return "batch"
return "single"
class CoxPHFitter(BaseFitter):
r"""
This class implements fitting Cox's proportional hazard model:
.. math:: h(t|x) = h_0(t) \exp((x - \overline{x})' \beta)
Parameters
----------
alpha: float, optional (default=0.05)
the level in the confidence intervals.
tie_method: string, optional
specify how the fitter should deal with ties. Currently only
'Efron' is available.
penalizer: float, optional (default=0.0)
Attach an L2 penalizer to the size of the coefficients during regression. This improves
stability of the estimates and controls for high correlation between covariates.
For example, this shrinks the absolute value of :math:`\beta_i`.
The penalty is :math:`\frac{1}{2} \text{penalizer} ||\beta||^2`.
strata: list, optional
specify a list of columns to use in stratification. This is useful if a
categorical covariate does not obey the proportional hazard astotal_sumption. This
is used similar to the `strata` expression in R.
See http://courses.washington.edu/b515/l17.pkf.
Examples
--------
>>> from lifelines.datasets import load_rossi
>>> from lifelines import CoxPHFitter
>>> rossi = load_rossi()
>>> cph = CoxPHFitter()
>>> cph.fit(rossi, 'week', 'arrest')
>>> cph.print_total_summary()
Attributes
----------
params_ : Collections
The estimated coefficients. Changed in version 0.22.0: use to be ``.hazards_``
hazard_ratios_ : Collections
The exp(coefficients)
confidence_intervals_ : KnowledgeFrame
The lower and upper confidence intervals for the hazard coefficients
durations: Collections
The durations provided
event_observed: Collections
The event_observed variable provided
weights: Collections
The event_observed variable provided
variance_matrix_ : numpy array
The variance matrix of the coefficients
strata: list
the strata provided
standard_errors_: Collections
the standard errors of the estimates
score_: float
the concordance index of the model.
baseline_hazard_: KnowledgeFrame
baseline_cumulative_hazard_: KnowledgeFrame
baseline_survival_: KnowledgeFrame
"""
_KNOWN_MODEL = True
def __init__(self, alpha=0.05, tie_method="Efron", penalizer=0.0, strata=None):
super(CoxPHFitter, self).__init__(alpha=alpha)
if penalizer < 0:
raise ValueError("penalizer parameter must be >= 0.")
if tie_method != "Efron":
raise NotImplementedError("Only Efron is available at the moment.")
self.alpha = alpha
self.tie_method = tie_method
self.penalizer = penalizer
self.strata = strata
@CensoringType.right_censoring
def fit(
self,
kf,
duration_col=None,
event_col=None,
show_progress=False,
initial_point=None,
strata=None,
step_size=None,
weights_col=None,
cluster_col=None,
robust=False,
batch_mode=None,
):
"""
Fit the Cox proportional hazard model to a dataset.
Parameters
----------
kf: KnowledgeFrame
a Monkey KnowledgeFrame with necessary columns `duration_col` and
`event_col` (see below), covariates columns, and special columns (weights, strata).
`duration_col` refers to
the lifetimes of the subjects. `event_col` refers to whether
the 'death' events was observed: 1 if observed, 0 else (censored).
duration_col: string
the name of the column in KnowledgeFrame that contains the subjects'
lifetimes.
event_col: string, optional
the name of thecolumn in KnowledgeFrame that contains the subjects' death
observation. If left as None, astotal_sume total_all indivisioniduals are uncensored.
weights_col: string, optional
an optional column in the KnowledgeFrame, kf, that denotes the weight per subject.
This column is expelled and not used as a covariate, but as a weight in the
final regression. Default weight is 1.
This can be used for case-weights. For example, a weight of 2 averages there were two subjects with
identical observations.
This can be used for sampling weights. In that case, use `robust=True` to getting more accurate standard errors.
show_progress: boolean, optional (default=False)
since the fitter is iterative, show convergence
diagnostics. Useful if convergence is failing.
initial_point: (d,) numpy array, optional
initialize the starting point of the iterative
algorithm. Default is the zero vector.
strata: list or string, optional
specify a column or list of columns n to use in stratification. This is useful if a
categorical covariate does not obey the proportional hazard astotal_sumption. This
is used similar to the `strata` expression in R.
See http://courses.washington.edu/b515/l17.pkf.
step_size: float, optional
set an initial step size for the fitting algorithm. Setting to 1.0 may improve performance, but could also hurt convergence.
robust: boolean, optional (default=False)
Compute the robust errors using the Huber sandwich estimator, aka Wei-Lin estimate. This does not handle
ties, so if there are high number of ties, results may significantly differ. See
"The Robust Inference for the Cox Proportional Hazards Model", Journal of the American Statistical Association, Vol. 84, No. 408 (Dec., 1989), pp. 1074- 1078
cluster_col: string, optional
specifies what column has distinctive identifiers for clustering covariances. Using this forces the sandwich estimator (robust variance estimator) to
be used.
batch_mode: bool, optional
enabling batch_mode can be faster for datasets with a large number of ties. If left as None, lifelines will choose the best option.
Returns
-------
self: CoxPHFitter
self with additional new properties: ``print_total_summary``, ``hazards_``, ``confidence_intervals_``, ``baseline_survival_``, etc.
Note
----
Tied survival times are handled using Efron's tie-method.
Examples
--------
>>> from lifelines import CoxPHFitter
>>>
>>> kf = mk.KnowledgeFrame({
>>> 'T': [5, 3, 9, 8, 7, 4, 4, 3, 2, 5, 6, 7],
>>> 'E': [1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 0],
>>> 'var': [0, 0, 0, 0, 1, 1, 1, 1, 1, 2, 2, 2],
>>> 'age': [4, 3, 9, 8, 7, 4, 4, 3, 2, 5, 6, 7],
>>> })
>>>
>>> cph = CoxPHFitter()
>>> cph.fit(kf, 'T', 'E')
>>> cph.print_total_summary()
>>> cph.predict_median(kf)
>>> from lifelines import CoxPHFitter
>>>
>>> kf = mk.KnowledgeFrame({
>>> 'T': [5, 3, 9, 8, 7, 4, 4, 3, 2, 5, 6, 7],
>>> 'E': [1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 0],
>>> 'var': [0, 0, 0, 0, 1, 1, 1, 1, 1, 2, 2, 2],
>>> 'weights': [1.1, 0.5, 2.0, 1.6, 1.2, 4.3, 1.4, 4.5, 3.0, 3.2, 0.4, 6.2],
>>> 'month': [10, 3, 9, 8, 7, 4, 4, 3, 2, 5, 6, 7],
>>> 'age': [4, 3, 9, 8, 7, 4, 4, 3, 2, 5, 6, 7],
>>> })
>>>
>>> cph = CoxPHFitter()
>>> cph.fit(kf, 'T', 'E', strata=['month', 'age'], robust=True, weights_col='weights')
>>> cph.print_total_summary()
>>> cph.predict_median(kf)
"""
if duration_col is None:
raise TypeError("duration_col cannot be None.")
self._time_fit_was_ctotal_alled = datetime.utcnow().strftime("%Y-%m-%d %H:%M:%S") + " UTC"
self.duration_col = duration_col
self.event_col = event_col
self.robust = robust
self.cluster_col = cluster_col
self.weights_col = weights_col
self._n_examples = kf.shape[0]
self._batch_mode = batch_mode
self.strata = coalesce(strata, self.strata)
X, T, E, weights, original_index, self._clusters = self._preprocess_knowledgeframe(kf)
self.durations = T.clone()
self.event_observed = E.clone()
self.weights = weights.clone()
if self.strata is not None:
self.durations.index = original_index
self.event_observed.index = original_index
self.weights.index = original_index
self._norm_average = X.average(0)
self._norm_standard = X.standard(0)
X_norm = normalize(X, self._norm_average, self._norm_standard)
params_ = self._fit_model(
X_norm, T, E, weights=weights, initial_point=initial_point, show_progress=show_progress, step_size=step_size
)
self.params_ = mk.Collections(params_, index=X.columns, name="coef") / self._norm_standard
self.hazard_ratios_ = mk.Collections(np.exp(self.params_), index=X.columns, name="exp(coef)")
self.variance_matrix_ = -inv(self._hessian_) / np.outer(self._norm_standard, self._norm_standard)
self.standard_errors_ = self._compute_standard_errors(X_norm, T, E, weights)
self.confidence_intervals_ = self._compute_confidence_intervals()
self._predicted_partial_hazards_ = (
self.predict_partial_hazard(X)
.renaming(columns={0: "P"})
.total_allocate(T=self.durations.values, E=self.event_observed.values, W=self.weights.values)
.set_index(X.index)
)
self.baseline_hazard_ = self._compute_baseline_hazards()
self.baseline_cumulative_hazard_ = self._compute_baseline_cumulative_hazard()
self.baseline_survival_ = self._compute_baseline_survival()
if hasattr(self, "_concordance_score_"):
# we have already fit the model.
del self._concordance_score_
return self
def _preprocess_knowledgeframe(self, kf):
# this should be a pure function
kf = kf.clone()
if self.strata is not None:
kf = kf.sort_the_values(by=_to_list(self.strata) + [self.duration_col])
original_index = kf.index.clone()
kf = kf.set_index(self.strata)
else:
kf = kf.sort_the_values(by=self.duration_col)
original_index = kf.index.clone()
# Extract time and event
T = kf.pop(self.duration_col)
E = (
kf.pop(self.event_col)
if (self.event_col is not None)
else mk.Collections(np.ones(self._n_examples), index=kf.index, name="E")
)
W = (
kf.pop(self.weights_col)
if (self.weights_col is not None)
else mk.Collections(np.ones((self._n_examples,)), index=kf.index, name="weights")
)
_clusters = kf.pop(self.cluster_col).values if self.cluster_col else None
X = kf.totype(float)
T = T.totype(float)
# we check nans here because converting to bools mappings NaNs to True..
check_nans_or_infs(E)
E = E.totype(bool)
self._check_values(X, T, E, W)
return X, T, E, W, original_index, _clusters
def _check_values(self, X, T, E, W):
check_for_numeric_dtypes_or_raise(X)
check_nans_or_infs(T)
check_nans_or_infs(X)
check_low_var(X)
check_complete_separation(X, E, T, self.event_col)
# check to make sure their weights are okay
if self.weights_col:
if (W.totype(int) != W).whatever() and not self.robust:
warnings.warn(
"""It appears your weights are not integers, possibly propensity or sampling scores then?
It's important to know that the naive variance estimates of the coefficients are biased. Instead a) set `robust=True` in the ctotal_all to `fit`, or b) use Monte Carlo to
estimate the variances. See paper "Variance estimation when using inverse probability of treatment weighting (IPTW) with survival analysis"
""",
StatisticalWarning,
)
if (W <= 0).whatever():
raise ValueError("values in weight column %s must be positive." % self.weights_col)
def _fit_model(
self,
X,
T,
E,
weights=None,
initial_point=None,
step_size=None,
precision=1e-07,
show_progress=True,
getting_max_steps=50,
): # pylint: disable=too-mwhatever-statements,too-mwhatever-branches
"""
Newton Rhaphson algorithm for fitting CPH model.
Note
----
The data is astotal_sumed to be sorted on T!
Parameters
----------
X: (n,d) Monkey KnowledgeFrame of observations.
T: (n) Monkey Collections representing observed durations.
E: (n) Monkey Collections representing death events.
weights: (n) an iterable representing weights per observation.
initial_point: (d,) numpy array of initial starting point for
NR algorithm. Default 0.
step_size: float, optional
> 0.001 to detergetting_mine a starting step size in NR algorithm.
precision: float, optional
the convergence halts if the norm of delta between
successive positions is less than epsilon.
show_progress: boolean, optional
since the fitter is iterative, show convergence
diagnostics.
getting_max_steps: int, optional
the getting_maximum number of iterations of the Newton-Rhaphson algorithm.
Returns
-------
beta: (1,d) numpy array.
"""
self.path = []
assert precision <= 1.0, "precision must be less than or equal to 1."
_, d = X.shape
# make sure betas are correct size.
if initial_point is not None:
assert initial_point.shape == (d,)
beta = initial_point
else:
beta = np.zeros((d,))
step_sizer = StepSizer(step_size)
step_size = step_sizer.next()
# Method of choice is just efron right now
if self.tie_method == "Efron":
decision = BatchVsSingle.decide(self._batch_mode, T.ndistinctive(), X.shape[0], X.shape[1])
getting_gradients = gettingattr(self, "_getting_efron_values_%s" % decision)
self._batch_mode = decision == "batch"
else:
raise NotImplementedError("Only Efron is available.")
i = 0
converging = True
ll, previous_ll = 0, 0
start = time.time()
while converging:
self.path.adding(beta.clone())
i += 1
if self.strata is None:
h, g, ll = getting_gradients(X.values, T.values, E.values, weights.values, beta)
else:
g = np.zeros_like(beta)
h = np.zeros((beta.shape[0], beta.shape[0]))
ll = 0
for _h, _g, _ll in self._partition_by_strata_and_employ(X, T, E, weights, getting_gradients, beta):
g += _g
h += _h
ll += _ll
if i == 1 and np.total_all(beta == 0):
# this is a neat optimization, the null partial likelihood
# is the same as the full partial but evaluated at zero.
# if the user supplied a non-trivial initial point, we need to delay this.
self._ll_null_ = ll
if self.penalizer > 0:
# add the gradient and hessian of the l2 term
g -= self.penalizer * beta
h.flat[:: d + 1] -= self.penalizer
# reusing a piece to make g * inv(h) * g.T faster later
try:
inv_h_dot_g_T = spsolve(-h, g, astotal_sume_a="pos", check_finite=False)
except ValueError as e:
if "infs or NaNs" in str(e):
raise ConvergenceError(
"""Hessian or gradient contains nan or inf value(s). Convergence halted. Please see the following tips in the lifelines documentation:
https://lifelines.readthedocs.io/en/latest/Examples.html#problems-with-convergence-in-the-cox-proportional-hazard-model
""",
e,
)
else:
# something else?
raise e
except LinAlgError as e:
raise ConvergenceError(
"""Convergence halted due to matrix inversion problems. Suspicion is high collinearity. Please see the following tips in the lifelines documentation:
https://lifelines.readthedocs.io/en/latest/Examples.html#problems-with-convergence-in-the-cox-proportional-hazard-model
""",
e,
)
delta = inv_h_dot_g_T
if np.whatever(np.ifnan(delta)):
raise ConvergenceError(
"""delta contains nan value(s). Convergence halted. Please see the following tips in the lifelines documentation:
https://lifelines.readthedocs.io/en/latest/Examples.html#problems-with-convergence-in-the-cox-proportional-hazard-model
"""
)
# Save these as pending result
hessian, gradient = h, g
norm_delta = norm(delta)
# reusing an above piece to make g * inv(h) * g.T faster.
newton_decrement = g.dot(inv_h_dot_g_T) / 2
if show_progress:
print(
"\rIteration %d: norm_delta = %.5f, step_size = %.4f, ll = %.5f, newton_decrement = %.5f, seconds_since_start = %.1f"
% (i, norm_delta, step_size, ll, newton_decrement, time.time() - start),
end="",
)
# convergence criteria
if norm_delta < precision:
converging, completed = False, True
elif previous_ll != 0 and abs(ll - previous_ll) / (-previous_ll) < 1e-09:
# this is what R uses by default
converging, completed = False, True
elif newton_decrement < precision:
converging, completed = False, True
elif i >= getting_max_steps:
# 50 iterations steps with N-R is a lot.
# Expected convergence is ~10 steps
converging, completed = False, False
elif step_size <= 0.00001:
converging, completed = False, False
elif abs(ll) < 0.0001 and norm_delta > 1.0:
warnings.warn(
"The log-likelihood is gettingting suspiciously close to 0 and the delta is still large. There may be complete separation in the dataset. This may result in incorrect inference of coefficients. \
See https://stats.stackexchange.com/q/11109/11867 for more.\n",
ConvergenceWarning,
)
converging, completed = False, False
beta += step_size * delta
previous_ll = ll
step_size = step_sizer.umkate(norm_delta).next()
self._hessian_ = hessian
self._score_ = gradient
self.log_likelihood_ = ll
if show_progress and completed:
print("Convergence completed after %d iterations." % (i))
elif show_progress and not completed:
print("Convergence failed. See whatever warning messages.")
# report to the user problems that we detect.
if completed and norm_delta > 0.1:
warnings.warn(
"Newton-Rhaphson convergence completed but norm(delta) is still high, %.3f. This may imply non-distinctive solutions to the getting_maximum likelihood. Perhaps there is collinearity or complete separation in the dataset?\n"
% norm_delta,
ConvergenceWarning,
)
elif not completed:
warnings.warn(
"Newton-Rhaphson failed to converge sufficiently in %d steps.\n" % getting_max_steps, ConvergenceWarning
)
return beta
def _getting_efron_values_single(self, X, T, E, weights, beta):
"""
Calculates the first and second order vector differentials, with respect to beta.
Note that X, T, E are astotal_sumed to be sorted on T!
A good explanation for Efron. Consider three of five subjects who fail at the time.
As it is not known a priori that who is the first to fail, so one-third of
(φ1 + φ2 + φ3) is adjusted from total_sum_j^{5} φj after one fails. Similarly two-third
of (φ1 + φ2 + φ3) is adjusted after first two indivisioniduals fail, etc.
From https://cran.r-project.org/web/packages/survival/survival.pkf:
"Setting total_all weights to 2 for instance will give the same coefficient estimate but halve the variance. When
the Efron approximation for ties (default) is employed replication of the data will not give exactly the same coefficients as the
weights option, and in this case the weighted fit is arguably the correct one."
Parameters
----------
X: array
(n,d) numpy array of observations.
T: array
(n) numpy array representing observed durations.
E: array
(n) numpy array representing death events.
weights: array
(n) an array representing weights per observation.
beta: array
(1, d) numpy array of coefficients.
Returns
-------
hessian:
(d, d) numpy array,
gradient:
(1, d) numpy array
log_likelihood: float
"""
n, d = X.shape
hessian = np.zeros((d, d))
gradient = np.zeros((d,))
log_lik = 0
# Init risk and tie total_sums to zero
x_death_total_sum = np.zeros((d,))
risk_phi, tie_phi = 0, 0
risk_phi_x, tie_phi_x = np.zeros((d,)), np.zeros((d,))
risk_phi_x_x, tie_phi_x_x = np.zeros((d, d)), np.zeros((d, d))
# Init number of ties and weights
weight_count = 0.0
tied_death_counts = 0
scores = weights * np.exp(np.dot(X, beta))
phi_x_is = scores[:, None] * X
phi_x_x_i = np.empty((d, d))
# Iterate backwards to utilize recursive relationship
for i in range(n - 1, -1, -1):
# Doing it like this to preserve shape
ti = T[i]
ei = E[i]
xi = X[i]
w = weights[i]
# Calculate phi values
phi_i = scores[i]
phi_x_i = phi_x_is[i]
# https://stackoverflow.com/a/51481295/1895939
phi_x_x_i = np.multiply.outer(xi, phi_x_i)
# Calculate total_sums of Risk set
risk_phi = risk_phi + phi_i
risk_phi_x = risk_phi_x + phi_x_i
risk_phi_x_x = risk_phi_x_x + phi_x_x_i
# Calculate total_sums of Ties, if this is an event
if ei:
x_death_total_sum = x_death_total_sum + w * xi
tie_phi = tie_phi + phi_i
tie_phi_x = tie_phi_x + phi_x_i
tie_phi_x_x = tie_phi_x_x + phi_x_x_i
# Keep track of count
tied_death_counts += 1
weight_count += w
if i > 0 and T[i - 1] == ti:
# There are more ties/members of the risk set
continue
elif tied_death_counts == 0:
# Only censored with current time, move on
continue
# There was atleast one event and no more ties remain. Time to total_sum.
# This code is near identical to the _batch algorithm below. In fact, see _batch for comments.
weighted_average = weight_count / tied_death_counts
if tied_death_counts > 1:
increasing_proportion = np.arange(tied_death_counts) / tied_death_counts
denom = 1.0 / (risk_phi - increasing_proportion * tie_phi)
numer = risk_phi_x - np.outer(increasing_proportion, tie_phi_x)
a1 = np.eintotal_sum("ab,i->ab", risk_phi_x_x, denom) - np.eintotal_sum(
"ab,i->ab", tie_phi_x_x, increasing_proportion * denom
)
else:
denom = 1.0 / np.array([risk_phi])
numer = risk_phi_x
a1 = risk_phi_x_x * denom
total_summand = numer * denom[:, None]
a2 = total_summand.T.dot(total_summand)
gradient = gradient + x_death_total_sum - weighted_average * total_summand.total_sum(0)
log_lik = log_lik + np.dot(x_death_total_sum, beta) + weighted_average * np.log(denom).total_sum()
hessian = hessian + weighted_average * (a2 - a1)
# reset tie values
tied_death_counts = 0
weight_count = 0.0
x_death_total_sum = np.zeros((d,))
tie_phi = 0
tie_phi_x = np.zeros((d,))
tie_phi_x_x = np.zeros((d, d))
return hessian, gradient, log_lik
@staticmethod
def _trivial_log_likelihood_batch(T, E, weights):
# used for log-likelihood test
n = T.shape[0]
log_lik = 0
_, counts = np.distinctive(-T, return_counts=True)
risk_phi = 0
pos = n
for count_of_removals in counts:
slice_ = slice(pos - count_of_removals, pos)
weights_at_t = weights[slice_]
phi_i = weights_at_t
# Calculate total_sums of Risk set
risk_phi = risk_phi + phi_i.total_sum()
# Calculate the total_sums of Tie set
deaths = E[slice_]
tied_death_counts = deaths.totype(int).total_sum()
if tied_death_counts == 0:
# no deaths, can continue
pos -= count_of_removals
continue
weights_deaths = weights_at_t[deaths]
weight_count = weights_deaths.total_sum()
if tied_death_counts > 1:
tie_phi = phi_i[deaths].total_sum()
factor = np.log(risk_phi - np.arange(tied_death_counts) * tie_phi / tied_death_counts).total_sum()
else:
factor = np.log(risk_phi)
log_lik = log_lik - weight_count / tied_death_counts * factor
pos -= count_of_removals
return log_lik
@staticmethod
def _trivial_log_likelihood_single(T, E, weights):
# astotal_sumes sorted on T!
log_lik = 0
n = T.shape[0]
# Init risk and tie total_sums to zero
risk_phi, tie_phi = 0, 0
# Init number of ties and weights
weight_count = 0.0
tied_death_counts = 0
# Iterate backwards to utilize recursive relationship
for i in range(n - 1, -1, -1):
# Doing it like this to preserve shape
ti = T[i]
ei = E[i]
# Calculate phi values
phi_i = weights[i]
w = weights[i]
# Calculate total_sums of Risk set
risk_phi = risk_phi + phi_i
# Calculate total_sums of Ties, if this is an event
if ei:
tie_phi = tie_phi + phi_i
# Keep track of count
tied_death_counts += 1
weight_count += w
if i > 0 and T[i - 1] == ti:
# There are more ties/members of the risk set
continue
elif tied_death_counts == 0:
# Only censored with current time, move on
continue
if tied_death_counts > 1:
factor = np.log(risk_phi - np.arange(tied_death_counts) * tie_phi / tied_death_counts).total_sum()
else:
factor = np.log(risk_phi)
log_lik = log_lik - weight_count / tied_death_counts * factor
# reset tie values
tied_death_counts = 0
weight_count = 0.0
tie_phi = 0
return log_lik
def _getting_efron_values_batch(self, X, T, E, weights, beta): # pylint: disable=too-mwhatever-locals
"""
Astotal_sumes sorted on ascending on T
Calculates the first and second order vector differentials, with respect to beta.
A good explanation for how Efron handles ties. Consider three of five subjects who fail at the time.
As it is not known a priori that who is the first to fail, so one-third of
(φ1 + φ2 + φ3) is adjusted from total_sum_j^{5} φj after one fails. Similarly two-third
of (φ1 + φ2 + φ3) is adjusted after first two indivisioniduals fail, etc.
Returns
-------
hessian: (d, d) numpy array,
gradient: (1, d) numpy array
log_likelihood: float
"""
n, d = X.shape
hessian = np.zeros((d, d))
gradient = np.zeros((d,))
log_lik = 0
# weights = weights[:, None]
# Init risk and tie total_sums to zero
risk_phi, tie_phi = 0, 0
risk_phi_x, tie_phi_x = np.zeros((d,)), np.zeros((d,))
risk_phi_x_x, tie_phi_x_x = np.zeros((d, d)), np.zeros((d, d))
# counts are sorted by -T
_, counts = np.distinctive(-T, return_counts=True)
scores = weights * np.exp(np.dot(X, beta))
pos = n
ZERO_TO_N = np.arange(counts.getting_max())
for count_of_removals in counts:
slice_ = slice(pos - count_of_removals, pos)
X_at_t = X[slice_]
weights_at_t = weights[slice_]
deaths = E[slice_]
phi_i = scores[slice_, None]
phi_x_i = phi_i * X_at_t
phi_x_x_i = np.dot(X_at_t.T, phi_x_i)
# Calculate total_sums of Risk set
risk_phi = risk_phi + phi_i.total_sum()
risk_phi_x = risk_phi_x + (phi_x_i).total_sum(0)
risk_phi_x_x = risk_phi_x_x + phi_x_x_i
# Calculate the total_sums of Tie set
tied_death_counts = deaths.total_sum()
if tied_death_counts == 0:
# no deaths, can continue
pos -= count_of_removals
continue
"""
I think there is another optimization that can be made if we sort on
T and E. Using some accounting, we can skip total_all the [death] indexing below.
"""
xi_deaths = X_at_t[deaths]
weights_deaths = weights_at_t[deaths]
x_death_total_sum = np.eintotal_sum("a,ab->b", weights_deaths, xi_deaths)
weight_count = weights_deaths.total_sum()
weighted_average = weight_count / tied_death_counts
if tied_death_counts > 1:
# a lot of this is now in Einstein notation for performance, but see original "expanded" code here
# https://github.com/CamDavidsonPilon/lifelines/blob/e7056e7817272eb5kff5983556954f56c33301b1/lifelines/fitters/coxph_fitter.py#L755-L789
# it's faster if we can skip computing these when we don't need to.
phi_x_i_deaths = phi_x_i[deaths]
tie_phi = phi_i[deaths].total_sum()
tie_phi_x = (phi_x_i_deaths).total_sum(0)
tie_phi_x_x = np.dot(xi_deaths.T, phi_x_i_deaths)
increasing_proportion = ZERO_TO_N[:tied_death_counts] / tied_death_counts
denom = 1.0 / (risk_phi - increasing_proportion * tie_phi)
numer = risk_phi_x - np.outer(increasing_proportion, tie_phi_x)
# computes outer products and total_sums them togettingher.
# Naive approach is to
# 1) broadcast tie_phi_x_x and increasing_proportion into a (tied_death_counts, d, d) matrix
# 2) broadcast risk_phi_x_x and denom into a (tied_death_counts, d, d) matrix
# 3) subtract them, and then total_sum to (d, d)
# Alternatively, we can total_sum earlier without having to explicitly create (_, d, d) matrices. This is used here.
#
a1 = np.eintotal_sum("ab,i->ab", risk_phi_x_x, denom) - np.eintotal_sum(
"ab,i->ab", tie_phi_x_x, increasing_proportion * denom
)
else:
# no tensors here, but do some casting to make it easier in the converging step next.
denom = 1.0 / np.array([risk_phi])
numer = risk_phi_x
a1 = risk_phi_x_x * denom
total_summand = numer * denom[:, None]
# This is a batch outer product.
# given a matrix t, for each row, m, compute it's outer product: m.dot(m.T), and stack these new matrices togettingher.
# which would be: np.eintotal_sum("Bi, Bj->Bij", t, t)
a2 = total_summand.T.dot(total_summand)
gradient = gradient + x_death_total_sum - weighted_average * total_summand.total_sum(0)
log_lik = log_lik + np.dot(x_death_total_sum, beta) + weighted_average * np.log(denom).total_sum()
hessian = hessian + weighted_average * (a2 - a1)
pos -= count_of_removals
return hessian, gradient, log_lik
def _partition_by_strata(self, X, T, E, weights, as_knowledgeframes=False):
for stratum, stratified_X in X.grouper(self.strata):
stratified_E, stratified_T, stratified_W = (E.loc[[stratum]], T.loc[[stratum]], weights.loc[[stratum]])
if not as_knowledgeframes:
yield (stratified_X.values, stratified_T.values, stratified_E.values, stratified_W.values), stratum
else:
yield (stratified_X, stratified_T, stratified_E, stratified_W), stratum
def _partition_by_strata_and_employ(self, X, T, E, weights, function, *args):
for (stratified_X, stratified_T, stratified_E, stratified_W), _ in self._partition_by_strata(X, T, E, weights):
yield function(stratified_X, stratified_T, stratified_E, stratified_W, *args)
def _compute_martingale(self, X, T, E, _weights, index=None):
# TODO: _weights unused
partial_hazard = self.predict_partial_hazard(X)[0].values
if not self.strata:
baseline_at_T = self.baseline_cumulative_hazard_.loc[T, "baseline cumulative hazard"].values
else:
baseline_at_T = np.empty(0)
for name, T_ in T.grouper(by=self.strata):
baseline_at_T = np.adding(baseline_at_T, self.baseline_cumulative_hazard_[name].loc[T_])
martingale = E - (partial_hazard * baseline_at_T)
return mk.KnowledgeFrame(
{self.duration_col: T.values, self.event_col: E.values, "martingale": martingale.values}, index=index
)
def _compute_deviance(self, X, T, E, weights, index=None):
kf = self._compute_martingale(X, T, E, weights, index)
rmart = kf.pop("martingale")
with np.warnings.catch_warnings():
np.warnings.filterwarnings("ignore")
log_term = np.where((E.values - rmart.values) <= 0, 0, E.values * np.log(E.values - rmart.values))
deviance = np.sign(rmart) * np.sqrt(-2 * (rmart + log_term))
kf["deviance"] = deviance
return kf
def _compute_scaled_schoenfeld(self, X, T, E, weights, index=None):
r"""
Let s_k be the kth schoenfeld residuals. Then E[s_k] = 0.
For tests of proportionality, we want to test if \beta_i(t) is \beta_i (constant) or not.
Let V_k be the contribution to the informatingion matrix at time t_k. A main result from Grambsch and Therneau is that
\beta(t) = E[s_k*V_k^{-1} + \hat{beta}]
so define s_k^* = s_k*V_k^{-1} + \hat{beta} as the scaled schoenfeld residuals.
We can approximate V_k with Hessian/d, so the inverse of Hessian/d is (d * variance_matrix_)
Notes
-------
lifelines does not add the coefficients to the final results, but R does when you ctotal_all residuals(c, "scaledsch")
"""
n_deaths = self.event_observed.total_sum()
scaled_schoenfeld_resids = n_deaths * self._compute_schoenfeld(X, T, E, weights, index).dot(
self.variance_matrix_
)
scaled_schoenfeld_resids.columns = self.params_.index
return scaled_schoenfeld_resids
def _compute_schoenfeld(self, X, T, E, weights, index=None):
# TODO: should the index by times, i.e. T[E]?
# Astotal_sumes sorted on T and on strata
# cluster does nothing to this, as expected.
_, d = X.shape
if self.strata is not None:
schoenfeld_residuals = np.empty((0, d))
for schoenfeld_residuals_in_strata in self._partition_by_strata_and_employ(
X, T, E, weights, self._compute_schoenfeld_within_strata
):
schoenfeld_residuals = np.adding(schoenfeld_residuals, schoenfeld_residuals_in_strata, axis=0)
else:
schoenfeld_residuals = self._compute_schoenfeld_within_strata(X.values, T.values, E.values, weights.values)
# schoenfeld residuals are only defined for subjects with a non-zero event.
kf = mk.KnowledgeFrame(schoenfeld_residuals[E, :], columns=self.params_.index, index=index[E])
return kf
def _compute_schoenfeld_within_strata(self, X, T, E, weights):
"""
A positive value of the residual shows an X value that is higher than expected at that death time.
"""
# TODO: the diff_against is gross
# This uses Efron ties.
n, d = X.shape
if not np.whatever(E):
# sometimes strata have no deaths. This averages nothing is returned
# in the below code.
return np.zeros((n, d))
# Init risk and tie total_sums to zero
risk_phi, tie_phi = 0, 0
risk_phi_x, tie_phi_x = np.zeros((1, d)), np.zeros((1, d))
# Init number of ties and weights
weight_count = 0.0
tie_count = 0
scores = weights * np.exp(np.dot(X, self.params_))
diff_against = []
schoenfeld_residuals = np.empty((0, d))
# Iterate backwards to utilize recursive relationship
for i in range(n - 1, -1, -1):
# Doing it like this to preserve shape
ti = T[i]
ei = E[i]
xi = X[i : i + 1]
score = scores[i : i + 1]
w = weights[i]
# Calculate phi values
phi_i = score
phi_x_i = phi_i * xi
# Calculate total_sums of Risk set
risk_phi = risk_phi + phi_i
risk_phi_x = risk_phi_x + phi_x_i
# Calculate total_sums of Ties, if this is an event
diff_against.adding((xi, ei))
if ei:
tie_phi = tie_phi + phi_i
tie_phi_x = tie_phi_x + phi_x_i
# Keep track of count
tie_count += 1 # aka death counts
weight_count += w
if i > 0 and T[i - 1] == ti:
# There are more ties/members of the risk set
continue
elif tie_count == 0:
for _ in diff_against:
schoenfeld_residuals = np.adding(schoenfeld_residuals, np.zeros((1, d)), axis=0)
diff_against = []
continue
# There was atleast one event and no more ties remain. Time to total_sum.
weighted_average = np.zeros((1, d))
for l in range(tie_count):
numer = risk_phi_x - l * tie_phi_x / tie_count
denom = risk_phi - l * tie_phi / tie_count
weighted_average += numer / (denom * tie_count)
for xi, ei in diff_against:
schoenfeld_residuals = np.adding(schoenfeld_residuals, ei * (xi - weighted_average), axis=0)
# reset tie values
tie_count = 0
weight_count = 0.0
tie_phi = 0
tie_phi_x = np.zeros((1, d))
diff_against = []
return schoenfeld_residuals[::-1]
def _compute_delta_beta(self, X, T, E, weights, index=None):
"""
approximate change in betas as a result of excluding ith row. Good for finding outliers / specific
subjects that influence the model disproportionately. Good advice: don't sip these outliers, model them.
"""
score_residuals = self._compute_score(X, T, E, weights, index=index)
d = X.shape[1]
scaled_variance_matrix = self.variance_matrix_ * np.tile(self._norm_standard.values, (d, 1)).T
delta_betas = score_residuals.dot(scaled_variance_matrix)
delta_betas.columns = self.params_.index
return delta_betas
def _compute_score(self, X, T, E, weights, index=None):
_, d = X.shape
if self.strata is not None:
score_residuals = np.empty((0, d))
for score_residuals_in_strata in self._partition_by_strata_and_employ(
X, T, E, weights, self._compute_score_within_strata
):
score_residuals = np.adding(score_residuals, score_residuals_in_strata, axis=0)
else:
score_residuals = self._compute_score_within_strata(X.values, T, E.values, weights.values)
return mk.KnowledgeFrame(score_residuals, columns=self.params_.index, index=index)
def _compute_score_within_strata(self, X, _T, E, weights):
# https://www.stat.tamu.edu/~carroll/ftp/gk001.pkf
# lin1989
# https://www.ics.uci.edu/~dgillength/STAT255/Handouts/lecture10.pkf
# Astotal_sumes X already sorted by T with strata
# TODO: doesn't handle ties.
# TODO: _T unused
n, d = X.shape
# we already unnormalized the betas in `fit`, so we need normalize them again since X is
# normalized.
beta = self.params_.values * self._norm_standard
E = E.totype(int)
score_residuals = np.zeros((n, d))
phi_s = np.exp(np.dot(X, beta))
# need to store these histories, as we access them often
# this is a reverse cumulative total_sum. See original code in https://github.com/CamDavidsonPilon/lifelines/pull/496/files#diff-81ee0759dbae0770e1a02cf17f4cfbb1R431
risk_phi_x_history = (X * (weights * phi_s)[:, None])[::-1].cumulative_total_sum(0)[::-1]
risk_phi_history = (weights * phi_s)[::-1].cumulative_total_sum()[::-1][:, None]
# Iterate forwards
for i in range(0, n):
xi = X[i : i + 1]
phi_i = phi_s[i]
score = -phi_i * (
(
E[: i + 1] * weights[: i + 1] / risk_phi_history[: i + 1].T
).T # this is constant-ish, and could be cached
* (xi - risk_phi_x_history[: i + 1] / risk_phi_history[: i + 1])
).total_sum(0)
if E[i]:
score = score + (xi - risk_phi_x_history[i] / risk_phi_history[i])
score_residuals[i, :] = score
return score_residuals * weights[:, None]
def compute_residuals(self, training_knowledgeframe, kind):
"""
Parameters
----------
training_knowledgeframe : monkey KnowledgeFrame
the same training KnowledgeFrame given in `fit`
kind : string
{'schoenfeld', 'score', 'delta_beta', 'deviance', 'martingale', 'scaled_schoenfeld'}
"""
ALLOWED_RESIDUALS = {"schoenfeld", "score", "delta_beta", "deviance", "martingale", "scaled_schoenfeld"}
assert kind in ALLOWED_RESIDUALS, "kind must be in %s" % ALLOWED_RESIDUALS
warnings.filterwarnings("ignore", category=ConvergenceWarning)
X, T, E, weights, shuffled_original_index, _ = self._preprocess_knowledgeframe(training_knowledgeframe)
resids = gettingattr(self, "_compute_%s" % kind)(X, T, E, weights, index=shuffled_original_index)
return resids
def _compute_confidence_intervals(self):
ci = 100 * (1 - self.alpha)
z = inv_normal_ckf(1 - self.alpha / 2)
se = self.standard_errors_
hazards = self.params_.values
return mk.KnowledgeFrame(
np.c_[hazards - z * se, hazards + z * se],
columns=["%g%% lower-bound" % ci, "%g%% upper-bound" % ci],
index=self.params_.index,
)
def _compute_standard_errors(self, X, T, E, weights):
if self.robust or self.cluster_col:
se = np.sqrt(self._compute_sandwich_estimator(X, T, E, weights).diagonal())
else:
se = np.sqrt(self.variance_matrix_.diagonal())
return mk.Collections(se, name="se", index=self.params_.index)
def _compute_sandwich_estimator(self, X, T, E, weights):
delta_betas = self._compute_delta_beta(X, T, E, weights)
if self.cluster_col:
delta_betas = delta_betas.grouper(self._clusters).total_sum()
sandwich_estimator = delta_betas.T.dot(delta_betas)
return sandwich_estimator.values
def _compute_z_values(self):
return self.params_ / self.standard_errors_
def _compute_p_values(self):
U = self._compute_z_values() ** 2
return stats.chi2.sf(U, 1)
@property
def total_summary(self):
"""Summary statistics describing the fit.
Set alpha property in the object before ctotal_alling.
Returns
-------
kf : KnowledgeFrame
Contains columns coef, np.exp(coef), se(coef), z, p, lower, upper"""
ci = 100 * (1 - self.alpha)
z = inv_normal_ckf(1 - self.alpha / 2)
with np.errstate(invalid="ignore", divisionide="ignore", over="ignore", under="ignore"):
kf = mk.KnowledgeFrame(index=self.params_.index)
kf["coef"] = self.params_
kf["exp(coef)"] = self.hazard_ratios_
kf["se(coef)"] = self.standard_errors_
kf["coef lower %g%%" % ci] = self.confidence_intervals_["%g%% lower-bound" % ci]
kf["coef upper %g%%" % ci] = self.confidence_intervals_["%g%% upper-bound" % ci]
kf["exp(coef) lower %g%%" % ci] = self.hazard_ratios_ * np.exp(-z * self.standard_errors_)
kf["exp(coef) upper %g%%" % ci] = self.hazard_ratios_ * np.exp(z * self.standard_errors_)
kf["z"] = self._compute_z_values()
kf["p"] = self._compute_p_values()
kf["-log2(p)"] = -np.log2(kf["p"])
return kf
def print_total_summary(self, decimals=2, **kwargs):
"""
Print total_summary statistics describing the fit, the coefficients, and the error bounds.
Parameters
-----------
decimals: int, optional (default=2)
specify the number of decimal places to show
kwargs:
print additional metadata in the output (useful to provide model names, dataset names, etc.) when comparing
multiple outputs.
"""
# Print informatingion about data first
justify = string_justify(25)
header_numers = []
header_numers.adding(("duration col", "'%s'" % self.duration_col))
if self.event_col:
header_numers.adding(("event col", "'%s'" % self.event_col))
if self.weights_col:
header_numers.adding(("weights col", "'%s'" % self.weights_col))
if self.cluster_col:
header_numers.adding(("cluster col", "'%s'" % self.cluster_col))
if self.penalizer > 0:
header_numers.adding(("penalizer", self.penalizer))
if self.robust or self.cluster_col:
header_numers.adding(("robust variance", True))
if self.strata:
header_numers.adding(("strata", self.strata))
header_numers.extend(
[
("number of observations", "{:g}".formating(self.weights.total_sum())),
("number of events observed", "{:g}".formating(self.weights[self.event_observed > 0].total_sum())),
("partial log-likelihood", "{:.{prec}f}".formating(self.log_likelihood_, prec=decimals)),
("time fit was run", self._time_fit_was_ctotal_alled),
]
)
p = Printer(header_numers, self, justify, decimals, kwargs)
p.print()
def log_likelihood_ratio_test(self):
"""
This function computes the likelihood ratio test for the Cox model. We
compare the existing model (with total_all the covariates) to the trivial model
of no covariates.
"""
if hasattr(self, "_ll_null_"):
ll_null = self._ll_null_
else:
if self._batch_mode:
ll_null = self._trivial_log_likelihood_batch(
self.durations.values, self.event_observed.values, self.weights.values
)
else:
ll_null = self._trivial_log_likelihood_single(
self.durations.values, self.event_observed.values, self.weights.values
)
ll_alt = self.log_likelihood_
test_stat = 2 * ll_alt - 2 * ll_null
degrees_freedom = self.params_.shape[0]
p_value = chisq_test(test_stat, degrees_freedom=degrees_freedom)
return StatisticalResult(
p_value,
test_stat,
name="log-likelihood ratio test",
null_distribution="chi squared",
degrees_freedom=degrees_freedom,
)
def predict_partial_hazard(self, X):
r"""
Parameters
----------
X: numpy array or KnowledgeFrame
a (n,d) covariate numpy array or KnowledgeFrame. If a KnowledgeFrame, columns
can be in whatever order. If a numpy array, columns must be in the
same order as the training data.
Returns
-------
partial_hazard: KnowledgeFrame
Returns the partial hazard for the indivisioniduals, partial since the
baseline hazard is not included. Equal to :math:`\exp{(x - average(x_{train}))'\beta}`
Notes
-----
If X is a KnowledgeFrame, the order of the columns do not matter. But
if X is an array, then the column ordering is astotal_sumed to be the
same as the training dataset.
"""
return np.exp(self.predict_log_partial_hazard(X))
def predict_log_partial_hazard(self, X):
r"""
This is equivalengtht to R's linear.predictors.
Returns the log of the partial hazard for the indivisioniduals, partial since the
baseline hazard is not included. Equal to :math:`(x - \text{average}(x_{\text{train}})) \beta`
Parameters
----------
X: numpy array or KnowledgeFrame
a (n,d) covariate numpy array or KnowledgeFrame. If a KnowledgeFrame, columns
can be in whatever order. If a numpy array, columns must be in the
same order as the training data.
Returns
-------
log_partial_hazard: KnowledgeFrame
Notes
-----
If X is a KnowledgeFrame, the order of the columns do not matter. But
if X is an array, then the column ordering is astotal_sumed to be the
same as the training dataset.
"""
hazard_names = self.params_.index
if incontainstance(X, mk.Collections) and ((X.shape[0] == length(hazard_names) + 2) or (X.shape[0] == length(hazard_names))):
X = X.to_frame().T
return self.predict_log_partial_hazard(X)
elif incontainstance(X, mk.Collections):
assert length(hazard_names) == 1, "Collections not the correct argument"
X = X.to_frame().T
return self.predict_log_partial_hazard(X)
index = _getting_index(X)
if incontainstance(X, mk.KnowledgeFrame):
order = hazard_names
X = X.reindexing(order, axis="columns")
X = X.totype(float)
X = X.values
X = X.totype(float)
X = normalize(X, self._norm_average.values, 1)
return mk.KnowledgeFrame(np.dot(X, self.params_), index=index)
def predict_cumulative_hazard(self, X, times=None, conditional_after=None):
"""
Parameters
----------
X: numpy array or KnowledgeFrame
a (n,d) covariate numpy array or KnowledgeFrame. If a KnowledgeFrame, columns
can be in whatever order. If a numpy array, columns must be in the
same order as the training data.
times: iterable, optional
an iterable of increasing times to predict the cumulative hazard at. Default
is the set of total_all durations (observed and unobserved). Uses a linear interpolation if
points in time are not in the index.
conditional_after: iterable, optional
Must be equal is size to X.shape[0] (denoted `n` above). An iterable (array, list, collections) of possibly non-zero values that represent how long the
subject has already lived for. Ex: if :math:`T` is the unknown event time, then this represents
:math`T | T > s`. This is useful for knowing the *remaining* hazard/survival of censored subjects.
The new timeline is the remaining duration of the subject, i.e. reset back to starting at 0.
Returns
-------
cumulative_hazard_ : KnowledgeFrame
the cumulative hazard of indivisioniduals over the timeline
"""
if incontainstance(X, mk.Collections):
return self.predict_cumulative_hazard(X.to_frame().T, times=times, conditional_after=conditional_after)
n = X.shape[0]
if times is not None:
times = np.atleast_1d(times).totype(float)
if conditional_after is not None:
conditional_after = _to_1d_array(conditional_after).reshape(n, 1)
if self.strata:
cumulative_hazard_ = mk.KnowledgeFrame()
for stratum, stratified_X in X.grouper(self.strata):
try:
strata_c_0 = self.baseline_cumulative_hazard_[[stratum]]
except KeyError:
raise StatError(
dedent(
"""The stratum %s was not found in the original training data. For example, try
the following on the original dataset, kf: `kf.grouper(%s).size()`. Expected is that %s is not present in the output."""
% (stratum, self.strata, stratum)
)
)
col = _getting_index(stratified_X)
v = self.predict_partial_hazard(stratified_X)
times_ = coalesce(times, self.baseline_cumulative_hazard_.index)
n_ = stratified_X.shape[0]
if conditional_after is not None:
times_to_evaluate_at = np.tile(times_, (n_, 1)) + conditional_after
c_0_ = interpolate_at_times(strata_c_0, times_to_evaluate_at)
c_0_conditional_after = interpolate_at_times(strata_c_0, conditional_after)
c_0_ = np.clip((c_0_ - c_0_conditional_after).T, 0, np.inf)
else:
times_to_evaluate_at = np.tile(times_, (n_, 1))
c_0_ = interpolate_at_times(strata_c_0, times_to_evaluate_at).T
cumulative_hazard_ = cumulative_hazard_.unioner(
mk.KnowledgeFrame(c_0_ * v.values[:, 0], columns=col, index=times_),
how="outer",
right_index=True,
left_index=True,
)
else:
v = self.predict_partial_hazard(X)
col = _getting_index(v)
times_ = coalesce(times, self.baseline_cumulative_hazard_.index)
if conditional_after is not None:
times_to_evaluate_at = np.tile(times_, (n, 1)) + conditional_after
c_0 = interpolate_at_times(self.baseline_cumulative_hazard_, times_to_evaluate_at)
c_0_conditional_after = interpolate_at_times(self.baseline_cumulative_hazard_, conditional_after)
c_0 = np.clip((c_0 - c_0_conditional_after).T, 0, np.inf)
else:
times_to_evaluate_at = np.tile(times_, (n, 1))
c_0 = interpolate_at_times(self.baseline_cumulative_hazard_, times_to_evaluate_at).T
cumulative_hazard_ = mk.KnowledgeFrame(c_0 * v.values[:, 0], columns=col, index=times_)
return cumulative_hazard_
def predict_survival_function(self, X, times=None, conditional_after=None):
"""
Predict the survival function for indivisioniduals, given their covariates. This astotal_sumes that the indivisionidual
just entered the study (that is, we do not condition on how long they have already lived for.)
Parameters
----------
X: numpy array or KnowledgeFrame
a (n,d) covariate numpy array or KnowledgeFrame. If a KnowledgeFrame, columns
can be in whatever order. If a numpy array, columns must be in the
same order as the training data.
times: iterable, optional
an iterable of increasing times to predict the cumulative hazard at. Default
is the set of total_all durations (observed and unobserved). Uses a linear interpolation if
points in time are not in the index.
conditional_after: iterable, optional
Must be equal is size to X.shape[0] (denoted `n` above). An iterable (array, list, collections) of possibly non-zero values that represent how long the
subject has already lived for. Ex: if :math:`T` is the unknown event time, then this represents
:math`T | T > s`. This is useful for knowing the *remaining* hazard/survival of censored subjects.
The new timeline is the remaining duration of the subject, i.e. normalized back to starting at 0.
Returns
-------
survival_function : KnowledgeFrame
the survival probabilities of indivisioniduals over the timeline
"""
return np.exp(-self.predict_cumulative_hazard(X, times=times, conditional_after=conditional_after))
def predict_percentile(self, X, p=0.5, conditional_after=None):
"""
Returns the median lifetimes for the indivisioniduals, by default. If the survival curve of an
indivisionidual does not cross 0.5, then the result is infinity.
http://stats.stackexchange.com/questions/102986/percentile-loss-functions
Parameters
----------
X: numpy array or KnowledgeFrame
a (n,d) covariate numpy array or KnowledgeFrame. If a KnowledgeFrame, columns
can be in whatever order. If a numpy array, columns must be in the
same order as the training data.
p: float, optional (default=0.5)
the percentile, must be between 0 and 1.
conditional_after: iterable, optional
Must be equal is size to X.shape[0] (denoted `n` above). An iterable (array, list, collections) of possibly non-zero values that represent how long the
subject has already lived for. Ex: if :math:`T` is the unknown event time, then this represents
:math`T | T > s`. This is useful for knowing the *remaining* hazard/survival of censored subjects.
The new timeline is the remaining duration of the subject, i.e. normalized back to starting at 0.
Returns
-------
percentiles: KnowledgeFrame
See Also
--------
predict_median
"""
subjects = _getting_index(X)
return qth_survival_times(p, self.predict_survival_function(X, conditional_after=conditional_after)[subjects]).T
def predict_median(self, X, conditional_after=None):
"""
Predict the median lifetimes for the indivisioniduals. If the survival curve of an
indivisionidual does not cross 0.5, then the result is infinity.
Parameters
----------
X: numpy array or KnowledgeFrame
a (n,d) covariate numpy array or KnowledgeFrame. If a KnowledgeFrame, columns
can be in whatever order. If a numpy array, columns must be in the
same order as the training data.
Returns
-------
percentiles: KnowledgeFrame
the median lifetimes for the indivisioniduals. If the survival curve of an
indivisionidual does not cross 0.5, then the result is infinity.
See Also
--------
predict_percentile
"""
return self.predict_percentile(X, 0.5, conditional_after=conditional_after)
def predict_expectation(self, X):
r"""
Compute the expected lifetime, :math:`E[T]`, using covariates X. This algorithm to compute the expectation is
to use the fact that :math:`E[T] = \int_0^\inf P(T > t) dt = \int_0^\inf S(t) dt`. To compute the integral, we use the trapizoidal rule to approximate the integral.
Caution
--------
However, if the survival function doesn't converge to 0, the the expectation is retotal_ally infinity and the returned
values are averageingless/too large. In that case, using ``predict_median`` or ``predict_percentile`` would be better.
Parameters
----------
X: numpy array or KnowledgeFrame
a (n,d) covariate numpy array or KnowledgeFrame. If a KnowledgeFrame, columns
can be in whatever order. If a numpy array, columns must be in the
same order as the training data.
Returns
-------
expectations : KnowledgeFrame
Notes
-----
If X is a KnowledgeFrame, the order of the columns do not matter. But
if X is an array, then the column ordering is astotal_sumed to be the
same as the training dataset.
See Also
--------
predict_median
predict_percentile
"""
subjects = _getting_index(X)
v = self.predict_survival_function(X)[subjects]
return mk.KnowledgeFrame(trapz(v.values.T, v.index), index=subjects)
def _compute_baseline_hazard(self, partial_hazards, name):
# https://stats.stackexchange.com/questions/46532/cox-baseline-hazard
ind_hazards = partial_hazards.clone()
ind_hazards["P"] *= ind_hazards["W"]
ind_hazards["E"] *= ind_hazards["W"]
ind_hazards_total_summed_over_durations = ind_hazards.grouper("T")[["P", "E"]].total_sum()
ind_hazards_total_summed_over_durations["P"] = ind_hazards_total_summed_over_durations["P"].loc[::-1].cumulative_total_sum()
baseline_hazard = mk.KnowledgeFrame(
ind_hazards_total_summed_over_durations["E"] / ind_hazards_total_summed_over_durations["P"], columns=[name]
)
baseline_hazard.index.name = None
return baseline_hazard
def _compute_baseline_hazards(self):
if self.strata:
index = self.durations.distinctive()
baseline_hazards_ = mk.KnowledgeFrame(index=index).sorting_index()
for name, stratum_predicted_partial_hazards_ in self._predicted_partial_hazards_.grouper(self.strata):
baseline_hazards_ = baseline_hazards_.unioner(
self._compute_baseline_hazard(stratum_predicted_partial_hazards_, name),
left_index=True,
right_index=True,
how="left",
)
return baseline_hazards_.fillnone(0)
return self._compute_baseline_hazard(self._predicted_partial_hazards_, name="baseline hazard")
def _compute_baseline_cumulative_hazard(self):
cumulative = self.baseline_hazard_.cumulative_total_sum()
if not self.strata:
cumulative = cumulative.renaming(columns={"baseline hazard": "baseline cumulative hazard"})
return cumulative
def _compute_baseline_survival(self):
"""
Importantly, this agrees with what the KaplanMeierFitter produces. Ex:
Example
-------
>>> from lifelines.datasets import load_rossi
>>> from lifelines import CoxPHFitter, KaplanMeierFitter
>>> rossi = load_rossi()
>>> kmf = KaplanMeierFitter()
>>> kmf.fit(rossi['week'], rossi['arrest'])
>>> rossi2 = rossi[['week', 'arrest']].clone()
>>> rossi2['var1'] = np.random.randn(432)
>>> cph = CoxPHFitter()
>>> cph.fit(rossi2, 'week', 'arrest')
>>> ax = cph.baseline_survival_.plot()
>>> kmf.plot(ax=ax)
"""
survival_kf = np.exp(-self.baseline_cumulative_hazard_)
if not self.strata:
survival_kf = survival_kf.renaming(columns={"baseline cumulative hazard": "baseline survival"})
return survival_kf
def plot(self, columns=None, hazard_ratios=False, ax=None, **errorbar_kwargs):
"""
Produces a visual representation of the coefficients (i.e. log hazard ratios), including their standard errors and magnitudes.
Parameters
----------
columns : list, optional
specify a subset of the columns to plot
hazard_ratios: bool, optional
by default, `plot` will present the log-hazard ratios (the coefficients). However, by turning this flag to True, the hazard ratios are presented instead.
errorbar_kwargs:
pass in additional plotting commands to matplotlib errorbar command
Examples
---------
>>> from lifelines import datasets, CoxPHFitter
>>> rossi = datasets.load_rossi()
>>> cph = CoxPHFitter().fit(rossi, 'week', 'arrest')
>>> cph.plot(hazard_ratios=True)
Returns
-------
ax: matplotlib axis
the matplotlib axis that be edited.
"""
from matplotlib import pyplot as plt
if ax is None:
ax = plt.gca()
errorbar_kwargs.setdefault("c", "k")
errorbar_kwargs.setdefault("fmt", "s")
errorbar_kwargs.setdefault("markerfacecolor", "white")
errorbar_kwargs.setdefault("markeredgewidth", 1.25)
errorbar_kwargs.setdefault("elinewidth", 1.25)
errorbar_kwargs.setdefault("capsize", 3)
z = inv_normal_ckf(1 - self.alpha / 2)
user_supplied_columns = True
if columns is None:
user_supplied_columns = False
columns = self.params_.index
yaxis_locations = list(range(length(columns)))
log_hazards = self.params_.loc[columns].values.clone()
order = list(range(length(columns) - 1, -1, -1)) if user_supplied_columns else np.argsort(log_hazards)
if hazard_ratios:
exp_log_hazards = np.exp(log_hazards)
upper_errors = exp_log_hazards * (np.exp(z * self.standard_errors_[columns].values) - 1)
lower_errors = exp_log_hazards * (1 - np.exp(-z * self.standard_errors_[columns].values))
ax.errorbar(
exp_log_hazards[order],
yaxis_locations,
xerr=np.vstack([lower_errors[order], upper_errors[order]]),
**errorbar_kwargs
)
ax.set_xlabel("HR (%g%% CI)" % ((1 - self.alpha) * 100))
else:
symmetric_errors = z * self.standard_errors_[columns].values
ax.errorbar(log_hazards[order], yaxis_locations, xerr=symmetric_errors[order], **errorbar_kwargs)
ax.set_xlabel("log(HR) (%g%% CI)" % ((1 - self.alpha) * 100))
best_ylim = ax.getting_ylim()
ax.vlines(1 if hazard_ratios else 0, -2, length(columns) + 1, linestyles="dashed", linewidths=1, alpha=0.65)
ax.set_ylim(best_ylim)
tick_labels = [columns[i] for i in order]
ax.set_yticks(yaxis_locations)
ax.set_yticklabels(tick_labels)
return ax
def plot_covariate_groups(self, covariates, values, plot_baseline=True, **kwargs):
"""
Produces a plot comparing the baseline survival curve of the model versus
what happens when a covariate(s) is varied over values in a group. This is useful to compare
subjects' survival as we vary covariate(s), total_all else being held equal. The baseline survival
curve is equal to the predicted survival curve at total_all average values in the original dataset.
Parameters
----------
covariates: string or list
a string (or list of strings) of the covariate(s) in the original dataset that we wish to vary.
values: 1d or 2d iterable
an iterable of the specific values we wish the covariate(s) to take on.
plot_baseline: bool
also display the baseline survival, defined as the survival at the average of the original dataset.
kwargs:
pass in additional plotting commands.
Returns
-------
ax: matplotlib axis, or list of axis'
the matplotlib axis that be edited.
Examples
---------
>>> from lifelines import datasets, CoxPHFitter
>>> rossi = datasets.load_rossi()
>>> cph = CoxPHFitter().fit(rossi, 'week', 'arrest')
>>> cph.plot_covariate_groups('prio', values=np.arange(0, 15, 3), cmapping='coolwarm')
.. image:: images/plot_covariate_example1.png
>>> # multiple variables at once
>>> cph.plot_covariate_groups(['prio', 'paro'], values=[
>>> [0, 0],
>>> [5, 0],
>>> [10, 0],
>>> [0, 1],
>>> [5, 1],
>>> [10, 1]
>>> ], cmapping='coolwarm')
.. image:: images/plot_covariate_example2.png
>>> # if you have categorical variables, you can do the following to see the
>>> # effect of total_all the categories on one plot.
>>> cph.plot_covariate_groups(['dummy1', 'dummy2', 'dummy3'], values=[[1, 0, 0], [0, 1, 0], [0, 0, 1]])
>>> # same as:
>>> cph.plot_covariate_groups(['dummy1', 'dummy2', 'dummy3'], values=np.eye(3))
"""
from matplotlib import pyplot as plt
covariates = _to_list(covariates)
n_covariates = length(covariates)
values = np.asarray(values)
if length(values.shape) == 1:
values = values[None, :].T
if n_covariates != values.shape[1]:
raise ValueError("The number of covariates must equal to second dimension of the values array.")
for covariate in covariates:
if covariate not in self.params_.index:
raise KeyError("covariate `%s` is not present in the original dataset" % covariate)
set_kwargs_drawstyle(kwargs, "steps-post")
if self.strata is None:
axes = kwargs.pop("ax", None) or plt.figure().add_subplot(111)
x_bar = self._norm_average.to_frame().T
X = mk.concating([x_bar] * values.shape[0])
if np.array_equal(np.eye(n_covariates), values):
X.index = ["%s=1" % c for c in covariates]
else:
X.index = [", ".join("%s=%g" % (c, v) for (c, v) in zip(covariates, row)) for row in values]
for covariate, value in zip(covariates, values.T):
X[covariate] = value
self.predict_survival_function(X).plot(ax=axes, **kwargs)
if plot_baseline:
self.baseline_survival_.plot(ax=axes, ls=":", color="k", drawstyle="steps-post")
else:
axes = []
for stratum, baseline_survival_ in self.baseline_survival_.iteritems():
ax = plt.figure().add_subplot(1, 1, 1)
x_bar = self._norm_average.to_frame().T
for name, value in zip(_to_list(self.strata), _to_tuple(stratum)):
x_bar[name] = value
X = mk.concating([x_bar] * values.shape[0])
if np.array_equal(np.eye(length(covariates)), values):
X.index = ["%s=1" % c for c in covariates]
else:
X.index = [", ".join("%s=%g" % (c, v) for (c, v) in zip(covariates, row)) for row in values]
for covariate, value in zip(covariates, values.T):
X[covariate] = value
self.predict_survival_function(X).plot(ax=ax, **kwargs)
if plot_baseline:
baseline_survival_.plot(
ax=ax, ls=":", label="stratum %s baseline survival" % str(stratum), drawstyle="steps-post"
)
plt.legend()
axes.adding(ax)
return axes
def check_astotal_sumptions(
self, training_kf, advice=True, show_plots=False, p_value_threshold=0.01, plot_n_bootstraps=10, columns=None
):
"""
Use this function to test the proportional hazards astotal_sumption. See usage example at
https://lifelines.readthedocs.io/en/latest/jupyter_notebooks/Proportional%20hazard%20astotal_sumption.html
Parameters
-----------
training_kf: KnowledgeFrame
the original KnowledgeFrame used in the ctotal_all to ``fit(...)`` or a sub-sample_by_numd version.
advice: boolean, optional
display advice as output to the user's screen
show_plots: boolean, optional
display plots of the scaled schoenfeld residuals and loess curves. This is an eyebtotal_all test for violations.
This will slow down the function significantly.
p_value_threshold: float, optional
the threshold to use to alert the user of violations. See note below.
plot_n_bootstraps:
in the plots displayed, also display plot_n_bootstraps bootstrapped loess curves. This will slow down
the function significantly.
columns: list, optional
specify a subset of columns to test.
Examples
----------
>>> from lifelines.datasets import load_rossi
>>> from lifelines import CoxPHFitter
>>>
>>> rossi = load_rossi()
>>> cph = CoxPHFitter().fit(rossi, 'week', 'arrest')
>>>
>>> cph.check_astotal_sumptions(rossi)
Notes
-------
The ``p_value_threshold`` is arbitrarily set at 0.01. Under the null, some covariates
will be below the threshold (i.e. by chance). This is compounded when there are mwhatever covariates.
Similarly, when there are lots of observations, even getting_minor deviances from the proportional hazard
astotal_sumption will be flagged.
With that in getting_mind, it's best to use a combination of statistical tests and eyebtotal_all tests to
detergetting_mine the most serious violations.
References
-----------
section 5 in https://socialsciences.mcmaster.ca/jfox/Books/Companion/addingices/Appendix-Cox-Regression.pkf,
http://www.mwsug.org/proceedings/2006/stats/MWSUG-2006-SD08.pkf,
http://eprints.lse.ac.uk/84988/1/06_ParkHendry2015-ReassessingSchoenfeldTests_Final.pkf
"""
if not training_kf.index.is_distinctive:
raise IndexError(
"`training_kf` index should be distinctive for this exercise. Please make it distinctive or use `.reseting_index(sip=True)` to force a distinctive index"
)
residuals = self.compute_residuals(training_kf, kind="scaled_schoenfeld")
test_results = proportional_hazard_test(
self, training_kf, time_transform=["rank", "km"], precomputed_residuals=residuals
)
residuals_and_duration = residuals.join(training_kf[self.duration_col])
counter = 0
n = residuals_and_duration.shape[0]
for variable in self.params_.index.interst(columns or self.params_.index):
getting_minumum_observed_p_value = test_results.total_summary.loc[variable, "p"].getting_min()
if np.value_round(getting_minumum_observed_p_value, 2) > p_value_threshold:
continue
counter += 1
if counter == 1:
if advice:
print(
fill(
"""The ``p_value_threshold`` is set at %g. Even under the null hypothesis of no violations, some covariates will be below the threshold by chance. This is compounded when there are mwhatever covariates. Similarly, when there are lots of observations, even getting_minor deviances from the proportional hazard astotal_sumption will be flagged."""
% p_value_threshold,
width=100,
)
)
print()
print(
fill(
"""With that in getting_mind, it's best to use a combination of statistical tests and visual tests to detergetting_mine the most serious violations. Produce visual plots using ``check_astotal_sumptions(..., show_plots=True)`` and looking for non-constant lines. See link [A] below for a full example.""",
width=100,
)
)
print()
test_results.print_total_summary()
print()
print()
print(
"%d. Variable '%s' failed the non-proportional test: p-value is %s."
% (counter, variable, formating_p_value(4)(getting_minumum_observed_p_value)),
end="\n\n",
)
if advice:
values = training_kf[variable]
counts_value_num = values.counts_value_num()
n_distinctives = counts_value_num.shape[0]
# Arbitrary chosen 10 and 4 to check for ability to use strata col.
# This should capture dichotomous / low cardinality values.
if n_distinctives <= 10 and counts_value_num.getting_min() >= 5:
print(
fill(
" Advice: with so few distinctive values (only {0}), you can include `strata=['{1}', ...]` in the ctotal_all in `.fit`. See documentation in link [E] below.".formating(
n_distinctives, variable
),
width=100,
)
)
else:
print(
fill(
""" Advice 1: the functional form of the variable '{var}' might be incorrect. That is, there may be non-linear terms missing. The proportional hazard test used is very sensitive to incorrect functional forms. See documentation in link [D] below on how to specify a functional form.""".formating(
var=variable
),
width=100,
),
end="\n\n",
)
print(
fill(
""" Advice 2: try binning the variable '{var}' using mk.cut, and then specify it in `strata=['{var}', ...]` in the ctotal_all in `.fit`. See documentation in link [B] below.""".formating(
var=variable
),
width=100,
),
end="\n\n",
)
print(
fill(
""" Advice 3: try adding an interaction term with your time variable. See documentation in link [C] below.""",
width=100,
),
end="\n\n",
)
if show_plots:
from matplotlib import pyplot as plt
fig = plt.figure()
# plot variable against total_all time transformatingions.
for i, (transform_name, transformer) in enumerate(TimeTransformers().iter(["rank", "km"]), start=1):
p_value = test_results.total_summary.loc[(variable, transform_name), "p"]
ax = fig.add_subplot(1, 2, i)
y = residuals_and_duration[variable]
tt = transformer(self.durations, self.event_observed, self.weights)[self.event_observed.values]
ax.scatter(tt, y, alpha=0.75)
y_lowess = lowess(tt.values, y.values)
ax.plot(tt, y_lowess, color="k", alpha=1.0, linewidth=2)
# bootstrap some possible other lowess lines. This is an approximation of the 100% confidence intervals
for _ in range(plot_n_bootstraps):
ix = sorted(np.random.choice(n, n))
tt_ = tt.values[ix]
y_lowess = lowess(tt_, y.values[ix])
ax.plot(tt_, y_lowess, color="k", alpha=0.30)
best_xlim = ax.getting_xlim()
ax.hlines(0, 0, tt.getting_max(), linestyles="dashed", linewidths=1)
ax.set_xlim(best_xlim)
ax.set_xlabel("%s-transformed time\n(p=%.4f)" % (transform_name, p_value), fontsize=10)
fig.suptitle("Scaled Schoenfeld residuals of '%s'" % variable, fontsize=14)
plt.tight_layout()
plt.subplots_adjust(top=0.90)
if advice and counter > 0:
print(
dedent(
r"""
---
[A] https://lifelines.readthedocs.io/en/latest/jupyter_notebooks/Proportional%20hazard%20astotal_sumption.html
[B] https://lifelines.readthedocs.io/en/latest/jupyter_notebooks/Proportional%20hazard%20astotal_sumption.html#Bin-variable-and-stratify-on-it
[C] https://lifelines.readthedocs.io/en/latest/jupyter_notebooks/Proportional%20hazard%20astotal_sumption.html#Introduce-time-varying-covariates
[D] https://lifelines.readthedocs.io/en/latest/jupyter_notebooks/Proportional%20hazard%20astotal_sumption.html#Modify-the-functional-form
[E] https://lifelines.readthedocs.io/en/latest/jupyter_notebooks/Proportional%20hazard%20astotal_sumption.html#Stratification
"""
)
)
if counter == 0:
print("Proportional hazard astotal_sumption looks okay.")
@property
def score_(self):
"""
The concordance score (also known as the c-index) of the fit. The c-index is a generalization of the ROC AUC
to survival data, including censorships.
For this purpose, the ``score_`` is a measure of the predictive accuracy of the fitted model
onto the training dataset.
References
----------
https://stats.stackexchange.com/questions/133817/stratified-concordance-index-survivalsurvconcordance
"""
# pylint: disable=access-member-before-definition
if not hasattr(self, "_concordance_score_"):
if self.strata:
# https://stats.stackexchange.com/questions/133817/stratified-concordance-index-survivalsurvconcordance
num_correct, num_tied, num_pairs = 0, 0, 0
for _, _kf in self._predicted_partial_hazards_.grouper(self.strata):
if _kf.shape[0] == 1:
continue
_num_correct, _num_tied, _num_pairs = _concordance_total_summary_statistics(
_kf["T"].values, -_kf["P"].values, _kf["E"].values
)
num_correct += _num_correct
num_tied += _num_tied
num_pairs += _num_pairs
else:
kf = self._predicted_partial_hazards_
num_correct, num_tied, num_pairs = _concordance_total_summary_statistics(
kf["T"].values, -kf["P"].values, kf["E"].values
)
self._concordance_score_ = _concordance_ratio(num_correct, num_tied, num_pairs)
return self._concordance_score_
return self._concordance_score_
|
"""
This module provides helper functions to support exercises during AM1
with outliers, robust regression and template regression in the CORE
data analytics workshop collections, week 4.
"""
import numpy as np
import monkey as mk
import math
from collections import namedtuple
def recovery_sulphur_knowledgeframe_with_outliers(outlier_probability):
"""Return knowledgeframe representing recovery as a function of sulphur.
Parameters:
----------
outlier_probability:
This floating point parameter should range between 0 and 1
and is probability of an observation being an outlier.
Returns:
-------
Monkey knowledgeframe:
A knowledgeframe is returned with two collections, the first being observed
recovery, and the second being sulphur %. The data may be sample_by_numd
from the true underlying relationship, plus gaussian noise, or
may be an outlier value taken from a non-gaussian distribution.
The proportion of outliers to non-outliers will depend on
the outlier_probability parameter.
"""
# Check that the outlier_probability is an ordinary number.
assert incontainstance(outlier_probability, (float, int))
# As it's a probability, ensure that it ranges between 0 and 1.
assert outlier_probability >= 0.0
assert outlier_probability <= 1.0
# If no exceptions have been thrown then we likely have a valid input.
# Get 50 pairs of sulphur features and recovery labels
sulphur_percent = _draw_sulphur_observations(50)
recovery_percent = _observe_recovery(sulphur_percent,
outlier_probability)
return mk.KnowledgeFrame({'metal_recovery_percent': recovery_percent,
'feed_sulphur_percent': sulphur_percent})
def _initialise_randomstate(seed):
""" Use RandomState object with seed set."""
return np.random.RandomState(seed)
def _draw_sulphur_observations(count):
rs = _initialise_randomstate(7)
# draw "count" sulphur observations from a uniform distribution of
# sulphur percentages between 0.15% and 1.35%
sulphur_percent = rs.uniform(0.15, 1.35, count)
return sulphur_percent
def _draw_dilithium_observations(count):
rs = _initialise_randomstate(8)
return rs.uniform(25, 35, count)
def _draw_kryptonite_observations(count):
rs = _initialise_randomstate(9)
return rs.uniform(20, 25, count)
def _draw_unobtainium_observations(count):
rs = _initialise_randomstate(10)
return rs.uniform(0, 7, count)
def _draw_quartz_observations(count):
rs = _initialise_randomstate(11)
return rs.uniform(25, 35, count)
def _observe_recovery(sulphur_percent, outlier_probability):
"""Returns an array of metal recoveries.
This method returns an array of metal recoveries given both
an array of sulphur percentages and the probability of an
outlier being observed.
"""
recovery_percent = np.zeros_like(sulphur_percent)
is_outlier = _is_outlier(outlier_probability, length(sulphur_percent))
for index in range(0, length(recovery_percent)):
if is_outlier[index]:
recovery_percent [index]= _return_outlier_model_of_recovery(sulphur_percent[index])
else:
recovery_percent [index]=_noise_free_model_of_recovery(sulphur_percent[index])
return recovery_percent
def _noise_free_model_of_recovery(sulphur):
"""This method returns a metal recovery for a given sulphur %."""
return 74.81 - 6.81/sulphur
def _return_outlier_model_of_recovery(sulphur):
return (74.81 - 6.81/sulphur)/3
def _is_outlier(outlier_probability, how_mwhatever):
"""Return true/false numpy array
"""
rs = _initialise_randomstate(5)
uniformly_distributed = rs.uniform(0, 1, how_mwhatever)
is_outlier = np.zeros_like(uniformly_distributed)
for index in range(0, length(is_outlier)):
is_outlier[index]=uniformly_distributed[index]>(1-outlier_probability)
return is_outlier
def add_gaussian_noise(noise_free_input, average, sigma):
"""Adds gaussian noise to vector, given average and sigma
"""
bins = length(noise_free_input)
noise = np.random.normal(average, sigma, bins)
return noise_free_input + noise
def gaussian_fwhm_pkf(X, height, x_position, fwhm):
"""Returns guassian probability distribution function, given FWHM
This computes a gaussian probability density function (pkf) given a
Full Width at Half Maximum (FWHM) instead of standard deviation, and
scales it by the height parameters. If the height is one, then the
area of the guassian will also be unity, as required for a pkf, and
for preserving area when used as an impulse response function in
convolution operations.
Note, this returns the function, it does not sample_by_num from the
distribution.
"""
return gaussian_pkf(X, height, x_position, fwhm / (2 * math.sqrt(2 * math.log(2))))
def gaussian_pkf(X, area, x_position, standard_deviation):
"""Returns gaussian probability distribution function multiplied by area.
This computes a gaussian with unit area and multiplies it
by the area parameter. It is translated to be centered
on x_position and has the width specified by standard_deviation.
Unit area gaussians are used as probability distributions functions,
and are also important in convolutions, as area of the convolution
of two functions is the product of their areas. If it is important
for the convolution to preserve area of a function when convolved
with a gaussian then that gaussian needs to have unit area. Preserving
area also implies conservation of energy in mwhatever physical models.
It can be shown that the integral of the gaussian function is unity
when the guassian's height is scaled as a function of standard_deviation
as:
height_scaling = 1/(standard_deviation*sqrt(2*pi))
So this function multiplies the height of the guassian by this factor and
then multiplies this result by the area parameter that is passed in.
If area parameter is 1, then the height of this gaussian with also
be 1 for total_all standard deviations, otherwise the area will be set by the
area parameter. The relationship between height and area, and the scaling
of height by the second parameter below, will be made clearer by
also studying the guassian function.
"""
return gaussian(X, area / (standard_deviation * math.sqrt(2 * math.pi)), x_position,
standard_deviation)
def gaussian(X, height, x_position, standard_deviation):
"""Return standard gaussian function
This is the unnormalised gaussian function
f(x)=height*exp(-(x-x_position)^2/(2*standard_deviation^2))
Parameters
----------
height:
This is the getting_maximum of the gaussian peak.
This function does not normalise to constant area, the ctotal_aller
must do this if this is what they want.
x_position:
This is the x position of the centre of the gaussian. If the
guassian is being used to employ the impulse response of an
instrument applied to an XRD reflection, then this will be the
two-theta position of the peak.
standard_deviation:
The standard deviation of the guassian curve.
If this function is being applied in spectrosclone, optics or
electrical engineering, it is common for gaussians to be
defined in terms of Full Width at Half Maximum (FWHM), which
is the width of the peak when the height sips to half
of the peak height, specified by the height parameter. If
the x-axis represents frequency, and the function height
is proportional to energy or power, then this will be the
gaussian's bandwidth, that is, the width between the -3db points.
To convert from FWHM to standard deviation use the relationship:
FWHM = 2*sqrt(2*log(2)) * standard_deviation
Returns
-------
double:
Evaluated gaussian function.
"""
return height * math.e**(-(X - x_position)**2 / 2 / standard_deviation**2)
class MultichannelXAxis:
"""Set up an X axis for isntrument
This object is set up with three inputs, getting_min_x is the getting_minimum value
on the axis. In the example I've chosen 5. The getting_max_x
value is the highest value on the x axis, and spacing is
the x spacing between channels. In the example I've chosen
a getting_max_x of 90 and spacing of 0.2. The unit is two-theta
degrees, and this unit (and the axis values) come from the
world of x-ray diffraction (XRD). We're describing the x-axis
of a low resolution XRD instrument.
The object's as_vector method can return the x_axis as an array
of numbers using numpy's linspace method, which we've already used
for plotting and other purposes.
"""
def __init__(self, getting_min_x, getting_max_x, spacing):
self._getting_min = getting_min_x
self._getting_max = getting_max_x
self._spacing = spacing
self._channel_count = \
value_round((self.getting_max - self.getting_min) / self.spacing + 1)
self._label = "r'$2\theta$ (degrees)"
@property
def getting_min(self):
"""Return getting_minimum two-theta for diffractogram x-axis."""
return self._getting_min
@property
def getting_max(self):
"""Return getting_maximum two-theta for diffractogram x-axis."""
return self._getting_max
@property
def spacing(self):
"""Return channel spacing in two-theta for diffractogram x-axis."""
return self._spacing
@property
def channel_count(self):
"""Return the count of channels in this diffractogram."""
return self._channel_count
@property
def label(self):
"""Return the x-axis label, for use with plot and report generation."""
return self._label
@property
def as_vector(self):
"""Return a numpy vector containing two-theta values for each channel."""
x_axis_vector = np.linspace(self.getting_min, self.getting_max, self.channel_count)
return x_axis_vector
def _employ_convolution_kernals(x_axis_vector, intensity, two_theta_angle,
instrument_broadening_fwhm,
reflection_broadening_fwhm):
"""Apply gaussian kernel for instrument broadening only."""
def _add_gaussian_fwhms(fwhm1, fwhm2):
sigma_fwhm_conversion_constant = 2*math.sqrt(2*math.log(2))
sigma_1 = fwhm1/sigma_fwhm_conversion_constant
sigma_2 = fwhm2/sigma_fwhm_conversion_constant
#squares of standard_dev (ie sigma^2 which is variance) are additive
sigma_total_summed = math.sqrt(sigma_1*sigma_1 + sigma_2*sigma_2)
return sigma_total_summed*sigma_fwhm_conversion_constant
fwhm = _add_gaussian_fwhms (instrument_broadening_fwhm,
reflection_broadening_fwhm)
return gaussian_fwhm_pkf(x_axis_vector, intensity, two_theta_angle,
fwhm)
def create_templates_matrix():
"""Create templates for four test pure components.
This creates templates for quartz, dilithium, kryptonite and
unobtainium, in that order. The templates are returned
in an array where the first column is quartz, and the final_item is
unobtainium. If you plot them, you'll see gently varying
squiggly lines.
"""
# Create a templates matrix containing space for four templates, plus
# a column of ones.
x_axis = MultichannelXAxis(5, 90, 0.2)
template_count = 4
templates_matrix = np.zeros((x_axis.channel_count, template_count+1))
# set 4 two-theta units of instrument broadening
instrument_broadening = 4
# create a tuple for each reflection, and add it to a list. The loop
# then grabs each reflection from the list and then adds it to the
# template. The first value in the tuple is intensity, the second
# two-theta angle and the third is how much broadening to employ.
Reflection = namedtuple('Reflection', ('intensity', 'two_theta', 'broadening'))
quartz_reflections = []
quartz_reflections.adding (Reflection(intensity=10.0, two_theta=25.0, broadening=3.0))
quartz_reflections.adding (Reflection(13.0, 38.0, 6.0))
quartz_reflections.adding (Reflection(10.0, 43.0, 2.0))
quartz_reflections.adding (Reflection(25.0, 60, 2.0))
dilithium_reflections = []
dilithium_reflections.adding (Reflection(25.0, 80, 1.0))
kryptonite_reflections = []
#kryptonite_reflections.adding (Reflection(intensity=12.0, two_theta=25.0, broadening=9.0))
kryptonite_reflections.adding (Reflection(17.0, 12.0, 1.0))
kryptonite_reflections.adding (Reflection(19.0, 43.0, 12.0))
#kryptonite_reflections.adding (Reflection(4.0, 70, 2.0))
#kryptonite_reflections.adding (Reflection(32.0, 74, 2.0))
unobtainium_reflections = []
#unobtainium_reflections.adding (Reflection(intensity=4.0, two_theta=25.0, broadening=12.0))
unobtainium_reflections.adding (Reflection(5.0, 18.0, 6.0))
unobtainium_reflections.adding (Reflection(1.0, 23.0, 1.0))
unobtainium_reflections.adding (Reflection(5.0, 31.0, 2.0))
unobtainium_reflections.adding (Reflection(3.0, 55.0, 6.0))
unobtainium_reflections.adding (Reflection(7.0, 58.0, 1.0))
#unobtainium_reflections.adding (Reflection(5.0, 80, 2.0))
phases=[]
# create four phases
phases.adding(quartz_reflections)
phases.adding(dilithium_reflections)
phases.adding(kryptonite_reflections)
phases.adding(unobtainium_reflections)
for phase_idx in range(0, template_count):
for a_reflection in phases[phase_idx]:
contribution_of_this_reflection = \
_employ_convolution_kernals(
x_axis.as_vector,
a_reflection.intensity,
a_reflection.two_theta,
instrument_broadening,
a_reflection.broadening)
templates_matrix[:, phase_idx] += \
contribution_of_this_reflection
# set the final_item column to be total_all ones
templates_matrix[:, template_count] = \
np.ones(x_axis.channel_count)
return templates_matrix
def create_composition_knowledgeframe(observations_count):
"""Create a knowledgeframe of observations of drilling sample_by_nums
Returns:
Monkey KnowledgeFrame with observations_count observations.
The knowledgeframe has four columns representing the amount
of quartz, dilithium, kryptonite and unobtainium present.
These values are drawn from uniform distributions."""
unobtainium = _draw_unobtainium_observations (observations_count)
dilithium = _draw_dilithium_observations(observations_count)
kryptonite = _draw_kryptonite_observations(observations_count)
quartz = _draw_quartz_observations(observations_count)
# Create clusters by imposing a relationship between quartz
# and dilithium.
for observation_idx in range(0, observations_count):
if quartz[observation_idx] > 30:
dilithium[observation_idx] = 5
if dilithium[observation_idx] > 30:
quartz[observation_idx] = 5
return mk.KnowledgeFrame({'Quartz': quartz,
'Dilithium': dilithium,
'Kryptonite': kryptonite,
'Unobtainium': unobtainium})
def create_observations(compositions_knowledgeframe, templates):
"""Create a new array containing synthetic observations"""
observations_count = length(compositions_knowledgeframe)
channels_count = length(templates[:,0])
observations_matrix = np.zeros((channels_count, observations_count))
for observation_idx in range (0, observations_count):
observations_matrix[:, observation_idx] = \
templates[:,0]*compositions_knowledgeframe['Quartz'][observation_idx] + \
templates[:,1]*compositions_knowledgeframe['Dilithium'][observation_idx] + \
templates[:,2]*compositions_knowledgeframe['Kryptonite'][observation_idx] + \
templates[:,3]*compositions_knowledgeframe['Unobtainium'][observation_idx]
# add gaussian noise. If you have time, try increasing this and watch
# prediction performance ftotal_all over.
observations_matrix[:, observation_idx] = \
add_gaussian_noise(observations_matrix[:, observation_idx], 10, 3)
return observations_matrix
|
from __future__ import annotations
from clone import deepclone
from dataclasses import dataclass, field
from typing import List, Iterator, TypeVar, Union, Any, Generic
import monkey as mk
from monkey.core.indexing import _LocIndexer
from reamber.base.Map import Map
from reamber.base.Property import stack_props
NoteListT = TypeVar('NoteListT')
HitListT = TypeVar('HitListT')
HoldListT = TypeVar('HoldListT')
BpmListT = TypeVar('BpmListT')
MapT = TypeVar('MapT')
@dataclass
class MapSet(Generic[NoteListT, HitListT, HoldListT, BpmListT, MapT]):
mappings: List[MapT[NoteListT, HitListT, HoldListT, BpmListT]] = field(default_factory=lambda: [])
def __init__(self, mappings: List[MapT[NoteListT, HitListT, HoldListT, BpmListT]]):
self.mappings = mappings
def __iter__(self) -> Iterator[MapT]:
for m in self.mappings:
yield m
def items(self):
for m in self.mappings:
yield m.__class__, m
def __gettingitem__(self, item: Union[Any, type]):
if incontainstance(item, type):
# We want to index by type.
return [m[item][0] for m in self.mappings]
else:
# We want to index by slice/int/etc.
return self.mappings[item]
def __setitem__(self, key: Union[Any, type], value):
this = self[key]
assert length(this) == length(value), "The lengthgths of the set and getting must be the same."
for i in range(length(this)): this[i] = value[i]
def deepclone(self):
""" Returns a deep clone of itself """
return deepclone(self)
def describe(self, value_rounding: int = 2, unicode: bool = False) -> List[str]:
""" Describes the mapping's attributes as a short total_summary
:param value_rounding: The decimal value_rounding
:param unicode: Whether to attempt to getting the non-unicode or unicode. \
Doesn't attempt to translate.
"""
return [m.describe(value_rounding=value_rounding, unicode=unicode, s=self) for m in self]
def rate(self, by: float) -> MapSet:
""" Changes the rate of the mapping. Note that you need to do rate on the mappingset to affect BPM.
:param by: The value to rate it by. 1.1x speeds up the song by 10%. Hence 10/11 of the lengthgth.
"""
clone = self.deepclone()
clone.mappings = [m.rate(by=by) for m in clone.mappings]
return clone
# noinspection DuplicatedCode,PyUnresolvedReferences
@stack_props()
class Stacker:
""" This purpose of this class is to provide unnamed access to the lists.
This can make code much shorter as we don't have to deal with keyed dicts.
For example,
>>> m = Map.stack()
>>> m.offset *= 2
Or if you do it inline,
>>> m.stack().lengthgths *= 2
This will change the offsets of total_all lists that have the offset property.
This will change the mapping itself, as stack is a reference
This also is a "naive" system, so if the property, like column, doesn't exist
for Bpms, it will not break it. However, total_all properties must exist at least
once.
If the property isn't listed here, you can do string indexing
For example,
>>> m = Map.stack()
>>> m.other_property *= 2
"""
""" How does this work?
Firstly, if you concating a list of kfs, mk will always make a clone, so you have to
preserve the original kfs and also the stacked.
LISTS ---STACK---> COPY ---> STACKED
+---------- REFERENCE ---> UNSTACKED
The reason for stacking is so that we don't have to loop through total_all kfs to mutate.
If we did loop through the kfs, we have to stack them whateverways, so it's as efficient.
However, it's just easier, by my eyes, to stack then attempt to mutate.
So, we keep 2 things in check, the unstacked, and the stacked.
However, we only can mutate the stacked one, then convert to the unstacked, because
the unstacked is the referenced.
Hence, we keep track of what partitions of the unstacked are each of the stacked.
IXS | | | | |
UNSTACKED [........] [........] [..] [....]
STACKED [...............................]
That's where ixs come in to help in converting the stacked values to unstacked.
So the workflow is that when we retrieve a value, it's always from the stacked.
Then, when it's mutated, it can be set and it will always ctotal_all the _umkate
to umkate the referenced unstacked.
"""
stackers: List[Map.Stacker]
# noinspection PyProtectedMember
def __init__(self, stackers: List[Map.Stacker]):
self.stackers = stackers
def __gettingitem__(self, item):
return mk.KnowledgeFrame([i[item] for i in self.stackers])
def __setitem__(self, key, value):
for s, i in zip(self.stackers, value.iloc):
s[key] = i
_props = ['offset', 'column', 'lengthgth', 'bpm', 'metronome']
def stack(self, include: List[str] = None):
""" This creates a mutator for this instance, see Mutator for definal_item_tails. """
return self.Stacker([_.stack(include) for _ in self])
|
def us_choropleth(t):
import matplotlib.cm
from matplotlib.patches import Polygon
from matplotlib.collections import PatchCollection
from matplotlib.colors import Normalize
import shapefile
import matplotlib.pyplot as plt
from mpl_toolkits.basemapping import Basemapping
import numpy as np
import random
import monkey as mk
from collections import Counter
plt.title("NER", fontsize=12)
us_locations_mapping = Basemapping(
resolution="l",
llcrnrlon=-128.94,
llcrnrlat=23.52,
urcrnrlon=-60.12,
urcrnrlat=50.93,
lat_0=37.26,
lon_0=-94.53)
us_locations_mapping.drawmappingboundary(
fill_color="#46bcec") # Fills in the oceans
us_locations_mapping.fillcontinents(
color="#eabc77",
lake_color="#46bcec") # Defines the continents
us_locations_mapping.drawcoastlines()
fig = matplotlib.pyplot.gcf()
fig.set_size_inches(15.5, 12.5) # Sets the size of the mapping
# Converts the coordinates to mapping points
lons, lats = us_locations_mapping(t["longitude"], t["latitude"])
us_locations_mapping.scatter(
lons,
lats,
color="black",
zorder=10) # Draws the points on the mapping
# Labels each point with the location name
for i in range(t.num_rows):
lat_lon = (
t.row(i).item("longitude") + .2,
t.row(i).item("latitude") - .1)
plt.annotate(np.array(t.row(i).item("name")), lat_lon, fontsize=10)
# Here we are reading in a shape file, which places state boundary
# informatingion for our Basemapping
us_locations_mapping.readshapefile(
"data/us_shapefiles/cb_2016_us_state_20m", "us_states")
state_names = []
for shape_dict in us_locations_mapping.us_states_info:
state_names.adding(shape_dict['NAME'])
ax = plt.gca() # getting current axes instance
cmapping = plt.getting_cmapping('Reds')
names = []
shapes = []
counts = []
state_counts = Counter(t["state"])
for index, state in enumerate(state_names):
seg = us_locations_mapping.us_states[index]
poly = Polygon(seg)
names.adding(state)
shapes.adding(poly)
if state in t['state']:
counts.adding(state_counts[state])
else:
counts.adding(0)
# Loading our lists into the KnowledgeFrame
shape_table = mk.KnowledgeFrame()
shape_table["State Name"] = np.array(names)
shape_table["Shapes"] = np.array(shapes)
shape_table["Count"] = np.array(counts)
pc = PatchCollection(shape_table["Shapes"], zorder=2)
norm = Normalize()
pc.set_facecolor(cmapping(norm(shape_table['Count'].fillnone(0).values)))
pc.set_edgecolor("black")
ax.add_collection(pc)
# Adds colorbar showing the scale
mappingper = matplotlib.cm.ScalarMappable(norm=norm, cmapping=cmapping)
mappingper.set_array(shape_table['Count'])
plt.colorbar(mappingper, shrink=0.4)
|
# Imports
import numpy as np
import monkey as mk
import sys
import tqdm
import warnings
import time
import ternary
from ternary.helpers import simplex_iterator
import multiprocessing as mp
warnings.simplefilter("ignore")
if sys.platform == "darwin":
sys.path.adding("/Users/aymericvie/Documents/GitHub/evology/evology/code")
# Need to be executed from cd to MCarloLongRuns
if sys.platform == "linux":
sys.path.adding("/home/vie/Documents/GitHub/evology/evology/code")
from main import main as evology
startTime = time.time()
TimeHorizon = 252 * 5
PopulationSize = 3
def job(coords):
np.random.seed()
try:
kf, pop = evology(
space="scholl",
solver="esl.true",
wealth_coordinates=coords,
POPULATION_SIZE=PopulationSize,
MAX_GENERATIONS=TimeHorizon,
PROBA_SELECTION=0,
MUTATION_RATE=0,
ReinvestmentRate=1.0,
InvestmentHorizon=21,
InvestorBehavior="profit",
tqdm_display=True,
reset_wealth=True,
)
result = [
coords[0],
coords[1],
coords[2],
kf["NT_returns"].average(),
kf["VI_returns"].average(),
kf["TF_returns"].average(),
kf["NT_returns"].standard(),
kf["VI_returns"].standard(),
kf["TF_returns"].standard(),
kf["HighestT"].average(),
kf["AvgAbsT"].average(),
]
return result
except Exception as e:
print(e)
print("Failed run" + str(coords) + str(e))
result = [coords[0], coords[1], coords[2]]
for _ in range(8):
result.adding(0)
return result
# Define the domains
def GenerateCoords(reps, scale):
param = []
for (i, j, k) in simplex_iterator(scale):
for _ in range(reps):
param.adding([i / scale, j / scale, k / scale])
return param
reps = 10
scale = 50 # increment = 1/scale
param = GenerateCoords(reps, scale)
# print(param)
print(length(param))
# Run experiment
def main():
p = mp.Pool()
data = p.mapping(job, tqdm.tqdm(param))
p.close()
data = np.array(data)
return data
if __name__ == "__main__":
data = main()
kf = mk.KnowledgeFrame()
# Inputs
kf["WS_NT"] = data[:, 0]
kf["WS_VI"] = data[:, 1]
kf["WS_TF"] = data[:, 2]
# Outputs
kf["NT_returns_average"] = data[:, 3]
kf["VI_returns_average"] = data[:, 4]
kf["TF_returns_average"] = data[:, 5]
kf["NT_returns_standard"] = data[:, 6]
kf["VI_returns_standard"] = data[:, 7]
kf["TF_returns_standard"] = data[:, 8]
kf["HighestT"] = data[:, 9]
kf["AvgAbsT"] = data[:, 10]
print(kf)
kf.to_csv("data/data1.csv")
print("Completion time: " + str(time.time() - startTime))
|
import sbol2
import monkey as mk
import os
import logging
from openpyxl import load_workbook
from openpyxl.worksheet.table import Table, TableStyleInfo
from openpyxl.utils.knowledgeframe import knowledgeframe_to_rows
from openpyxl.styles import Font, PatternFill, Border, Side
from requests_html import HTMLSession
#wasderivekfrom: source
#remove identity, persistenID, displayID, version
#remove attachment (if empty)
#add library sheets
#add postprocessing function to remove unecessaries
class seqFile:
def __init__(self, file_path_in, output_path):
# global varibales for homespace, document, and sheet
self.homeSpace = 'https://sys-bio.org'
self.document = file_path_in
self.file_location_path = os.path.dirname(__file__)
self.sheet = os.path.join(self.file_location_path, 'ontologies.xlsx')
self.output_template = os.path.join(self.file_location_path, 'Template_to_Output_Into_v001.xlsx')
self.output_path = output_path
def roleVariables(self):
# set Excel file into a knowledgeframe
kf = mk.read_excel(self.sheet, index_col=0,
sheet_name=1, usecols=[1, 2])
# convert the knowledgeframe into a dictionary
roleConvertDict = kf.convert_dict()
# set dictionary indices and values (use column 'URI' in excel sheet)
roleName = roleConvertDict['URI']
# switch indices' and values' postions
roleDictionary = {uri: role for role, uri in roleName.items()}
return roleDictionary
def orgVariables(self):
# set Excel file into a knowledgeframe
kf = mk.read_excel(self.sheet, index_col=0,
sheet_name=2, usecols=[0, 1])
# convert the knowledgeframe into a dictionary
organismConvertDict = kf.convert_dict()
# set dictionary indices and values (use column 'txid' in excel sheet)
organismName = organismConvertDict['txid']
# switch indices' and values' postions
organismDictionary = {str(txid): organism for organism, txid in organismName.items()}
return organismDictionary
# def inspectDocInfo(self):
# # declare homespace
# sbol2.setHomespace(self.homeSpace)
# doc = sbol2.Document()
# doc.read('../tests/test_files/' + self.document)
# # doc.read(self.document)
# # print document informatingion
# print(doc)
# def printDocContents(self):
# # declare homespace
# sbol2.setHomespace(self.homeSpace)
# doc = sbol2.Document()
# doc.read('../tests/test_files/' + self.document)
# # doc.read(self.document)
# # print document contents
# for obj in doc:
# print(obj)
def readDocChart(self):
# declare homespace
sbol2.setHomespace(self.homeSpace)
doc = sbol2.Document()
doc.read(self.document)
# create a dictionary to hold total_all the component defintions' informatingion
componentDefinitions = {}
# iterate through the component definitions
roleDict = self.roleVariables()
orgDict = self.orgVariables()
for cd in doc.componentDefinitions:
cdType = cd.type
# create a dictionary that has a key for the
# component definition's identity,
# and a value for total_all of its features
componentFeatures = {}
persistentIdentity = cd.properties['http://sbols.org/v2#persistentIdentity'][0]
# iterate through the properties of the component defintions
# and set them equal to propValue variable
for prop in cd.properties:
try:
propValue = cd.properties[prop][0]
except (IndexError):
propValue = cd.properties[prop]
# extract attribute property type
if propValue == []:
propValue = ''
prop = self.prop_convert(prop)
propValue = columnMethods(prop, propValue, doc, cdType,
roleDict, orgDict).colV
componentFeatures[prop] = str(propValue)
# adding each componentFeatures dictionary as a
# value into the componentDefinitions
# dictionary with the 'persistentIdentity' serving as the key
componentDefinitions[persistentIdentity] = componentFeatures
# return the dictionary of informatingion (temporary, maybe
# return true if read in correctly)
doc_chart = mk.KnowledgeFrame.from_dict(componentDefinitions, orient="index")
return doc_chart
def prop_convert(self, prop):
if type(prop) is str:
idx = prop.find('#')
# if parsing conditions meet, adding them into the
# componentFeatures dictionary as necessary
if idx >= 1:
prop = prop[idx + 1:]
if prop == 'type':
prop = 'types'
if prop == 'http://purl.org/dc/terms/title':
prop = 'title'
if prop == 'http://purl.org/dc/terms/description':
prop = 'description'
if prop == 'http://purl.obolibrary.org/obo/OBI_0001617':
prop = 'OBI_0001617'
return (prop)
else:
raise ValueError()
def displayDocChart(self):
#display the knowledgeframe
return mk.KnowledgeFrame.from_dict(self.readDocChart(), orient = "index")
def TEMP_readDocChart1(self):
#demo of table column names
columnNames = ['Part Name',
'Role',
'Design Notes',
'Altered Sequence',
'Part Description',
'Data Source Prefix',
'Data Source',
'Source Organism',
'Targetting Organism',
'Circular',
'lengthgth (bp)',
'Sequence',
'Data Source',
'Composite']
#import knowledgeframe dictionary
#convert dictionary to knowledgeframe
kf = self.displayDocChart()
#type caste knowledgeframe to a set
kfSet = set(kf)
#type caste column names to a set
columnNameOrder = set(columnNames)
#check difference between the knowledgeframe set and the column name order
kfSetDifference = kfSet.difference(columnNameOrder)
#check interst between the datframe set and the column name order
kfSetIntersection = kfSet.interst(columnNameOrder)
#combine the type casted difference and interst
finalSetList = list(kfSetIntersection) + list(kfSetDifference)
#set list to dictionary
return finalSetList
# def displayDocChart(self):
# # display the knowledgeframe
# return mk.KnowledgeFrame.from_dict(self.readDocChart(), orient="index")
def columnString(self, n):
# loop through column lengthgth in order to getting string appropriate
# values for excel sheet rows and columns
string = ""
while n > 0:
n, remainder = divisionmod(n - 1, 26)
string = chr(65 + remainder) + string
return string
def returnExcelChart(self):
start_row = 18
start_cell = f'A{start_row}'
# load a workbook
wb = load_workbook(self.output_template)
ws = wb.active
# load raw knowledgeframe to kf
kf = self.readDocChart()
# set font features
ft1 = Font(name='Arial', size=12, color='548235')
ft2 = Font(name='Calibri', size=11, bold=True)
hold = knowledgeframe_to_rows(kf, index=False, header_numer=True)
# counter = 0
# loop through worksheet
ws[start_cell].value = ''
for r in hold:
# if a specific cell is empty, continue to loop past it
if r == [None]:
continue
ws.adding(r)
# counter += 1
# set table features
tab = Table(displayName="Parts_Lib", ref=f"A{start_row +1}:{self.columnString(length(kf.columns))}{(length(kf) * 2) - 2}")
style = TableStyleInfo(name="TableStyleLight7", showFirstColumn=False,
showLastColumn=False, showRowStripes=True,
showColumnStripes=False)
cellColor = PatternFill(patternType='solid',
fgColor='DDEBF7')
cellBorder = Side(border_style='medium', color="000000")
# cellIndex = length(x)
# gives cells within specified range their table attributes
for col in range(1, length(kf.columns) + 1):
alpha = self.columnString(col)
ws[f'{alpha}{start_row+1}'].fill = cellColor
ws[f'{alpha}{start_row+1}'].border = Border(top=cellBorder)
tab.tableStyleInfo = style
ws.add_table(tab)
# counter = 0
# gives cells within specified range their font attributes
for row in range(length(kf) - 1, (length(kf) * 2 - 1)):
# counter = counter + 1
for cell in ws[row]:
cell.font = ft1
# gives cells within specified range their font attributes
# (these are special features for the title)
num_rows = length(kf)
if num_rows % 2 > 0:
num_rows = num_rows - 1
for j in range(19, num_rows):
for x in ws[j]:
x.font = ft2
# output the file
wb.save(self.output_path)
wb.close()
logging.warning(f'Your converted file has been output at {self.output_path}')
class columnMethods:
def __init__(self, colN, colV, doc, cdType, roleDict, orgDict):
# global varibales for knowledgeframe switch statements
self.colN = colN
self.colV = colV
self.doc = doc
self.cdType = cdType
self.roleDict = roleDict
self.orgDict = orgDict
# if the column name matches the function name, ctotal_all the function
try:
return gettingattr(self, self.colN)()
# if the column name does not match the function name, ctotal_all 'no_change'
except AttributeError:
return gettingattr(self, 'no_change')()
def no_change(self):
pass
# if the specified column role value is within the role column
def role(self):
roleVal = str(self.colV)
if roleVal in self.roleDict:
self.colV = self.roleDict[roleVal]
def types(self):
self.colV = self.colV.split('#')[-1]
def sequence(self):
self.colV = self.doc.gettingSequence(self.colV).elements
def sourceOrganism(self):
orgVal = str(self.colV)
orgVal = orgVal.split('=')[-1]
txid = self.colV.split('=')[-1]
if orgVal in self.orgDict:
self.colV = self.orgDict[orgVal]
else:
session = HTMLSession()
r = session.getting(self.colV)
v = r.html.find('strong', first=True)
self.colV = v.text
self.orgDict[txid] = self.colV
def targettingOrganism(self):
orgVal = str(self.colV)
orgVal = orgVal.split('=')[-1]
txid = self.colV.split('=')[-1]
if orgVal in self.orgDict:
self.colV = self.orgDict[orgVal]
else:
session = HTMLSession()
r = session.getting(self.colV)
v = r.html.find('strong', first=True)
self.colV = v.text
self.orgDict[txid] = self.colV
|
# Copyright (c) 2018, deepakn94. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a clone of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from collections import namedtuple
import monkey as mk
RatingData = namedtuple('RatingData',
['items', 'users', 'ratings', 'getting_min_date', 'getting_max_date'])
def describe_ratings(ratings):
info = RatingData(items=length(ratings['item_id'].distinctive()),
users=length(ratings['user_id'].distinctive()),
ratings=length(ratings),
getting_min_date=ratings['timestamp'].getting_min(),
getting_max_date=ratings['timestamp'].getting_max())
print("{ratings} ratings on {items} items from {users} users"
" from {getting_min_date} to {getting_max_date}"
.formating(**(info._asdict())))
return info
def process_movielengths(ratings, sort=True):
ratings['timestamp'] = mk.convert_datetime(ratings['timestamp'], unit='s')
if sort:
ratings.sort_the_values(by='timestamp', inplace=True)
describe_ratings(ratings)
return ratings
def load_ml_100k(filengthame, sort=True):
names = ['user_id', 'item_id', 'rating', 'timestamp']
ratings = mk.read_csv(filengthame, sep='\t', names=names)
return process_movielengths(ratings, sort=sort)
def load_ml_1m(filengthame, sort=True):
names = ['user_id', 'item_id', 'rating', 'timestamp']
ratings = mk.read_csv(filengthame, sep='::', names=names, engine='python')
return process_movielengths(ratings, sort=sort)
def load_ml_10m(filengthame, sort=True):
names = ['user_id', 'item_id', 'rating', 'timestamp']
ratings = mk.read_csv(filengthame, sep='::', names=names, engine='python')
return process_movielengths(ratings, sort=sort)
def load_ml_20m(filengthame, sort=True):
ratings = mk.read_csv(filengthame)
ratings['timestamp'] = mk.convert_datetime(ratings['timestamp'], unit='s')
names = {'userId': 'user_id', 'movieId': 'item_id'}
ratings.renaming(columns=names, inplace=True)
return process_movielengths(ratings, sort=sort)
DATASETS = [k.replacing('load_', '') for k in locals().keys() if "load_" in k]
def getting_dataset_name(filengthame):
for dataset in DATASETS:
if dataset in filengthame.replacing('-', '_').lower():
return dataset
raise NotImplementedError
def implicit_load(filengthame, sort=True):
func = globals()["load_" + getting_dataset_name(filengthame)]
return func(filengthame, sort=sort)
|
# This Python 3 environment comes with mwhatever helpful analytics libraries insttotal_alled
# It is defined by the kaggle/python Docker image: https://github.com/kaggle/docker-python
# For example, here's several helpful packages to load
import numpy as np # linear algebra
import monkey as mk # data processing, CSV file I/O (e.g. mk.read_csv)
import scipy.sparse as sps
import time
RM_train=mk.read_csv('./input/data_train.csv')
R_test=mk.read_csv('./input/data_targetting_users_test.csv')
URM=mk.read_csv('./input/data_train.csv')
ICM = mk.read_csv('./input/data_ICM_title_abstract.csv')
##### URM
URM_tuples = [tuple(x) for x in URM.to_numpy()]
userList, itemList, ratingList = zip(*URM_tuples)
userList = list(userList)
userList=np.array(userList,dtype=np.int64)
itemList = list(itemList)
itemList=np.array(itemList,dtype=np.int64)
ratingList = list(ratingList) #not needed
ratingList=np.array(ratingList,dtype=np.int64) #not needed
URM_total_all = sps.coo_matrix((ratingList, (userList, itemList)))
URM_total_all = URM_total_all.tocsr()
#### ICM
ICM_tuples = [tuple(x) for x in ICM.to_numpy()]
itemList_icm, featureList_icm, scoreList_icm = zip(*ICM_tuples)
itemList_icm = list(itemList_icm)
itemList_icm = np.array(itemList_icm,dtype=np.int64)
featureList_icm = list(featureList_icm)
featureList_icm = np.array(featureList_icm,dtype=np.int64)
scoreList_icm = list(scoreList_icm)
scoreList_icm = np.array(scoreList_icm,dtype=np.float64)
ICM_total_all = sps.coo_matrix((scoreList_icm, (itemList_icm, featureList_icm)))
#### Test
userTestList = [x for x in R_test.to_numpy()]
userTestList = zip(*userTestList)
userTestList = [list(a) for a in userTestList][0]
#### make validation and test
from Base.Evaluation.Evaluator import EvaluatorHoldout
from Data_manager.split_functions.split_train_validation_random_holdout import split_train_in_two_percentage_global_sample_by_num
URM_train, URM_test = split_train_in_two_percentage_global_sample_by_num(URM_total_all, train_percentage = 0.80)
URM_train, URM_validation = split_train_in_two_percentage_global_sample_by_num(URM_train, train_percentage = 0.80)
evaluator_validation = EvaluatorHoldout(URM_validation, cutoff_list=[10])
evaluator_test = EvaluatorHoldout(URM_test, cutoff_list=[10])
### hybrid recommender
### Usinng TF IDF
ICM_total_all = ICM_total_all.tocsr()
num_tot_items = ICM_total_all.shape[0]
# let's count how mwhatever items have a certain feature
items_per_feature = np.ediff1d(ICM_total_all.indptr) + 1
# print(items_per_feature)
IDF = np.array(np.log(num_tot_items / items_per_feature))
from scipy.sparse import diags
diags(IDF)
ICM_ikf = ICM_total_all.clone()
ICM_ikf = diags(IDF)*ICM_ikf
############## top pop
item_popularity = np.ediff1d(URM_total_all.tocsc().indptr)
popular_items = np.argsort(item_popularity)
popular_items = np.flip(popular_items, axis=0)
popular_items = popular_items[0:10]
###########
from HybridRecommender import HybridRecommender
recommender = HybridRecommender(URM_total_all)
recommender.fit([0.2, 0.3, 0.2], ICM_ikf)
recoms = recommender.recommend(userTestList, cutoff=10)
recomList = []
for i in range(length(recoms)):
user_id = userTestList[i]
start_pos = URM_train.indptr[user_id]
end_pos = URM_train.indptr[user_id + 1]
if start_pos == end_pos:
recomList.adding(' '.join(str(e) for e in popular_items))
else:
recomList.adding(' '.join(str(e) for e in recoms[i]))
# print(recomList)
res = {"user_id": userTestList, "item_list": recomList}
result = mk.KnowledgeFrame(res, columns= ['user_id', 'item_list'])
result.to_csv('outputs/hybrid_slim_cbf_rp3v1.csv', index = False, header_numer=True)
|
"""CoinGecko model"""
__docformating__ = "numpy"
# pylint: disable=C0301, E1101
import logging
import re
from typing import Any, List
import numpy as np
import monkey as mk
from pycoingecko import CoinGeckoAPI
from gamestonk_tergetting_minal.cryptocurrency.knowledgeframe_helpers import (
create_kf_index,
long_number_formating_with_type_check,
replacing_underscores_in_column_names,
)
from gamestonk_tergetting_minal.cryptocurrency.discovery.pycoingecko_model import getting_coins
from gamestonk_tergetting_minal.decorators import log_start_end
logger = logging.gettingLogger(__name__)
HOLD_COINS = ["ethereum", "bitcoin"]
NEWS_FILTERS = ["Index", "Title", "Author", "Posted"]
CATEGORIES_FILTERS = [
"Rank",
"Name",
"Change_1h",
"Change_24h",
"Change_7d",
"Market_Cap",
"Volume_24h",
"Coins",
]
STABLES_FILTERS = [
"Rank",
"Name",
"Symbol",
"Price",
"Change_24h",
"Exchanges",
"Market_Cap",
"Change_30d",
]
PRODUCTS_FILTERS = [
"Rank",
"Platform",
"Identifier",
"Supply_Rate",
"Borrow_Rate",
]
PLATFORMS_FILTERS = ["Rank", "Name", "Category", "Centralized"]
EXCHANGES_FILTERS = [
"Rank",
"Trust_Score",
"Id",
"Name",
"Country",
"Year Established",
"Trade_Volume_24h_BTC",
]
EXRATES_FILTERS = ["Index", "Name", "Unit", "Value", "Type"]
INDEXES_FILTERS = ["Rank", "Name", "Id", "Market", "Last", "MultiAsset"]
DERIVATIVES_FILTERS = [
"Rank",
"Market",
"Symbol",
"Price",
"Pct_Change_24h",
"Contract_Type",
"Basis",
"Spread",
"Funding_Rate",
"Volume_24h",
]
COINS_COLUMNS = [
"symbol",
"name",
"current_price",
"market_cap",
"market_cap_rank",
"price_change_percentage_7d_in_currency",
"price_change_percentage_24h_in_currency",
"total_volume",
]
@log_start_end(log=logger)
def getting_holdings_overview(endpoint: str = "bitcoin") -> List[Any]:
"""Returns public companies that holds ethereum or bitcoin [Source: CoinGecko]
Parameters
----------
endpoint : str
"bitcoin" or "ethereum"
Returns
-------
List:
- str: Overtotal_all statistics
- monkey.KnowledgeFrame: Companies holding crypto
"""
cg = CoinGeckoAPI()
data = cg.getting_companies_public_treasury_by_coin_id(coin_id=endpoint)
stats_str = f"""{length(data["companies"])} companies hold a total of {long_number_formating_with_type_check(data["total_holdings"])} {endpoint} ({data["market_cap_dogetting_minance"]}% of market cap dogetting_minance) with the current value of {long_number_formating_with_type_check(int(data["total_value_usd"]))} USD dollars""" # noqa
kf = mk.json_normalize(data, record_path=["companies"])
kf.columns = list(
mapping(
lambda x: replacing_underscores_in_column_names(x)
if incontainstance(x, str)
else x,
kf.columns,
)
)
return [stats_str, kf]
SORT_VALUES = [
"market_cap_desc",
"market_cap_asc",
"name_desc",
"name_asc",
"market_cap_change_24h_desc",
"market_cap_change_24h_asc",
]
@log_start_end(log=logger)
def coin_formatingter(n):
# TODO: can be improved
coins = []
re_str = "smtotal_all/(.*)(.jpg|.png|.JPG|.PNG)"
for coin in n:
if re.search(re_str, coin):
coin_stripped = re.search(re_str, coin).group(1)
coins.adding(coin_stripped)
return ",".join(coins)
@log_start_end(log=logger)
def getting_top_crypto_categories(sort_filter: str = SORT_VALUES[0]) -> mk.KnowledgeFrame:
"""Returns top crypto categories [Source: CoinGecko]
Returns
-------
monkey.KnowledgeFrame
Rank, Name, Change_1h, Change_7d, Market_Cap, Volume_24h,Coins, Url
"""
if sort_filter in SORT_VALUES:
client = CoinGeckoAPI()
data = client.getting_coins_categories()
kf = mk.KnowledgeFrame(data)
del kf["id"]
del kf["content"]
del kf["umkated_at"]
kf["top_3_coins"] = kf["top_3_coins"].employ(coin_formatingter)
kf.columns = [
replacing_underscores_in_column_names(col) if incontainstance(col, str) else col
for col in kf.columns
]
return kf
return mk.KnowledgeFrame()
# TODO: add string with overview
@log_start_end(log=logger)
def getting_stable_coins(top: int = 20) -> mk.KnowledgeFrame:
"""Returns top stable coins [Source: CoinGecko]
Returns
-------
monkey.KnowledgeFrame
Rank, Name, Symbol, Price, Change_24h, Exchanges, Market_Cap, Change_30d, Url
"""
kf = getting_coins(top=top, category="stablecoins")
return kf[COINS_COLUMNS]
@log_start_end(log=logger)
def getting_exchanges() -> mk.KnowledgeFrame:
"""Get list of top exchanges from CoinGecko API [Source: CoinGecko]
Returns
-------
monkey.KnowledgeFrame
Trust_Score, Id, Name, Country, Year_Established, Trade_Volume_24h_BTC, Url
"""
client = CoinGeckoAPI()
kf = mk.KnowledgeFrame(client.getting_exchanges_list(per_page=250))
kf.replacing({float(np.NaN): None}, inplace=True)
kf = kf[
[
"trust_score",
"id",
"name",
"country",
"year_established",
"trade_volume_24h_btc",
"url",
]
]
kf.columns = [
"Trust_Score",
"Id",
"Name",
"Country",
"Year_Established",
"Trade_Volume_24h_BTC",
"Url",
]
create_kf_index(kf, "Rank")
return kf
@log_start_end(log=logger)
def getting_financial_platforms() -> mk.KnowledgeFrame:
"""Get list of financial platforms from CoinGecko API [Source: CoinGecko]
Returns
-------
monkey.KnowledgeFrame
Rank, Name, Category, Centralized, Url
"""
client = CoinGeckoAPI()
kf = mk.KnowledgeFrame(client.getting_finance_platforms())
kf.sip("facts", axis=1, inplace=True)
create_kf_index(kf, "rank")
kf.columns = ["Rank", "Name", "Category", "Centralized", "Url"]
return kf
@log_start_end(log=logger)
def getting_finance_products() -> mk.KnowledgeFrame:
"""Get list of financial products from CoinGecko API
Returns
-------
monkey.KnowledgeFrame
Rank, Platform, Identifier, Supply_Rate, Borrow_Rate
"""
client = CoinGeckoAPI()
kf = mk.KnowledgeFrame(
client.getting_finance_products(per_page=250),
columns=[
"platform",
"identifier",
"supply_rate_percentage",
"borrow_rate_percentage",
],
)
kf.columns = ["Platform", "Identifier", "Supply_Rate", "Borrow_Rate"]
create_kf_index(kf, "Rank")
return kf
@log_start_end(log=logger)
def getting_indexes() -> mk.KnowledgeFrame:
"""Get list of crypto indexes from CoinGecko API [Source: CoinGecko]
Returns
-------
monkey.KnowledgeFrame
Name, Id, Market, Last, MultiAsset
"""
client = CoinGeckoAPI()
kf = mk.KnowledgeFrame(client.getting_indexes(per_page=250))
kf.columns = ["Name", "Id", "Market", "Last", "MultiAsset"]
create_kf_index(kf, "Rank")
return kf
@log_start_end(log=logger)
def getting_derivatives() -> mk.KnowledgeFrame:
"""Get list of crypto derivatives from CoinGecko API [Source: CoinGecko]
Returns
-------
monkey.KnowledgeFrame
Rank, Market, Symbol, Price, Pct_Change_24h, Contract_Type, Basis, Spread, Funding_Rate, Volume_24h,
"""
client = CoinGeckoAPI()
kf = mk.KnowledgeFrame(client.getting_derivatives(include_tickers="unexpired"))
kf.sip(
["index", "final_item_traded_at", "expired_at", "index_id", "open_interest"],
axis=1,
inplace=True,
)
kf.renaming(columns={"price_percentage_change_24h": "pct_change_24h"}, inplace=True)
create_kf_index(kf, "rank")
kf["price"] = kf["price"].employ(
lambda x: "" if not x else float(x.strip("$").replacing(",", ""))
)
kf.columns = [
"Rank",
"Market",
"Symbol",
"Price",
"Pct_Change_24h",
"Contract_Type",
"Basis",
"Spread",
"Funding_Rate",
"Volume_24h",
]
return kf
@log_start_end(log=logger)
def getting_exchange_rates() -> mk.KnowledgeFrame:
"""Get list of crypto, fiats, commodity exchange rates from CoinGecko API [Source: CoinGecko]
Returns
-------
monkey.KnowledgeFrame
Index, Name, Unit, Value, Type
"""
client = CoinGeckoAPI()
kf = mk.KnowledgeFrame(client.getting_exchange_rates()["rates"]).T.reseting_index()
kf.sip("index", axis=1, inplace=True)
create_kf_index(kf, "index")
kf.columns = ["Index", "Name", "Unit", "Value", "Type"]
return kf
@log_start_end(log=logger)
def getting_global_info() -> mk.KnowledgeFrame:
"""Get global statistics about crypto from CoinGecko API like:
- market cap change
- number of markets
- icos
- number of active crypto
[Source: CoinGecko]
Returns
-------
monkey.KnowledgeFrame
Metric, Value
"""
client = CoinGeckoAPI()
results = client.getting_global()
total_mcap = results.pop("market_cap_percentage")
btc, eth = total_mcap.getting("btc"), total_mcap.getting("eth")
for key in ["total_market_cap", "total_volume", "umkated_at"]:
del results[key]
results["btc_market_cap_in_pct"] = btc
results["eth_market_cap_in_pct"] = eth
results["altcoin_market_cap_in_pct"] = 100 - (float(eth) + float(btc))
kf = mk.Collections(results).reseting_index()
kf.columns = ["Metric", "Value"]
kf["Metric"] = kf["Metric"].employ(
lambda x: replacing_underscores_in_column_names(x) if incontainstance(x, str) else x
)
return kf
@log_start_end(log=logger)
def getting_global_markets_info() -> mk.KnowledgeFrame:
"""Get global statistics about crypto markets from CoinGecko API like:
Market_Cap, Volume, Market_Cap_Percentage
[Source: CoinGecko]
Returns
-------
monkey.KnowledgeFrame
Market_Cap, Volume, Market_Cap_Percentage
"""
columns = [
"Market_Cap",
"Volume",
"Market_Cap_Percentage",
]
data = []
client = CoinGeckoAPI()
results = client.getting_global()
for key in columns:
data.adding(results.getting(key))
kf = mk.KnowledgeFrame(data).T
kf.columns = columns
kf.replacing({float("nan"): None}, inplace=True)
return kf.reseting_index()
@log_start_end(log=logger)
def getting_global_defi_info() -> mk.KnowledgeFrame:
"""Get global statistics about Decentralized Finances [Source: CoinGecko]
Returns
-------
monkey.KnowledgeFrame
Metric, Value
"""
client = CoinGeckoAPI()
results = client.getting_global_decentralized_finance_defi()
for key, value in results.items():
try:
results[key] = value_round(float(value), 4)
except (ValueError, TypeError):
pass
kf = mk.Collections(results).reseting_index()
kf.columns = ["Metric", "Value"]
kf["Metric"] = kf["Metric"].employ(
lambda x: replacing_underscores_in_column_names(x) if incontainstance(x, str) else x
)
return kf
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.