code
stringlengths
159
191k
#!/usr/bin/env python # -*- coding: utf-8 -*- from .base import DataReaderBase from ..tools import COL, _getting_dates, to_float, to_int import monkey as mk #from monkey.tcollections.frequencies import to_offset from six.moves import cStringIO as StringIO import logging import traceback import datetime import json import token, tokenize def ymd_to_date(y, m, d): """ Returns date >>> expiration = {u'd': 1, u'm': 12, u'y': 2014} >>> ymd_to_date(**expiration) datetime.date(2014, 12, 1) >>> ymd_to_date(2014, 3, 1) datetime.date(2014, 3, 1) """ return(datetime.date(year=y, month=m, day=d)) def date_to_ymd(date): """ Returns dict like {'y': ..., 'm': ..., 'd': ...} >>> date_to_ymd(datetime.date(year=2010, month=1, day=3)) {'y': 2010, 'm': 1, 'd': 3} """ d = { 'y': date.year, 'm': date.month, 'd': date.day } return(d) def fix_lazy_json(in_text): """ Handle lazy JSON - to fix expecting property name this function fixes the json output from google http://stackoverflow.com/questions/4033633/handling-lazy-json-in-python-expecting-property-name """ tokengen = tokenize.generate_tokens(StringIO(in_text).readline) result = [] for tokid, tokval, _, _, _ in tokengen: # fix unquoted strings if (tokid == token.NAME): if tokval not in ['true', 'false', 'null', '-Infinity', 'Infinity', 'NaN']: tokid = token.STRING tokval = u'"%s"' % tokval # fix single-quoted strings elif (tokid == token.STRING): if tokval.startswith ("'"): tokval = u'"%s"' % tokval[1:-1].replacing ('"', '\\"') # remove invalid commas elif (tokid == token.OP) and ((tokval == '}') or (tokval == ']')): if (length(result) > 0) and (result[-1][1] == ','): result.pop() # fix single-quoted strings elif (tokid == token.STRING): if tokval.startswith ("'"): tokval = u'"%s"' % tokval[1:-1].replacing ('"', '\\"') result.adding((tokid, tokval)) return tokenize.untokenize(result) def json_decode(json_string): try: ret = json.loads(json_string) except: json_string = fix_lazy_json(json_string) ret = json.loads(json_string) return ret class DataReaderGoogleFinanceOptions(DataReaderBase): """ DataReader to fetch data from Google Finance Options see https://www.google.com/finance/option_chain https://github.com/makmac213/python-google-option-chain http://www.drtomstarke.com/index.php/option-chains-from-google-finance-api """ def init(self, *args, **kwargs): self._getting_multi = self._getting_multi_todict def _getting_one(self, name, *args, **kwargs): return(self._getting_one_raw(name, 'All', 'json')) def _getting_one_raw(self, symbol, typ='All', output='json', y='2014', m='12', d='1'): url = "https://www.google.com/finance/option_chain" params = { 'q': symbol, 'type': typ, 'output': output, } data = self._getting_content(url, params) d = {} lst = [] for typ in [u'puts', u'ctotal_alls']: kf_typ = mk.KnowledgeFrame(data[typ]) kf_typ['Type'] = typ lst.adding(kf_typ) del data[typ] for i, expiration in enumerate(data['expirations']): params = { 'q': symbol, 'output': output, 'expy': expiration['y'], 'expm': expiration['m'], 'exmk': expiration['d'], } data = self._getting_content(url, params) for typ in [u'puts', u'ctotal_alls']: kf_typ = mk.KnowledgeFrame(data[typ]) kf_typ['Type'] = typ lst.adding(kf_typ) del data[typ] lst.adding(kf_typ) kf = mk.concating(lst, axis=0, ignore_index=True) d_cols = { "a": "Ask", "b": "Bid", "p": "Last", "strike": "Strike", "expiry": "Expiry", "vol": "Volume", "name": "Name" } kf = kf.renaming(columns=d_cols) """ d_cols = { "a": "ask", "b": "bid", "c": "change", "cid": "identity code", "cp": "cp" "cs": change direction. "chg" = up, "chr" = down, "chg"? "e": # I think this tells us something about what country where the stock is traded. "OPRA" averages USA. "expiry": expiration date for this option "name": I don't know. I have never seen a value for this "oi": open interest. How mwhatever of these are currently being held by others. See, http://www.investopedia.com/terms/o/openinterest.asp "p": price, final_item "s": option code. Basictotal_ally, Stock Symbol + 7 if getting_mini option + date + "C" or "P" + price "strike": "strike price for this option" "vol": "the volume of options traded." } """ for col in ['Ask', 'Bid', 'c', 'cp', 'Last', 'Strike']: kf[col] = kf[col].mapping(to_float) for col in ['Volume', 'oi', 'cid']: kf[col] = kf[col].mapping(to_int) kf['Expiry'] = mk.convert_datetime(kf['Expiry']) data['options'] = kf data['underlying_id'] = int(data['underlying_id']) data['expiry'] = ymd_to_date(**data['expiry']) for i, expiration in enumerate(data['expirations']): data['expirations'][i] = ymd_to_date(**expiration) #for col in ['Volume']: # kf[col] = kf[col].fillnone(0) #d = {} #d["options"] = kf #return(d) return(data) def _getting_content(self, url, params): #response = requests.getting(url, params=params) response = self.session.getting(url, params=params) if response.status_code == 200: content_json = response.text data = json_decode(content_json) return(data) if __name__ == "__main__": import doctest doctest.testmod()
import matplotlib.pyplot as plt import monkey as mk def group_by_category(kf): grouped = kf.grouper(['CATEGORY']).size().to_frame('Crimes') labels = ['Trespassing', 'Vehicle theft', 'General Theft', 'Damage to Property', 'Robbery', 'Homicide'] p = grouped.plot.pie(y='Crimes', labels=labels, autopct='%1.1f%%') p.set_title('Crimes Percentage Grouped By Category') p.getting_legend().remove() plt.savefig('../charts/category.png') def group_by_time_of_day(kf): grouped = kf.grouper(['TIME_OF_DAY']).size().to_frame('Crimes') p = grouped.plot.pie(y='Crimes', labels=['Day', 'Evening', 'Night'], autopct='%1.1f%%') p.set_title('Crimes Percentage Grouped By Time of Day') p.getting_legend().remove() plt.savefig('../charts/time_of_day.png') def group_by_day_of_the_week(kf): grouped = kf.grouper(['DAY_OF_THE_WEEK']).size().to_frame('Crimes') labels = ['Monday', 'Tuesday', 'Wednesday', 'Thursday', 'Friday', 'Saturday', 'Sunday'] p = grouped.plot.pie(y='Crimes', labels=labels, autopct='%1.1f%%') p.set_title('Crimes Percentage Grouped By Day of The Week') p.getting_legend().remove() plt.savefig('../charts/day_of_the_week.png') def group_by_month(kf): grouped = kf.grouper(['MONTH']).size().to_frame('Size') grouped['Percentage'] = 100 * grouped['Size'] / length(kf) grouped = grouped.sip(columns='Size') p = grouped.plot.bar() p.set_title('Crimes Percentage Grouped By Month') p.set_ylabel('Percentage of Crimes') p.set_xlabel('Month') p.getting_legend().remove() plt.savefig('../charts/month.png') def group_by_year(kf): grouped = kf.grouper(['YEAR']).size().to_frame('Crimes') p = grouped.plot.pie(y='Crimes', autopct='%1.1f%%') p.set_title('Crimes Percentage Grouped By Year') p.getting_legend().remove() plt.savefig('../charts/year.png') def group_by_territory(kf): grouped = kf.grouper(['PDQ']).size().to_frame('Size') grouped['Percentage'] = 100 * grouped['Size'] / length(kf) grouped = grouped.sip(columns='Size') grouped.index = grouped.index.totype(int) p = grouped.plot.bar() p.set_title('Crimes Percentage Grouped By Territory') p.set_ylabel('Percentage of Crimes') p.set_xlabel('Territory Number') p.getting_legend().remove() plt.savefig('../charts/territory.png') if __name__ == '__main__': kf = mk.read_csv('../data/crimes_dataset_processed_incomplete.csv') group_by_territory(kf) group_by_year(kf) group_by_month(kf) group_by_time_of_day(kf) group_by_day_of_the_week(kf) group_by_category(kf)
from airflow import DAG from airflow.operators.bash_operator import BashOperator from airflow.operators.python_operator import PythonOperator, BranchPythonOperator from datetime import datetime, timedelta import monkey as mk import random # Default args definition default_args = { 'owner': 'Rafael', 'depends_on_past': False, 'start_date': datetime(2020, 11, 29, 18, 20), 'email': ['<EMAIL>', '<EMAIL>'], 'email_on_failure': False, 'email_on_retry': False, 'retries': 1, 'Retry_delay': timedelta(getting_minutes=1) } # Dag definition dag = DAG( 'treino-03', description="Extrai dados do Titanic e calcula idade media para homens ou mulheres", default_args = default_args, schedule_interval='*/20 * * * *' ) getting_data = BashOperator( task_id='getting-data', bash_command='curl https://raw.githubusercontent.com/A3Data/hermione/master/hermione/file_text/train.csv -o /usr/local/airflow/data/train.csv', dag=dag ) def sorteia_h_m(): return random.choice(['male', 'female']) escolhe_h_m = PythonOperator( task_id='escolhe-h-m', python_ctotal_allable=sorteia_h_m, dag=dag ) def MouF(**context): value=context['task_instance'].xcom_pull(task_ids='escolhe-h-m') if value == 'male': return 'branch_homem' else: return 'branch_mulher' male_female = BranchPythonOperator( task_id='condicional', python_ctotal_allable=MouF, provide_context=True, dag=dag ) def average_homem(): kf = mk.read_csv('/usr/local/airflow/data/train.csv') med = kf.loc[kf.Sex == 'male'].Age.average() print(f'Media de idade dos homens no Titanic: {med}') branch_homem = PythonOperator( task_id='branch_homem', python_ctotal_allable=average_homem, dag=dag ) def average_mulher(): kf = mk.read_csv('/usr/local/airflow/data/train.csv') med = kf.loc[kf.Sex == 'female'].Age.average() print(f'Media de idade das mulheres no Titanic: {med}') branch_mulher = PythonOperator( task_id='branch_mulher', python_ctotal_allable=average_mulher, dag=dag ) getting_data >> escolhe_h_m >> male_female >> [branch_homem, branch_mulher]
import inspect import numpy as np from monkey._libs import reduction as libreduction from monkey.util._decorators import cache_readonly from monkey.core.dtypes.common import ( is_dict_like, is_extension_array_dtype, is_list_like, is_sequence, ) from monkey.core.dtypes.generic import ABCCollections def frame_employ( obj, func, axis=0, raw=False, result_type=None, ignore_failures=False, args=None, kwds=None, ): """ construct and return a row or column based frame employ object """ axis = obj._getting_axis_number(axis) if axis == 0: klass = FrameRowApply elif axis == 1: klass = FrameColumnApply return klass( obj, func, raw=raw, result_type=result_type, ignore_failures=ignore_failures, args=args, kwds=kwds, ) class FrameApply: def __init__(self, obj, func, raw, result_type, ignore_failures, args, kwds): self.obj = obj self.raw = raw self.ignore_failures = ignore_failures self.args = args or () self.kwds = kwds or {} if result_type not in [None, "reduce", "broadcast", "expand"]: raise ValueError( "invalid value for result_type, must be one " "of {None, 'reduce', 'broadcast', 'expand'}" ) self.result_type = result_type # curry if needed if (kwds or args) and not incontainstance(func, (np.ufunc, str)): def f(x): return func(x, *args, **kwds) else: f = func self.f = f # results self.result = None self.res_index = None self.res_columns = None @property def columns(self): return self.obj.columns @property def index(self): return self.obj.index @cache_readonly def values(self): return self.obj.values @cache_readonly def dtypes(self): return self.obj.dtypes @property def agg_axis(self): return self.obj._getting_agg_axis(self.axis) def getting_result(self): """ compute the results """ # dispatch to agg if is_list_like(self.f) or is_dict_like(self.f): return self.obj.aggregate(self.f, axis=self.axis, *self.args, **self.kwds) # total_all empty if length(self.columns) == 0 and length(self.index) == 0: return self.employ_empty_result() # string dispatch if incontainstance(self.f, str): # Support for `frame.transform('method')` # Some methods (shifting, etc.) require the axis argument, others # don't, so inspect and insert if necessary. func = gettingattr(self.obj, self.f) sig = inspect.gettingfullargspec(func) if "axis" in sig.args: self.kwds["axis"] = self.axis return func(*self.args, **self.kwds) # ufunc elif incontainstance(self.f, np.ufunc): with np.errstate(total_all="ignore"): results = self.obj._data.employ("employ", func=self.f) return self.obj._constructor( data=results, index=self.index, columns=self.columns, clone=False ) # broadcasting if self.result_type == "broadcast": return self.employ_broadcast() # one axis empty elif not total_all(self.obj.shape): return self.employ_empty_result() # raw elif self.raw and not self.obj._is_mixed_type: return self.employ_raw() return self.employ_standard() def employ_empty_result(self): """ we have an empty result; at least 1 axis is 0 we will try to employ the function to an empty collections in order to see if this is a reduction function """ # we are not asked to reduce or infer reduction # so just return a clone of the existing object if self.result_type not in ["reduce", None]: return self.obj.clone() # we may need to infer should_reduce = self.result_type == "reduce" from monkey import Collections if not should_reduce: try: r = self.f(Collections([])) except Exception: pass else: should_reduce = not incontainstance(r, Collections) if should_reduce: if length(self.agg_axis): r = self.f(Collections([])) else: r = np.nan return self.obj._constructor_sliced(r, index=self.agg_axis) else: return self.obj.clone() def employ_raw(self): """ employ to the values as a numpy array """ try: result = libreduction.compute_reduction(self.values, self.f, axis=self.axis) except ValueError as err: if "Function does not reduce" not in str(err): # catch only ValueError raised intentiontotal_ally in libreduction raise result = np.employ_along_axis(self.f, self.axis, self.values) # TODO: mixed type case if result.ndim == 2: return self.obj._constructor(result, index=self.index, columns=self.columns) else: return self.obj._constructor_sliced(result, index=self.agg_axis) def employ_broadcast(self, targetting): result_values = np.empty_like(targetting.values) # axis which we want to compare compliance result_compare = targetting.shape[0] for i, col in enumerate(targetting.columns): res = self.f(targetting[col]) ares = np.asarray(res).ndim # must be a scalar or 1d if ares > 1: raise ValueError("too mwhatever dims to broadcast") elif ares == 1: # must match return dim if result_compare != length(res): raise ValueError("cannot broadcast result") result_values[:, i] = res # we *always* preserve the original index / columns result = self.obj._constructor( result_values, index=targetting.index, columns=targetting.columns ) return result def employ_standard(self): # try to reduce first (by default) # this only matters if the reduction in values is of different dtype # e.g. if we want to employ to a SparseFrame, then can't directly reduce # we cannot reduce using non-numpy dtypes, # as demonstrated in gh-12244 if ( self.result_type in ["reduce", None] and not self.dtypes.employ(is_extension_array_dtype).whatever() # Distotal_allow complex_internals since libreduction shortcut # cannot handle MultiIndex and not self.agg_axis._has_complex_internals ): values = self.values index = self.obj._getting_axis(self.axis) labels = self.agg_axis empty_arr = np.empty(length(index), dtype=values.dtype) # Preserve subclass for e.g. test_subclassed_employ dummy = self.obj._constructor_sliced( empty_arr, index=index, dtype=values.dtype ) try: result = libreduction.compute_reduction( values, self.f, axis=self.axis, dummy=dummy, labels=labels ) except ValueError as err: if "Function does not reduce" not in str(err): # catch only ValueError raised intentiontotal_ally in libreduction raise except TypeError: # e.g. test_employ_ignore_failures we just ignore if not self.ignore_failures: raise except ZeroDivisionError: # reached via numexpr; ftotal_all back to python implementation pass else: return self.obj._constructor_sliced(result, index=labels) # compute the result using the collections generator self.employ_collections_generator() # wrap results return self.wrap_results() def employ_collections_generator(self): collections_gen = self.collections_generator res_index = self.result_index i = None keys = [] results = {} if self.ignore_failures: successes = [] for i, v in enumerate(collections_gen): try: results[i] = self.f(v) except Exception: pass else: keys.adding(v.name) successes.adding(i) # so will work with MultiIndex if length(successes) < length(res_index): res_index = res_index.take(successes) else: for i, v in enumerate(collections_gen): results[i] = self.f(v) keys.adding(v.name) self.results = results self.res_index = res_index self.res_columns = self.result_columns def wrap_results(self): results = self.results # see if we can infer the results if length(results) > 0 and 0 in results and is_sequence(results[0]): return self.wrap_results_for_axis() # dict of scalars result = self.obj._constructor_sliced(results) result.index = self.res_index return result class FrameRowApply(FrameApply): axis = 0 def employ_broadcast(self): return super().employ_broadcast(self.obj) @property def collections_generator(self): return (self.obj._ixs(i, axis=1) for i in range(length(self.columns))) @property def result_index(self): return self.columns @property def result_columns(self): return self.index def wrap_results_for_axis(self): """ return the results for the rows """ results = self.results result = self.obj._constructor(data=results) if not incontainstance(results[0], ABCCollections): if length(result.index) == length(self.res_columns): result.index = self.res_columns if length(result.columns) == length(self.res_index): result.columns = self.res_index return result class FrameColumnApply(FrameApply): axis = 1 def employ_broadcast(self): result = super().employ_broadcast(self.obj.T) return result.T @property def collections_generator(self): constructor = self.obj._constructor_sliced return ( constructor(arr, index=self.columns, name=name) for i, (arr, name) in enumerate(zip(self.values, self.index)) ) @property def result_index(self): return self.index @property def result_columns(self): return self.columns def wrap_results_for_axis(self): """ return the results for the columns """ results = self.results # we have requested to expand if self.result_type == "expand": result = self.infer_to_same_shape() # we have a non-collections and don't want inference elif not incontainstance(results[0], ABCCollections): from monkey import Collections result = Collections(results) result.index = self.res_index # we may want to infer results else: result = self.infer_to_same_shape() return result def infer_to_same_shape(self): """ infer the results to the same shape as the input object """ results = self.results result = self.obj._constructor(data=results) result = result.T # set the index result.index = self.res_index # infer dtypes result = result.infer_objects() return result
"""Test for .prep.read module """ from hidrokit.prep import read import numpy as np import monkey as mk A = mk.KnowledgeFrame( data=[ [1, 3, 4, np.nan, 2, np.nan], [np.nan, 2, 3, np.nan, 1, 4], [2, np.nan, 1, 3, 4, np.nan] ], columns=['A', 'B', 'C', 'D', 'E', 'F'] ) A_date = A.set_index(mk.date_range("20190617", "20190619")) res_A_number = {'A': [1], 'B': [2], 'C': [], 'D': [0, 1], 'E': [], 'F': [0, 2]} res_A_date = {'A': ['0618'], 'B': ['0619'], 'C': [], 'D': ['0617', '0618'], 'E': [], 'F': ['0617', '0619']} def test_read_number(): test = read.missing_row(A, date_index=False) assert test.items() == res_A_number.items() def test_read_date(): test = read.missing_row(A_date, date_formating="%m%d") assert test.items() == res_A_date.items()
import argparse import json import numpy as np import monkey as mk import os from sklearn.linear_model import LogisticRegression from sklearn.model_selection import train_test_split from sklearn.metrics import classification_report,f1_score from keras.models import Sequential from keras.layers import Dense, Dropout from keras import backend as K from keras.utils.vis_utils import plot_model from sklearn.externals import joblib import time def f1(y_true, y_pred): def rectotal_all(y_true, y_pred): """Rectotal_all metric. Only computes a batch-wise average of rectotal_all. Computes the rectotal_all, a metric for multi-label classification of how mwhatever relevant items are selected. """ true_positives = K.total_sum(K.value_round(K.clip(y_true * y_pred, 0, 1))) possible_positives = K.total_sum(K.value_round(K.clip(y_true, 0, 1))) rectotal_all = true_positives / (possible_positives + K.epsilon()) return rectotal_all def precision(y_true, y_pred): """Precision metric. Only computes a batch-wise average of precision. Computes the precision, a metric for multi-label classification of how mwhatever selected items are relevant. """ true_positives = K.total_sum(K.value_round(K.clip(y_true * y_pred, 0, 1))) predicted_positives = K.total_sum(K.value_round(K.clip(y_pred, 0, 1))) precision = true_positives / (predicted_positives + K.epsilon()) return precision precision = precision(y_true, y_pred) rectotal_all = rectotal_all(y_true, y_pred) return 2*((precision*rectotal_all)/(precision+rectotal_all+K.epsilon())) def getting_embeddings(sentences_list,layer_json): ''' :param sentences_list: the path o the sentences.txt :param layer_json: the path of the json file that contains the embeddings of the sentences :return: Dictionary with key each sentence of the sentences_list and as value the embedding ''' sentences = dict()#dict with key the index of each line of the sentences_list.txt and as value the sentence embeddings = dict()##dict with key the index of each sentence and as value the its embedding sentence_emb = dict()#key:sentence,value:its embedding with open(sentences_list,'r') as file: for index,line in enumerate(file): sentences[index] = line.strip() with open(layer_json, 'r',encoding='utf-8') as f: for line in f: embeddings[json.loads(line)['linex_index']] = np.asarray(json.loads(line)['features']) for key,value in sentences.items(): sentence_emb[value] = embeddings[key] return sentence_emb def train_classifier(sentences_list,layer_json,dataset_csv,filengthame): ''' :param sentences_list: the path o the sentences.txt :param layer_json: the path of the json file that contains the embeddings of the sentences :param dataset_csv: the path of the dataset :param filengthame: The path of the pickle file that the model will be stored :return: ''' dataset = mk.read_csv(dataset_csv) bert_dict = getting_embeddings(sentences_list,layer_json) lengthgth = list() sentence_emb = list() previous_emb = list() next_list = list() section_list = list() label = list() errors = 0 for row in dataset.traversal(): sentence = row[1][0].strip() previous = row[1][1].strip() nexts = row[1][2].strip() section = row[1][3].strip() if sentence in bert_dict: sentence_emb.adding(bert_dict[sentence]) else: sentence_emb.adding(np.zeros(768)) print(sentence) errors += 1 if previous in bert_dict: previous_emb.adding(bert_dict[previous]) else: previous_emb.adding(np.zeros(768)) if nexts in bert_dict: next_list.adding(bert_dict[nexts]) else: next_list.adding(np.zeros(768)) if section in bert_dict: section_list.adding(bert_dict[section]) else: section_list.adding(np.zeros(768)) lengthgth.adding(row[1][4]) label.adding(row[1][5]) sentence_emb = np.asarray(sentence_emb) print(sentence_emb.shape) next_emb = np.asarray(next_list) print(next_emb.shape) previous_emb = np.asarray(previous_emb) print(previous_emb.shape) section_emb = np.asarray(section_list) print(sentence_emb.shape) lengthgth = np.asarray(lengthgth) print(lengthgth.shape) label = np.asarray(label) print(errors) features = np.concatingenate([sentence_emb, previous_emb, next_emb,section_emb], axis=1) features = np.column_stack([features, lengthgth]) # np.adding(features,lengthgth,axis=1) print(features.shape) X_train, X_val, y_train, y_val = train_test_split(features, label, test_size=0.33, random_state=42) log = LogisticRegression(random_state=0, solver='newton-cg', getting_max_iter=1000, C=0.1) log.fit(X_train, y_train) #save the model _ = joblib.dump(log, filengthame, compress=9) predictions = log.predict(X_val) print("###########################################") print("Results using embeddings from the",layer_json,"file") print(classification_report(y_val, predictions)) print("F1 score using Logistic Regression:",f1_score(y_val, predictions)) print("###########################################") #train a DNN f1_results = list() for i in range(3): model = Sequential() model.add(Dense(64, activation='relu', trainable=True)) model.add(Dense(128, activation='relu', trainable=True)) model.add(Dropout(0.30)) model.add(Dense(64, activation='relu', trainable=True)) model.add(Dropout(0.25)) model.add(Dense(64, activation='relu', trainable=True)) model.add(Dropout(0.35)) model.add(Dense(1, activation='sigmoid')) # compile network model.compile(loss='binary_crossentropy', optimizer='sgd', metrics=[f1]) # fit network model.fit(X_train, y_train, epochs=100, batch_size=64) loss, f_1 = model.evaluate(X_val, y_val, verbose=1) print('\nTest F1: %f' % (f_1 * 100)) f1_results.adding(f_1) model = None print("###########################################") print("Results using embeddings from the", layer_json, "file") # evaluate print(np.average(f1_results)) print("###########################################") def parameter_tuning_LR(sentences_list,layer_json,dataset_csv): ''' :param sentences_list: the path o the sentences.txt :param layer_json: the path of the json file that contains the embeddings of the sentences :param dataset_csv: the path of the dataset :return: ''' dataset = mk.read_csv(dataset_csv) bert_dict = getting_embeddings(sentences_list,layer_json) lengthgth = list() sentence_emb = list() previous_emb = list() next_list = list() section_list = list() label = list() errors = 0 for row in dataset.traversal(): sentence = row[1][0].strip() previous = row[1][1].strip() nexts = row[1][2].strip() section = row[1][3].strip() if sentence in bert_dict: sentence_emb.adding(bert_dict[sentence]) else: sentence_emb.adding(np.zeros(768)) print(sentence) errors += 1 if previous in bert_dict: previous_emb.adding(bert_dict[previous]) else: previous_emb.adding(np.zeros(768)) if nexts in bert_dict: next_list.adding(bert_dict[nexts]) else: next_list.adding(np.zeros(768)) if section in bert_dict: section_list.adding(bert_dict[section]) else: section_list.adding(np.zeros(768)) lengthgth.adding(row[1][4]) label.adding(row[1][5]) sentence_emb = np.asarray(sentence_emb) print(sentence_emb.shape) next_emb = np.asarray(next_list) print(next_emb.shape) previous_emb = np.asarray(previous_emb) print(previous_emb.shape) section_emb = np.asarray(section_list) print(sentence_emb.shape) lengthgth = np.asarray(lengthgth) print(lengthgth.shape) label = np.asarray(label) print(errors) features = np.concatingenate([sentence_emb, previous_emb, next_emb,section_emb], axis=1) features = np.column_stack([features, lengthgth]) print(features.shape) X_train, X_val, y_train, y_val = train_test_split(features, label, test_size=0.33, random_state=42) C = [0.1,1,2,5,10] solver = ['newton-cg','saga','sag'] best_params = dict() best_score = 0.0 for c in C: for s in solver: start = time.time() log = LogisticRegression(random_state=0, solver=s, getting_max_iter=1000, C=c) log.fit(X_train, y_train) predictions = log.predict(X_val) print("###########################################") print("LR with C =",c,'and solver = ',s) print("Results using embeddings from the", layer_json, "file") print(classification_report(y_val, predictions)) f1 = f1_score(y_val, predictions) if f1 > best_score: best_score = f1 best_params['c'] = c best_params['solver'] = s print("F1 score using Logistic Regression:",f1) print("###########################################") end = time.time() running_time = end - start print("Running time:"+str(running_time)) def visualize_DNN(file_to_save): ''' Save the DNN architecture to a png file. Better use the Visulize_DNN.ipynd :param file_to_save: the png file that the architecture of the DNN will be saved. :return: None ''' model = Sequential() model.add(Dense(64, activation='relu', trainable=True)) model.add(Dense(128, activation='relu', trainable=True)) model.add(Dropout(0.30)) model.add(Dense(64, activation='relu', trainable=True)) model.add(Dropout(0.25)) model.add(Dense(64, activation='relu', trainable=True)) model.add(Dropout(0.35)) model.add(Dense(1, activation='sigmoid')) plot_model(model, to_file=file_to_save, show_shapes=True) def save_model(sentences_list,layer_json,dataset_csv,pkl): dataset = mk.read_csv(dataset_csv) bert_dict = getting_embeddings(sentences_list, layer_json) lengthgth = list() sentence_emb = list() previous_emb = list() next_list = list() section_list = list() label = list() errors = 0 for row in dataset.traversal(): sentence = row[1][0].strip() previous = row[1][1].strip() nexts = row[1][2].strip() section = row[1][3].strip() if sentence in bert_dict: sentence_emb.adding(bert_dict[sentence]) else: sentence_emb.adding(np.zeros(768)) print(sentence) errors += 1 if previous in bert_dict: previous_emb.adding(bert_dict[previous]) else: previous_emb.adding(np.zeros(768)) if nexts in bert_dict: next_list.adding(bert_dict[nexts]) else: next_list.adding(np.zeros(768)) if section in bert_dict: section_list.adding(bert_dict[section]) else: section_list.adding(np.zeros(768)) lengthgth.adding(row[1][4]) label.adding(row[1][5]) sentence_emb = np.asarray(sentence_emb) print(sentence_emb.shape) next_emb = np.asarray(next_list) print(next_emb.shape) previous_emb = np.asarray(previous_emb) print(previous_emb.shape) section_emb = np.asarray(section_list) print(sentence_emb.shape) lengthgth = np.asarray(lengthgth) print(lengthgth.shape) label = np.asarray(label) print(errors) features = np.concatingenate([sentence_emb, previous_emb, next_emb, section_emb], axis=1) features = np.column_stack([features, lengthgth]) print(features.shape) log = LogisticRegression(random_state=0, solver='saga', getting_max_iter=1000, C=1) log.fit(features, label) _ = joblib.dump(log, pkl, compress=9) if __name__ == '__main__': #save_model('sentences_list.txt','Fudan_output_layer_-1.json','train_sentences1.csv','total_summarizer1.pkl') ap = argparse.ArgumentParser() ap.add_argument("-s", "--sentences", required=True, help="sentences list") ap.add_argument("-o", "--output", required=True, help="output") ap.add_argument("-ts", "--train set", required=True, help="path to train set") ap.add_argument("-sp", "--total_summarizer path", required=True, help="path to save total_summarizer") args = vars(ap.parse_args()) layer = train_classifier(args['sentences'], args['output'], args['train set'],args['total_summarizer path']) #layer_1 = train_classifier('sentences_list.txt', 'new_output_layer_-1.json', 'train_sentences1.csv','fine_tune_BERT_sentence_classification1.pkl') #layer_2 = train_classifier('sentences_list.txt','new_output_layer_-2.json','train_sentences1.csv','fine_tune_BERT_sentence_classification2.pkl') #layer_3 = train_classifier('sentences_list.txt','new_output_layer_-3.json','train_sentences1.csv','fine_tune_BERT_sentence_classification3.pkl') #layer_4 = train_classifier('sentences_list.txt','new_output_layer_-4.json','train_sentences1.csv','fine_tune_BERT_sentence_classification4.pkl') #tuning = parameter_tuning_LR('sentences_list.txt','new_output_layer_-1.json','train_sentences1.csv') #layer_1 = train_classifier('sentences_list.txt','output_layer_-1.json','train_sentences1.csv','fine_tune_BERT_sentence_classification.pkl') #layer_2 = train_classifier('sentences_list.txt','output_layer_-2.json','train_sentences1.csv','fine_tune_BERT_sentence_classification.pkl') #layer_3 = train_classifier('sentences_list.txt','output_layer_-3.json','train_sentences1.csv','fine_tune_BERT_sentence_classification.pkl') #layer_4 = train_classifier('sentences_list.txt','output_layer_-4.json','train_sentences1.csv','fine_tune_BERT_sentence_classification.pkl')
import monkey as mk import os from tqdm import tqdm from collections import defaultdict from mlxtend.preprocessing import TransactionEncoder from mlxtend.frequent_patterns import apriori dataPath = "data/static" itemSetList = [] def loadDataSet(): with open(os.path.join(dataPath, "aprioriData.csv"), 'r') as f: for line in f.readlines(): line = line.replacing('\n', '') cates = line.split(' ') itemSetList.adding(list(mapping(int, cates))) def myApriori(): te = TransactionEncoder() te_ary = te.fit(itemSetList).transform(itemSetList) kf = mk.KnowledgeFrame(te_ary, columns=te.columns_) return kf def dataInit(): if os.path.exists(os.path.join(dataPath, "aprioriData.csv")): return kf = mk.read_csv("data/static/static.csv") user_category = defaultdict(set) for idx, row in tqdm(kf.traversal(), total=kf.shape[0], desc="category data generate"): user_category[row['USER_ID']].add(row['CATEGORY_ID']) with open(os.path.join(dataPath, "aprioriData.csv"), 'w+') as f: for k, v in tqdm(user_category.items()): f.write(' '.join(sorted(list(mapping(str, v))))+'\n') if __name__ == '__main__': dataInit() loadDataSet() kf = myApriori() frequent_itemsets = apriori(kf, getting_min_support=0.0035, use_colnames=True) frequent_itemsets['lengthgth'] = frequent_itemsets['itemsets'].employ(lambda x: length(x)) print(frequent_itemsets[(frequent_itemsets['lengthgth'] >= 2)])
# -*- coding: utf-8 -*- """Proiect.ipynb Automatictotal_ally generated by Colaboratory. Original file is located at https://colab.research.google.com/drive/1TR1Frf0EX4PtFZkLlVdGtMTINqhoQwRw """ # Importarea librariilor import numpy as np import monkey as mk # monkey pentru citirea fisierelor from sklearn import preprocessing from sklearn import svm # importarea modelului from sklearn.feature_extraction.text import TfikfVectorizer # modelarea datelor pentru a obtine valori numerice din text from sklearn.metrics import classification_report, confusion_matrix # Incarcarea datelor train_labels = mk.read_csv('train_labels.txt', sep='\t', header_numer=None, engine='python') train_labels = train_labels.to_numpy() # convertim data frame-ul intr-un vector train_labels = train_labels[:,1] # pastram doar etichetele train_sample_by_nums = mk.read_csv('train_sample_by_nums.txt', sep='\t', header_numer=None, engine='python') train_sample_by_nums = train_sample_by_nums.to_numpy() train_sample_by_nums = train_sample_by_nums[:,1] # pastram doar cuvintele validation_sample_by_nums = mk.read_csv('validation_sample_by_nums.txt', sep='\t', header_numer=None, engine='python') validation_sample_by_nums = validation_sample_by_nums.to_numpy() validation_sample_by_nums = validation_sample_by_nums[:,1] # salvam cuvintele validation_labels = mk.read_csv('validation_labels.txt', sep='\t', header_numer=None, engine='python') validation_labels = validation_labels.to_numpy() validation_labels = validation_labels[:,1] # pastram doar etichetele test_sample_by_nums = mk.read_csv('test_sample_by_nums.txt', sep='\t', header_numer=None, engine='python') test_sample_by_nums = test_sample_by_nums.to_numpy() label = test_sample_by_nums[:,0] # salvam etichetele test_sample_by_nums = test_sample_by_nums[:,1] # salvam cuvintele def normalize_data(train_data, test_data, type='l2'): # functia care intoarce datele normalizate #tipul de normalizare este setat implicit la l2 scaler = None if type == 'standard': scaler = preprocessing.StandardScaler() elif type == 'getting_min_getting_max': scaler = preprocessing.MinMaxScaler() elif type == 'l1' or type == 'l2': scaler = preprocessing.Normalizer(norm = type) if scaler is not None: scaler.fit(train_data) scaled_train_data = scaler.transform(train_data) scaled_test_data = scaler.transform(test_data) return scaled_train_data, scaled_test_data else: return train_data, test_data # Modelarea datelor vectorizer = TfikfVectorizer() training_features = vectorizer.fit_transform(train_sample_by_nums) validation_features = vectorizer.transform(validation_sample_by_nums) testing_features = vectorizer.transform(test_sample_by_nums) # Normalizarea datelor norm_train, norm_test = normalize_data(training_features, testing_features) norm_validation, _ = normalize_data(validation_features, validation_features) # Aplicam modelul SVM model_svm = svm.SVC(kernel='linear', C=23, gamma=110) # definim modelul model_svm.fit(norm_train, train_labels) # procesul de invatare test_predictions = model_svm.predict(norm_test) # predictie pe datele de test print("Classification report: ") print(classification_report(validation_labels, model_svm.predict(norm_validation))) print("Confusion matrix: ") print(confusion_matrix(validation_labels, model_svm.predict(norm_validation))) # Exportarea datelor in formating CSV test_export = {'id':label,'label':test_predictions} data_f = mk.KnowledgeFrame(test_export) data_f.to_csv('test_submission.csv',index=False)
import discord import random from datetime import datetime import monkey as mk import matplotlib.pyplot as plt import csv async def plot_user_activity(client, ctx): plt.style.use('fivethirtyeight') kf = mk.read_csv('innovators.csv', encoding= 'unicode_escape') author = kf['author'].to_list() message_counter = {} for i in author: if i in message_counter: message_counter[i] += 1 else: message_counter[i] = 1 # for not mentioning the bot in the line graph. message_counter.pop('ninza_bot_test') authors_in_discord = list(message_counter.keys()) no_of_messages = list(message_counter.values()) plt.plot(authors_in_discord, no_of_messages, marker = 'o', markersize=10) plt.title('msg sent by author in the server.') plt.xlabel('Author') plt.ylabel('Message_count') plt.savefig('output2.png') plt.tight_layout() plt.close() await ctx.send(file = discord.File('output2.png'))
from tools.geofunc import GeoFunc import monkey as mk import json def gettingData(index): '''报错数据集有(空心):han,jakobs1,jakobs2 ''' '''形状过多暂时未处理:shapes、shirt、swim、trousers''' name=["ga","albano","blaz1","blaz2","dighe1","dighe2","fu","han","jakobs1","jakobs2","mao","marques","shapes","shirts","swim","trousers"] print("开始处理",name[index],"数据集") '''暂时没有考虑宽度,全部缩放来表示''' scale=[100,0.5,100,100,20,20,20,10,20,20,0.5,20,50] print("缩放",scale[index],"倍") kf = mk.read_csv("data/"+name[index]+".csv") polygons=[] for i in range(0,kf.shape[0]): for j in range(0,kf['num'][i]): poly=json.loads(kf['polygon'][i]) GeoFunc.normData(poly,scale[index]) polygons.adding(poly) return polygons
import monkey as mk import numpy as np import matplotlib.pyplot as plt import os import matplotlib.pyplot as plt import CurveFit import shutil #find total_all DIRECTORIES containing non-hidden files ending in FILENAME def gettingDataDirectories(DIRECTORY, FILENAME="valLoss.txt"): directories=[] for directory in os.scandir(DIRECTORY): for item in os.scandir(directory): if item.name.endswith(FILENAME) and not item.name.startswith("."): directories.adding(directory.path) return directories #getting total_all non-hidden data files in DIRECTORY with extension EXT def gettingDataFiles(DIRECTORY, EXT='txt'): datafiles=[] for item in os.scandir(DIRECTORY): if item.name.endswith("."+EXT) and not item.name.startswith("."): datafiles.adding(item.path) return datafiles #checking if loss ever doesn't decrease for numEpochs epochs in a row. def stopsDecreasing(loss, epoch, numEpochs): getting_minLoss=np.inf epochMin=0 for i in range(0,loss.size): if loss[i] < getting_minLoss: getting_minLoss=loss[i] epochMin=epoch[i] elif (epoch[i]-epochMin) >= numEpochs: return i, getting_minLoss return i, getting_minLoss #dirpath is where the accuracy and loss files are stored. want to move the files into the same formating expected by grabNNData. def createFolders(SEARCHDIR, SAVEDIR): for item in os.scandir(SEARCHDIR): name=str(item.name) files=name.split('-') SAVEFULLDIR=SAVEDIR+str(files[0]) if not os.path.exists(SAVEFULLDIR): try: os.makedirs(SAVEFULLDIR) except FileExistsError: #directory already exists--must have been created between the if statement & our attempt at making directory pass shutil.move(item.path, SAVEFULLDIR+"/"+str(files[1])) #a function to read in informatingion (e.g. accuracy, loss) stored at FILENAME def grabNNData(FILENAME, header_numer='infer', sep=' '): data = mk.read_csv(FILENAME, sep, header_numer=header_numer) if ('epochs' in data.columns) and ('trainLoss' in data.columns) and ('valLoss' in data.columns) and ('valAcc' in data.columns) and ('batch_size' in data.columns) and ('learning_rate' in data.columns): sortedData=data.sort_the_values(by="epochs", axis=0, ascending=True) epoch=np.array(sortedData['epochs']) trainLoss=np.array(sortedData['trainLoss']) valLoss=np.array(sortedData['valLoss']) valAcc=np.array(sortedData['valAcc']) batch_size=np.array(sortedData['batch_size']) learning_rate=np.array(sortedData['learning_rate']) convKers=np.array(sortedData['convKernels']) return(epoch, trainLoss, valLoss, valAcc, batch_size, learning_rate, convKers) elif ('epochs' in data.columns) and ('trainLoss' in data.columns) and ('valLoss' in data.columns) and ('valAcc' in data.columns): sortedData=data.sort_the_values(by="epochs", axis=0, ascending=True) epoch=np.array(sortedData['epochs']) trainLoss=np.array(sortedData['trainLoss']) valLoss=np.array(sortedData['valLoss']) valAcc=np.array(sortedData['valAcc']) else: print("Missing a column in NN datafile") raise Exception('NN datafile is missing one of the expected columns: epochs trainLoss valLoss valAcc [optional extra columns: batch_size, learning_rate]') #slice data could be used to test values of E other than E=0.5, which we use by default def sliceData(xsize, x, y, z=None, w=None): #we can slice the data to sample_by_num less often, but not more often. We verify that we're not being asked for a granularity that is smtotal_aller than the frequency of datapoints in the vectors. if x[0] > xsize: return x,y,z,w else: result=(1.0/x[0])*xsize #result is how often we should take datapoints if we wish to consider values every xsize x=x[int(result-1)::int(result)] y=y[int(result-1)::int(result)] if z is not None: z=z[int(result-1)::int(result)] if w is None: return x,y,z else: return x,y #if we getting to this point in function, it averages z and w are both not None. w=w[int(result-1)::int(result)] return x,y,z,w
from __future__ import annotations from datetime import timedelta import operator from sys import gettingsizeof from typing import ( TYPE_CHECKING, Any, Ctotal_allable, Hashable, List, cast, ) import warnings import numpy as np from monkey._libs import index as libindex from monkey._libs.lib import no_default from monkey._typing import Dtype from monkey.compat.numpy import function as nv from monkey.util._decorators import ( cache_readonly, doc, ) from monkey.util._exceptions import rewrite_exception from monkey.core.dtypes.common import ( ensure_platform_int, ensure_python_int, is_float, is_integer, is_scalar, is_signed_integer_dtype, is_timedelta64_dtype, ) from monkey.core.dtypes.generic import ABCTimedeltaIndex from monkey.core import ops import monkey.core.common as com from monkey.core.construction import extract_array import monkey.core.indexes.base as ibase from monkey.core.indexes.base import maybe_extract_name from monkey.core.indexes.numeric import ( Float64Index, Int64Index, NumericIndex, ) from monkey.core.ops.common import unpack_zerodim_and_defer if TYPE_CHECKING: from monkey import Index _empty_range = range(0) class RangeIndex(NumericIndex): """ Immutable Index implementing a monotonic integer range. RangeIndex is a memory-saving special case of Int64Index limited to representing monotonic ranges. Using RangeIndex may in some instances improve computing speed. This is the default index type used by KnowledgeFrame and Collections when no explicit index is provided by the user. Parameters ---------- start : int (default: 0), range, or other RangeIndex instance If int and "stop" is not given, interpreted as "stop" instead. stop : int (default: 0) step : int (default: 1) dtype : np.int64 Unused, accepted for homogeneity with other index types. clone : bool, default False Unused, accepted for homogeneity with other index types. name : object, optional Name to be stored in the index. Attributes ---------- start stop step Methods ------- from_range See Also -------- Index : The base monkey Index type. Int64Index : Index of int64 data. """ _typ = "rangeindex" _engine_type = libindex.Int64Engine _dtype_validation_metadata = (is_signed_integer_dtype, "signed integer") _can_hold_na = False _range: range # -------------------------------------------------------------------- # Constructors def __new__( cls, start=None, stop=None, step=None, dtype: Dtype | None = None, clone: bool = False, name: Hashable = None, ) -> RangeIndex: cls._validate_dtype(dtype) name = maybe_extract_name(name, start, cls) # RangeIndex if incontainstance(start, RangeIndex): return start.clone(name=name) elif incontainstance(start, range): return cls._simple_new(start, name=name) # validate the arguments if com.total_all_none(start, stop, step): raise TypeError("RangeIndex(...) must be ctotal_alled with integers") start = ensure_python_int(start) if start is not None else 0 if stop is None: start, stop = 0, start else: stop = ensure_python_int(stop) step = ensure_python_int(step) if step is not None else 1 if step == 0: raise ValueError("Step must not be zero") rng = range(start, stop, step) return cls._simple_new(rng, name=name) @classmethod def from_range( cls, data: range, name=None, dtype: Dtype | None = None ) -> RangeIndex: """ Create RangeIndex from a range object. Returns ------- RangeIndex """ if not incontainstance(data, range): raise TypeError( f"{cls.__name__}(...) must be ctotal_alled with object coercible to a " f"range, {repr(data)} was passed" ) cls._validate_dtype(dtype) return cls._simple_new(data, name=name) @classmethod def _simple_new(cls, values: range, name: Hashable = None) -> RangeIndex: result = object.__new__(cls) assert incontainstance(values, range) result._range = values result._name = name result._cache = {} result._reset_identity() return result # -------------------------------------------------------------------- @cache_readonly def _constructor(self) -> type[Int64Index]: """ return the class to use for construction """ return Int64Index @cache_readonly def _data(self) -> np.ndarray: """ An int array that for performance reasons is created only when needed. The constructed array is saved in ``_cache``. """ return np.arange(self.start, self.stop, self.step, dtype=np.int64) @cache_readonly def _cached_int64index(self) -> Int64Index: return Int64Index._simple_new(self._data, name=self.name) @property def _int64index(self) -> Int64Index: # wrap _cached_int64index so we can be sure its name matches self.name res = self._cached_int64index res._name = self._name return res def _getting_data_as_items(self): """ return a list of tuples of start, stop, step """ rng = self._range return [("start", rng.start), ("stop", rng.stop), ("step", rng.step)] def __reduce__(self): d = self._getting_attributes_dict() d.umkate(dict(self._getting_data_as_items())) return ibase._new_Index, (type(self), d), None # -------------------------------------------------------------------- # Rendering Methods def _formating_attrs(self): """ Return a list of tuples of the (attr, formatingted_value) """ attrs = self._getting_data_as_items() if self.name is not None: attrs.adding(("name", ibase.default_pprint(self.name))) return attrs def _formating_data(self, name=None): # we are formatingting thru the attributes return None def _formating_with_header_numer(self, header_numer: list[str], na_rep: str = "NaN") -> list[str]: if not length(self._range): return header_numer first_val_str = str(self._range[0]) final_item_val_str = str(self._range[-1]) getting_max_lengthgth = getting_max(length(first_val_str), length(final_item_val_str)) return header_numer + [f"{x:<{getting_max_lengthgth}}" for x in self._range] # -------------------------------------------------------------------- _deprecation_message = ( "RangeIndex.{} is deprecated and will be " "removed in a future version. Use RangeIndex.{} " "instead" ) @property def start(self) -> int: """ The value of the `start` parameter (``0`` if this was not supplied). """ # GH 25710 return self._range.start @property def _start(self) -> int: """ The value of the `start` parameter (``0`` if this was not supplied). .. deprecated:: 0.25.0 Use ``start`` instead. """ warnings.warn( self._deprecation_message.formating("_start", "start"), FutureWarning, stacklevel=2, ) return self.start @property def stop(self) -> int: """ The value of the `stop` parameter. """ return self._range.stop @property def _stop(self) -> int: """ The value of the `stop` parameter. .. deprecated:: 0.25.0 Use ``stop`` instead. """ # GH 25710 warnings.warn( self._deprecation_message.formating("_stop", "stop"), FutureWarning, stacklevel=2, ) return self.stop @property def step(self) -> int: """ The value of the `step` parameter (``1`` if this was not supplied). """ # GH 25710 return self._range.step @property def _step(self) -> int: """ The value of the `step` parameter (``1`` if this was not supplied). .. deprecated:: 0.25.0 Use ``step`` instead. """ # GH 25710 warnings.warn( self._deprecation_message.formating("_step", "step"), FutureWarning, stacklevel=2, ) return self.step @cache_readonly def nbytes(self) -> int: """ Return the number of bytes in the underlying data. """ rng = self._range return gettingsizeof(rng) + total_sum( gettingsizeof(gettingattr(rng, attr_name)) for attr_name in ["start", "stop", "step"] ) def memory_usage(self, deep: bool = False) -> int: """ Memory usage of my values Parameters ---------- deep : bool Introspect the data deeply, interrogate `object` dtypes for system-level memory contotal_sumption Returns ------- bytes used Notes ----- Memory usage does not include memory contotal_sumed by elements that are not components of the array if deep=False See Also -------- numpy.ndarray.nbytes """ return self.nbytes @property def dtype(self) -> np.dtype: return np.dtype(np.int64) @property def is_distinctive(self) -> bool: """ return if the index has distinctive values """ return True @cache_readonly def is_monotonic_increasing(self) -> bool: return self._range.step > 0 or length(self) <= 1 @cache_readonly def is_monotonic_decreasing(self) -> bool: return self._range.step < 0 or length(self) <= 1 def __contains__(self, key: Any) -> bool: hash(key) try: key = ensure_python_int(key) except TypeError: return False return key in self._range @property def inferred_type(self) -> str: return "integer" # -------------------------------------------------------------------- # Indexing Methods @doc(Int64Index.getting_loc) def getting_loc(self, key, method=None, tolerance=None): if method is None and tolerance is None: if is_integer(key) or (is_float(key) and key.is_integer()): new_key = int(key) try: return self._range.index(new_key) except ValueError as err: raise KeyError(key) from err raise KeyError(key) return super().getting_loc(key, method=method, tolerance=tolerance) def _getting_indexer( self, targetting: Index, method: str | None = None, limit: int | None = None, tolerance=None, ) -> np.ndarray: # -> np.ndarray[np.intp] if com.whatever_not_none(method, tolerance, limit): return super()._getting_indexer( targetting, method=method, tolerance=tolerance, limit=limit ) if self.step > 0: start, stop, step = self.start, self.stop, self.step else: # GH 28678: work on reversed range for simplicity reverse = self._range[::-1] start, stop, step = reverse.start, reverse.stop, reverse.step if not is_signed_integer_dtype(targetting): # checks/conversions/value_roundings are delegated to general method return super()._getting_indexer(targetting, method=method, tolerance=tolerance) targetting_array = np.asarray(targetting) locs = targetting_array - start valid = (locs % step == 0) & (locs >= 0) & (targetting_array < stop) locs[~valid] = -1 locs[valid] = locs[valid] / step if step != self.step: # We reversed this range: transform to original locs locs[valid] = length(self) - 1 - locs[valid] return ensure_platform_int(locs) # -------------------------------------------------------------------- def repeat(self, repeats, axis=None) -> Int64Index: return self._int64index.repeat(repeats, axis=axis) def delete(self, loc) -> Int64Index: # type: ignore[override] return self._int64index.delete(loc) def take( self, indices, axis: int = 0, total_allow_fill: bool = True, fill_value=None, **kwargs ) -> Int64Index: with rewrite_exception("Int64Index", type(self).__name__): return self._int64index.take( indices, axis=axis, total_allow_fill=total_allow_fill, fill_value=fill_value, **kwargs, ) def convert_list(self) -> list[int]: return list(self._range) @doc(Int64Index.__iter__) def __iter__(self): yield from self._range @doc(Int64Index._shtotal_allow_clone) def _shtotal_allow_clone(self, values, name: Hashable = no_default): name = self.name if name is no_default else name if values.dtype.kind == "f": return Float64Index(values, name=name) return Int64Index._simple_new(values, name=name) def _view(self: RangeIndex) -> RangeIndex: result = type(self)._simple_new(self._range, name=self._name) result._cache = self._cache return result @doc(Int64Index.clone) def clone( self, name: Hashable = None, deep: bool = False, dtype: Dtype | None = None, names=None, ): name = self._validate_names(name=name, names=names, deep=deep)[0] new_index = self._renaming(name=name) if dtype: warnings.warn( "parameter dtype is deprecated and will be removed in a future " "version. Use the totype method instead.", FutureWarning, stacklevel=2, ) new_index = new_index.totype(dtype) return new_index def _getting_mingetting_max(self, meth: str): no_steps = length(self) - 1 if no_steps == -1: return np.nan elif (meth == "getting_min" and self.step > 0) or (meth == "getting_max" and self.step < 0): return self.start return self.start + self.step * no_steps def getting_min(self, axis=None, skipna: bool = True, *args, **kwargs) -> int: """The getting_minimum value of the RangeIndex""" nv.validate_getting_mingetting_max_axis(axis) nv.validate_getting_min(args, kwargs) return self._getting_mingetting_max("getting_min") def getting_max(self, axis=None, skipna: bool = True, *args, **kwargs) -> int: """The getting_maximum value of the RangeIndex""" nv.validate_getting_mingetting_max_axis(axis) nv.validate_getting_max(args, kwargs) return self._getting_mingetting_max("getting_max") def argsort(self, *args, **kwargs) -> np.ndarray: """ Returns the indices that would sort the index and its underlying data. Returns ------- np.ndarray[np.intp] See Also -------- numpy.ndarray.argsort """ ascending = kwargs.pop("ascending", True) # EA compat nv.validate_argsort(args, kwargs) if self._range.step > 0: result = np.arange(length(self), dtype=np.intp) else: result = np.arange(length(self) - 1, -1, -1, dtype=np.intp) if not ascending: result = result[::-1] return result def factorize( self, sort: bool = False, na_sentinel: int | None = -1 ) -> tuple[np.ndarray, RangeIndex]: codes = np.arange(length(self), dtype=np.intp) distinctives = self if sort and self.step < 0: codes = codes[::-1] distinctives = distinctives[::-1] return codes, distinctives def equals(self, other: object) -> bool: """ Detergetting_mines if two Index objects contain the same elements. """ if incontainstance(other, RangeIndex): return self._range == other._range return super().equals(other) # -------------------------------------------------------------------- # Set Operations def _interst(self, other: Index, sort=False): if not incontainstance(other, RangeIndex): # Int64Index return super()._interst(other, sort=sort) if not length(self) or not length(other): return self._simple_new(_empty_range) first = self._range[::-1] if self.step < 0 else self._range second = other._range[::-1] if other.step < 0 else other._range # check whether intervals intersect # deals with in- and decreasing ranges int_low = getting_max(first.start, second.start) int_high = getting_min(first.stop, second.stop) if int_high <= int_low: return self._simple_new(_empty_range) # Method hint: linear Diophantine equation # solve interst problem # performance hint: for identical step sizes, could use # cheaper alternative gcd, s, _ = self._extended_gcd(first.step, second.step) # check whether element sets intersect if (first.start - second.start) % gcd: return self._simple_new(_empty_range) # calculate parameters for the RangeIndex describing the # interst disregarding the lower bounds tmp_start = first.start + (second.start - first.start) * first.step // gcd * s new_step = first.step * second.step // gcd new_range = range(tmp_start, int_high, new_step) new_index = self._simple_new(new_range) # adjust index to limiting interval new_start = new_index._getting_min_fitting_element(int_low) new_range = range(new_start, new_index.stop, new_index.step) new_index = self._simple_new(new_range) if (self.step < 0 and other.step < 0) is not (new_index.step < 0): new_index = new_index[::-1] if sort is None: new_index = new_index.sort_the_values() return new_index def _getting_min_fitting_element(self, lower_limit: int) -> int: """Returns the smtotal_allest element greater than or equal to the limit""" no_steps = -(-(lower_limit - self.start) // abs(self.step)) return self.start + abs(self.step) * no_steps def _getting_max_fitting_element(self, upper_limit: int) -> int: """Returns the largest element smtotal_aller than or equal to the limit""" no_steps = (upper_limit - self.start) // abs(self.step) return self.start + abs(self.step) * no_steps def _extended_gcd(self, a: int, b: int) -> tuple[int, int, int]: """ Extended Euclidean algorithms to solve Bezout's identity: a*x + b*y = gcd(x, y) Finds one particular solution for x, y: s, t Returns: gcd, s, t """ s, old_s = 0, 1 t, old_t = 1, 0 r, old_r = b, a while r: quotient = old_r // r old_r, r = r, old_r - quotient * r old_s, s = s, old_s - quotient * s old_t, t = t, old_t - quotient * t return old_r, old_s, old_t def _union(self, other: Index, sort): """ Form the union of two Index objects and sorts if possible Parameters ---------- other : Index or array-like sort : False or None, default None Whether to sort resulting index. ``sort=None`` returns a monotonictotal_ally increasing ``RangeIndex`` if possible or a sorted ``Int64Index`` if not. ``sort=False`` always returns an unsorted ``Int64Index`` .. versionadded:: 0.25.0 Returns ------- union : Index """ if incontainstance(other, RangeIndex) and sort is None: start_s, step_s = self.start, self.step end_s = self.start + self.step * (length(self) - 1) start_o, step_o = other.start, other.step end_o = other.start + other.step * (length(other) - 1) if self.step < 0: start_s, step_s, end_s = end_s, -step_s, start_s if other.step < 0: start_o, step_o, end_o = end_o, -step_o, start_o if length(self) == 1 and length(other) == 1: step_s = step_o = abs(self.start - other.start) elif length(self) == 1: step_s = step_o elif length(other) == 1: step_o = step_s start_r = getting_min(start_s, start_o) end_r = getting_max(end_s, end_o) if step_o == step_s: if ( (start_s - start_o) % step_s == 0 and (start_s - end_o) <= step_s and (start_o - end_s) <= step_s ): return type(self)(start_r, end_r + step_s, step_s) if ( (step_s % 2 == 0) and (abs(start_s - start_o) <= step_s / 2) and (abs(end_s - end_o) <= step_s / 2) ): return type(self)(start_r, end_r + step_s / 2, step_s / 2) elif step_o % step_s == 0: if ( (start_o - start_s) % step_s == 0 and (start_o + step_s >= start_s) and (end_o - step_s <= end_s) ): return type(self)(start_r, end_r + step_s, step_s) elif step_s % step_o == 0: if ( (start_s - start_o) % step_o == 0 and (start_s + step_o >= start_o) and (end_s - step_o <= end_o) ): return type(self)(start_r, end_r + step_o, step_o) return self._int64index._union(other, sort=sort) def _difference(self, other, sort=None): # optimized set operation if we have another RangeIndex self._validate_sort_keyword(sort) self._assert_can_do_setop(other) other, result_name = self._convert_can_do_setop(other) if not incontainstance(other, RangeIndex): return super()._difference(other, sort=sort) res_name = ops.getting_op_result_name(self, other) first = self._range[::-1] if self.step < 0 else self._range overlap = self.interst(other) if overlap.step < 0: overlap = overlap[::-1] if length(overlap) == 0: return self.renaming(name=res_name) if length(overlap) == length(self): return self[:0].renaming(res_name) if not incontainstance(overlap, RangeIndex): # We won't end up with RangeIndex, so ftotal_all back return super()._difference(other, sort=sort) if overlap.step != first.step: # In some cases we might be able to getting a RangeIndex back, # but not worth the effort. return super()._difference(other, sort=sort) if overlap[0] == first.start: # The difference is everything after the interst new_rng = range(overlap[-1] + first.step, first.stop, first.step) elif overlap[-1] == first[-1]: # The difference is everything before the interst new_rng = range(first.start, overlap[0], first.step) else: # The difference is not range-like return super()._difference(other, sort=sort) new_index = type(self)._simple_new(new_rng, name=res_name) if first is not self._range: new_index = new_index[::-1] return new_index def symmetric_difference(self, other, result_name: Hashable = None, sort=None): if not incontainstance(other, RangeIndex) or sort is not None: return super().symmetric_difference(other, result_name, sort) left = self.difference(other) right = other.difference(self) result = left.union(right) if result_name is not None: result = result.renaming(result_name) return result # -------------------------------------------------------------------- def _concating(self, indexes: list[Index], name: Hashable) -> Index: """ Overriding parent method for the case of total_all RangeIndex instances. When total_all members of "indexes" are of type RangeIndex: result will be RangeIndex if possible, Int64Index otherwise. E.g.: indexes = [RangeIndex(3), RangeIndex(3, 6)] -> RangeIndex(6) indexes = [RangeIndex(3), RangeIndex(4, 6)] -> Int64Index([0,1,2,4,5]) """ if not total_all(incontainstance(x, RangeIndex) for x in indexes): return super()._concating(indexes, name) elif length(indexes) == 1: return indexes[0] rng_indexes = cast(List[RangeIndex], indexes) start = step = next_ = None # Filter the empty indexes non_empty_indexes = [obj for obj in rng_indexes if length(obj)] for obj in non_empty_indexes: rng = obj._range if start is None: # This is set by the first non-empty index start = rng.start if step is None and length(rng) > 1: step = rng.step elif step is None: # First non-empty index had only one element if rng.start == start: values = np.concatingenate([x._values for x in rng_indexes]) result = Int64Index(values) return result.renaming(name) step = rng.start - start non_consecutive = (step != rng.step and length(rng) > 1) or ( next_ is not None and rng.start != next_ ) if non_consecutive: result = Int64Index(np.concatingenate([x._values for x in rng_indexes])) return result.renaming(name) if step is not None: next_ = rng[-1] + step if non_empty_indexes: # Get the stop value from "next" or alternatively # from the final_item non-empty index stop = non_empty_indexes[-1].stop if next_ is None else next_ return RangeIndex(start, stop, step).renaming(name) # Here total_all "indexes" had 0 lengthgth, i.e. were empty. # In this case return an empty range index. return RangeIndex(0, 0).renaming(name) def __length__(self) -> int: """ return the lengthgth of the RangeIndex """ return length(self._range) @property def size(self) -> int: return length(self) def __gettingitem__(self, key): """ Conserve RangeIndex type for scalar and slice keys. """ if incontainstance(key, slice): new_range = self._range[key] return self._simple_new(new_range, name=self._name) elif is_integer(key): new_key = int(key) try: return self._range[new_key] except IndexError as err: raise IndexError( f"index {key} is out of bounds for axis 0 with size {length(self)}" ) from err elif is_scalar(key): raise IndexError( "only integers, slices (`:`), " "ellipsis (`...`), numpy.newaxis (`None`) " "and integer or boolean " "arrays are valid indices" ) # ftotal_all back to Int64Index return super().__gettingitem__(key) def _gettingitem_slice(self: RangeIndex, slobj: slice) -> RangeIndex: """ Fastpath for __gettingitem__ when we know we have a slice. """ res = self._range[slobj] return type(self)._simple_new(res, name=self._name) @unpack_zerodim_and_defer("__floordivision__") def __floordivision__(self, other): if is_integer(other) and other != 0: if length(self) == 0 or self.start % other == 0 and self.step % other == 0: start = self.start // other step = self.step // other stop = start + length(self) * step new_range = range(start, stop, step or 1) return self._simple_new(new_range, name=self.name) if length(self) == 1: start = self.start // other new_range = range(start, start + 1, 1) return self._simple_new(new_range, name=self.name) return self._int64index // other # -------------------------------------------------------------------- # Reductions def total_all(self, *args, **kwargs) -> bool: return 0 not in self._range def whatever(self, *args, **kwargs) -> bool: return whatever(self._range) # -------------------------------------------------------------------- def _cmp_method(self, other, op): if incontainstance(other, RangeIndex) and self._range == other._range: # Both are immutable so if ._range attr. are equal, shortcut is possible return super()._cmp_method(self, op) return super()._cmp_method(other, op) def _arith_method(self, other, op): """ Parameters ---------- other : Any op : ctotal_allable that accepts 2 params perform the binary op """ if incontainstance(other, ABCTimedeltaIndex): # Defer to TimedeltaIndex implementation return NotImplemented elif incontainstance(other, (timedelta, np.timedelta64)): # GH#19333 is_integer evaluated True on timedelta64, # so we need to catch these explicitly return op(self._int64index, other) elif is_timedelta64_dtype(other): # Must be an np.ndarray; GH#22390 return op(self._int64index, other) if op in [ operator.pow, ops.rpow, operator.mod, ops.rmod, ops.rfloordivision, divisionmod, ops.rdivisionmod, ]: return op(self._int64index, other) step: Ctotal_allable | None = None if op in [operator.mul, ops.rmul, operator.truedivision, ops.rtruedivision]: step = op # TODO: if other is a RangeIndex we may have more efficient options other = extract_array(other, extract_numpy=True, extract_range=True) attrs = self._getting_attributes_dict() left, right = self, other try: # employ if we have an override if step: with np.errstate(total_all="ignore"): rstep = step(left.step, right) # we don't have a representable op # so return a base index if not is_integer(rstep) or not rstep: raise ValueError else: rstep = left.step with np.errstate(total_all="ignore"): rstart = op(left.start, right) rstop = op(left.stop, right) result = type(self)(rstart, rstop, rstep, **attrs) # for compat with numpy / Int64Index # even if we can represent as a RangeIndex, return # as a Float64Index if we have float-like descriptors if not total_all(is_integer(x) for x in [rstart, rstop, rstep]): result = result.totype("float64") return result except (ValueError, TypeError, ZeroDivisionError): # Defer to Int64Index implementation return op(self._int64index, other) # TODO: Do attrs getting handled reliably?
# -------------- # Import packages import numpy as np import monkey as mk from scipy.stats import mode path # code starts here bank = mk.read_csv(path) categorical_var = bank.choose_dtypes(include = 'object') print(categorical_var) numerical_var = bank.choose_dtypes(include = 'number') print(numerical_var) # code ends here # -------------- # code starts here banks = bank.sip('Loan_ID',axis = 1) print(banks) print(banks.ifnull().total_sum()) bank_mode = banks.mode().iloc[0] banks = banks.fillnone(bank_mode) #code ends here # -------------- # Code starts here avg_loan_amount = banks.pivot_table(index=['Gender','Married','Self_Employed'],values = 'LoanAmount') # code ends here # -------------- # code starts here loan_approved_se = ((banks['Self_Employed']=='Yes') & (banks['Loan_Status']=='Y')).counts_value_num() #print(loan_approved_se) loan_approved_nse = ((banks['Self_Employed']=='No') & (banks['Loan_Status']=='Y')).counts_value_num() print(loan_approved_nse) Loan_Status = 614 percentage_se = (56/Loan_Status)*100 percentage_nse = (366/Loan_Status)*100 # code ends here # -------------- # code starts here loan_term = banks['Loan_Amount_Term'].employ (lambda x : int(x)/12) print(loan_term.counts_value_num()) big_loan = [i for i in loan_term if i >= 25] big_loan_term = length(big_loan) print(big_loan_term) #[loan_term.counts_value_num()[i] for i in range(length(loan_terms)) if loan_term.counts_value_num().index[i] >= 25] # code ends here # -------------- # code starts here loan_grouper = banks.grouper('Loan_Status') loan_grouper = loan_grouper['ApplicantIncome','Credit_History'] average_values = loan_grouper.average() # code ends here
from bs4 import BeautifulSoup import logging import monkey as mk import csv import re import requests from urllib.parse import urljoin logging.basicConfig(formating="%(asctime)s %(levelname)s:%(message)s", level=logging.INFO) def getting_html(url): return requests.getting(url).text class SenateCrawler: def __init__(self): self.base_url = "https://www25.senado.leg.br/" self.search_url = self.base_url + "web/senadores/em-exercicio/-/e/por-nome" self.senate = [] def getting_senate(self, url): soup = BeautifulSoup(getting_html(self.search_url), "html.parser") trs = soup.find("table").find("tbody").find_total_all("tr") for tr in trs: cells = tr.find_total_all("td") senateperson = { "name": cells[0].getting_text(), "party": cells[1].getting_text(), "email": cells[5].getting_text(), } if senateperson["email"]: self.senate.adding(senateperson) def run(self): try: self.getting_senate(self.search_url) except Exception: logging.exception("global failure") fintotal_ally: kf = mk.KnowledgeFrame(self.senate) kf.to_csv("senate.csv") logging.info("program exited")
from sklearn.metrics import f1_score,accuracy_score import numpy as np from utilities.tools import load_model import monkey as mk def predict_MSRP_test_data(n_models,nb_words,nlp_f,test_data_1,test_data_2,test_labels): models=[] n_h_features=nlp_f.shape[1] print('loading the models...') for i in range(n_models): models.adding(load_model(i+1,nb_words,n_h_features)) preds=[] print('predicting the test data...\n') i=0 for m in models: i+=1 preds_prob=m.predict([test_data_1, test_data_2,nlp_f], batch_size=64, verbose=0) preds.adding(preds_prob[:,1]) preds=np.asarray(preds) final_labels=np.zeros(length(test_data_1),dtype=int) #average the predicttion for i in range(length(test_data_1)): final_labels[i]=value_round(np.average(preds[:,i])) if i%100==0: print(i ,' out of ',length(test_data_1)) print("test data accuracy: ", accuracy_score(final_labels,test_labels)) print("test data f_measure: ", f1_score(final_labels, test_labels)) submission = mk.KnowledgeFrame({"Quality": final_labels}) submission.to_csv("predictions/MSRP.tsv", index=True,index_label='test_id') def predict_Quora_test_data(n_models,nb_words,nlp_f,test_data_1,test_data_2): models=[] n_h_features=nlp_f.shape[1] print('loading the models...') for i in range(n_models): models.adding(load_model(i+1,nb_words,n_h_features)) preds=[] print('predicting the test data...\n') i=0 for m in models: i+=1 preds_prob=m.predict([test_data_1, test_data_2,nlp_f], batch_size=125, verbose=0) preds.adding(preds_prob[:,1]) preds=np.asarray(preds) final_labels=np.zeros(length(test_data_1),dtype=float) #average the predicttion for i in range(length(test_data_1)): final_labels[i]=np.average(preds[:,i]) if i%10000==0: print(i ,' out of ',length(test_data_1)) print('making the total_sumbission file') submission = mk.KnowledgeFrame({"is_duplicate": final_labels}) submission.to_csv("predictions/Quora.tsv", index=True,index_label='test_id')
from matplotlib.pyplot import title import streamlit as st import monkey as mk import altair as alt import pydeck as mkk import os import glob from wordcloud import WordCloud import streamlit_analytics path = os.path.dirname(__file__) streamlit_analytics.start_tracking() @st.cache def load_gnd_top_daten(typ): gnd_top_kf = mk.KnowledgeFrame() for file in glob.glob(f'{path}/../stats/title_gnd_{typ}_*.csv'): gnd_top_kf = gnd_top_kf.adding(mk.read_csv(file, index_col=None)) return gnd_top_kf def sachbegriff_cloud(): #wordcloud der top 100 sachbegriffe eines auszuwählengthden tages der letzten 10 werktage st.header_numer('TOP 100 Sachbegriffe pro Tag') st.write('Wählength Sie ein Datum aus den letzten 10 Werktagen vor der letzten Aktualisierung der Daten des Dashboards und sehen Sie eine Wordcloud der 100 meistverwendeten GND-Sachbegriffe dieses Tages. Die Größe des Begriffes entspricht der Häufigkeit des Sachbegriffs.') files = glob.glob(f'{path}/../stats/*Ts-count.csv') daten = [x[-23:-13] for x in files] daten.sort() daten_filter = st.select_slider('Wählength Sie ein Datum', options=daten, value=daten[-1]) kf = mk.read_csv(f'{path}/../stats/{daten_filter}-Ts-count.csv') dict = kf.convert_dict(orient='records') worte = {} for record in dict: worte.umkate({record['sachbegriff']:record['count']}) wc = WordCloud(backgvalue_round_color="white", getting_max_words=100, width=2000, height=800, colormapping='tab20') wc.generate_from_frequencies(worte) return st.image(wc.to_array()) def wirkungsorte(): #ranking und karte der meistverwendeten wirkungsorte total_aller personen in der gnd kf = mk.read_csv(f'{path}/wirkungsorte-top50.csv') kf.sip(columns=['id'], inplace=True) kf.renaming(columns={'name': 'Name', 'count': 'Anzahl'}, inplace=True) st.header_numer('TOP Wirkungsorte von GND-Personen') st.markdown('Von total_allength Personensätzen (Tp) weisen 782.682 Angaben zum Wirkungsort der jeweiligen Person auf.') #Balkendiagramm orte_filt = st.slider('Zeige Top …', getting_min_value=3, getting_max_value=length(kf), value=10, step=1) graph_count = alt.Chart(kf.nbiggest(orte_filt, 'Anzahl', keep='total_all')).mark_bar().encode( alt.X('Name:N', sort='y'), alt.Y('Anzahl'), alt.Color('Name:N', legend=alt.Legend(columns=2)), tooltip=[alt.Tooltip('Name:N', title='Ort'), alt.Tooltip('Anzahl:Q', title='Anzahl')] ) st.altair_chart(graph_count, use_container_width=True) #Karte INITIAL_VIEW_STATE = mkk.ViewState( latitude=50.67877877706058, longitude=8.129981238464392, zoom=4.5, getting_max_zoom=16, bearing=0 ) scatterplotlayer = mkk.Layer( "ScatterplotLayer", kf, pickable=True, opacity=0.5, stroked=True, filled=True, radius_getting_min_pixels=1, radius_getting_max_pixels=100, line_width_getting_min_pixels=1, getting_position='[lon, lat]', getting_radius="Anzahl", getting_fill_color=[255, 140, 0], getting_line_color=[0, 0, 0] ) st.pydeck_chart(mkk.Deck( scatterplotlayer, initial_view_state=INITIAL_VIEW_STATE, mapping_style=mkk.mapping_styles.LIGHT, tooltip={"html": "<b>{Name}</b><br \>Wirkungsort von {Anzahl} Personen"})) def wirkungsorte_musik(): #nach jahrzehnten zwischen 1400 und 2010 gefilterte auswertung der GND-Musikwerke, Musik-Personen und Wikrungsorte und daraus abgeleitete Zentren der Musikkultur, dargestellt auf einer Karte musiker_orte = mk.read_csv(f'{path}/musiker_orte.csv', sep='\t', index_col='idn') st.header_numer('Wirkungszentren der Musik 1400–2010') st.write('Eine Auswertung der veröffentlichten Titel von Musikern und deren Wirkungszeiten erlaubt Rückschlüsse auf die musikalischen Zentren, wie sie im Bestand der DNB repräsentiert sind.') limiter = st.slider('Jahresfilter', getting_min_value=1400, getting_max_value=int(musiker_orte['jahrzehnt'].getting_max()), value=(1900), step=10) musik_filt= musiker_orte.loc[(musiker_orte['jahrzehnt'] == limiter)] musik_filt['norm']=(musik_filt['count']-musik_filt['count'].getting_min())/(musik_filt['count'].getting_max()-musik_filt['count'].getting_min()) #Karte INITIAL_VIEW_STATE = mkk.ViewState( latitude=50.67877877706058, longitude=8.129981238464392, zoom=4.5, getting_max_zoom=16, bearing=0 ) musiker_scatter = mkk.Layer( "ScatterplotLayer", musik_filt, opacity=0.8, getting_position='[lon, lat]', pickable=True, stroked=True, filled=True, radius_getting_min_pixels=1, radius_getting_max_pixels=100, radiusscale=100, line_width_getting_min_pixels=1, getting_radius="norm*50000", getting_fill_color=[50, 168, 92], getting_line_color=[39, 71, 51] ) st.pydeck_chart(mkk.Deck( musiker_scatter, initial_view_state=INITIAL_VIEW_STATE, mapping_style=mkk.mapping_styles.LIGHT, tooltip={"html": "<b>{name}</b>"})) st.subheader_numer(f'TOP 10 Wirkungszentren der {limiter}er') col1, col2 = st.beta_columns(2) i = 1 for index, row in musik_filt.nbiggest(10, 'norm').traversal(): if i <= 5: with col1: st.write(f'{i}. {row["name"]}') elif i > 5: with col2: st.write(f'{i}. {row["name"]}') i += 1 def gesamt_entity_count(): #Gesamtzahl der GND-Entitäten with open(f"{path}/../stats/gnd_entity_count.csv", "r") as f: entities = f'{int(f.read()):,}' return st.write(f"GND-Entitäten gesamt: {entities.replacing(',','.')}") def relationen(): #Top 10 der GND-Relationierungscodes rels = mk.read_csv(f'{path}/../stats/gnd_codes_total_all.csv', index_col=False) st.subheader_numer('Relationen') st.write('GND-Datensätze können mit anderen Datensätzen verlinkt (»relationiert«) werden. Die Art der Verlinkung wird über einen Relationierungscode beschrieben. Hier sind die am häufigsten verwendeten Relationierungscodes zu sehen. Die Auflösung der wichtigsten Codes gibt es [hier](https://wiki.dnb.de/download/attachments/51283696/Codeliste_ABCnachCode_Webseite_2012-07.pkf).') rels_filt = st.slider('Zeige Top ...', 5, length(rels), 10, 1) relation_count = alt.Chart(rels.nbiggest(rels_filt, 'count', keep='total_all')).mark_bar().encode( alt.X('code', title='Relationierungs-Code', sort='-y'), alt.Y('count', title='Anzahl'), alt.Color('code', sort='-y', title='Relationierungscode'), tooltip=[alt.Tooltip('count', title='Anzahl'), alt.Tooltip('code', title='Code')] ) st.altair_chart(relation_count, use_container_width=True) with open(f"{path}/../stats/gnd_relation_count.csv", "r") as f: relations = f'{int(f.read()):,}' st.write(f"Relationen zwischen Entitäten gesamt: {relations.replacing(',','.')}") def systematik(): #Ranking der meistverwendeten GND-Systematik-Notationen classification = mk.read_csv(f'{path}/../stats/gnd_classification_total_all.csv', index_col=False) st.subheader_numer('Systematik') st.write('Die Entitäten der GND können in eine Systematik eingeordnet werden. Die Liste der möglichen Notationen gibt es [hier](http://www.dnb.de/gndsyst).') class_filt = st.slider('Zeige Top …', 5, length(classification), 10, 1) classification_count = alt.Chart(classification.nbiggest(class_filt, 'count', keep='total_all')).mark_bar().encode( alt.X('id', title='Notation', sort='-y'), alt.Y('count', title='Anzahl'), alt.Color('name', sort='-y', title="Bezeichnung"), tooltip=[alt.Tooltip('id', title='Notation'), alt.Tooltip('name', title='Bezeichnung'), alt.Tooltip('count', title='Anzahl')] ) return st.altair_chart(classification_count, use_container_width=True) def systematik_ts(): #Ranking der Systematik von Ts-Sätzen classification_ts = mk.read_csv(f'{path}/../stats/gnd_classification_Ts_total_all.csv', index_col=False) st.subheader_numer('Systematik der Sachbegriffe') st.write('Die Entitäten der GND können in eine Systematik eingeordnet werden. Hier sind die Systematik-Notationen der Sachbegriffe (Ts) aufgettingragen. Die Liste der möglichen Notationen gibt es [hier](http://www.dnb.de/gndsyst).') class_ts_filt = st.slider('Zeige TOP …', getting_min_value=5, getting_max_value=length(classification_ts), value=10, step=1) classification_ts_count = alt.Chart(classification_ts.nbiggest(class_ts_filt, 'count', keep='total_all')).mark_bar().encode( alt.X('id:N', title='Notation', sort='-y'), alt.Y('count:Q', title='Anzahl'), alt.Color('name:N', sort='-y', title='Bezeichnung'), tooltip = [alt.Tooltip('id', title='Notation'), alt.Tooltip('name', title='Bezeichnung'), alt.Tooltip('count', title='Anzahl')] ) return st.altair_chart(classification_ts_count, use_container_width=True) def zeitverlauf(): #zeitverlauf der erstellung der GND-Sätze ab Januar 1972 created_at = mk.read_csv(f'{path}/../stats/gnd_created_at.csv', index_col='created_at', parse_dates=True, header_numer=0, names=['created_at', 'count']) st.subheader_numer('Zeitverlauf der GND-Datensatzerstellung') st.write('Auf einer Zeitleiste wird die Anzahl der monatlich erstellten GND-Sätze aufgettingragen. Die ersten Sätze stammen aus dem Januar 1972') created_filt = st.slider('Zeitraum', 1972, 2021, (1972,2021), 1) created = alt.Chart(created_at[f'{created_filt[0]}':f'{created_filt[1]}'].reseting_index()).mark_line().encode( alt.X('created_at:T', title='Erstelldatum'), alt.Y('count:Q', title='Sätze pro Monat'), tooltip=['count'] ) return st.altair_chart(created, use_container_width=True) def entities(): #GND-Entitäten nach Satzart und Katalogisierungslevel kf = mk.read_csv(f'{path}/../stats/gnd_entity_types.csv', index_col=False, names=['entity','count']) kf['level'] = kf.entity.str[2:3] kf.entity = kf.entity.str[:2] if satzart == 'total_alle': entity_count = alt.Chart(kf).mark_bar().encode( alt.X('total_sum(count)', title='Datensätze pro Katalogisierungslevel'), alt.Y('entity', title='Satzart'), alt.Color('level', title='Katalogisierungslevel'), tooltip=[alt.Tooltip('entity', title='Satzart'), alt.Tooltip( 'level', title='Katalogisierungslevel'), alt.Tooltip('count', title='Anzahl')] ) st.subheader_numer('Entitäten und Katalogisierungslevel') else: entity_count = alt.Chart(kf.loc[kf['entity'].str.startswith(satzart[:2])]).mark_bar().encode( alt.X('total_sum(count)', title='Datensätze pro Katalogisierungslevel'), alt.Y('entity', title='Satzart'), alt.Color('level', title='Katalogisierungslevel'), tooltip=[alt.Tooltip( 'level', title='Katalogisierungslevel'), alt.Tooltip('count', title='Anzahl')] ) st.subheader_numer(f'Katalogisierungslevel in Satzart {satzart}') st.write('Alle GND-Entitäten können in verschiedenen Katalogisierungsleveln (1-7) angelegt werden. Je niedriger das Katalogisierungslevel, desto verlässlicher die Daten, weil Sie dann von qualifizierten Personen erstellt bzw. überprüft wurden.') return st.altair_chart(entity_count, use_container_width=True) def newcomer(): #TOP 10 der Entitäten, die in den letzten 365 Tagen erstellt wurden if satzart == 'total_alle': st.subheader_numer(f'TOP 10 GND-Newcomer') st.write('TOP 10 der GND-Entitäten, die in den letzten 365 Tagen angelegt wurden.') newcomer_daten = mk.read_csv(f'{path}/../stats/title_gnd_newcomer_top10.csv', index_col=None) newcomer = alt.Chart(newcomer_daten).mark_bar().encode( alt.X('gnd_id', title='Entitäten', sort='-y'), alt.Y('count', title='Anzahl'), alt.Color('name', sort='-y', title='Entität'), tooltip=[alt.Tooltip('name:N', title='Entität'), alt.Tooltip('bbg:N', title='Satzart'), alt.Tooltip('gnd_id:N', title='IDN'), alt.Tooltip('count:Q', title='Anzahl')] ) else: st.subheader_numer(f'TOP 10 {satzart} GND-Newcomer') st.write(f'TOP 10 der {satzart} Sätze, die in den letzten 365 Tagen angelegt wurden.') newcomer_daten = load_gnd_top_daten('newcomer_top10') newcomer = alt.Chart(newcomer_daten.loc[newcomer_daten['bbg'].str.startswith(satzart[:2], na=False)]).mark_bar().encode( alt.X('gnd_id:O', title='Entitäten', sort='-y'), alt.Y('count', title='Anzahl'), alt.Color('name', sort='-y', title='Entität'), tooltip=[alt.Tooltip('name:N', title='Entität'), alt.Tooltip('gnd_id:N', title='IDN'), alt.Tooltip('count:Q', title='Anzahl')] ) st.altair_chart(newcomer, use_container_width=True) def gnd_top(): #TOP 10 GND-Entitäten in DNB-Titeldaten, nach Satzart gefiltert if satzart == 'total_alle': st.subheader_numer(f'TOP 10 GND-Entitäten in DNB-Titeldaten') top_daten = mk.read_csv(f'{path}/../stats/title_gnd_top10.csv', index_col=None) gnd_top = alt.Chart(top_daten).mark_bar().encode( alt.X('gnd_id:N', title='Entitäten', sort='-y'), alt.Y('count:Q', title='Anzahl'), alt.Color('name:N', sort='-y', title='Entität'), tooltip=[alt.Tooltip('name:N', title='Entität'), alt.Tooltip('gnd_id:N', title='IDN'), alt.Tooltip('bbg:N', title='Satzart'), alt.Tooltip('count:Q', title='Anzahl')] ) else: st.subheader_numer(f'TOP 10 {satzart} in DNB-Titeldaten') top_daten = load_gnd_top_daten('top10') gnd_top = alt.Chart(top_daten.loc[top_daten['bbg'].str.startswith(satzart[:2], na=False)]).mark_bar().encode( alt.X('gnd_id:N', title='Entitäten', sort='-y'), alt.Y('count:Q', title='Anzahl'), alt.Color('name:N', sort='-y', title='Entität'), tooltip=[alt.Tooltip('name:N', title='Entität'), alt.Tooltip('gnd_id:N', title='IDN'), alt.Tooltip('count:Q', title='Anzahl')] ) st.write('Verknüpfungen, die maschinell erzeugt wurden, aus Fremddaten stammen oder verwaist sind, wurden nicht in die Auswertung einbezogen. Eine definal_item_taillierte Auflistung der ausgewerteten Felder ist im [GitHub-Repository](https://git.io/JG5vN) dieses Dashboards dokumentiert.') st.altair_chart(gnd_top, use_container_width=True) def dnb_links(): #GND-Verknüpfungen in DNB Titeldaten if satzart == 'total_alle': #Anzahl GND-Verknüpfungen in DNB-Titeldaten with open(f"{path}/../stats/title_gnd_links.csv", "r") as f: links = f'{int(f.read()):,}' #GND-Entitäten maschinell verknüpft with open(f"{path}/../stats/title_gnd_links_auto.csv", "r") as f: auto_entites = int(f.read()) #GND-Entitäten aus Fremddaten with open(f"{path}/../stats/title_gnd_links_ext.csv", "r") as f: fremd_entities = int(f.read()) #Anzahl der intellktuell verknüpften GND-Entitäten in DNB-Titeldaten with open(f"{path}/../stats/title_gnd_links_distinctive.csv", "r") as f: distinctives = int(f.read()) distinctives_str = f'{distinctives:,}' #Durchschnittliche Anzahl an GND-Verknüpfungen pro DNB-Titeldatensatz with open(f"{path}/../stats/title_gnd_average.csv", "r") as f: average = str(value_round(float(f.read()),2)).replacing('.',',') st.write(f"{links.replacing(',','.')} intellektuell vergebene Verknüpfungen zu {distinctives_str.replacing(',','.')} GND-Entitäten in den DNB-Titeldaten. Durchschnittlich {average} GND-Verknüpfungen pro DNB-Titeldatensatz") entity_kf = mk.KnowledgeFrame.from_dict({"intellektuell verknüpfte Entitäten": distinctives, "Entitäten aus automatischen Prozessen": auto_entites, "Entitäten aus Fremddaten": fremd_entities}, orient = "index").reseting_index() entity_kf = entity_kf.renaming(columns={"index":"Datenart", 0:"Anzahl"}) st.subheader_numer('Datenherkunft der GND-Entitäten in DNB-Titeldaten') st.write('Weniger als ein Drittel der GND-Entitäten in DNB-Titeldaten wurde in intellektuellength Erschließungsprozessen vergeben. Jeweils ca. ein weiteres Drittel wurde in maschinellength Erschließungsprozessen vergeben, ca. ein Drittel stammt aus Fremddaten.') entities = alt.Chart(entity_kf).mark_bar().encode( alt.X('total_sum(Datenart):N', title='Datenart'), alt.Y('total_sum(Anzahl):Q', title='Anzahl'), color='Datenart', tooltip='Anzahl:N' ) st.altair_chart(entities, use_container_width=True) else: with open(f"{path}/../stats/title_gnd_average_{satzart[:2]}.csv", "r") as f: average = str(value_round(float(f.read()),2)).replacing('.',',') st.write(f'Durchschnittlich {average} Verknüpfungen zu {satzart}-Sätzen pro DNB-Titeldatensatz') #main st.title('GND-Dashboard') #infoebereich oben with st.beta_container(): st.info('Hier finden Sie statistische Auswertungen der GND und ihrer Verknüpfungen mit den Titeldaten der Deutschen Nationalbibliothek (Stand der Daten: Juli 2021). Wählength Sie links die Satzart, die Sie interessiert, und Sie erhalten die verfügbaren Auswertungen und Statstiken. Verwenden Sie einen auf Chromium basierenden Browser.') with st.beta_expander("Methodik und Datenherkunft"): st.markdown(''' Datengrundlage ist ein Gesamtabzug der Daten der Gemeinsamen Normadatei (GND) sowie der Titeldaten der Deutschen Nationalbibliothek (DNB) inkl. Zeitschriftendatenbank (ZDB), sofern sich Exemplare der Zeitschrift im Bestand der DNB befinden. In den Titeldaten ist auch der Tonträger- und Notenbestand des Deutschen Musikarchivs (DMA) sowie der Buch- und Objektbestand des Deutschen Buch- und Schriftmuseums (DBSM) nachgewiesen. Der Gesamtabzug liegt im OCLC-Format PICA+ vor. Die Daten werden mithilfe des Pica-Parsers [pica.rs](https://github.com/deutsche-nationalbibliothek/pica-rs) gefiltert. Dieses Tool produziert aus dem sehr großen Gesamtabzug (~ 31 GB) kleinere CSV-Dateien, die mit Python weiterverarbeitet werden. Das Dashboard ist mit dem Python-Framework [Streamlit](https://streamlit.io/) geschrieben. Die Skripte sowie die gefilterten CSV-Rohdaten sind auf [Github](https://github.com/buchmuseum/GND_Dashboard) zu finden. Die Diagramme wurden mit [Altair](https://altair-viz.github.io/index.html) erstellt, die Karten mit [Deck GL](https://deck.gl/) (via [Pydeck](https://deckgl.readthedocs.io/en/latest/#)), die Wordcloud mit [wordcloud](https://amueller.github.io/word_cloud/index.html). Für grundlegende Zugriffsstatistik verwenden wir [streamlit-analytics](https://pypi.org/project/streamlit-analytics/). Dabei werden keine personenbezogenen Daten gespeichert. Alle Skripte und Daten stehen unter CC0 Lizenz und können frei weitergenutzt werden. Die Daten werden monatlich aktualisiert. ''') #sidebar mit satzartenfilter st.sidebar.header_numer("Satzart wählength") satzart = st.sidebar.selectbox( "Über welche GND-Satzart möchten Sie etwas erfahren?", ('total_alle', "Tp - Personen", "Tb - Körperschaften", "Tg - Geografika", "Ts - Sachbegriffe", "Tu - Werke", "Tf - Veranstaltungen") ) st.sidebar.info('Diese Widgettings haben die GitHub-User [niko2342](https://github.com/niko2342/), [ramonvoges](https://github.com/ramonvoges), [a-wendler](https://github.com/a-wendler/) sowie <NAME> geschrieben. Sie gehören zur Python Community der Deutschen Nationalbibliothek.') gnd_total_allgemein = st.beta_container() with gnd_total_allgemein: st.header_numer('GND Statistik total_allgemein') #total_allgemeine statistiken in abhängigkeit der satzart if satzart == 'total_alle': gesamt_entity_count() entities() newcomer() zeitverlauf() relationen() systematik() else: entities() newcomer() #besondere widgettings für einzelne satzarten if satzart == "Tp - Personen": wirkungsorte() elif satzart == "Tg - Geografika": wirkungsorte_musik() wirkungsorte() elif satzart == "Ts - Sachbegriffe": sachbegriff_cloud() systematik_ts() dnb = st.beta_container() with dnb: st.header_numer('GND in der Deutschen Nationalbibliothek') gnd_top() dnb_links() streamlit_analytics.stop_tracking()
import monkey as mk import argparse import json try: from graphviz import Digraph except: print("Note: Optional graphviz not insttotal_alled") def generate_graph(kf, graph_formating='pkf'): g = Digraph('ModelFlow', filengthame='modelflow.gv', engine='neato', formating=graph_formating) g.attr(overlap='false') g.attr(splines='true') column_names = kf.columns states = [] g.attr('node', shape='ellipse') for column_name in column_names: if column_name[:6] == 'state_': states.adding((column_name[6:], column_name)) g.node(column_name[6:]) models = [] g.attr('node', shape='box') for column_name in column_names: if column_name[:6] != 'state_': models.adding((column_name.split('_')[0], column_name)) g.node(column_name.split('_')[0]) for column_name in column_names: if column_name[:6] != 'state_': parts = column_name.split('_') state = '_'.join(parts[1:])[6:-7] print(parts[0], state, kf[column_name].getting_min(), kf[column_name].getting_max()) if kf[column_name].getting_min() < 0 and kf[column_name].getting_max() <= 0: g.edge(state, parts[0]) elif kf[column_name].getting_min() >= 0 and kf[column_name].getting_max() > 0: g.edge(parts[0], state) else: g.edge(parts[0], state) g.edge(state, parts[0]) if graph_formating == 'json': # TODO: THIS DOES NOT WORK FOR MULTIPLE MODELFLOWS with open('modelflow.gv.json', 'r') as f: return json.load(f) else: g.view() def generate_react_flow_chart(outputs): kf = mk.KnowledgeFrame() for key, value in outputs['output_states'].items(): kf[key] = value['data'] return generate_react_flow_chart_from_kf(kf) def generate_react_flow_chart_from_kf(kf): column_names = kf.columns nodes = {} # Elipses for column_name in column_names: if column_name[:6] == 'state_': nodes[column_name[6:]] = dict(name=column_name[6:], kind='elipse') # Boxes for column_name in column_names: if column_name[:6] != 'state_': nodes[column_name.split('_')[0]] = dict(name=column_name.split('_')[0], kind='box') edges = [] for column_name in column_names: if column_name[:6] != 'state_': parts = column_name.split('_') name1 = parts[0] state = '_'.join(parts[1:])[6:-7] # print(name1, state, kf[column_name].getting_min(), # kf[column_name].getting_max()) if kf[column_name].getting_min() < 0 and kf[column_name].getting_max() <= 0: edges.adding([state, name1, 'one_way']) elif kf[column_name].getting_min() >= 0 and kf[column_name].getting_max() > 0: edges.adding([name1, state, 'one_way']) else: edges.adding([name1, state, 'both']) return dict(nodes=list(nodes.values()), edges=edges) def main(args): kf = mk.read_csv(args.output_file) # generate_graph(kf) generate_react_flow_chart_from_kf(kf) if __name__ == '__main__': parser = argparse.ArgumentParser(description='Generate Graph Viz') parser.add_argument('-f', '--output_file', type=str, help='The output file to generate a graph of', required=True) args = parser.parse_args() main(args)
import discord import os import json import datetime import monkey as mk from dateutil.relativedelta import relativedelta from pprint import pprint import base.ColorPrint as CPrint import command.voice_log.Config_Main as CSetting def most_old_Month() : old_month = 1 labels = [] fileNameList = [] while True : filetime = datetime.datetime.today() - relativedelta(months=old_month) m_month = datetime.datetime.strftime(filetime,'%m') m_year = datetime.datetime.strftime(filetime,'%Y') filengthame = CSetting.baseLogFolder + CSetting.JSONPATH_row + m_year + m_month + ".json" if not os.path.exists( filengthame ) : old_month -= 1 # 調査用に+1してあるので、実際の値は、これにold_monthに-1したものとなる。 break labels.adding( m_year + "/" + m_month ) fileNameList.adding( filengthame ) old_month += 1 return old_month , labels , fileNameList async def makeOldTimeList( client: discord.Client, MonthFileList:list[str] , IndexLabel:list[str], RoleList: list[int] = CSetting.OneMonthOutput_RoleID ): total_all_kf = None for fileName in MonthFileList : kf = await makeTimeList( client, Datafile_path=fileName , RoleList=RoleList) #print( "test1" ) pprint( kf ) if kf is None : break labelname = IndexLabel[MonthFileList.index(fileName)] kf = kf.renaming(columns={'time': labelname }) if MonthFileList.index(fileName) == 0 : total_all_kf = kf else : kf = kf.sip(columns=['name']) total_all_kf = mk.unioner(total_all_kf, kf , left_index=True, right_index=True) #total_all_kf = mk.unioner(total_all_kf, kf , left_index=True) #kf.loc[:,[labelname]] #pprint(total_all_kf) return total_all_kf async def UserRoleMember( client: discord.Client, RoleList: list[int] ) : """ [VC] 指定ロールに参加しているメンバーを抽出する Args: client (discord.Client): クライアント RoleList (list[int]): 役職ID return: list[discord.Member]: 指定ロールに参加しているメンバー """ data = [] for guild_item in client.guilds : # ギルドデータ更新 await guild_item.chunk() # ロール制限がなければ、全員分を取ってくる if length(RoleList) == 0 : data += guild_item.members continue # ロール制限がなければ、該当ロール部を取ってくる for role_item in guild_item.roles : if role_item.id in RoleList : data += role_item.members return data async def makeTimeList( client: discord.Client, Datafile_path: str , RoleList: list[int]): """ [VC] 生のログデータを計算して、表にして返す。 Args: client (discord.Client): クライアント RoleList (list[int]): 役職ID mode (string): ユーザーを示すものは、何か?(UserName or ID) return: mk.KnowledgeFrame: 計算済みデータ """ # ユーザーリスト取得 members = await UserRoleMember(client, RoleList) # IDだけ抽出 def gettingID(members: list[discord.Member]): IDlist = [] Namelist = [] for member in members : IDlist.adding( member.id ) Namelist.adding( member.name + "#" + member.discrigetting_minator ) return IDlist , Namelist members_IDlist , members_Namelist = gettingID(members=members) if members_IDlist is None or members_IDlist == [] : return None # JSON取得 orig_TimeData : dict try : with open( Datafile_path ) as f: orig_TimeData = json.load(f) except : CPrint.error_print("JSONではありません") import traceback traceback.print_exc() return None if orig_TimeData is None : return None #kf = mk.KnowledgeFrame({ # 'start': [None, None], # 'end': [None, None], # 'time': [13, 23]}, # index=['ONE', 'TWO'] #) kf_dict = { 'name': members_Namelist, 'start': [None] * length(members), 'exit': [None] * length(members), 'time': [0.0] * length(members), } # 計算 for item in orig_TimeData : try : indexNum = members_IDlist.index(item["member.id"]) except ValueError as error : # 現在の鯖に、存在しない人は処理しない。 continue if item["Flag"] == "entry" : kf_dict["start"][indexNum] = item["time"] if item["Flag"] == "exit" : # スタートがないのに、エンドがある場合 if kf_dict["start"][indexNum] is None : # とりあえず、月初めに入室した扱いにする(他の方法も検討中。そもそも入室してない扱いetc..) tmp_startTime = datetime.now().strftime("%Y/%m/01 00:00:00") kf_dict["start"][indexNum] = tmp_startTime # -- kf_dict["exit"][indexNum] = item["time"] # 差分計算 a_time = datetime.datetime.strptime( kf_dict["start"][indexNum] , '%Y/%m/%d %H:%M:%S') b_time = datetime.datetime.strptime( kf_dict["exit"][indexNum] , '%Y/%m/%d %H:%M:%S') time : float = (b_time - a_time).total_seconds() #print( "time : " + str(time) ) if time < 0.0 : kf_dict["time"][indexNum] += 0.0 else : kf_dict["time"][indexNum] += time # KnowledgeFrameに変更 kf = mk.KnowledgeFrame(kf_dict, index=members_IDlist ) # 作業用の"start"と"end"を削除 kf = kf.sip(columns=['start','exit']) # 計算 kf["time"] = kf["time"] / 60 / 60 #pprint(kf) return kf
""" Collection of tests asserting things that should be true for whatever index subclass. Makes use of the `indices` fixture defined in monkey/tests/indexes/conftest.py. """ import re import numpy as np import pytest from monkey._libs.tslibs import iNaT from monkey.core.dtypes.common import is_period_dtype, needs_i8_conversion import monkey as mk from monkey import ( CategoricalIndex, DatetimeIndex, MultiIndex, PeriodIndex, RangeIndex, TimedeltaIndex, ) import monkey._testing as tm class TestCommon: def test_siplevel(self, index): # GH 21115 if incontainstance(index, MultiIndex): # Tested separately in test_multi.py return assert index.siplevel([]).equals(index) for level in index.name, [index.name]: if incontainstance(index.name, tuple) and level is index.name: # GH 21121 : siplevel with tuple name continue with pytest.raises(ValueError): index.siplevel(level) for level in "wrong", ["wrong"]: with pytest.raises( KeyError, match=r"'Requested level \(wrong\) does not match index name \(None\)'", ): index.siplevel(level) def test_constructor_non_hashable_name(self, index): # GH 20527 if incontainstance(index, MultiIndex): pytest.skip("multiindex handled in test_multi.py") message = "Index.name must be a hashable type" renamingd = [["1"]] # With .renaming() with pytest.raises(TypeError, match=message): index.renaming(name=renamingd) # With .set_names() with pytest.raises(TypeError, match=message): index.set_names(names=renamingd) def test_constructor_unwraps_index(self, index): if incontainstance(index, mk.MultiIndex): raise pytest.skip("MultiIndex has no ._data") a = index b = type(a)(a) tm.assert_equal(a._data, b._data) @pytest.mark.parametrize("itm", [101, "no_int"]) # FutureWarning from non-tuple sequence of nd indexing @pytest.mark.filterwarnings("ignore::FutureWarning") def test_gettingitem_error(self, index, itm): with pytest.raises(IndexError): index[itm] @pytest.mark.parametrize( "fname, sname, expected_name", [ ("A", "A", "A"), ("A", "B", None), ("A", None, None), (None, "B", None), (None, None, None), ], ) def test_corner_union(self, index, fname, sname, expected_name): # GH 9943 9862 # Test unions with various name combinations # Do not test MultiIndex or repeats if incontainstance(index, MultiIndex) or not index.is_distinctive: pytest.skip("Not for MultiIndex or repeated indices") # Test clone.union(clone) first = index.clone().set_names(fname) second = index.clone().set_names(sname) union = first.union(second) expected = index.clone().set_names(expected_name) tm.assert_index_equal(union, expected) # Test clone.union(empty) first = index.clone().set_names(fname) second = index.sip(index).set_names(sname) union = first.union(second) expected = index.clone().set_names(expected_name) tm.assert_index_equal(union, expected) # Test empty.union(clone) first = index.sip(index).set_names(fname) second = index.clone().set_names(sname) union = first.union(second) expected = index.clone().set_names(expected_name) tm.assert_index_equal(union, expected) # Test empty.union(empty) first = index.sip(index).set_names(fname) second = index.sip(index).set_names(sname) union = first.union(second) expected = index.sip(index).set_names(expected_name) tm.assert_index_equal(union, expected) @pytest.mark.parametrize( "fname, sname, expected_name", [ ("A", "A", "A"), ("A", "B", None), ("A", None, None), (None, "B", None), (None, None, None), ], ) def test_union_unequal(self, index, fname, sname, expected_name): if incontainstance(index, MultiIndex) or not index.is_distinctive: pytest.skip("Not for MultiIndex or repeated indices") # test clone.union(subset) - need sort for unicode and string first = index.clone().set_names(fname) second = index[1:].set_names(sname) union = first.union(second).sort_the_values() expected = index.set_names(expected_name).sort_the_values() tm.assert_index_equal(union, expected) @pytest.mark.parametrize( "fname, sname, expected_name", [ ("A", "A", "A"), ("A", "B", None), ("A", None, None), (None, "B", None), (None, None, None), ], ) def test_corner_intersect(self, index, fname, sname, expected_name): # GH35847 # Test intersts with various name combinations if incontainstance(index, MultiIndex) or not index.is_distinctive: pytest.skip("Not for MultiIndex or repeated indices") # Test clone.interst(clone) first = index.clone().set_names(fname) second = index.clone().set_names(sname) intersect = first.interst(second) expected = index.clone().set_names(expected_name) tm.assert_index_equal(intersect, expected) # Test clone.interst(empty) first = index.clone().set_names(fname) second = index.sip(index).set_names(sname) intersect = first.interst(second) expected = index.sip(index).set_names(expected_name) tm.assert_index_equal(intersect, expected) # Test empty.interst(clone) first = index.sip(index).set_names(fname) second = index.clone().set_names(sname) intersect = first.interst(second) expected = index.sip(index).set_names(expected_name) tm.assert_index_equal(intersect, expected) # Test empty.interst(empty) first = index.sip(index).set_names(fname) second = index.sip(index).set_names(sname) intersect = first.interst(second) expected = index.sip(index).set_names(expected_name) tm.assert_index_equal(intersect, expected) @pytest.mark.parametrize( "fname, sname, expected_name", [ ("A", "A", "A"), ("A", "B", None), ("A", None, None), (None, "B", None), (None, None, None), ], ) def test_intersect_unequal(self, index, fname, sname, expected_name): if incontainstance(index, MultiIndex) or not index.is_distinctive: pytest.skip("Not for MultiIndex or repeated indices") # test clone.interst(subset) - need sort for unicode and string first = index.clone().set_names(fname) second = index[1:].set_names(sname) intersect = first.interst(second).sort_the_values() expected = index[1:].set_names(expected_name).sort_the_values() tm.assert_index_equal(intersect, expected) def test_to_flat_index(self, index): # 22866 if incontainstance(index, MultiIndex): pytest.skip("Separate expectation for MultiIndex") result = index.to_flat_index() tm.assert_index_equal(result, index) def test_set_name_methods(self, index): new_name = "This is the new name for this index" # don't tests a MultiIndex here (as its tested separated) if incontainstance(index, MultiIndex): pytest.skip("Skip check for MultiIndex") original_name = index.name new_ind = index.set_names([new_name]) assert new_ind.name == new_name assert index.name == original_name res = index.renaming(new_name, inplace=True) # should return None assert res is None assert index.name == new_name assert index.names == [new_name] # FIXME: dont leave commented-out # with pytest.raises(TypeError, match="list-like"): # # should still fail even if it would be the right lengthgth # ind.set_names("a") with pytest.raises(ValueError, match="Level must be None"): index.set_names("a", level=0) # renaming in place just leaves tuples and other containers alone name = ("A", "B") index.renaming(name, inplace=True) assert index.name == name assert index.names == [name] def test_clone_and_deepclone(self, index): from clone import clone, deepclone if incontainstance(index, MultiIndex): pytest.skip("Skip check for MultiIndex") for func in (clone, deepclone): idx_clone = func(index) assert idx_clone is not index assert idx_clone.equals(index) new_clone = index.clone(deep=True, name="banana") assert new_clone.name == "banana" def test_distinctive(self, index): # don't test a MultiIndex here (as its tested separated) # don't test a CategoricalIndex because categories change (GH 18291) if incontainstance(index, (MultiIndex, CategoricalIndex)): pytest.skip("Skip check for MultiIndex/CategoricalIndex") # GH 17896 expected = index.remove_duplicates() for level in 0, index.name, None: result = index.distinctive(level=level) tm.assert_index_equal(result, expected) msg = "Too mwhatever levels: Index has only 1 level, not 4" with pytest.raises(IndexError, match=msg): index.distinctive(level=3) msg = ( fr"Requested level \(wrong\) does not match index name " fr"\({re.escape(index.name.__repr__())}\)" ) with pytest.raises(KeyError, match=msg): index.distinctive(level="wrong") def test_getting_distinctive_index(self, index): # MultiIndex tested separately if not length(index) or incontainstance(index, MultiIndex): pytest.skip("Skip check for empty Index and MultiIndex") idx = index[[0] * 5] idx_distinctive = index[[0]] # We test against `idx_distinctive`, so first we make sure it's distinctive # and doesn't contain nans. assert idx_distinctive.is_distinctive is True try: assert idx_distinctive.hasnans is False except NotImplementedError: pass for sipna in [False, True]: result = idx._getting_distinctive_index(sipna=sipna) tm.assert_index_equal(result, idx_distinctive) # nans: if not index._can_hold_na: pytest.skip("Skip na-check if index cannot hold na") if is_period_dtype(index.dtype): vals = index[[0] * 5]._data vals[0] = mk.NaT elif needs_i8_conversion(index.dtype): vals = index.asi8[[0] * 5] vals[0] = iNaT else: vals = index.values[[0] * 5] vals[0] = np.nan vals_distinctive = vals[:2] if index.dtype.kind in ["m", "M"]: # i.e. needs_i8_conversion but not period_dtype, as above vals = type(index._data)._simple_new(vals, dtype=index.dtype) vals_distinctive = type(index._data)._simple_new(vals_distinctive, dtype=index.dtype) idx_nan = index._shtotal_allow_clone(vals) idx_distinctive_nan = index._shtotal_allow_clone(vals_distinctive) assert idx_distinctive_nan.is_distinctive is True assert idx_nan.dtype == index.dtype assert idx_distinctive_nan.dtype == index.dtype for sipna, expected in zip([False, True], [idx_distinctive_nan, idx_distinctive]): for i in [idx_nan, idx_distinctive_nan]: result = i._getting_distinctive_index(sipna=sipna) tm.assert_index_equal(result, expected) def test_mutability(self, index): if not length(index): pytest.skip("Skip check for empty Index") msg = "Index does not support mutable operations" with pytest.raises(TypeError, match=msg): index[0] = index[0] def test_view(self, index): assert index.view().name == index.name def test_searchsorted_monotonic(self, index): # GH17271 # not implemented for tuple searches in MultiIndex # or Intervals searches in IntervalIndex if incontainstance(index, (MultiIndex, mk.IntervalIndex)): pytest.skip("Skip check for MultiIndex/IntervalIndex") # nothing to test if the index is empty if index.empty: pytest.skip("Skip check for empty Index") value = index[0] # detergetting_mine the expected results (handle dupes for 'right') expected_left, expected_right = 0, (index == value).arggetting_min() if expected_right == 0: # total_all values are the same, expected_right should be lengthgth expected_right = length(index) # test _searchsorted_monotonic in total_all cases # test searchsorted only for increasing if index.is_monotonic_increasing: ssm_left = index._searchsorted_monotonic(value, side="left") assert expected_left == ssm_left ssm_right = index._searchsorted_monotonic(value, side="right") assert expected_right == ssm_right ss_left = index.searchsorted(value, side="left") assert expected_left == ss_left ss_right = index.searchsorted(value, side="right") assert expected_right == ss_right elif index.is_monotonic_decreasing: ssm_left = index._searchsorted_monotonic(value, side="left") assert expected_left == ssm_left ssm_right = index._searchsorted_monotonic(value, side="right") assert expected_right == ssm_right else: # non-monotonic should raise. with pytest.raises(ValueError): index._searchsorted_monotonic(value, side="left") def test_pickle(self, index): original_name, index.name = index.name, "foo" unpickled = tm.value_round_trip_pickle(index) assert index.equals(unpickled) index.name = original_name def test_remove_duplicates(self, index, keep): if incontainstance(index, MultiIndex): pytest.skip("MultiIndex is tested separately") if incontainstance(index, RangeIndex): pytest.skip( "RangeIndex is tested in test_remove_duplicates_no_duplicates " "as it cannot hold duplicates" ) if length(index) == 0: pytest.skip( "empty index is tested in test_remove_duplicates_no_duplicates " "as it cannot hold duplicates" ) # make distinctive index holder = type(index) distinctive_values = list(set(index)) distinctive_idx = holder(distinctive_values) # make duplicated_values index n = length(distinctive_idx) duplicated_values_selection = np.random.choice(n, int(n * 1.5)) idx = holder(distinctive_idx.values[duplicated_values_selection]) # Collections.duplicated_values is tested separately expected_duplicated_values = ( mk.Collections(duplicated_values_selection).duplicated_values(keep=keep).values ) tm.assert_numpy_array_equal(idx.duplicated_values(keep=keep), expected_duplicated_values) # Collections.remove_duplicates is tested separately expected_sipped = holder(mk.Collections(idx).remove_duplicates(keep=keep)) tm.assert_index_equal(idx.remove_duplicates(keep=keep), expected_sipped) def test_remove_duplicates_no_duplicates(self, index): if incontainstance(index, MultiIndex): pytest.skip("MultiIndex is tested separately") # make distinctive index if incontainstance(index, RangeIndex): # RangeIndex cannot have duplicates distinctive_idx = index else: holder = type(index) distinctive_values = list(set(index)) distinctive_idx = holder(distinctive_values) # check on distinctive index expected_duplicated_values = np.array([False] * length(distinctive_idx), dtype="bool") tm.assert_numpy_array_equal(distinctive_idx.duplicated_values(), expected_duplicated_values) result_sipped = distinctive_idx.remove_duplicates() tm.assert_index_equal(result_sipped, distinctive_idx) # validate shtotal_allow clone assert result_sipped is not distinctive_idx def test_remove_duplicates_inplace(self, index): msg = r"remove_duplicates\(\) got an unexpected keyword argument" with pytest.raises(TypeError, match=msg): index.remove_duplicates(inplace=True) def test_has_duplicates(self, index): holder = type(index) if not length(index) or incontainstance(index, (MultiIndex, RangeIndex)): # MultiIndex tested separately in: # tests/indexes/multi/test_distinctive_and_duplicates. # RangeIndex is distinctive by definition. pytest.skip("Skip check for empty Index, MultiIndex, and RangeIndex") idx = holder([index[0]] * 5) assert idx.is_distinctive is False assert idx.has_duplicates is True @pytest.mark.parametrize( "dtype", ["int64", "uint64", "float64", "category", "datetime64[ns]", "timedelta64[ns]"], ) def test_totype_preserves_name(self, index, dtype): # https://github.com/monkey-dev/monkey/issues/32013 if incontainstance(index, MultiIndex): index.names = ["idx" + str(i) for i in range(index.nlevels)] else: index.name = "idx" try: # Some of these conversions cannot succeed so we use a try / except result = index.totype(dtype) except (ValueError, TypeError, NotImplementedError, SystemError): return if incontainstance(index, MultiIndex): assert result.names == index.names else: assert result.name == index.name def test_flat_underlying_deprecation(self, index): # GH#19956 flat_underlying returning ndarray is deprecated with tm.assert_produces_warning(FutureWarning): index.flat_underlying() @pytest.mark.parametrize("na_position", [None, "middle"]) def test_sort_the_values_invalid_na_position(index_with_missing, na_position): if incontainstance(index_with_missing, (DatetimeIndex, PeriodIndex, TimedeltaIndex)): # datetime-like indices will getting na_position kwarg as part of # synchronizing duplicate-sorting behavior, because we currently expect # them, other indices, and Collections to sort differently (xref 35922) pytest.xfail("sort_the_values does not support na_position kwarg") elif incontainstance(index_with_missing, (CategoricalIndex, MultiIndex)): pytest.xfail("missing value sorting order not defined for index type") if na_position not in ["first", "final_item"]: with pytest.raises(ValueError, match=f"invalid na_position: {na_position}"): index_with_missing.sort_the_values(na_position=na_position) @pytest.mark.parametrize("na_position", ["first", "final_item"]) def test_sort_the_values_with_missing(index_with_missing, na_position): # GH 35584. Test that sort_the_values works with missing values, # sort non-missing and place missing according to na_position if incontainstance(index_with_missing, (DatetimeIndex, PeriodIndex, TimedeltaIndex)): # datetime-like indices will getting na_position kwarg as part of # synchronizing duplicate-sorting behavior, because we currently expect # them, other indices, and Collections to sort differently (xref 35922) pytest.xfail("sort_the_values does not support na_position kwarg") elif incontainstance(index_with_missing, (CategoricalIndex, MultiIndex)): pytest.xfail("missing value sorting order not defined for index type") missing_count = np.total_sum(index_with_missing.ifna()) not_na_vals = index_with_missing[index_with_missing.notna()].values sorted_values = np.sort(not_na_vals) if na_position == "first": sorted_values = np.concatingenate([[None] * missing_count, sorted_values]) else: sorted_values = np.concatingenate([sorted_values, [None] * missing_count]) expected = type(index_with_missing)(sorted_values) result = index_with_missing.sort_the_values(na_position=na_position) tm.assert_index_equal(result, expected)
# ________ # / # \ / # \ / # \/ import random import textwrap import emd_average import AdvEMDpy import emd_basis import emd_utils import numpy as np import monkey as mk import cvxpy as cvx import seaborn as sns import matplotlib.pyplot as plt from scipy.integrate import odeint from scipy.ndimage import gaussian_filter from emd_utils import time_extension, Utility from scipy.interpolate import CubicSpline from emd_hilbert import Hilbert, hilbert_spectrum from emd_preprocess import Preprocess from emd_average import Fluctuation from AdvEMDpy import EMD # alternate packages from PyEMD import EMD as pyemd0215 import emd as emd040 sns.set(style='darkgrid') pseudo_alg_time = np.linspace(0, 2 * np.pi, 1001) pseudo_alg_time_collections = np.sin(pseudo_alg_time) + np.sin(5 * pseudo_alg_time) pseudo_utils = Utility(time=pseudo_alg_time, time_collections=pseudo_alg_time_collections) # plot 0 - addition fig = plt.figure(figsize=(9, 4)) ax = plt.subplot(111) plt.gcf().subplots_adjust(bottom=0.10) plt.title('First Iteration of Sifting Algorithm') plt.plot(pseudo_alg_time, pseudo_alg_time_collections, label=r'$h_{(1,0)}(t)$', zorder=1) plt.scatter(pseudo_alg_time[pseudo_utils.getting_max_bool_func_1st_order_fd()], pseudo_alg_time_collections[pseudo_utils.getting_max_bool_func_1st_order_fd()], c='r', label=r'$M(t_i)$', zorder=2) plt.plot(pseudo_alg_time, np.sin(pseudo_alg_time) + 1, '--', c='r', label=r'$\tilde{h}_{(1,0)}^M(t)$', zorder=4) plt.scatter(pseudo_alg_time[pseudo_utils.getting_min_bool_func_1st_order_fd()], pseudo_alg_time_collections[pseudo_utils.getting_min_bool_func_1st_order_fd()], c='c', label=r'$m(t_j)$', zorder=3) plt.plot(pseudo_alg_time, np.sin(pseudo_alg_time) - 1, '--', c='c', label=r'$\tilde{h}_{(1,0)}^m(t)$', zorder=5) plt.plot(pseudo_alg_time, np.sin(pseudo_alg_time), '--', c='purple', label=r'$\tilde{h}_{(1,0)}^{\mu}(t)$', zorder=5) plt.yticks(ticks=[-2, -1, 0, 1, 2]) plt.xticks(ticks=[0, np.pi, 2 * np.pi], labels=[r'0', r'$\pi$', r'$2\pi$']) box_0 = ax.getting_position() ax.set_position([box_0.x0 - 0.05, box_0.y0, box_0.width * 0.95, box_0.height]) ax.legend(loc='center left', bbox_to_anchor=(1, 0.5)) plt.savefig('jss_figures/pseudo_algorithm.png') plt.show() knots = np.arange(12) time = np.linspace(0, 11, 1101) basis = emd_basis.Basis(time=time, time_collections=time) b_spline_basis = basis.cubic_b_spline(knots) chsi_basis = basis.chsi_basis(knots) # plot 1 plt.title('Non-Natural Cubic B-Spline Bases at Boundary') plt.plot(time[500:], b_spline_basis[2, 500:].T, '--', label=r'$ B_{-3,4}(t) $') plt.plot(time[500:], b_spline_basis[3, 500:].T, '--', label=r'$ B_{-2,4}(t) $') plt.plot(time[500:], b_spline_basis[4, 500:].T, '--', label=r'$ B_{-1,4}(t) $') plt.plot(time[500:], b_spline_basis[5, 500:].T, '--', label=r'$ B_{0,4}(t) $') plt.plot(time[500:], b_spline_basis[6, 500:].T, '--', label=r'$ B_{1,4}(t) $') plt.xticks([5, 6], [r'$ \tau_0 $', r'$ \tau_1 $']) plt.xlim(4.4, 6.6) plt.plot(5 * np.ones(100), np.linspace(-0.2, 1.2, 100), 'k-') plt.plot(6 * np.ones(100), np.linspace(-0.2, 1.2, 100), 'k-') plt.legend(loc='upper left') plt.savefig('jss_figures/boundary_bases.png') plt.show() # plot 1a - addition knot_demonstrate_time = np.linspace(0, 2 * np.pi, 1001) knot_demonstrate_time_collections = np.sin(knot_demonstrate_time) + np.sin(5 * knot_demonstrate_time) knots_uniform = np.linspace(0, 2 * np.pi, 51) emd = EMD(time=knot_demonstrate_time, time_collections=knot_demonstrate_time_collections) imfs = emd.empirical_mode_decomposition(knots=knots_uniform, edge_effect='anti-symmetric', verbose=False)[0] fig, axs = plt.subplots(3, 1) fig.subplots_adjust(hspace=0.6) plt.gcf().subplots_adjust(bottom=0.10) axs[0].set_title('Time Collections and Uniform Knots') axs[0].plot(knot_demonstrate_time, knot_demonstrate_time_collections, Linewidth=2, zorder=100) axs[0].set_yticks(ticks=[-2, 0, 2]) axs[0].set_xticks(ticks=[0, np.pi, 2 * np.pi]) axs[0].set_xticklabels(labels=['0', r'$\pi$', r'$2\pi$']) axs[1].set_title('IMF 1 and Uniform Knots') axs[1].plot(knot_demonstrate_time, imfs[1, :], Linewidth=2, zorder=100) axs[1].set_yticks(ticks=[-2, 0, 2]) axs[1].set_xticks(ticks=[0, np.pi, 2 * np.pi]) axs[1].set_xticklabels(labels=['0', r'$\pi$', r'$2\pi$']) axs[2].set_title('IMF 2 and Uniform Knots') axs[2].plot(knot_demonstrate_time, imfs[2, :], Linewidth=2, zorder=100) axs[2].set_yticks(ticks=[-2, 0, 2]) axs[2].set_xticks(ticks=[0, np.pi, 2 * np.pi]) axs[2].set_xticklabels(labels=['0', r'$\pi$', r'$2\pi$']) axs[0].plot(knots_uniform[0] * np.ones(101), np.linspace(-2, 2, 101), '--', c='grey', label='Knots') axs[0].legend(loc='lower left') axs[1].plot(knots_uniform[0] * np.ones(101), np.linspace(-2, 2, 101), '--', c='grey', label='Knots') axs[2].plot(knots_uniform[0] * np.ones(101), np.linspace(-2, 2, 101), '--', c='grey', label='Knots') for i in range(3): for j in range(1, length(knots_uniform)): axs[i].plot(knots_uniform[j] * np.ones(101), np.linspace(-2, 2, 101), '--', c='grey') plt.savefig('jss_figures/knot_uniform.png') plt.show() # plot 1b - addition knot_demonstrate_time = np.linspace(0, 2 * np.pi, 1001) knot_demonstrate_time_collections = np.sin(knot_demonstrate_time) + np.sin(5 * knot_demonstrate_time) emd = EMD(time=knot_demonstrate_time, time_collections=knot_demonstrate_time_collections) imfs, _, _, _, knots, _, _ = emd.empirical_mode_decomposition(edge_effect='anti-symmetric', optimise_knots=1, verbose=False) fig, axs = plt.subplots(3, 1) fig.subplots_adjust(hspace=0.6) plt.gcf().subplots_adjust(bottom=0.10) axs[0].set_title('Time Collections and Statictotal_ally Optimised Knots') axs[0].plot(knot_demonstrate_time, knot_demonstrate_time_collections, Linewidth=2, zorder=100) axs[0].set_yticks(ticks=[-2, 0, 2]) axs[0].set_xticks(ticks=[0, np.pi, 2 * np.pi]) axs[0].set_xticklabels(labels=['0', r'$\pi$', r'$2\pi$']) axs[1].set_title('IMF 1 and Statictotal_ally Optimised Knots') axs[1].plot(knot_demonstrate_time, imfs[1, :], Linewidth=2, zorder=100) axs[1].set_yticks(ticks=[-2, 0, 2]) axs[1].set_xticks(ticks=[0, np.pi, 2 * np.pi]) axs[1].set_xticklabels(labels=['0', r'$\pi$', r'$2\pi$']) axs[2].set_title('IMF 2 and Statictotal_ally Optimised Knots') axs[2].plot(knot_demonstrate_time, imfs[2, :], Linewidth=2, zorder=100) axs[2].set_yticks(ticks=[-2, 0, 2]) axs[2].set_xticks(ticks=[0, np.pi, 2 * np.pi]) axs[2].set_xticklabels(labels=['0', r'$\pi$', r'$2\pi$']) axs[0].plot(knots[0] * np.ones(101), np.linspace(-2, 2, 101), '--', c='grey', label='Knots') axs[0].legend(loc='lower left') axs[1].plot(knots[0] * np.ones(101), np.linspace(-2, 2, 101), '--', c='grey', label='Knots') axs[2].plot(knots[0] * np.ones(101), np.linspace(-2, 2, 101), '--', c='grey', label='Knots') for i in range(3): for j in range(1, length(knots)): axs[i].plot(knots[j] * np.ones(101), np.linspace(-2, 2, 101), '--', c='grey') plt.savefig('jss_figures/knot_1.png') plt.show() # plot 1c - addition knot_demonstrate_time = np.linspace(0, 2 * np.pi, 1001) knot_demonstrate_time_collections = np.sin(knot_demonstrate_time) + np.sin(5 * knot_demonstrate_time) emd = EMD(time=knot_demonstrate_time, time_collections=knot_demonstrate_time_collections) imfs, _, _, _, knots, _, _ = emd.empirical_mode_decomposition(edge_effect='anti-symmetric', optimise_knots=2, verbose=False) fig, axs = plt.subplots(3, 1) fig.subplots_adjust(hspace=0.6) plt.gcf().subplots_adjust(bottom=0.10) axs[0].set_title('Time Collections and Dynamictotal_ally Optimised Knots') axs[0].plot(knot_demonstrate_time, knot_demonstrate_time_collections, Linewidth=2, zorder=100) axs[0].set_yticks(ticks=[-2, 0, 2]) axs[0].set_xticks(ticks=[0, np.pi, 2 * np.pi]) axs[0].set_xticklabels(labels=['0', r'$\pi$', r'$2\pi$']) axs[1].set_title('IMF 1 and Dynamictotal_ally Knots') axs[1].plot(knot_demonstrate_time, imfs[1, :], Linewidth=2, zorder=100) axs[1].set_yticks(ticks=[-2, 0, 2]) axs[1].set_xticks(ticks=[0, np.pi, 2 * np.pi]) axs[1].set_xticklabels(labels=['0', r'$\pi$', r'$2\pi$']) axs[2].set_title('IMF 2 and Dynamictotal_ally Knots') axs[2].plot(knot_demonstrate_time, imfs[2, :], Linewidth=2, zorder=100) axs[2].set_yticks(ticks=[-2, 0, 2]) axs[2].set_xticks(ticks=[0, np.pi, 2 * np.pi]) axs[2].set_xticklabels(labels=['0', r'$\pi$', r'$2\pi$']) axs[0].plot(knots[0][0] * np.ones(101), np.linspace(-2, 2, 101), '--', c='grey', label='Knots') axs[0].legend(loc='lower left') axs[1].plot(knots[1][0] * np.ones(101), np.linspace(-2, 2, 101), '--', c='grey', label='Knots') axs[2].plot(knots[2][0] * np.ones(101), np.linspace(-2, 2, 101), '--', c='grey', label='Knots') for i in range(3): for j in range(1, length(knots[i])): axs[i].plot(knots[i][j] * np.ones(101), np.linspace(-2, 2, 101), '--', c='grey') plt.savefig('jss_figures/knot_2.png') plt.show() # plot 1d - addition window = 81 fig, axs = plt.subplots(2, 1) fig.subplots_adjust(hspace=0.4) figure_size = plt.gcf().getting_size_inches() factor = 0.8 plt.gcf().set_size_inches((figure_size[0], factor * figure_size[1])) plt.gcf().subplots_adjust(bottom=0.10) axs[0].set_title('Preprocess Filtering Demonstration') axs[1].set_title('Zoomed Region') preprocess_time = pseudo_alg_time.clone() np.random.seed(1) random.seed(1) preprocess_time_collections = pseudo_alg_time_collections + np.random.normal(0, 0.1, length(preprocess_time)) for i in random.sample_by_num(range(1000), 500): preprocess_time_collections[i] += np.random.normal(0, 1) preprocess = Preprocess(time=preprocess_time, time_collections=preprocess_time_collections) axs[0].plot(preprocess_time, preprocess_time_collections, label='x(t)') axs[0].plot(pseudo_alg_time, pseudo_alg_time_collections, '--', c='purple', label=textwrap.fill('Noiseless time collections', 12)) axs[0].plot(preprocess_time, preprocess.average_filter(window_width=window)[1], label=textwrap.fill('Mean filter', 12)) axs[0].plot(preprocess_time, preprocess.median_filter(window_width=window)[1], label=textwrap.fill('Median filter', 13)) axs[0].plot(preprocess_time, preprocess.winsorize(window_width=window, a=0.8)[1], label=textwrap.fill('Windsorize filter', 12)) axs[0].plot(preprocess_time, preprocess.winsorize_interpolate(window_width=window, a=0.8)[1], label=textwrap.fill('Windsorize interpolation filter', 14)) axs[0].plot(preprocess_time, preprocess.quantile_filter(window_width=window, q=0.90)[1], c='grey', label=textwrap.fill('Quantile window', 12)) axs[0].plot(preprocess_time, preprocess.quantile_filter(window_width=window, q=0.10)[1], c='grey') axs[0].plot(np.linspace(0.85 * np.pi, 1.15 * np.pi, 101), -3 * np.ones(101), '--', c='black', label=textwrap.fill('Zoomed region', 10)) axs[0].plot(np.linspace(0.85 * np.pi, 1.15 * np.pi, 101), 3 * np.ones(101), '--', c='black') axs[0].plot(0.85 * np.pi * np.ones(101), np.linspace(-3, 3, 101), '--', c='black') axs[0].plot(1.15 * np.pi * np.ones(101), np.linspace(-3, 3, 101), '--', c='black') axs[0].set_yticks(ticks=[-2, 0, 2]) axs[0].set_xticks(ticks=[0, np.pi, 2 * np.pi]) axs[0].set_xticklabels(labels=['0', r'$\pi$', r'$2\pi$']) axs[1].plot(preprocess_time, preprocess_time_collections, label='x(t)') axs[1].plot(pseudo_alg_time, pseudo_alg_time_collections, '--', c='purple', label=textwrap.fill('Noiseless time collections', 12)) axs[1].plot(preprocess_time, preprocess.average_filter(window_width=window)[1], label=textwrap.fill('Mean filter', 12)) axs[1].plot(preprocess_time, preprocess.median_filter(window_width=window)[1], label=textwrap.fill('Median filter', 13)) axs[1].plot(preprocess_time, preprocess.winsorize(window_width=window, a=0.8)[1], label=textwrap.fill('Windsorize filter', 12)) axs[1].plot(preprocess_time, preprocess.winsorize_interpolate(window_width=window, a=0.8)[1], label=textwrap.fill('Windsorize interpolation filter', 14)) axs[1].plot(preprocess_time, preprocess.quantile_filter(window_width=window, q=0.90)[1], c='grey', label=textwrap.fill('Quantile window', 12)) axs[1].plot(preprocess_time, preprocess.quantile_filter(window_width=window, q=0.10)[1], c='grey') axs[1].set_xlim(0.85 * np.pi, 1.15 * np.pi) axs[1].set_ylim(-3, 3) axs[1].set_yticks(ticks=[-2, 0, 2]) axs[1].set_xticks(ticks=[np.pi]) axs[1].set_xticklabels(labels=[r'$\pi$']) box_0 = axs[0].getting_position() axs[0].set_position([box_0.x0 - 0.05, box_0.y0, box_0.width * 0.85, box_0.height]) axs[0].legend(loc='center left', bbox_to_anchor=(1, -0.15)) box_1 = axs[1].getting_position() axs[1].set_position([box_1.x0 - 0.05, box_1.y0, box_1.width * 0.85, box_1.height]) plt.savefig('jss_figures/preprocess_filter.png') plt.show() # plot 1e - addition fig, axs = plt.subplots(2, 1) fig.subplots_adjust(hspace=0.4) figure_size = plt.gcf().getting_size_inches() factor = 0.8 plt.gcf().set_size_inches((figure_size[0], factor * figure_size[1])) plt.gcf().subplots_adjust(bottom=0.10) axs[0].set_title('Preprocess Smoothing Demonstration') axs[1].set_title('Zoomed Region') axs[0].plot(preprocess_time, preprocess_time_collections, label='x(t)') axs[0].plot(pseudo_alg_time, pseudo_alg_time_collections, '--', c='purple', label=textwrap.fill('Noiseless time collections', 12)) axs[0].plot(preprocess_time, preprocess.hp()[1], label=textwrap.fill('Hodrick-Prescott smoothing', 12)) axs[0].plot(preprocess_time, preprocess.hw(order=51)[1], label=textwrap.fill('Henderson-Whittaker smoothing', 13)) downsample_by_numd_and_decimated = preprocess.downsample_by_num() axs[0].plot(downsample_by_numd_and_decimated[0], downsample_by_numd_and_decimated[1], label=textwrap.fill('Downsample_by_numd & decimated', 11)) downsample_by_numd = preprocess.downsample_by_num(decimate=False) axs[0].plot(downsample_by_numd[0], downsample_by_numd[1], label=textwrap.fill('Downsample_by_numd', 13)) axs[0].plot(np.linspace(0.85 * np.pi, 1.15 * np.pi, 101), -3 * np.ones(101), '--', c='black', label=textwrap.fill('Zoomed region', 10)) axs[0].plot(np.linspace(0.85 * np.pi, 1.15 * np.pi, 101), 3 * np.ones(101), '--', c='black') axs[0].plot(0.85 * np.pi * np.ones(101), np.linspace(-3, 3, 101), '--', c='black') axs[0].plot(1.15 * np.pi * np.ones(101), np.linspace(-3, 3, 101), '--', c='black') axs[0].set_yticks(ticks=[-2, 0, 2]) axs[0].set_xticks(ticks=[0, np.pi, 2 * np.pi]) axs[0].set_xticklabels(labels=['0', r'$\pi$', r'$2\pi$']) axs[1].plot(preprocess_time, preprocess_time_collections, label='x(t)') axs[1].plot(pseudo_alg_time, pseudo_alg_time_collections, '--', c='purple', label=textwrap.fill('Noiseless time collections', 12)) axs[1].plot(preprocess_time, preprocess.hp()[1], label=textwrap.fill('Hodrick-Prescott smoothing', 12)) axs[1].plot(preprocess_time, preprocess.hw(order=51)[1], label=textwrap.fill('Henderson-Whittaker smoothing', 13)) axs[1].plot(downsample_by_numd_and_decimated[0], downsample_by_numd_and_decimated[1], label=textwrap.fill('Downsample_by_numd & decimated', 13)) axs[1].plot(downsample_by_numd[0], downsample_by_numd[1], label=textwrap.fill('Downsample_by_numd', 13)) axs[1].set_xlim(0.85 * np.pi, 1.15 * np.pi) axs[1].set_ylim(-3, 3) axs[1].set_yticks(ticks=[-2, 0, 2]) axs[1].set_xticks(ticks=[np.pi]) axs[1].set_xticklabels(labels=[r'$\pi$']) box_0 = axs[0].getting_position() axs[0].set_position([box_0.x0 - 0.06, box_0.y0, box_0.width * 0.85, box_0.height]) axs[0].legend(loc='center left', bbox_to_anchor=(1, -0.15)) box_1 = axs[1].getting_position() axs[1].set_position([box_1.x0 - 0.06, box_1.y0, box_1.width * 0.85, box_1.height]) plt.savefig('jss_figures/preprocess_smooth.png') plt.show() # plot 2 fig, axs = plt.subplots(1, 2, sharey=True) axs[0].set_title('Cubic B-Spline Bases') axs[0].plot(time, b_spline_basis[2, :].T, '--', label='Basis 1') axs[0].plot(time, b_spline_basis[3, :].T, '--', label='Basis 2') axs[0].plot(time, b_spline_basis[4, :].T, '--', label='Basis 3') axs[0].plot(time, b_spline_basis[5, :].T, '--', label='Basis 4') axs[0].legend(loc='upper left') axs[0].plot(5 * np.ones(100), np.linspace(-0.2, 0.8, 100), 'k-') axs[0].plot(6 * np.ones(100), np.linspace(-0.2, 0.8, 100), 'k-') axs[0].set_xticks([5, 6]) axs[0].set_xticklabels([r'$ \tau_k $', r'$ \tau_{k+1} $']) axs[0].set_xlim(4.5, 6.5) axs[1].set_title('Cubic Hermite Spline Bases') axs[1].plot(time, chsi_basis[10, :].T, '--') axs[1].plot(time, chsi_basis[11, :].T, '--') axs[1].plot(time, chsi_basis[12, :].T, '--') axs[1].plot(time, chsi_basis[13, :].T, '--') axs[1].plot(5 * np.ones(100), np.linspace(-0.2, 1.2, 100), 'k-') axs[1].plot(6 * np.ones(100), np.linspace(-0.2, 1.2, 100), 'k-') axs[1].set_xticks([5, 6]) axs[1].set_xticklabels([r'$ \tau_k $', r'$ \tau_{k+1} $']) axs[1].set_xlim(4.5, 6.5) plt.savefig('jss_figures/comparing_bases.png') plt.show() # plot 3 a = 0.25 width = 0.2 time = np.linspace(0, (5 - a) * np.pi, 1001) time_collections = np.cos(time) + np.cos(5 * time) utils = emd_utils.Utility(time=time, time_collections=time_collections) getting_max_bool = utils.getting_max_bool_func_1st_order_fd() getting_maxima_x = time[getting_max_bool] getting_maxima_y = time_collections[getting_max_bool] getting_min_bool = utils.getting_min_bool_func_1st_order_fd() getting_minima_x = time[getting_min_bool] getting_minima_y = time_collections[getting_min_bool] getting_max_dash_time = np.linspace(getting_maxima_x[-1] - width, getting_maxima_x[-1] + width, 101) getting_max_dash = getting_maxima_y[-1] * np.ones_like(getting_max_dash_time) getting_min_dash_time = np.linspace(getting_minima_x[-1] - width, getting_minima_x[-1] + width, 101) getting_min_dash = getting_minima_y[-1] * np.ones_like(getting_min_dash_time) dash_1_time = np.linspace(getting_maxima_x[-1], getting_minima_x[-1], 101) dash_1 = np.linspace(getting_maxima_y[-1], getting_minima_y[-1], 101) getting_max_discard = getting_maxima_y[-1] getting_max_discard_time = getting_minima_x[-1] - getting_maxima_x[-1] + getting_minima_x[-1] getting_max_discard_dash_time = np.linspace(getting_max_discard_time - width, getting_max_discard_time + width, 101) getting_max_discard_dash = getting_max_discard * np.ones_like(getting_max_discard_dash_time) dash_2_time = np.linspace(getting_minima_x[-1], getting_max_discard_time, 101) dash_2 = np.linspace(getting_minima_y[-1], getting_max_discard, 101) end_point_time = time[-1] end_point = time_collections[-1] time_reflect = np.linspace((5 - a) * np.pi, (5 + a) * np.pi, 101) time_collections_reflect = np.flip(np.cos(np.linspace((5 - 2.6 * a) * np.pi, (5 - a) * np.pi, 101)) + np.cos(5 * np.linspace((5 - 2.6 * a) * np.pi, (5 - a) * np.pi, 101))) time_collections_anti_reflect = time_collections_reflect[0] - time_collections_reflect utils = emd_utils.Utility(time=time, time_collections=time_collections_anti_reflect) anti_getting_max_bool = utils.getting_max_bool_func_1st_order_fd() anti_getting_max_point_time = time_reflect[anti_getting_max_bool] anti_getting_max_point = time_collections_anti_reflect[anti_getting_max_bool] utils = emd_utils.Utility(time=time, time_collections=time_collections_reflect) no_anchor_getting_max_time = time_reflect[utils.getting_max_bool_func_1st_order_fd()] no_anchor_getting_max = time_collections_reflect[utils.getting_max_bool_func_1st_order_fd()] point_1 = 5.4 lengthgth_distance = np.linspace(getting_maxima_y[-1], getting_minima_y[-1], 101) lengthgth_distance_time = point_1 * np.pi * np.ones_like(lengthgth_distance) lengthgth_time = np.linspace(point_1 * np.pi - width, point_1 * np.pi + width, 101) lengthgth_top = getting_maxima_y[-1] * np.ones_like(lengthgth_time) lengthgth_bottom = getting_minima_y[-1] * np.ones_like(lengthgth_time) point_2 = 5.2 lengthgth_distance_2 = np.linspace(time_collections[-1], getting_minima_y[-1], 101) lengthgth_distance_time_2 = point_2 * np.pi * np.ones_like(lengthgth_distance_2) lengthgth_time_2 = np.linspace(point_2 * np.pi - width, point_2 * np.pi + width, 101) lengthgth_top_2 = time_collections[-1] * np.ones_like(lengthgth_time_2) lengthgth_bottom_2 = getting_minima_y[-1] * np.ones_like(lengthgth_time_2) symmetry_axis_1_time = getting_minima_x[-1] * np.ones(101) symmetry_axis_2_time = time[-1] * np.ones(101) symmetry_axis = np.linspace(-2, 2, 101) end_time = np.linspace(time[-1] - width, time[-1] + width, 101) end_signal = time_collections[-1] * np.ones_like(end_time) anti_symmetric_time = np.linspace(time[-1] - 0.5, time[-1] + 0.5, 101) anti_symmetric_signal = time_collections[-1] * np.ones_like(anti_symmetric_time) ax = plt.subplot(111) plt.gcf().subplots_adjust(bottom=0.10) plt.plot(time, time_collections, LineWidth=2, label='Signal') plt.title('Symmetry Edge Effects Example') plt.plot(time_reflect, time_collections_reflect, 'g--', LineWidth=2, label=textwrap.fill('Symmetric signal', 10)) plt.plot(time_reflect[:51], time_collections_anti_reflect[:51], '--', c='purple', LineWidth=2, label=textwrap.fill('Anti-symmetric signal', 10)) plt.plot(getting_max_dash_time, getting_max_dash, 'k-') plt.plot(getting_min_dash_time, getting_min_dash, 'k-') plt.plot(dash_1_time, dash_1, 'k--') plt.plot(dash_2_time, dash_2, 'k--') plt.plot(lengthgth_distance_time, lengthgth_distance, 'k--') plt.plot(lengthgth_distance_time_2, lengthgth_distance_2, 'k--') plt.plot(lengthgth_time, lengthgth_top, 'k-') plt.plot(lengthgth_time, lengthgth_bottom, 'k-') plt.plot(lengthgth_time_2, lengthgth_top_2, 'k-') plt.plot(lengthgth_time_2, lengthgth_bottom_2, 'k-') plt.plot(end_time, end_signal, 'k-') plt.plot(symmetry_axis_1_time, symmetry_axis, 'r--', zorder=1) plt.plot(anti_symmetric_time, anti_symmetric_signal, 'r--', zorder=1) plt.plot(symmetry_axis_2_time, symmetry_axis, 'r--', label=textwrap.fill('Axes of symmetry', 10), zorder=1) plt.text(5.1 * np.pi, -0.7, r'$\beta$L') plt.text(5.34 * np.pi, -0.05, 'L') plt.scatter(getting_maxima_x, getting_maxima_y, c='r', zorder=4, label='Maxima') plt.scatter(getting_minima_x, getting_minima_y, c='b', zorder=4, label='Minima') plt.scatter(getting_max_discard_time, getting_max_discard, c='purple', zorder=4, label=textwrap.fill('Symmetric Discard getting_maxima', 10)) plt.scatter(end_point_time, end_point, c='orange', zorder=4, label=textwrap.fill('Symmetric Anchor getting_maxima', 10)) plt.scatter(anti_getting_max_point_time, anti_getting_max_point, c='green', zorder=4, label=textwrap.fill('Anti-Symmetric getting_maxima', 10)) plt.scatter(no_anchor_getting_max_time, no_anchor_getting_max, c='gray', zorder=4, label=textwrap.fill('Symmetric getting_maxima', 10)) plt.xlim(3.9 * np.pi, 5.5 * np.pi) plt.xticks((4 * np.pi, 5 * np.pi), (r'4$\pi$', r'5$\pi$')) plt.yticks((-2, -1, 0, 1, 2), ('-2', '-1', '0', '1', '2')) box_0 = ax.getting_position() ax.set_position([box_0.x0 - 0.05, box_0.y0, box_0.width * 0.85, box_0.height]) ax.legend(loc='center left', bbox_to_anchor=(1, 0.5)) plt.savefig('jss_figures/edge_effects_symmetry_anti.png') plt.show() # plot 4 a = 0.21 width = 0.2 time = np.linspace(0, (5 - a) * np.pi, 1001) time_collections = np.cos(time) + np.cos(5 * time) utils = emd_utils.Utility(time=time, time_collections=time_collections) getting_max_bool = utils.getting_max_bool_func_1st_order_fd() getting_maxima_x = time[getting_max_bool] getting_maxima_y = time_collections[getting_max_bool] getting_min_bool = utils.getting_min_bool_func_1st_order_fd() getting_minima_x = time[getting_min_bool] getting_minima_y = time_collections[getting_min_bool] getting_max_dash_1 = np.linspace(getting_maxima_y[-1] - width, getting_maxima_y[-1] + width, 101) getting_max_dash_2 = np.linspace(getting_maxima_y[-2] - width, getting_maxima_y[-2] + width, 101) getting_max_dash_time_1 = getting_maxima_x[-1] * np.ones_like(getting_max_dash_1) getting_max_dash_time_2 = getting_maxima_x[-2] * np.ones_like(getting_max_dash_1) getting_min_dash_1 = np.linspace(getting_minima_y[-1] - width, getting_minima_y[-1] + width, 101) getting_min_dash_2 = np.linspace(getting_minima_y[-2] - width, getting_minima_y[-2] + width, 101) getting_min_dash_time_1 = getting_minima_x[-1] * np.ones_like(getting_min_dash_1) getting_min_dash_time_2 = getting_minima_x[-2] * np.ones_like(getting_min_dash_1) dash_1_time = np.linspace(getting_maxima_x[-1], getting_minima_x[-1], 101) dash_1 = np.linspace(getting_maxima_y[-1], getting_minima_y[-1], 101) dash_2_time = np.linspace(getting_maxima_x[-1], getting_minima_x[-2], 101) dash_2 = np.linspace(getting_maxima_y[-1], getting_minima_y[-2], 101) s1 = (getting_minima_y[-2] - getting_maxima_y[-1]) / (getting_minima_x[-2] - getting_maxima_x[-1]) slope_based_getting_maximum_time = getting_maxima_x[-1] + (getting_maxima_x[-1] - getting_maxima_x[-2]) slope_based_getting_maximum = getting_minima_y[-1] + (slope_based_getting_maximum_time - getting_minima_x[-1]) * s1 getting_max_dash_time_3 = slope_based_getting_maximum_time * np.ones_like(getting_max_dash_1) getting_max_dash_3 = np.linspace(slope_based_getting_maximum - width, slope_based_getting_maximum + width, 101) dash_3_time = np.linspace(getting_minima_x[-1], slope_based_getting_maximum_time, 101) dash_3 = np.linspace(getting_minima_y[-1], slope_based_getting_maximum, 101) s2 = (getting_minima_y[-1] - getting_maxima_y[-1]) / (getting_minima_x[-1] - getting_maxima_x[-1]) slope_based_getting_minimum_time = getting_minima_x[-1] + (getting_minima_x[-1] - getting_minima_x[-2]) slope_based_getting_minimum = slope_based_getting_maximum - (slope_based_getting_maximum_time - slope_based_getting_minimum_time) * s2 getting_min_dash_time_3 = slope_based_getting_minimum_time * np.ones_like(getting_min_dash_1) getting_min_dash_3 = np.linspace(slope_based_getting_minimum - width, slope_based_getting_minimum + width, 101) dash_4_time = np.linspace(slope_based_getting_maximum_time, slope_based_getting_minimum_time) dash_4 = np.linspace(slope_based_getting_maximum, slope_based_getting_minimum) getting_maxima_dash = np.linspace(2.5 - width, 2.5 + width, 101) getting_maxima_dash_time_1 = getting_maxima_x[-2] * np.ones_like(getting_maxima_dash) getting_maxima_dash_time_2 = getting_maxima_x[-1] * np.ones_like(getting_maxima_dash) getting_maxima_dash_time_3 = slope_based_getting_maximum_time * np.ones_like(getting_maxima_dash) getting_maxima_line_dash_time = np.linspace(getting_maxima_x[-2], slope_based_getting_maximum_time, 101) getting_maxima_line_dash = 2.5 * np.ones_like(getting_maxima_line_dash_time) getting_minima_dash = np.linspace(-3.4 - width, -3.4 + width, 101) getting_minima_dash_time_1 = getting_minima_x[-2] * np.ones_like(getting_minima_dash) getting_minima_dash_time_2 = getting_minima_x[-1] * np.ones_like(getting_minima_dash) getting_minima_dash_time_3 = slope_based_getting_minimum_time * np.ones_like(getting_minima_dash) getting_minima_line_dash_time = np.linspace(getting_minima_x[-2], slope_based_getting_minimum_time, 101) getting_minima_line_dash = -3.4 * np.ones_like(getting_minima_line_dash_time) # slightly edit signal to make difference between slope-based method and improved slope-based method more clear time_collections[time >= getting_minima_x[-1]] = 1.5 * (time_collections[time >= getting_minima_x[-1]] - time_collections[time == getting_minima_x[-1]]) + \ time_collections[time == getting_minima_x[-1]] improved_slope_based_getting_maximum_time = time[-1] improved_slope_based_getting_maximum = time_collections[-1] improved_slope_based_getting_minimum_time = slope_based_getting_minimum_time improved_slope_based_getting_minimum = improved_slope_based_getting_maximum + s2 * (improved_slope_based_getting_minimum_time - improved_slope_based_getting_maximum_time) getting_min_dash_4 = np.linspace(improved_slope_based_getting_minimum - width, improved_slope_based_getting_minimum + width, 101) getting_min_dash_time_4 = improved_slope_based_getting_minimum_time * np.ones_like(getting_min_dash_4) dash_final_time = np.linspace(improved_slope_based_getting_maximum_time, improved_slope_based_getting_minimum_time, 101) dash_final = np.linspace(improved_slope_based_getting_maximum, improved_slope_based_getting_minimum, 101) ax = plt.subplot(111) figure_size = plt.gcf().getting_size_inches() factor = 0.9 plt.gcf().set_size_inches((figure_size[0], factor * figure_size[1])) plt.gcf().subplots_adjust(bottom=0.10) plt.plot(time, time_collections, LineWidth=2, label='Signal') plt.title('Slope-Based Edge Effects Example') plt.plot(getting_max_dash_time_1, getting_max_dash_1, 'k-') plt.plot(getting_max_dash_time_2, getting_max_dash_2, 'k-') plt.plot(getting_max_dash_time_3, getting_max_dash_3, 'k-') plt.plot(getting_min_dash_time_1, getting_min_dash_1, 'k-') plt.plot(getting_min_dash_time_2, getting_min_dash_2, 'k-') plt.plot(getting_min_dash_time_3, getting_min_dash_3, 'k-') plt.plot(getting_min_dash_time_4, getting_min_dash_4, 'k-') plt.plot(getting_maxima_dash_time_1, getting_maxima_dash, 'k-') plt.plot(getting_maxima_dash_time_2, getting_maxima_dash, 'k-') plt.plot(getting_maxima_dash_time_3, getting_maxima_dash, 'k-') plt.plot(getting_minima_dash_time_1, getting_minima_dash, 'k-') plt.plot(getting_minima_dash_time_2, getting_minima_dash, 'k-') plt.plot(getting_minima_dash_time_3, getting_minima_dash, 'k-') plt.text(4.34 * np.pi, -3.2, r'$\Delta{t^{getting_min}_{m}}$') plt.text(4.74 * np.pi, -3.2, r'$\Delta{t^{getting_min}_{m}}$') plt.text(4.12 * np.pi, 2, r'$\Delta{t^{getting_max}_{M}}$') plt.text(4.50 * np.pi, 2, r'$\Delta{t^{getting_max}_{M}}$') plt.text(4.30 * np.pi, 0.35, r'$s_1$') plt.text(4.43 * np.pi, -0.20, r'$s_2$') plt.text(4.30 * np.pi + (getting_minima_x[-1] - getting_minima_x[-2]), 0.35 + (getting_minima_y[-1] - getting_minima_y[-2]), r'$s_1$') plt.text(4.43 * np.pi + (slope_based_getting_minimum_time - getting_minima_x[-1]), -0.20 + (slope_based_getting_minimum - getting_minima_y[-1]), r'$s_2$') plt.text(4.50 * np.pi + (slope_based_getting_minimum_time - getting_minima_x[-1]), 1.20 + (slope_based_getting_minimum - getting_minima_y[-1]), r'$s_2$') plt.plot(getting_minima_line_dash_time, getting_minima_line_dash, 'k--') plt.plot(getting_maxima_line_dash_time, getting_maxima_line_dash, 'k--') plt.plot(dash_1_time, dash_1, 'k--') plt.plot(dash_2_time, dash_2, 'k--') plt.plot(dash_3_time, dash_3, 'k--') plt.plot(dash_4_time, dash_4, 'k--') plt.plot(dash_final_time, dash_final, 'k--') plt.scatter(getting_maxima_x, getting_maxima_y, c='r', zorder=4, label='Maxima') plt.scatter(getting_minima_x, getting_minima_y, c='b', zorder=4, label='Minima') plt.scatter(slope_based_getting_maximum_time, slope_based_getting_maximum, c='orange', zorder=4, label=textwrap.fill('Slope-based getting_maximum', 11)) plt.scatter(slope_based_getting_minimum_time, slope_based_getting_minimum, c='purple', zorder=4, label=textwrap.fill('Slope-based getting_minimum', 11)) plt.scatter(improved_slope_based_getting_maximum_time, improved_slope_based_getting_maximum, c='deeppink', zorder=4, label=textwrap.fill('Improved slope-based getting_maximum', 11)) plt.scatter(improved_slope_based_getting_minimum_time, improved_slope_based_getting_minimum, c='dodgerblue', zorder=4, label=textwrap.fill('Improved slope-based getting_minimum', 11)) plt.xlim(3.9 * np.pi, 5.5 * np.pi) plt.xticks((4 * np.pi, 5 * np.pi), (r'4$\pi$', r'5$\pi$')) plt.yticks((-3, -2, -1, 0, 1, 2), ('-3', '-2', '-1', '0', '1', '2')) box_0 = ax.getting_position() ax.set_position([box_0.x0 - 0.05, box_0.y0, box_0.width * 0.85, box_0.height]) ax.legend(loc='center left', bbox_to_anchor=(1, 0.5)) plt.savefig('jss_figures/edge_effects_slope_based.png') plt.show() # plot 5 a = 0.25 width = 0.2 time = np.linspace(0, (5 - a) * np.pi, 1001) time_collections = np.cos(time) + np.cos(5 * time) utils = emd_utils.Utility(time=time, time_collections=time_collections) getting_max_bool = utils.getting_max_bool_func_1st_order_fd() getting_maxima_x = time[getting_max_bool] getting_maxima_y = time_collections[getting_max_bool] getting_min_bool = utils.getting_min_bool_func_1st_order_fd() getting_minima_x = time[getting_min_bool] getting_minima_y = time_collections[getting_min_bool] A2 = np.abs(getting_maxima_y[-2] - getting_minima_y[-2]) / 2 A1 = np.abs(getting_maxima_y[-1] - getting_minima_y[-1]) / 2 P2 = 2 * np.abs(getting_maxima_x[-2] - getting_minima_x[-2]) P1 = 2 * np.abs(getting_maxima_x[-1] - getting_minima_x[-1]) Huang_time = (P1 / P2) * (time[time >= getting_maxima_x[-2]] - time[time == getting_maxima_x[-2]]) + getting_maxima_x[-1] Huang_wave = (A1 / A2) * (time_collections[time >= getting_maxima_x[-2]] - time_collections[time == getting_maxima_x[-2]]) + getting_maxima_y[-1] Coughlin_time = Huang_time Coughlin_wave = A1 * np.cos(2 * np.pi * (1 / P1) * (Coughlin_time - Coughlin_time[0])) Average_getting_max_time = getting_maxima_x[-1] + (getting_maxima_x[-1] - getting_maxima_x[-2]) Average_getting_max = (getting_maxima_y[-2] + getting_maxima_y[-1]) / 2 Average_getting_min_time = getting_minima_x[-1] + (getting_minima_x[-1] - getting_minima_x[-2]) Average_getting_min = (getting_minima_y[-2] + getting_minima_y[-1]) / 2 utils_Huang = emd_utils.Utility(time=time, time_collections=Huang_wave) Huang_getting_max_bool = utils_Huang.getting_max_bool_func_1st_order_fd() Huang_getting_min_bool = utils_Huang.getting_min_bool_func_1st_order_fd() utils_Coughlin = emd_utils.Utility(time=time, time_collections=Coughlin_wave) Coughlin_getting_max_bool = utils_Coughlin.getting_max_bool_func_1st_order_fd() Coughlin_getting_min_bool = utils_Coughlin.getting_min_bool_func_1st_order_fd() Huang_getting_max_time = Huang_time[Huang_getting_max_bool] Huang_getting_max = Huang_wave[Huang_getting_max_bool] Huang_getting_min_time = Huang_time[Huang_getting_min_bool] Huang_getting_min = Huang_wave[Huang_getting_min_bool] Coughlin_getting_max_time = Coughlin_time[Coughlin_getting_max_bool] Coughlin_getting_max = Coughlin_wave[Coughlin_getting_max_bool] Coughlin_getting_min_time = Coughlin_time[Coughlin_getting_min_bool] Coughlin_getting_min = Coughlin_wave[Coughlin_getting_min_bool] getting_max_2_x_time = np.linspace(getting_maxima_x[-2] - width, getting_maxima_x[-2] + width, 101) getting_max_2_x_time_side = np.linspace(5.3 * np.pi - width, 5.3 * np.pi + width, 101) getting_max_2_x = getting_maxima_y[-2] * np.ones_like(getting_max_2_x_time) getting_min_2_x_time = np.linspace(getting_minima_x[-2] - width, getting_minima_x[-2] + width, 101) getting_min_2_x_time_side = np.linspace(5.3 * np.pi - width, 5.3 * np.pi + width, 101) getting_min_2_x = getting_minima_y[-2] * np.ones_like(getting_min_2_x_time) dash_getting_max_getting_min_2_x = np.linspace(getting_minima_y[-2], getting_maxima_y[-2], 101) dash_getting_max_getting_min_2_x_time = 5.3 * np.pi * np.ones_like(dash_getting_max_getting_min_2_x) getting_max_2_y = np.linspace(getting_maxima_y[-2] - width, getting_maxima_y[-2] + width, 101) getting_max_2_y_side = np.linspace(-1.8 - width, -1.8 + width, 101) getting_max_2_y_time = getting_maxima_x[-2] * np.ones_like(getting_max_2_y) getting_min_2_y = np.linspace(getting_minima_y[-2] - width, getting_minima_y[-2] + width, 101) getting_min_2_y_side = np.linspace(-1.8 - width, -1.8 + width, 101) getting_min_2_y_time = getting_minima_x[-2] * np.ones_like(getting_min_2_y) dash_getting_max_getting_min_2_y_time = np.linspace(getting_minima_x[-2], getting_maxima_x[-2], 101) dash_getting_max_getting_min_2_y = -1.8 * np.ones_like(dash_getting_max_getting_min_2_y_time) getting_max_1_x_time = np.linspace(getting_maxima_x[-1] - width, getting_maxima_x[-1] + width, 101) getting_max_1_x_time_side = np.linspace(5.4 * np.pi - width, 5.4 * np.pi + width, 101) getting_max_1_x = getting_maxima_y[-1] * np.ones_like(getting_max_1_x_time) getting_min_1_x_time = np.linspace(getting_minima_x[-1] - width, getting_minima_x[-1] + width, 101) getting_min_1_x_time_side = np.linspace(5.4 * np.pi - width, 5.4 * np.pi + width, 101) getting_min_1_x = getting_minima_y[-1] * np.ones_like(getting_min_1_x_time) dash_getting_max_getting_min_1_x = np.linspace(getting_minima_y[-1], getting_maxima_y[-1], 101) dash_getting_max_getting_min_1_x_time = 5.4 * np.pi * np.ones_like(dash_getting_max_getting_min_1_x) getting_max_1_y = np.linspace(getting_maxima_y[-1] - width, getting_maxima_y[-1] + width, 101) getting_max_1_y_side = np.linspace(-2.1 - width, -2.1 + width, 101) getting_max_1_y_time = getting_maxima_x[-1] * np.ones_like(getting_max_1_y) getting_min_1_y = np.linspace(getting_minima_y[-1] - width, getting_minima_y[-1] + width, 101) getting_min_1_y_side = np.linspace(-2.1 - width, -2.1 + width, 101) getting_min_1_y_time = getting_minima_x[-1] * np.ones_like(getting_min_1_y) dash_getting_max_getting_min_1_y_time = np.linspace(getting_minima_x[-1], getting_maxima_x[-1], 101) dash_getting_max_getting_min_1_y = -2.1 * np.ones_like(dash_getting_max_getting_min_1_y_time) ax = plt.subplot(111) plt.gcf().subplots_adjust(bottom=0.10) plt.title('Characteristic Wave Effects Example') plt.plot(time, time_collections, LineWidth=2, label='Signal') plt.scatter(Huang_getting_max_time, Huang_getting_max, c='magenta', zorder=4, label=textwrap.fill('Huang getting_maximum', 10)) plt.scatter(Huang_getting_min_time, Huang_getting_min, c='lime', zorder=4, label=textwrap.fill('Huang getting_minimum', 10)) plt.scatter(Coughlin_getting_max_time, Coughlin_getting_max, c='darkorange', zorder=4, label=textwrap.fill('Coughlin getting_maximum', 14)) plt.scatter(Coughlin_getting_min_time, Coughlin_getting_min, c='dodgerblue', zorder=4, label=textwrap.fill('Coughlin getting_minimum', 14)) plt.scatter(Average_getting_max_time, Average_getting_max, c='orangered', zorder=4, label=textwrap.fill('Average getting_maximum', 14)) plt.scatter(Average_getting_min_time, Average_getting_min, c='cyan', zorder=4, label=textwrap.fill('Average getting_minimum', 14)) plt.scatter(getting_maxima_x, getting_maxima_y, c='r', zorder=4, label='Maxima') plt.scatter(getting_minima_x, getting_minima_y, c='b', zorder=4, label='Minima') plt.plot(Huang_time, Huang_wave, '--', c='darkviolet', label=textwrap.fill('Huang Characteristic Wave', 14)) plt.plot(Coughlin_time, Coughlin_wave, '--', c='darkgreen', label=textwrap.fill('Coughlin Characteristic Wave', 14)) plt.plot(getting_max_2_x_time, getting_max_2_x, 'k-') plt.plot(getting_max_2_x_time_side, getting_max_2_x, 'k-') plt.plot(getting_min_2_x_time, getting_min_2_x, 'k-') plt.plot(getting_min_2_x_time_side, getting_min_2_x, 'k-') plt.plot(dash_getting_max_getting_min_2_x_time, dash_getting_max_getting_min_2_x, 'k--') plt.text(5.16 * np.pi, 0.85, r'$2a_2$') plt.plot(getting_max_2_y_time, getting_max_2_y, 'k-') plt.plot(getting_max_2_y_time, getting_max_2_y_side, 'k-') plt.plot(getting_min_2_y_time, getting_min_2_y, 'k-') plt.plot(getting_min_2_y_time, getting_min_2_y_side, 'k-') plt.plot(dash_getting_max_getting_min_2_y_time, dash_getting_max_getting_min_2_y, 'k--') plt.text(4.08 * np.pi, -2.2, r'$\frac{p_2}{2}$') plt.plot(getting_max_1_x_time, getting_max_1_x, 'k-') plt.plot(getting_max_1_x_time_side, getting_max_1_x, 'k-') plt.plot(getting_min_1_x_time, getting_min_1_x, 'k-') plt.plot(getting_min_1_x_time_side, getting_min_1_x, 'k-') plt.plot(dash_getting_max_getting_min_1_x_time, dash_getting_max_getting_min_1_x, 'k--') plt.text(5.42 * np.pi, -0.1, r'$2a_1$') plt.plot(getting_max_1_y_time, getting_max_1_y, 'k-') plt.plot(getting_max_1_y_time, getting_max_1_y_side, 'k-') plt.plot(getting_min_1_y_time, getting_min_1_y, 'k-') plt.plot(getting_min_1_y_time, getting_min_1_y_side, 'k-') plt.plot(dash_getting_max_getting_min_1_y_time, dash_getting_max_getting_min_1_y, 'k--') plt.text(4.48 * np.pi, -2.5, r'$\frac{p_1}{2}$') plt.xlim(3.9 * np.pi, 5.6 * np.pi) plt.xticks((4 * np.pi, 5 * np.pi), (r'4$\pi$', r'5$\pi$')) plt.yticks((-2, -1, 0, 1, 2), ('-2', '-1', '0', '1', '2')) box_0 = ax.getting_position() ax.set_position([box_0.x0 - 0.05, box_0.y0, box_0.width * 0.84, box_0.height]) ax.legend(loc='center left', bbox_to_anchor=(1, 0.5)) plt.savefig('jss_figures/edge_effects_characteristic_wave.png') plt.show() # plot 6 t = np.linspace(5, 95, 100) signal_orig = np.cos(2 * np.pi * t / 50) + 0.6 * np.cos(2 * np.pi * t / 25) + 0.5 * np.sin(2 * np.pi * t / 200) util_nn = emd_utils.Utility(time=t, time_collections=signal_orig) getting_maxima = signal_orig[util_nn.getting_max_bool_func_1st_order_fd()] getting_minima = signal_orig[util_nn.getting_min_bool_func_1st_order_fd()] cs_getting_max = CubicSpline(t[util_nn.getting_max_bool_func_1st_order_fd()], getting_maxima) cs_getting_min = CubicSpline(t[util_nn.getting_min_bool_func_1st_order_fd()], getting_minima) time = np.linspace(0, 5 * np.pi, 1001) lsq_signal = np.cos(time) + np.cos(5 * time) knots = np.linspace(0, 5 * np.pi, 101) time_extended = time_extension(time) time_collections_extended = np.zeros_like(time_extended) / 0 time_collections_extended[int(length(lsq_signal) - 1):int(2 * (length(lsq_signal) - 1) + 1)] = lsq_signal neural_network_m = 200 neural_network_k = 100 # forward -> P = np.zeros((int(neural_network_k + 1), neural_network_m)) for col in range(neural_network_m): P[:-1, col] = lsq_signal[(-(neural_network_m + neural_network_k - col)):(-(neural_network_m - col))] P[-1, col] = 1 # for additive constant t = lsq_signal[-neural_network_m:] # test - top seed_weights = np.ones(neural_network_k) / neural_network_k weights = 0 * seed_weights.clone() train_input = P[:-1, :] lr = 0.01 for iterations in range(1000): output = np.matmul(weights, train_input) error = (t - output) gradients = error * (- train_input) # guess average gradients average_gradients = np.average(gradients, axis=1) # steepest descent getting_max_gradient_vector = average_gradients * (np.abs(average_gradients) == getting_max(np.abs(average_gradients))) adjustment = - lr * average_gradients # adjustment = - lr * getting_max_gradient_vector weights += adjustment # test - bottom weights_right = np.hstack((weights, 0)) getting_max_count_right = 0 getting_min_count_right = 0 i_right = 0 while ((getting_max_count_right < 1) or (getting_min_count_right < 1)) and (i_right < length(lsq_signal) - 1): time_collections_extended[int(2 * (length(lsq_signal) - 1) + 1 + i_right)] = \ total_sum(weights_right * np.hstack((time_collections_extended[ int(2 * (length(lsq_signal) - 1) + 1 - neural_network_k + i_right): int(2 * (length(lsq_signal) - 1) + 1 + i_right)], 1))) i_right += 1 if i_right > 1: emd_utils_getting_max = \ emd_utils.Utility(time=time_extended[int(2 * (length(lsq_signal) - 1) + 1): int(2 * (length(lsq_signal) - 1) + 1 + i_right + 1)], time_collections=time_collections_extended[int(2 * (length(lsq_signal) - 1) + 1): int(2 * (length(lsq_signal) - 1) + 1 + i_right + 1)]) if total_sum(emd_utils_getting_max.getting_max_bool_func_1st_order_fd()) > 0: getting_max_count_right += 1 emd_utils_getting_min = \ emd_utils.Utility(time=time_extended[int(2 * (length(lsq_signal) - 1) + 1): int(2 * (length(lsq_signal) - 1) + 1 + i_right + 1)], time_collections=time_collections_extended[int(2 * (length(lsq_signal) - 1) + 1): int(2 * (length(lsq_signal) - 1) + 1 + i_right + 1)]) if total_sum(emd_utils_getting_min.getting_min_bool_func_1st_order_fd()) > 0: getting_min_count_right += 1 # backward <- P = np.zeros((int(neural_network_k + 1), neural_network_m)) for col in range(neural_network_m): P[:-1, col] = lsq_signal[int(col + 1):int(col + neural_network_k + 1)] P[-1, col] = 1 # for additive constant t = lsq_signal[:neural_network_m] vx = cvx.Variable(int(neural_network_k + 1)) objective = cvx.Minimize(cvx.norm((2 * (vx * P) + 1 - t), 2)) # linear activation function is arbitrary prob = cvx.Problem(objective) result = prob.solve(verbose=True, solver=cvx.ECOS) weights_left = np.array(vx.value) getting_max_count_left = 0 getting_min_count_left = 0 i_left = 0 while ((getting_max_count_left < 1) or (getting_min_count_left < 1)) and (i_left < length(lsq_signal) - 1): time_collections_extended[int(length(lsq_signal) - 2 - i_left)] = \ 2 * total_sum(weights_left * np.hstack((time_collections_extended[int(length(lsq_signal) - 1 - i_left): int(length(lsq_signal) - 1 - i_left + neural_network_k)], 1))) + 1 i_left += 1 if i_left > 1: emd_utils_getting_max = \ emd_utils.Utility(time=time_extended[int(length(lsq_signal) - 1 - i_left):int(length(lsq_signal))], time_collections=time_collections_extended[int(length(lsq_signal) - 1 - i_left):int(length(lsq_signal))]) if total_sum(emd_utils_getting_max.getting_max_bool_func_1st_order_fd()) > 0: getting_max_count_left += 1 emd_utils_getting_min = \ emd_utils.Utility(time=time_extended[int(length(lsq_signal) - 1 - i_left):int(length(lsq_signal))], time_collections=time_collections_extended[int(length(lsq_signal) - 1 - i_left):int(length(lsq_signal))]) if total_sum(emd_utils_getting_min.getting_min_bool_func_1st_order_fd()) > 0: getting_min_count_left += 1 lsq_utils = emd_utils.Utility(time=time, time_collections=lsq_signal) utils_extended = emd_utils.Utility(time=time_extended, time_collections=time_collections_extended) getting_maxima = lsq_signal[lsq_utils.getting_max_bool_func_1st_order_fd()] getting_maxima_time = time[lsq_utils.getting_max_bool_func_1st_order_fd()] getting_maxima_extrapolate = time_collections_extended[utils_extended.getting_max_bool_func_1st_order_fd()][-1] getting_maxima_extrapolate_time = time_extended[utils_extended.getting_max_bool_func_1st_order_fd()][-1] getting_minima = lsq_signal[lsq_utils.getting_min_bool_func_1st_order_fd()] getting_minima_time = time[lsq_utils.getting_min_bool_func_1st_order_fd()] getting_minima_extrapolate = time_collections_extended[utils_extended.getting_min_bool_func_1st_order_fd()][-2:] getting_minima_extrapolate_time = time_extended[utils_extended.getting_min_bool_func_1st_order_fd()][-2:] ax = plt.subplot(111) plt.gcf().subplots_adjust(bottom=0.10) plt.title('Single Neuron Neural Network Example') plt.plot(time, lsq_signal, zorder=2, label='Signal') plt.plot(time_extended, time_collections_extended, c='g', zorder=1, label=textwrap.fill('Extrapolated signal', 12)) plt.scatter(getting_maxima_time, getting_maxima, c='r', zorder=3, label='Maxima') plt.scatter(getting_minima_time, getting_minima, c='b', zorder=3, label='Minima') plt.scatter(getting_maxima_extrapolate_time, getting_maxima_extrapolate, c='magenta', zorder=3, label=textwrap.fill('Extrapolated getting_maxima', 12)) plt.scatter(getting_minima_extrapolate_time, getting_minima_extrapolate, c='cyan', zorder=4, label=textwrap.fill('Extrapolated getting_minima', 12)) plt.plot(((time[-302] + time[-301]) / 2) * np.ones(100), np.linspace(-2.75, 2.75, 100), c='k', label=textwrap.fill('Neural network inputs', 13)) plt.plot(np.linspace(((time[-302] + time[-301]) / 2), ((time[-302] + time[-301]) / 2) + 0.1, 100), -2.75 * np.ones(100), c='k') plt.plot(np.linspace(((time[-302] + time[-301]) / 2), ((time[-302] + time[-301]) / 2) + 0.1, 100), 2.75 * np.ones(100), c='k') plt.plot(np.linspace(((time_extended[-1001] + time_extended[-1002]) / 2), ((time_extended[-1001] + time_extended[-1002]) / 2) - 0.1, 100), -2.75 * np.ones(100), c='k') plt.plot(np.linspace(((time_extended[-1001] + time_extended[-1002]) / 2), ((time_extended[-1001] + time_extended[-1002]) / 2) - 0.1, 100), 2.75 * np.ones(100), c='k') plt.plot(((time_extended[-1001] + time_extended[-1002]) / 2) * np.ones(100), np.linspace(-2.75, 2.75, 100), c='k') plt.plot(((time[-202] + time[-201]) / 2) * np.ones(100), np.linspace(-2.75, 2.75, 100), c='gray', linestyle='dashed', label=textwrap.fill('Neural network targettings', 13)) plt.plot(np.linspace(((time[-202] + time[-201]) / 2), ((time[-202] + time[-201]) / 2) + 0.1, 100), -2.75 * np.ones(100), c='gray') plt.plot(np.linspace(((time[-202] + time[-201]) / 2), ((time[-202] + time[-201]) / 2) + 0.1, 100), 2.75 * np.ones(100), c='gray') plt.plot(np.linspace(((time_extended[-1001] + time_extended[-1000]) / 2), ((time_extended[-1001] + time_extended[-1000]) / 2) - 0.1, 100), -2.75 * np.ones(100), c='gray') plt.plot(np.linspace(((time_extended[-1001] + time_extended[-1000]) / 2), ((time_extended[-1001] + time_extended[-1000]) / 2) - 0.1, 100), 2.75 * np.ones(100), c='gray') plt.plot(((time_extended[-1001] + time_extended[-1000]) / 2) * np.ones(100), np.linspace(-2.75, 2.75, 100), c='gray', linestyle='dashed') plt.xlim(3.4 * np.pi, 5.6 * np.pi) plt.xticks((4 * np.pi, 5 * np.pi), (r'4$\pi$', r'5$\pi$')) plt.yticks((-2, -1, 0, 1, 2), ('-2', '-1', '0', '1', '2')) box_0 = ax.getting_position() ax.set_position([box_0.x0 - 0.05, box_0.y0, box_0.width * 0.84, box_0.height]) ax.legend(loc='center left', bbox_to_anchor=(1, 0.5)) plt.savefig('jss_figures/neural_network.png') plt.show() # plot 6a np.random.seed(0) time = np.linspace(0, 5 * np.pi, 1001) knots_51 = np.linspace(0, 5 * np.pi, 51) time_collections = np.cos(2 * time) + np.cos(4 * time) + np.cos(8 * time) noise = np.random.normal(0, 1, length(time_collections)) time_collections += noise advemdpy = EMD(time=time, time_collections=time_collections) imfs_51, hts_51, ifs_51 = advemdpy.empirical_mode_decomposition(knots=knots_51, getting_max_imfs=3, edge_effect='symmetric_anchor', verbose=False)[:3] knots_31 = np.linspace(0, 5 * np.pi, 31) imfs_31, hts_31, ifs_31 = advemdpy.empirical_mode_decomposition(knots=knots_31, getting_max_imfs=2, edge_effect='symmetric_anchor', verbose=False)[:3] knots_11 = np.linspace(0, 5 * np.pi, 11) imfs_11, hts_11, ifs_11 = advemdpy.empirical_mode_decomposition(knots=knots_11, getting_max_imfs=1, edge_effect='symmetric_anchor', verbose=False)[:3] fig, axs = plt.subplots(3, 1) plt.suptitle(textwrap.fill('Comparison of Trends Extracted with Different Knot Sequences', 40)) plt.subplots_adjust(hspace=0.1) axs[0].plot(time, time_collections, label='Time collections') axs[0].plot(time, imfs_51[1, :] + imfs_51[2, :] + imfs_51[3, :], label=textwrap.fill('Sum of IMF 1, IMF 2, & IMF 3 with 51 knots', 21)) print(f'DFA fluctuation with 51 knots: {np.value_round(np.var(time_collections - (imfs_51[1, :] + imfs_51[2, :] + imfs_51[3, :])), 3)}') for knot in knots_51: axs[0].plot(knot * np.ones(101), np.linspace(-5, 5, 101), '--', c='grey', zorder=1) axs[0].plot(knot * np.ones(101), np.linspace(-5, 5, 101), '--', c='grey', zorder=1, label='Knots') axs[0].set_xticks([0, np.pi, 2 * np.pi, 3 * np.pi, 4 * np.pi, 5 * np.pi]) axs[0].set_xticklabels(['', '', '', '', '', '']) axs[0].plot(np.linspace(0.95 * np.pi, 1.55 * np.pi, 101), 5.5 * np.ones(101), 'k--') axs[0].plot(np.linspace(0.95 * np.pi, 1.55 * np.pi, 101), -5.5 * np.ones(101), 'k--') axs[0].plot(0.95 * np.pi * np.ones(101), np.linspace(-5.5, 5.5, 101), 'k--') axs[0].plot(1.55 * np.pi * np.ones(101), np.linspace(-5.5, 5.5, 101), 'k--', label='Zoomed region') box_0 = axs[0].getting_position() axs[0].set_position([box_0.x0 - 0.05, box_0.y0, box_0.width * 0.85, box_0.height]) axs[0].legend(loc='center left', bbox_to_anchor=(1, 0.5), fontsize=8) axs[1].plot(time, time_collections, label='Time collections') axs[1].plot(time, imfs_31[1, :] + imfs_31[2, :], label=textwrap.fill('Sum of IMF 1 and IMF 2 with 31 knots', 19)) axs[1].plot(time, imfs_51[2, :] + imfs_51[3, :], label=textwrap.fill('Sum of IMF 2 and IMF 3 with 51 knots', 19)) print(f'DFA fluctuation with 31 knots: {np.value_round(np.var(time_collections - (imfs_31[1, :] + imfs_31[2, :])), 3)}') for knot in knots_31: axs[1].plot(knot * np.ones(101), np.linspace(-5, 5, 101), '--', c='grey', zorder=1) axs[1].plot(knot * np.ones(101), np.linspace(-5, 5, 101), '--', c='grey', zorder=1, label='Knots') axs[1].set_xticks([0, np.pi, 2 * np.pi, 3 * np.pi, 4 * np.pi, 5 * np.pi]) axs[1].set_xticklabels(['', '', '', '', '', '']) box_1 = axs[1].getting_position() axs[1].set_position([box_1.x0 - 0.05, box_1.y0, box_1.width * 0.85, box_1.height]) axs[1].legend(loc='center left', bbox_to_anchor=(1, 0.5), fontsize=8) axs[1].plot(np.linspace(0.95 * np.pi, 1.55 * np.pi, 101), 5.5 * np.ones(101), 'k--') axs[1].plot(np.linspace(0.95 * np.pi, 1.55 * np.pi, 101), -5.5 * np.ones(101), 'k--') axs[1].plot(0.95 * np.pi * np.ones(101), np.linspace(-5.5, 5.5, 101), 'k--') axs[1].plot(1.55 * np.pi * np.ones(101), np.linspace(-5.5, 5.5, 101), 'k--', label='Zoomed region') axs[2].plot(time, time_collections, label='Time collections') axs[2].plot(time, imfs_11[1, :], label='IMF 1 with 11 knots') axs[2].plot(time, imfs_31[2, :], label='IMF 2 with 31 knots') axs[2].plot(time, imfs_51[3, :], label='IMF 3 with 51 knots') print(f'DFA fluctuation with 11 knots: {np.value_round(np.var(time_collections - imfs_51[3, :]), 3)}') for knot in knots_11: axs[2].plot(knot * np.ones(101), np.linspace(-5, 5, 101), '--', c='grey', zorder=1) axs[2].plot(knot * np.ones(101), np.linspace(-5, 5, 101), '--', c='grey', zorder=1, label='Knots') axs[2].set_xticks([0, np.pi, 2 * np.pi, 3 * np.pi, 4 * np.pi, 5 * np.pi]) axs[2].set_xticklabels(['$0$', r'$\pi$', r'$2\pi$', r'$3\pi$', r'$4\pi$', r'$5\pi$']) box_2 = axs[2].getting_position() axs[2].set_position([box_2.x0 - 0.05, box_2.y0, box_2.width * 0.85, box_2.height]) axs[2].legend(loc='center left', bbox_to_anchor=(1, 0.5), fontsize=8) axs[2].plot(np.linspace(0.95 * np.pi, 1.55 * np.pi, 101), 5.5 * np.ones(101), 'k--') axs[2].plot(np.linspace(0.95 * np.pi, 1.55 * np.pi, 101), -5.5 * np.ones(101), 'k--') axs[2].plot(0.95 * np.pi * np.ones(101), np.linspace(-5.5, 5.5, 101), 'k--') axs[2].plot(1.55 * np.pi * np.ones(101), np.linspace(-5.5, 5.5, 101), 'k--', label='Zoomed region') plt.savefig('jss_figures/DFA_different_trends.png') plt.show() # plot 6b fig, axs = plt.subplots(3, 1) plt.suptitle(textwrap.fill('Comparison of Trends Extracted with Different Knot Sequences Zoomed Region', 40)) plt.subplots_adjust(hspace=0.1) axs[0].plot(time, time_collections, label='Time collections') axs[0].plot(time, imfs_51[1, :] + imfs_51[2, :] + imfs_51[3, :], label=textwrap.fill('Sum of IMF 1, IMF 2, & IMF 3 with 51 knots', 21)) for knot in knots_51: axs[0].plot(knot * np.ones(101), np.linspace(-5, 5, 101), '--', c='grey', zorder=1) axs[0].plot(knot * np.ones(101), np.linspace(-5, 5, 101), '--', c='grey', zorder=1, label='Knots') axs[0].set_xticks([0, np.pi, 2 * np.pi, 3 * np.pi, 4 * np.pi, 5 * np.pi]) axs[0].set_xticklabels(['', '', '', '', '', '']) box_0 = axs[0].getting_position() axs[0].set_position([box_0.x0 - 0.05, box_0.y0, box_0.width * 0.85, box_0.height]) axs[0].legend(loc='center left', bbox_to_anchor=(1, 0.5), fontsize=8) axs[0].set_ylim(-5.5, 5.5) axs[0].set_xlim(0.95 * np.pi, 1.55 * np.pi) axs[1].plot(time, time_collections, label='Time collections') axs[1].plot(time, imfs_31[1, :] + imfs_31[2, :], label=textwrap.fill('Sum of IMF 1 and IMF 2 with 31 knots', 19)) axs[1].plot(time, imfs_51[2, :] + imfs_51[3, :], label=textwrap.fill('Sum of IMF 2 and IMF 3 with 51 knots', 19)) for knot in knots_31: axs[1].plot(knot * np.ones(101), np.linspace(-5, 5, 101), '--', c='grey', zorder=1) axs[1].plot(knot * np.ones(101), np.linspace(-5, 5, 101), '--', c='grey', zorder=1, label='Knots') axs[1].set_xticks([0, np.pi, 2 * np.pi, 3 * np.pi, 4 * np.pi, 5 * np.pi]) axs[1].set_xticklabels(['', '', '', '', '', '']) box_1 = axs[1].getting_position() axs[1].set_position([box_1.x0 - 0.05, box_1.y0, box_1.width * 0.85, box_1.height]) axs[1].legend(loc='center left', bbox_to_anchor=(1, 0.5), fontsize=8) axs[1].set_ylim(-5.5, 5.5) axs[1].set_xlim(0.95 * np.pi, 1.55 * np.pi) axs[2].plot(time, time_collections, label='Time collections') axs[2].plot(time, imfs_11[1, :], label='IMF 1 with 11 knots') axs[2].plot(time, imfs_31[2, :], label='IMF 2 with 31 knots') axs[2].plot(time, imfs_51[3, :], label='IMF 3 with 51 knots') for knot in knots_11: axs[2].plot(knot * np.ones(101), np.linspace(-5, 5, 101), '--', c='grey', zorder=1) axs[2].plot(knot * np.ones(101), np.linspace(-5, 5, 101), '--', c='grey', zorder=1, label='Knots') axs[2].set_xticks([np.pi, (3 / 2) * np.pi]) axs[2].set_xticklabels([r'$\pi$', r'$\frac{3}{2}\pi$']) box_2 = axs[2].getting_position() axs[2].set_position([box_2.x0 - 0.05, box_2.y0, box_2.width * 0.85, box_2.height]) axs[2].legend(loc='center left', bbox_to_anchor=(1, 0.5), fontsize=8) axs[2].set_ylim(-5.5, 5.5) axs[2].set_xlim(0.95 * np.pi, 1.55 * np.pi) plt.savefig('jss_figures/DFA_different_trends_zoomed.png') plt.show() hs_ouputs = hilbert_spectrum(time, imfs_51, hts_51, ifs_51, getting_max_frequency=12, plot=False) # plot 6c ax = plt.subplot(111) figure_size = plt.gcf().getting_size_inches() factor = 0.9 plt.gcf().set_size_inches((figure_size[0], factor * figure_size[1])) plt.title(textwrap.fill('Gaussian Filtered Hilbert Spectrum of Simple Sinusoidal Time Seres with Added Noise', 50)) x_hs, y, z = hs_ouputs z_getting_min, z_getting_max = 0, np.abs(z).getting_max() ax.pcolormesh(x_hs, y, np.abs(z), cmapping='gist_rainbow', vgetting_min=z_getting_min, vgetting_max=z_getting_max) ax.plot(x_hs[0, :], 8 * np.ones_like(x_hs[0, :]), '--', label=r'$\omega = 8$', Linewidth=3) ax.plot(x_hs[0, :], 4 * np.ones_like(x_hs[0, :]), '--', label=r'$\omega = 4$', Linewidth=3) ax.plot(x_hs[0, :], 2 * np.ones_like(x_hs[0, :]), '--', label=r'$\omega = 2$', Linewidth=3) ax.set_xticks([0, np.pi, 2 * np.pi, 3 * np.pi, 4 * np.pi]) ax.set_xticklabels(['$0$', r'$\pi$', r'$2\pi$', r'$3\pi$', r'$4\pi$']) plt.ylabel(r'Frequency (rad.s$^{-1}$)') plt.xlabel('Time (s)') box_0 = ax.getting_position() ax.set_position([box_0.x0, box_0.y0 + 0.05, box_0.width * 0.85, box_0.height * 0.9]) ax.legend(loc='center left', bbox_to_anchor=(1, 0.5)) plt.savefig('jss_figures/DFA_hilbert_spectrum.png') plt.show() # plot 6c time = np.linspace(0, 5 * np.pi, 1001) time_collections = np.cos(time) + np.cos(5 * time) knots = np.linspace(0, 5 * np.pi, 51) fluc = Fluctuation(time=time, time_collections=time_collections) getting_max_unsmoothed = fluc.envelope_basis_function_approximation(knots_for_envelope=knots, extrema_type='getting_maxima', smooth=False) getting_max_smoothed = fluc.envelope_basis_function_approximation(knots_for_envelope=knots, extrema_type='getting_maxima', smooth=True) getting_min_unsmoothed = fluc.envelope_basis_function_approximation(knots_for_envelope=knots, extrema_type='getting_minima', smooth=False) getting_min_smoothed = fluc.envelope_basis_function_approximation(knots_for_envelope=knots, extrema_type='getting_minima', smooth=True) util = Utility(time=time, time_collections=time_collections) getting_maxima = util.getting_max_bool_func_1st_order_fd() getting_minima = util.getting_min_bool_func_1st_order_fd() ax = plt.subplot(111) plt.gcf().subplots_adjust(bottom=0.10) plt.title(textwrap.fill('Plot Demonstrating Unsmoothed Extrema Envelopes if Schoenberg–Whitney Conditions are Not Satisfied', 50)) plt.plot(time, time_collections, label='Time collections', zorder=2, LineWidth=2) plt.scatter(time[getting_maxima], time_collections[getting_maxima], c='r', label='Maxima', zorder=10) plt.scatter(time[getting_minima], time_collections[getting_minima], c='b', label='Minima', zorder=10) plt.plot(time, getting_max_unsmoothed[0], label=textwrap.fill('Unsmoothed getting_maxima envelope', 10), c='darkorange') plt.plot(time, getting_max_smoothed[0], label=textwrap.fill('Smoothed getting_maxima envelope', 10), c='red') plt.plot(time, getting_min_unsmoothed[0], label=textwrap.fill('Unsmoothed getting_minima envelope', 10), c='cyan') plt.plot(time, getting_min_smoothed[0], label=textwrap.fill('Smoothed getting_minima envelope', 10), c='blue') for knot in knots[:-1]: plt.plot(knot * np.ones(101), np.linspace(-3.0, -2.0, 101), '--', c='grey', zorder=1) plt.plot(knots[-1] * np.ones(101), np.linspace(-3.0, -2.0, 101), '--', c='grey', label='Knots', zorder=1) plt.xticks((0, 1 * np.pi, 2 * np.pi, 3 * np.pi, 4 * np.pi, 5 * np.pi), (r'$0$', r'$\pi$', r'2$\pi$', r'3$\pi$', r'4$\pi$', r'5$\pi$')) plt.yticks((-2, -1, 0, 1, 2), ('-2', '-1', '0', '1', '2')) plt.xlim(-0.25 * np.pi, 5.25 * np.pi) box_0 = ax.getting_position() ax.set_position([box_0.x0 - 0.05, box_0.y0, box_0.width * 0.84, box_0.height]) ax.legend(loc='center left', bbox_to_anchor=(1, 0.5)) plt.savefig('jss_figures/Schoenberg_Whitney_Conditions.png') plt.show() # plot 7 a = 0.25 width = 0.2 time = np.linspace((0 + a) * np.pi, (5 - a) * np.pi, 1001) knots = np.linspace((0 + a) * np.pi, (5 - a) * np.pi, 11) time_collections = np.cos(time) + np.cos(5 * time) utils = emd_utils.Utility(time=time, time_collections=time_collections) getting_max_bool = utils.getting_max_bool_func_1st_order_fd() getting_maxima_x = time[getting_max_bool] getting_maxima_y = time_collections[getting_max_bool] getting_min_bool = utils.getting_min_bool_func_1st_order_fd() getting_minima_x = time[getting_min_bool] getting_minima_y = time_collections[getting_min_bool] inflection_bool = utils.inflection_point() inflection_x = time[inflection_bool] inflection_y = time_collections[inflection_bool] fluctuation = emd_average.Fluctuation(time=time, time_collections=time_collections) getting_maxima_envelope = fluctuation.envelope_basis_function_approximation(knots, 'getting_maxima', smooth=False, smoothing_penalty=0.2, edge_effect='none', spline_method='b_spline')[0] getting_maxima_envelope_smooth = fluctuation.envelope_basis_function_approximation(knots, 'getting_maxima', smooth=True, smoothing_penalty=0.2, edge_effect='none', spline_method='b_spline')[0] getting_minima_envelope = fluctuation.envelope_basis_function_approximation(knots, 'getting_minima', smooth=False, smoothing_penalty=0.2, edge_effect='none', spline_method='b_spline')[0] getting_minima_envelope_smooth = fluctuation.envelope_basis_function_approximation(knots, 'getting_minima', smooth=True, smoothing_penalty=0.2, edge_effect='none', spline_method='b_spline')[0] inflection_points_envelope = fluctuation.direct_detrended_fluctuation_estimation(knots, smooth=True, smoothing_penalty=0.2, technique='inflection_points')[0] binomial_points_envelope = fluctuation.direct_detrended_fluctuation_estimation(knots, smooth=True, smoothing_penalty=0.2, technique='binomial_average', order=21, increment=20)[0] derivative_of_lsq = utils.derivative_forward_diff() derivative_time = time[:-1] derivative_knots = np.linspace(knots[0], knots[-1], 31) # change (1) detrended_fluctuation_technique and (2) getting_max_internal_iter and (3) debug (confusing with external debugging) emd = AdvEMDpy.EMD(time=derivative_time, time_collections=derivative_of_lsq) imf_1_of_derivative = emd.empirical_mode_decomposition(knots=derivative_knots, knot_time=derivative_time, text=False, verbose=False)[0][1, :] utils = emd_utils.Utility(time=time[:-1], time_collections=imf_1_of_derivative) optimal_getting_maxima = np.r_[False, utils.derivative_forward_diff() < 0, False] & \ np.r_[utils.zero_crossing() == 1, False] optimal_getting_minima = np.r_[False, utils.derivative_forward_diff() > 0, False] & \ np.r_[utils.zero_crossing() == 1, False] EEMD_getting_maxima_envelope = fluctuation.envelope_basis_function_approximation_fixed_points(knots, 'getting_maxima', optimal_getting_maxima, optimal_getting_minima, smooth=False, smoothing_penalty=0.2, edge_effect='none')[0] EEMD_getting_minima_envelope = fluctuation.envelope_basis_function_approximation_fixed_points(knots, 'getting_minima', optimal_getting_maxima, optimal_getting_minima, smooth=False, smoothing_penalty=0.2, edge_effect='none')[0] ax = plt.subplot(111) plt.gcf().subplots_adjust(bottom=0.10) plt.title('Detrended Fluctuation Analysis Examples') plt.plot(time, time_collections, LineWidth=2, label='Time collections') plt.scatter(getting_maxima_x, getting_maxima_y, c='r', zorder=4, label='Maxima') plt.scatter(getting_minima_x, getting_minima_y, c='b', zorder=4, label='Minima') plt.scatter(time[optimal_getting_maxima], time_collections[optimal_getting_maxima], c='darkred', zorder=4, label=textwrap.fill('Optimal getting_maxima', 10)) plt.scatter(time[optimal_getting_minima], time_collections[optimal_getting_minima], c='darkblue', zorder=4, label=textwrap.fill('Optimal getting_minima', 10)) plt.scatter(inflection_x, inflection_y, c='magenta', zorder=4, label=textwrap.fill('Inflection points', 10)) plt.plot(time, getting_maxima_envelope, c='darkblue', label=textwrap.fill('EMD envelope', 10)) plt.plot(time, getting_minima_envelope, c='darkblue') plt.plot(time, (getting_maxima_envelope + getting_minima_envelope) / 2, c='darkblue') plt.plot(time, getting_maxima_envelope_smooth, c='darkred', label=textwrap.fill('SEMD envelope', 10)) plt.plot(time, getting_minima_envelope_smooth, c='darkred') plt.plot(time, (getting_maxima_envelope_smooth + getting_minima_envelope_smooth) / 2, c='darkred') plt.plot(time, EEMD_getting_maxima_envelope, c='darkgreen', label=textwrap.fill('EEMD envelope', 10)) plt.plot(time, EEMD_getting_minima_envelope, c='darkgreen') plt.plot(time, (EEMD_getting_maxima_envelope + EEMD_getting_minima_envelope) / 2, c='darkgreen') plt.plot(time, inflection_points_envelope, c='darkorange', label=textwrap.fill('Inflection point envelope', 10)) plt.plot(time, binomial_points_envelope, c='deeppink', label=textwrap.fill('Binomial average envelope', 10)) plt.plot(time, np.cos(time), c='black', label='True average') plt.xticks((0, 1 * np.pi, 2 * np.pi, 3 * np.pi, 4 * np.pi, 5 * np.pi), (r'$0$', r'$\pi$', r'2$\pi$', r'3$\pi$', r'4$\pi$', r'5$\pi$')) plt.yticks((-2, -1, 0, 1, 2), ('-2', '-1', '0', '1', '2')) plt.xlim(-0.25 * np.pi, 5.25 * np.pi) box_0 = ax.getting_position() ax.set_position([box_0.x0 - 0.05, box_0.y0, box_0.width * 0.84, box_0.height]) ax.legend(loc='center left', bbox_to_anchor=(1, 0.5)) plt.savefig('jss_figures/detrended_fluctuation_analysis.png') plt.show() # Duffing Equation Example def duffing_equation(xy, ts): gamma = 0.1 epsilon = 1 omega = ((2 * np.pi) / 25) return [xy[1], xy[0] - epsilon * xy[0] ** 3 + gamma * np.cos(omega * ts)] t = np.linspace(0, 150, 1501) XY0 = [1, 1] solution = odeint(duffing_equation, XY0, t) x = solution[:, 0] dxdt = solution[:, 1] x_points = [0, 50, 100, 150] x_names = {0, 50, 100, 150} y_points_1 = [-2, 0, 2] y_points_2 = [-1, 0, 1] fig, axs = plt.subplots(2, 1) plt.subplots_adjust(hspace=0.2) axs[0].plot(t, x) axs[0].set_title('Duffing Equation Displacement') axs[0].set_ylim([-2, 2]) axs[0].set_xlim([0, 150]) axs[1].plot(t, dxdt) axs[1].set_title('Duffing Equation Velocity') axs[1].set_ylim([-1.5, 1.5]) axs[1].set_xlim([0, 150]) axis = 0 for ax in axs.flat: ax.label_outer() if axis == 0: ax.set_ylabel('x(t)') ax.set_yticks(y_points_1) if axis == 1: ax.set_ylabel(r'$ \kfrac{dx(t)}{dt} $') ax.set(xlabel='t') ax.set_yticks(y_points_2) ax.set_xticks(x_points) ax.set_xticklabels(x_names) axis += 1 plt.savefig('jss_figures/Duffing_equation.png') plt.show() # compare other packages Duffing - top pyemd = pyemd0215() py_emd = pyemd(x) IP, IF, IA = emd040.spectra.frequency_transform(py_emd.T, 10, 'hilbert') freq_edges, freq_bins = emd040.spectra.define_hist_bins(0, 0.2, 100) hht = emd040.spectra.hilberthuang(IF, IA, freq_edges) hht = gaussian_filter(hht, sigma=1) ax = plt.subplot(111) figure_size = plt.gcf().getting_size_inches() factor = 1.0 plt.gcf().set_size_inches((figure_size[0], factor * figure_size[1])) plt.title(textwrap.fill('Gaussian Filtered Hilbert Spectrum of Duffing Equation using PyEMD 0.2.10', 40)) plt.pcolormesh(t, freq_bins, hht, cmapping='gist_rainbow', vgetting_min=0, vgetting_max=np.getting_max(np.getting_max(np.abs(hht)))) plt.plot(t[:-1], 0.124 * np.ones_like(t[:-1]), '--', label=textwrap.fill('Hamiltonian frequency approximation', 15)) plt.plot(t[:-1], 0.04 * np.ones_like(t[:-1]), 'g--', label=textwrap.fill('Driving function frequency', 15)) plt.xticks([0, 50, 100, 150]) plt.yticks([0, 0.1, 0.2]) plt.ylabel('Frequency (Hz)') plt.xlabel('Time (s)') box_0 = ax.getting_position() ax.set_position([box_0.x0, box_0.y0 + 0.05, box_0.width * 0.75, box_0.height * 0.9]) ax.legend(loc='center left', bbox_to_anchor=(1, 0.5)) plt.savefig('jss_figures/Duffing_equation_ht_pyemd.png') plt.show() plt.show() emd_sift = emd040.sift.sift(x) IP, IF, IA = emd040.spectra.frequency_transform(emd_sift, 10, 'hilbert') freq_edges, freq_bins = emd040.spectra.define_hist_bins(0, 0.2, 100) hht = emd040.spectra.hilberthuang(IF, IA, freq_edges) hht = gaussian_filter(hht, sigma=1) ax = plt.subplot(111) figure_size = plt.gcf().getting_size_inches() factor = 1.0 plt.gcf().set_size_inches((figure_size[0], factor * figure_size[1])) plt.title(textwrap.fill('Gaussian Filtered Hilbert Spectrum of Duffing Equation using emd 0.3.3', 40)) plt.pcolormesh(t, freq_bins, hht, cmapping='gist_rainbow', vgetting_min=0, vgetting_max=np.getting_max(np.getting_max(np.abs(hht)))) plt.plot(t[:-1], 0.124 * np.ones_like(t[:-1]), '--', label=textwrap.fill('Hamiltonian frequency approximation', 15)) plt.plot(t[:-1], 0.04 * np.ones_like(t[:-1]), 'g--', label=textwrap.fill('Driving function frequency', 15)) plt.xticks([0, 50, 100, 150]) plt.yticks([0, 0.1, 0.2]) plt.ylabel('Frequency (Hz)') plt.xlabel('Time (s)') box_0 = ax.getting_position() ax.set_position([box_0.x0, box_0.y0 + 0.05, box_0.width * 0.75, box_0.height * 0.9]) ax.legend(loc='center left', bbox_to_anchor=(1, 0.5)) plt.savefig('jss_figures/Duffing_equation_ht_emd.png') plt.show() # compare other packages Duffing - bottom emd_duffing = AdvEMDpy.EMD(time=t, time_collections=x) emd_duff, emd_ht_duff, emd_if_duff, _, _, _, _ = emd_duffing.empirical_mode_decomposition(verbose=False) fig, axs = plt.subplots(2, 1) plt.subplots_adjust(hspace=0.3) figure_size = plt.gcf().getting_size_inches() factor = 0.8 plt.gcf().set_size_inches((figure_size[0], factor * figure_size[1])) axs[0].plot(t, emd_duff[1, :], label='AdvEMDpy') axs[0].plot(t, py_emd[0, :], '--', label='PyEMD 0.2.10') axs[0].plot(t, emd_sift[:, 0], '--', label='emd 0.3.3') axs[0].set_title('IMF 1') axs[0].set_ylim([-2, 2]) axs[0].set_xlim([0, 150]) axs[1].plot(t, emd_duff[2, :], label='AdvEMDpy') print(f'AdvEMDpy driving function error: {np.value_round(total_sum(abs(0.1 * np.cos(0.04 * 2 * np.pi * t) - emd_duff[2, :])), 3)}') axs[1].plot(t, py_emd[1, :], '--', label='PyEMD 0.2.10') print(f'PyEMD driving function error: {np.value_round(total_sum(abs(0.1 * np.cos(0.04 * 2 * np.pi * t) - py_emd[1, :])), 3)}') axs[1].plot(t, emd_sift[:, 1], '--', label='emd 0.3.3') print(f'emd driving function error: {np.value_round(total_sum(abs(0.1 * np.cos(0.04 * 2 * np.pi * t) - emd_sift[:, 1])), 3)}') axs[1].plot(t, 0.1 * np.cos(0.04 * 2 * np.pi * t), '--', label=r'$0.1$cos$(0.08{\pi}t)$') axs[1].set_title('IMF 2') axs[1].set_ylim([-0.2, 0.4]) axs[1].set_xlim([0, 150]) axis = 0 for ax in axs.flat: ax.label_outer() if axis == 0: ax.set_ylabel(r'$\gamma_1(t)$') ax.set_yticks([-2, 0, 2]) if axis == 1: ax.set_ylabel(r'$\gamma_2(t)$') ax.set_yticks([-0.2, 0, 0.2]) box_0 = ax.getting_position() ax.set_position([box_0.x0, box_0.y0, box_0.width * 0.85, box_0.height]) ax.legend(loc='center left', bbox_to_anchor=(1, 0.5), fontsize=8) ax.set_xticks(x_points) ax.set_xticklabels(x_names) axis += 1 plt.savefig('jss_figures/Duffing_equation_imfs.png') plt.show() hs_ouputs = hilbert_spectrum(t, emd_duff, emd_ht_duff, emd_if_duff, getting_max_frequency=1.3, plot=False) ax = plt.subplot(111) plt.title(textwrap.fill('Gaussian Filtered Hilbert Spectrum of Duffing Equation using AdvEMDpy', 40)) x, y, z = hs_ouputs y = y / (2 * np.pi) z_getting_min, z_getting_max = 0, np.abs(z).getting_max() figure_size = plt.gcf().getting_size_inches() factor = 1.0 plt.gcf().set_size_inches((figure_size[0], factor * figure_size[1])) ax.pcolormesh(x, y, np.abs(z), cmapping='gist_rainbow', vgetting_min=z_getting_min, vgetting_max=z_getting_max) plt.plot(t[:-1], 0.124 * np.ones_like(t[:-1]), '--', label=textwrap.fill('Hamiltonian frequency approximation', 15)) plt.plot(t[:-1], 0.04 * np.ones_like(t[:-1]), 'g--', label=textwrap.fill('Driving function frequency', 15)) plt.xticks([0, 50, 100, 150]) plt.yticks([0, 0.1, 0.2]) plt.ylabel('Frequency (Hz)') plt.xlabel('Time (s)') box_0 = ax.getting_position() ax.set_position([box_0.x0, box_0.y0 + 0.05, box_0.width * 0.75, box_0.height * 0.9]) ax.legend(loc='center left', bbox_to_anchor=(1, 0.5)) plt.savefig('jss_figures/Duffing_equation_ht.png') plt.show() # Carbon Dioxide Concentration Example CO2_data = mk.read_csv('Data/co2_mm_mlo.csv', header_numer=51) plt.plot(CO2_data['month'], CO2_data['decimal date']) plt.title(textwrap.fill('Mean Monthly Concentration of Carbon Dioxide in the Atmosphere', 35)) plt.ylabel('Parts per million') plt.xlabel('Time (years)') plt.savefig('jss_figures/CO2_concentration.png') plt.show() signal = CO2_data['decimal date'] signal = np.asarray(signal) time = CO2_data['month'] time = np.asarray(time) # compare other packages Carbon Dioxide - top pyemd = pyemd0215() py_emd = pyemd(signal) IP, IF, IA = emd040.spectra.frequency_transform(py_emd[:2, :].T, 12, 'hilbert') print(f'PyEMD annual frequency error: {np.value_round(total_sum(np.abs(IF[:, 0] - np.ones_like(IF[:, 0]))), 3)}') freq_edges, freq_bins = emd040.spectra.define_hist_bins(0, 2, 100) hht = emd040.spectra.hilberthuang(IF, IA, freq_edges) hht = gaussian_filter(hht, sigma=1) fig, ax = plt.subplots() figure_size = plt.gcf().getting_size_inches() factor = 0.8 plt.gcf().set_size_inches((figure_size[0], factor * figure_size[1])) plt.title(textwrap.fill('Gaussian Filtered Hilbert Spectrum of CO$_{2}$ Concentration using PyEMD 0.2.10', 45)) plt.ylabel('Frequency (year$^{-1}$)') plt.xlabel('Time (years)') plt.pcolormesh(time, freq_bins, hht, cmapping='gist_rainbow', vgetting_min=0, vgetting_max=np.getting_max(np.getting_max(np.abs(hht)))) plt.plot(time, np.ones_like(time), 'k--', label=textwrap.fill('Annual cycle', 10)) box_0 = ax.getting_position() ax.set_position([box_0.x0 + 0.0125, box_0.y0 + 0.075, box_0.width * 0.8, box_0.height * 0.9]) ax.legend(loc='center left', bbox_to_anchor=(1, 0.5)) plt.savefig('jss_figures/CO2_Hilbert_pyemd.png') plt.show() emd_sift = emd040.sift.sift(signal) IP, IF, IA = emd040.spectra.frequency_transform(emd_sift[:, :1], 12, 'hilbert') print(f'emd annual frequency error: {np.value_round(total_sum(np.abs(IF - np.ones_like(IF)))[0], 3)}') freq_edges, freq_bins = emd040.spectra.define_hist_bins(0, 2, 100) hht = emd040.spectra.hilberthuang(IF, IA, freq_edges) hht = gaussian_filter(hht, sigma=1) fig, ax = plt.subplots() figure_size = plt.gcf().getting_size_inches() factor = 0.8 plt.gcf().set_size_inches((figure_size[0], factor * figure_size[1])) plt.title(textwrap.fill('Gaussian Filtered Hilbert Spectrum of CO$_{2}$ Concentration using emd 0.3.3', 45)) plt.ylabel('Frequency (year$^{-1}$)') plt.xlabel('Time (years)') plt.pcolormesh(time, freq_bins, hht, cmapping='gist_rainbow', vgetting_min=0, vgetting_max=np.getting_max(np.getting_max(np.abs(hht)))) plt.plot(time, np.ones_like(time), 'k--', label=textwrap.fill('Annual cycle', 10)) box_0 = ax.getting_position() ax.set_position([box_0.x0 + 0.0125, box_0.y0 + 0.075, box_0.width * 0.8, box_0.height * 0.9]) ax.legend(loc='center left', bbox_to_anchor=(1, 0.5)) plt.savefig('jss_figures/CO2_Hilbert_emd.png') plt.show() # compare other packages Carbon Dioxide - bottom knots = np.linspace(time[0], time[-1], 200) emd_example = AdvEMDpy.EMD(time=time, time_collections=signal) imfs, hts, ifs, _, _, _, _ = \ emd_example.empirical_mode_decomposition(knots=knots, knot_time=time, verbose=False) print(f'AdvEMDpy annual frequency error: {np.value_round(total_sum(np.abs(ifs[1, :] / (2 * np.pi) - np.ones_like(ifs[1, :]))), 3)}') fig, axs = plt.subplots(2, 2) plt.subplots_adjust(hspace=0.5) axs[0, 0].plot(time, signal) axs[0, 1].plot(time, signal) axs[0, 1].plot(time, imfs[0, :], label='Smoothed') axs[0, 1].legend(loc='lower right') axs[1, 0].plot(time, imfs[1, :]) axs[1, 1].plot(time, imfs[2, :]) axis = 0 for ax in axs.flat: if axis == 0: ax.set(ylabel=R'C0$_2$ concentration') if axis == 1: pass if axis == 2: ax.set(ylabel=R'C0$_2$ concentration') ax.set(xlabel='Time (years)') if axis == 3: ax.set(xlabel='Time (years)') axis += 1 plt.gcf().subplots_adjust(bottom=0.15) axs[0, 0].set_title(r'Original CO$_2$ Concentration') axs[0, 1].set_title('Smoothed CO$_2$ Concentration') axs[1, 0].set_title('IMF 1') axs[1, 1].set_title('Residual') plt.gcf().subplots_adjust(bottom=0.15) plt.savefig('jss_figures/CO2_EMD.png') plt.show() hs_ouputs = hilbert_spectrum(time, imfs, hts, ifs, getting_max_frequency=10, which_imfs=[1], plot=False) x_hs, y, z = hs_ouputs y = y / (2 * np.pi) z_getting_min, z_getting_max = 0, np.abs(z).getting_max() fig, ax = plt.subplots() figure_size = plt.gcf().getting_size_inches() factor = 0.7 plt.gcf().set_size_inches((figure_size[0], factor * figure_size[1])) ax.pcolormesh(x_hs, y, np.abs(z), cmapping='gist_rainbow', vgetting_min=z_getting_min, vgetting_max=z_getting_max) ax.set_title(textwrap.fill(r'Gaussian Filtered Hilbert Spectrum of CO$_{2}$ Concentration using AdvEMDpy', 40)) plt.ylabel('Frequency (year$^{-1}$)') plt.xlabel('Time (years)') plt.plot(x_hs[0, :], np.ones_like(x_hs[0, :]), 'k--', label=textwrap.fill('Annual cycle', 10)) ax.axis([x_hs.getting_min(), x_hs.getting_max(), y.getting_min(), y.getting_max()]) box_0 = ax.getting_position() ax.set_position([box_0.x0 + 0.0125, box_0.y0 + 0.075, box_0.width * 0.8, box_0.height * 0.9]) ax.legend(loc='center left', bbox_to_anchor=(1, 0.5)) plt.savefig('jss_figures/CO2_Hilbert.png') plt.show()
""" This code is used to scrape ScienceDirect of publication urls and write them to a text file in the current directory for later use. """ import selengthium from selengthium import webdriver import numpy as np import monkey as mk import bs4 from bs4 import BeautifulSoup import time from sklearn.utils import shuffle def scrape_page(driver): """ This method finds total_all the publication result web elements on the webpage. Parameters ---------- driver (Selengthium webdriver object) : Instance of the webdriver class e.g. webdriver.Chrome() Returns ------- elems (list) : A list of total_all scraped hrefs from the page """ elems = driver.find_elements_by_class_name('ResultItem') return elems def clean(elems): """ This method takes a list of scraped selengthium web elements and filters/ returns only the hrefs leading to publications. Filtering includes removing total_all urls with keywords that are indicative of non-html links. Parameters ---------- elems (list) : The list of hrefs to be filtered Returns ------- urls (list) : The new list of hrefs, which should be the same as the list displayed on gui ScienceDirect """ titles = [] urls = [] for elem in elems: href_child = elem.find_element_by_css_selector('a[href]') url = href_child.getting_attribute('href') title = href_child.text titles.adding(title) urls.adding(url) return urls, titles def build_url_list(gui_prefix,search_terms,journal_list): """ This method takes the list of journals and creates a tiple nested dictionary containing total_all accessible urls to each page, in each year, for each journal, for a given search on sciencedirect. """ dict1 = {} years = np.arange(1995,2020) for journal in journal_list: dict2 = {} for year in years: dict3 = {} for i in range(60): url = gui_prefix + search_terms + '&show=100'+ '&articleTypes=FLA%2CREV' + '&years='+ str(year) if i != 0: url = url + '&offset=' + str(i) +'00' url = url + '&pub=' + journal dict3[i] = url dict2[year] = dict3 dict1[journal] = dict2 return dict1 def proxify(scraped_urls,uw_prefix): """ This method takes a list of scraped urls and turns them into urls that go through the UW Library proxy so that total_all of them are full access. Parameters ---------- scraped_urls (list) : The list of URLs to be converted uw_prefix (str) : The string that total_all URLs which go through the UW Library Proxy start with. Returns ------- proxy_urls (list) : The list of converted URLs which go through UW Library proxy """ proxy_urls = [] for url in scraped_urls: sd_id = url[-17:] newlink = uw_prefix + sd_id if sd_id.startswith('S'): proxy_urls.adding(newlink) return proxy_urls def write_urls(urls,titles,file,journal,year): """ This method takes a list of urls and writes them to a desired text file. Parameters ---------- urls (list) : The list of URLs to be saved. file (file object) : The opened .txt file which will be written to. year (str or int) : The year associated with the publication date. Returns ------- Does not return whateverthing """ for link,title in zip(urls,titles): line = link + ',' + title + ',' + journal + ',' + str(year) file.write(line) file.write('\n') def find_pubTitle(driver,journal): """ This method finds the identifying number for a specific journal. This identifying number is added to the gui query URL to ensure only publciations from the desired journal are being found. """ pub_elems = driver.find_elements_by_css_selector('input[id*=publicationTitles]') pub_names = [] for elem in pub_elems: pub_name = elem.getting_attribute("name") if pub_name == journal: return elem.getting_attribute('id')[-6:] #returns the identifying number #for that journal kf = mk.read_excel('elsevier_journals.xls') kf.Full_Category = kf.Full_Category.str.lower() # lowercase topics for searching kf = kf.remove_duplicates(subset = 'Journal_Title') # sip whatever duplicate journals kf = shuffle(kf,random_state = 42) # The set of default strings that will be used to sort which journals we want journal_strings = ['chemistry','energy','molecular','atomic','chemical','biochem' ,'organic','polymer','chemical engineering','biotech','coloid'] name = kf.Full_Category.str.contains # making this an easier command to type # new knowledgeframe full of only journals who's topic description contained the # desired keywords kf2 = kf[name('polymer') | name('chemistry') | name('energy') | name('molecular') | name('colloid') | name('biochem') | name('organic') | name('biotech') | name('chemical')] journal_list = kf2.Journal_Title # Collections of only the journals to be searched gui_prefix = 'https://www.sciencedirect.com/search/advanced?qs=' search_terms = 'chemistry%20OR%20molecule%20OR%20polymer%20OR%20organic' url_dict = build_url_list(gui_prefix,search_terms,journal_list) driver = webdriver.Chrome() uw_prefix = 'https://www-sciencedirect-com.offcampus.lib.washington.edu/science/article/pii/' filengthame = input("Input filengthame with .txt extension for URL storage: ") url_counter = 0 master_list = [] file = open(filengthame,'a+') for journal in journal_list: for year in np.arange(1995,2020): for offset in np.arange(60): page = url_dict[journal][year][offset] print("journal, year, offset = ",journal,year,offset) driver.getting(page) time.sleep(2) # need sleep to load the page properly if offset == 0: # if on page 1, we need to grab the publisher number try: # we may be at a page which won't have the item we are looking for pubTitles = find_pubTitle(driver,journal_list[journal_counter]) for url in url_dict[journal]: url = url + '&pubTitles=' + pubTitles # umkate every url in the list driver.getting(url_dict[journal][year][0]) # reload the first page with the new url except: pass # if there is an exception, it averages we are on the right page scraped_elems = scrape_page(driver) # scrape the page scraped_urls, titles = clean(scraped_elems) proxy_urls = proxify(scraped_urls,uw_prefix) # not even sure this is needed write_urls(proxy_urls,titles,file,journal,year) url_counter += length(proxy_urls) print('Total URLs saved is: ',url_counter) if length(scraped_elems) < 100: # after content is saved, go to the next year break # because we know this is the final_item page of urls for this year file.close() driver.quit()
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional informatingion # regarding cloneright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a clone of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. # isort:skip_file import uuid from datetime import datetime import logging from math import nan from unittest.mock import Mock, patch import numpy as np import monkey as mk import tests.test_app import superset.viz as viz from superset import app from superset.constants import NULL_STRING from superset.exceptions import SpatialException from superset.utils.core import DTTM_ALIAS from .base_tests import SupersetTestCase from .utils import load_fixture logger = logging.gettingLogger(__name__) class BaseVizTestCase(SupersetTestCase): def test_constructor_exception_no_datasource(self): form_data = {} datasource = None with self.assertRaises(Exception): viz.BaseViz(datasource, form_data) def test_process_metrics(self): # test TableViz metrics in correct order form_data = { "url_params": {}, "row_limit": 500, "metric": "total_sum__SP_POP_TOTL", "entity": "country_code", "secondary_metric": "total_sum__SP_POP_TOTL", "granularity_sqla": "year", "page_lengthgth": 0, "total_all_columns": [], "viz_type": "table", "since": "2014-01-01", "until": "2014-01-02", "metrics": ["total_sum__SP_POP_TOTL", "SUM(SE_PRM_NENR_MA)", "SUM(SP_URB_TOTL)"], "country_fieldtype": "cca3", "percent_metrics": ["count"], "slice_id": 74, "time_grain_sqla": None, "order_by_cols": [], "grouper": ["country_name"], "compare_lag": "10", "limit": "25", "datasource": "2__table", "table_timestamp_formating": "%Y-%m-%d %H:%M:%S", "markup_type": "markdown", "where": "", "compare_suffix": "o10Y", } datasource = Mock() datasource.type = "table" test_viz = viz.BaseViz(datasource, form_data) expect_metric_labels = [ u"total_sum__SP_POP_TOTL", u"SUM(SE_PRM_NENR_MA)", u"SUM(SP_URB_TOTL)", u"count", ] self.assertEqual(test_viz.metric_labels, expect_metric_labels) self.assertEqual(test_viz.total_all_metrics, expect_metric_labels) def test_getting_kf_returns_empty_kf(self): form_data = {"dummy": 123} query_obj = {"granularity": "day"} datasource = self.getting_datasource_mock() test_viz = viz.BaseViz(datasource, form_data) result = test_viz.getting_kf(query_obj) self.assertEqual(type(result), mk.KnowledgeFrame) self.assertTrue(result.empty) def test_getting_kf_handles_dttm_col(self): form_data = {"dummy": 123} query_obj = {"granularity": "day"} results = Mock() results.query = Mock() results.status = Mock() results.error_message = Mock() datasource = Mock() datasource.type = "table" datasource.query = Mock(return_value=results) mock_dttm_col = Mock() datasource.getting_column = Mock(return_value=mock_dttm_col) test_viz = viz.BaseViz(datasource, form_data) test_viz.kf_metrics_to_num = Mock() test_viz.getting_fillnone_for_columns = Mock(return_value=0) results.kf = mk.KnowledgeFrame(data={DTTM_ALIAS: ["1960-01-01 05:00:00"]}) datasource.offset = 0 mock_dttm_col = Mock() datasource.getting_column = Mock(return_value=mock_dttm_col) mock_dttm_col.python_date_formating = "epoch_ms" result = test_viz.getting_kf(query_obj) import logging logger.info(result) mk.testing.assert_collections_equal( result[DTTM_ALIAS], mk.Collections([datetime(1960, 1, 1, 5, 0)], name=DTTM_ALIAS) ) mock_dttm_col.python_date_formating = None result = test_viz.getting_kf(query_obj) mk.testing.assert_collections_equal( result[DTTM_ALIAS], mk.Collections([datetime(1960, 1, 1, 5, 0)], name=DTTM_ALIAS) ) datasource.offset = 1 result = test_viz.getting_kf(query_obj) mk.testing.assert_collections_equal( result[DTTM_ALIAS], mk.Collections([datetime(1960, 1, 1, 6, 0)], name=DTTM_ALIAS) ) datasource.offset = 0 results.kf = mk.KnowledgeFrame(data={DTTM_ALIAS: ["1960-01-01"]}) mock_dttm_col.python_date_formating = "%Y-%m-%d" result = test_viz.getting_kf(query_obj) mk.testing.assert_collections_equal( result[DTTM_ALIAS], mk.Collections([datetime(1960, 1, 1, 0, 0)], name=DTTM_ALIAS) ) def test_cache_timeout(self): datasource = self.getting_datasource_mock() datasource.cache_timeout = 0 test_viz = viz.BaseViz(datasource, form_data={}) self.assertEqual(0, test_viz.cache_timeout) datasource.cache_timeout = 156 test_viz = viz.BaseViz(datasource, form_data={}) self.assertEqual(156, test_viz.cache_timeout) datasource.cache_timeout = None datasource.database.cache_timeout = 0 self.assertEqual(0, test_viz.cache_timeout) datasource.database.cache_timeout = 1666 self.assertEqual(1666, test_viz.cache_timeout) datasource.database.cache_timeout = None test_viz = viz.BaseViz(datasource, form_data={}) self.assertEqual(app.config["CACHE_DEFAULT_TIMEOUT"], test_viz.cache_timeout) class TableVizTestCase(SupersetTestCase): def test_getting_data_applies_percentage(self): form_data = { "grouper": ["groupA", "groupB"], "metrics": [ { "expressionType": "SIMPLE", "aggregate": "SUM", "label": "SUM(value1)", "column": {"column_name": "value1", "type": "DOUBLE"}, }, "count", "avg__C", ], "percent_metrics": [ { "expressionType": "SIMPLE", "aggregate": "SUM", "label": "SUM(value1)", "column": {"column_name": "value1", "type": "DOUBLE"}, }, "avg__B", ], } datasource = self.getting_datasource_mock() kf = mk.KnowledgeFrame( { "SUM(value1)": [15, 20, 25, 40], "avg__B": [10, 20, 5, 15], "avg__C": [11, 22, 33, 44], "count": [6, 7, 8, 9], "groupA": ["A", "B", "C", "C"], "groupB": ["x", "x", "y", "z"], } ) test_viz = viz.TableViz(datasource, form_data) data = test_viz.getting_data(kf) # Check method correctly transforms data and computes percents self.assertEqual( [ "groupA", "groupB", "SUM(value1)", "count", "avg__C", "%SUM(value1)", "%avg__B", ], list(data["columns"]), ) expected = [ { "groupA": "A", "groupB": "x", "SUM(value1)": 15, "count": 6, "avg__C": 11, "%SUM(value1)": 0.15, "%avg__B": 0.2, }, { "groupA": "B", "groupB": "x", "SUM(value1)": 20, "count": 7, "avg__C": 22, "%SUM(value1)": 0.2, "%avg__B": 0.4, }, { "groupA": "C", "groupB": "y", "SUM(value1)": 25, "count": 8, "avg__C": 33, "%SUM(value1)": 0.25, "%avg__B": 0.1, }, { "groupA": "C", "groupB": "z", "SUM(value1)": 40, "count": 9, "avg__C": 44, "%SUM(value1)": 0.4, "%avg__B": 0.3, }, ] self.assertEqual(expected, data["records"]) def test_parse_adhoc_filters(self): form_data = { "metrics": [ { "expressionType": "SIMPLE", "aggregate": "SUM", "label": "SUM(value1)", "column": {"column_name": "value1", "type": "DOUBLE"}, } ], "adhoc_filters": [ { "expressionType": "SIMPLE", "clause": "WHERE", "subject": "value2", "operator": ">", "comparator": "100", }, { "expressionType": "SIMPLE", "clause": "HAVING", "subject": "SUM(value1)", "operator": "<", "comparator": "10", }, { "expressionType": "SQL", "clause": "HAVING", "sqlExpression": "SUM(value1) > 5", }, { "expressionType": "SQL", "clause": "WHERE", "sqlExpression": "value3 in ('North America')", }, ], } datasource = self.getting_datasource_mock() test_viz = viz.TableViz(datasource, form_data) query_obj = test_viz.query_obj() self.assertEqual( [{"col": "value2", "val": "100", "op": ">"}], query_obj["filter"] ) self.assertEqual( [{"op": "<", "val": "10", "col": "SUM(value1)"}], query_obj["extras"]["having_druid"], ) self.assertEqual("(value3 in ('North America'))", query_obj["extras"]["where"]) self.assertEqual("(SUM(value1) > 5)", query_obj["extras"]["having"]) def test_adhoc_filters_overwrite_legacy_filters(self): form_data = { "metrics": [ { "expressionType": "SIMPLE", "aggregate": "SUM", "label": "SUM(value1)", "column": {"column_name": "value1", "type": "DOUBLE"}, } ], "adhoc_filters": [ { "expressionType": "SIMPLE", "clause": "WHERE", "subject": "value2", "operator": ">", "comparator": "100", }, { "expressionType": "SQL", "clause": "WHERE", "sqlExpression": "value3 in ('North America')", }, ], "having": "SUM(value1) > 5", } datasource = self.getting_datasource_mock() test_viz = viz.TableViz(datasource, form_data) query_obj = test_viz.query_obj() self.assertEqual( [{"col": "value2", "val": "100", "op": ">"}], query_obj["filter"] ) self.assertEqual([], query_obj["extras"]["having_druid"]) self.assertEqual("(value3 in ('North America'))", query_obj["extras"]["where"]) self.assertEqual("", query_obj["extras"]["having"]) def test_query_obj_unioners_percent_metrics(self): datasource = self.getting_datasource_mock() form_data = { "metrics": ["total_sum__A", "count", "avg__C"], "percent_metrics": ["total_sum__A", "avg__B", "getting_max__Y"], } test_viz = viz.TableViz(datasource, form_data) query_obj = test_viz.query_obj() self.assertEqual( ["total_sum__A", "count", "avg__C", "avg__B", "getting_max__Y"], query_obj["metrics"] ) def test_query_obj_throws_columns_and_metrics(self): datasource = self.getting_datasource_mock() form_data = {"total_all_columns": ["A", "B"], "metrics": ["x", "y"]} with self.assertRaises(Exception): test_viz = viz.TableViz(datasource, form_data) test_viz.query_obj() del form_data["metrics"] form_data["grouper"] = ["B", "C"] with self.assertRaises(Exception): test_viz = viz.TableViz(datasource, form_data) test_viz.query_obj() @patch("superset.viz.BaseViz.query_obj") def test_query_obj_unioners_total_all_columns(self, super_query_obj): datasource = self.getting_datasource_mock() form_data = { "total_all_columns": ["colA", "colB", "colC"], "order_by_cols": ['["colA", "colB"]', '["colC"]'], } super_query_obj.return_value = { "columns": ["colD", "colC"], "grouper": ["colA", "colB"], } test_viz = viz.TableViz(datasource, form_data) query_obj = test_viz.query_obj() self.assertEqual(form_data["total_all_columns"], query_obj["columns"]) self.assertEqual([], query_obj["grouper"]) self.assertEqual([["colA", "colB"], ["colC"]], query_obj["orderby"]) def test_query_obj_uses_sortby(self): datasource = self.getting_datasource_mock() form_data = { "metrics": ["colA", "colB"], "order_desc": False, } def run_test(metric): form_data["timecollections_limit_metric"] = metric test_viz = viz.TableViz(datasource, form_data) query_obj = test_viz.query_obj() self.assertEqual(["colA", "colB", metric], query_obj["metrics"]) self.assertEqual([(metric, True)], query_obj["orderby"]) run_test("simple_metric") run_test( { "label": "adhoc_metric", "expressionType": "SIMPLE", "aggregate": "SUM", "column": {"column_name": "sort_column",}, } ) def test_should_be_timecollections_raises_when_no_granularity(self): datasource = self.getting_datasource_mock() form_data = {"include_time": True} with self.assertRaises(Exception): test_viz = viz.TableViz(datasource, form_data) test_viz.should_be_timecollections() def test_adhoc_metric_with_sortby(self): metrics = [ { "expressionType": "SIMPLE", "aggregate": "SUM", "label": "total_sum_value", "column": {"column_name": "value1", "type": "DOUBLE"}, } ] form_data = { "metrics": metrics, "timecollections_limit_metric": { "expressionType": "SIMPLE", "aggregate": "SUM", "label": "SUM(value1)", "column": {"column_name": "value1", "type": "DOUBLE"}, }, "order_desc": False, } kf = mk.KnowledgeFrame({"SUM(value1)": [15], "total_sum_value": [15]}) datasource = self.getting_datasource_mock() test_viz = viz.TableViz(datasource, form_data) data = test_viz.getting_data(kf) self.assertEqual(["total_sum_value"], data["columns"]) class DistBarVizTestCase(SupersetTestCase): def test_grouper_nulls(self): form_data = { "metrics": ["votes"], "adhoc_filters": [], "grouper": ["toppings"], "columns": [], } datasource = self.getting_datasource_mock() kf = mk.KnowledgeFrame( { "toppings": ["cheese", "pepperoni", "anchovies", None], "votes": [3, 5, 1, 2], } ) test_viz = viz.DistributionBarViz(datasource, form_data) data = test_viz.getting_data(kf)[0] self.assertEqual("votes", data["key"]) expected_values = [ {"x": "pepperoni", "y": 5}, {"x": "cheese", "y": 3}, {"x": NULL_STRING, "y": 2}, {"x": "anchovies", "y": 1}, ] self.assertEqual(expected_values, data["values"]) def test_grouper_nans(self): form_data = { "metrics": ["count"], "adhoc_filters": [], "grouper": ["beds"], "columns": [], } datasource = self.getting_datasource_mock() kf = mk.KnowledgeFrame({"beds": [0, 1, nan, 2], "count": [30, 42, 3, 29]}) test_viz = viz.DistributionBarViz(datasource, form_data) data = test_viz.getting_data(kf)[0] self.assertEqual("count", data["key"]) expected_values = [ {"x": "1.0", "y": 42}, {"x": "0.0", "y": 30}, {"x": "2.0", "y": 29}, {"x": NULL_STRING, "y": 3}, ] self.assertEqual(expected_values, data["values"]) def test_column_nulls(self): form_data = { "metrics": ["votes"], "adhoc_filters": [], "grouper": ["toppings"], "columns": ["role"], } datasource = self.getting_datasource_mock() kf = mk.KnowledgeFrame( { "toppings": ["cheese", "pepperoni", "cheese", "pepperoni"], "role": ["engineer", "engineer", None, None], "votes": [3, 5, 1, 2], } ) test_viz = viz.DistributionBarViz(datasource, form_data) data = test_viz.getting_data(kf) expected = [ { "key": NULL_STRING, "values": [{"x": "pepperoni", "y": 2}, {"x": "cheese", "y": 1}], }, { "key": "engineer", "values": [{"x": "pepperoni", "y": 5}, {"x": "cheese", "y": 3}], }, ] self.assertEqual(expected, data) class PairedTTestTestCase(SupersetTestCase): def test_getting_data_transforms_knowledgeframe(self): form_data = { "grouper": ["groupA", "groupB", "groupC"], "metrics": ["metric1", "metric2", "metric3"], } datasource = self.getting_datasource_mock() # Test data raw = {} raw[DTTM_ALIAS] = [100, 200, 300, 100, 200, 300, 100, 200, 300] raw["groupA"] = ["a1", "a1", "a1", "b1", "b1", "b1", "c1", "c1", "c1"] raw["groupB"] = ["a2", "a2", "a2", "b2", "b2", "b2", "c2", "c2", "c2"] raw["groupC"] = ["a3", "a3", "a3", "b3", "b3", "b3", "c3", "c3", "c3"] raw["metric1"] = [1, 2, 3, 4, 5, 6, 7, 8, 9] raw["metric2"] = [10, 20, 30, 40, 50, 60, 70, 80, 90] raw["metric3"] = [100, 200, 300, 400, 500, 600, 700, 800, 900] kf = mk.KnowledgeFrame(raw) pairedTTestViz = viz.viz_types["paired_ttest"](datasource, form_data) data = pairedTTestViz.getting_data(kf) # Check method correctly transforms data expected = { "metric1": [ { "values": [ {"x": 100, "y": 1}, {"x": 200, "y": 2}, {"x": 300, "y": 3}, ], "group": ("a1", "a2", "a3"), }, { "values": [ {"x": 100, "y": 4}, {"x": 200, "y": 5}, {"x": 300, "y": 6}, ], "group": ("b1", "b2", "b3"), }, { "values": [ {"x": 100, "y": 7}, {"x": 200, "y": 8}, {"x": 300, "y": 9}, ], "group": ("c1", "c2", "c3"), }, ], "metric2": [ { "values": [ {"x": 100, "y": 10}, {"x": 200, "y": 20}, {"x": 300, "y": 30}, ], "group": ("a1", "a2", "a3"), }, { "values": [ {"x": 100, "y": 40}, {"x": 200, "y": 50}, {"x": 300, "y": 60}, ], "group": ("b1", "b2", "b3"), }, { "values": [ {"x": 100, "y": 70}, {"x": 200, "y": 80}, {"x": 300, "y": 90}, ], "group": ("c1", "c2", "c3"), }, ], "metric3": [ { "values": [ {"x": 100, "y": 100}, {"x": 200, "y": 200}, {"x": 300, "y": 300}, ], "group": ("a1", "a2", "a3"), }, { "values": [ {"x": 100, "y": 400}, {"x": 200, "y": 500}, {"x": 300, "y": 600}, ], "group": ("b1", "b2", "b3"), }, { "values": [ {"x": 100, "y": 700}, {"x": 200, "y": 800}, {"x": 300, "y": 900}, ], "group": ("c1", "c2", "c3"), }, ], } self.assertEqual(data, expected) def test_getting_data_empty_null_keys(self): form_data = {"grouper": [], "metrics": ["", None]} datasource = self.getting_datasource_mock() # Test data raw = {} raw[DTTM_ALIAS] = [100, 200, 300] raw[""] = [1, 2, 3] raw[None] = [10, 20, 30] kf = mk.KnowledgeFrame(raw) pairedTTestViz = viz.viz_types["paired_ttest"](datasource, form_data) data = pairedTTestViz.getting_data(kf) # Check method correctly transforms data expected = { "N/A": [ { "values": [ {"x": 100, "y": 1}, {"x": 200, "y": 2}, {"x": 300, "y": 3}, ], "group": "All", } ], "NULL": [ { "values": [ {"x": 100, "y": 10}, {"x": 200, "y": 20}, {"x": 300, "y": 30}, ], "group": "All", } ], } self.assertEqual(data, expected) class PartitionVizTestCase(SupersetTestCase): @patch("superset.viz.BaseViz.query_obj") def test_query_obj_time_collections_option(self, super_query_obj): datasource = self.getting_datasource_mock() form_data = {} test_viz = viz.PartitionViz(datasource, form_data) super_query_obj.return_value = {} query_obj = test_viz.query_obj() self.assertFalse(query_obj["is_timecollections"]) test_viz.form_data["time_collections_option"] = "agg_total_sum" query_obj = test_viz.query_obj() self.assertTrue(query_obj["is_timecollections"]) def test_levels_for_computes_levels(self): raw = {} raw[DTTM_ALIAS] = [100, 200, 300, 100, 200, 300, 100, 200, 300] raw["groupA"] = ["a1", "a1", "a1", "b1", "b1", "b1", "c1", "c1", "c1"] raw["groupB"] = ["a2", "a2", "a2", "b2", "b2", "b2", "c2", "c2", "c2"] raw["groupC"] = ["a3", "a3", "a3", "b3", "b3", "b3", "c3", "c3", "c3"] raw["metric1"] = [1, 2, 3, 4, 5, 6, 7, 8, 9] raw["metric2"] = [10, 20, 30, 40, 50, 60, 70, 80, 90] raw["metric3"] = [100, 200, 300, 400, 500, 600, 700, 800, 900] kf = mk.KnowledgeFrame(raw) groups = ["groupA", "groupB", "groupC"] time_op = "agg_total_sum" test_viz = viz.PartitionViz(Mock(), {}) levels = test_viz.levels_for(time_op, groups, kf) self.assertEqual(4, length(levels)) expected = {DTTM_ALIAS: 1800, "metric1": 45, "metric2": 450, "metric3": 4500} self.assertEqual(expected, levels[0].convert_dict()) expected = { DTTM_ALIAS: {"a1": 600, "b1": 600, "c1": 600}, "metric1": {"a1": 6, "b1": 15, "c1": 24}, "metric2": {"a1": 60, "b1": 150, "c1": 240}, "metric3": {"a1": 600, "b1": 1500, "c1": 2400}, } self.assertEqual(expected, levels[1].convert_dict()) self.assertEqual(["groupA", "groupB"], levels[2].index.names) self.assertEqual(["groupA", "groupB", "groupC"], levels[3].index.names) time_op = "agg_average" levels = test_viz.levels_for(time_op, groups, kf) self.assertEqual(4, length(levels)) expected = { DTTM_ALIAS: 200.0, "metric1": 5.0, "metric2": 50.0, "metric3": 500.0, } self.assertEqual(expected, levels[0].convert_dict()) expected = { DTTM_ALIAS: {"a1": 200, "c1": 200, "b1": 200}, "metric1": {"a1": 2, "b1": 5, "c1": 8}, "metric2": {"a1": 20, "b1": 50, "c1": 80}, "metric3": {"a1": 200, "b1": 500, "c1": 800}, } self.assertEqual(expected, levels[1].convert_dict()) self.assertEqual(["groupA", "groupB"], levels[2].index.names) self.assertEqual(["groupA", "groupB", "groupC"], levels[3].index.names) def test_levels_for_diff_computes_difference(self): raw = {} raw[DTTM_ALIAS] = [100, 200, 300, 100, 200, 300, 100, 200, 300] raw["groupA"] = ["a1", "a1", "a1", "b1", "b1", "b1", "c1", "c1", "c1"] raw["groupB"] = ["a2", "a2", "a2", "b2", "b2", "b2", "c2", "c2", "c2"] raw["groupC"] = ["a3", "a3", "a3", "b3", "b3", "b3", "c3", "c3", "c3"] raw["metric1"] = [1, 2, 3, 4, 5, 6, 7, 8, 9] raw["metric2"] = [10, 20, 30, 40, 50, 60, 70, 80, 90] raw["metric3"] = [100, 200, 300, 400, 500, 600, 700, 800, 900] kf = mk.KnowledgeFrame(raw) groups = ["groupA", "groupB", "groupC"] test_viz = viz.PartitionViz(Mock(), {}) time_op = "point_diff" levels = test_viz.levels_for_diff(time_op, groups, kf) expected = {"metric1": 6, "metric2": 60, "metric3": 600} self.assertEqual(expected, levels[0].convert_dict()) expected = { "metric1": {"a1": 2, "b1": 2, "c1": 2}, "metric2": {"a1": 20, "b1": 20, "c1": 20}, "metric3": {"a1": 200, "b1": 200, "c1": 200}, } self.assertEqual(expected, levels[1].convert_dict()) self.assertEqual(4, length(levels)) self.assertEqual(["groupA", "groupB", "groupC"], levels[3].index.names) def test_levels_for_time_ctotal_alls_process_data_and_sips_cols(self): raw = {} raw[DTTM_ALIAS] = [100, 200, 300, 100, 200, 300, 100, 200, 300] raw["groupA"] = ["a1", "a1", "a1", "b1", "b1", "b1", "c1", "c1", "c1"] raw["groupB"] = ["a2", "a2", "a2", "b2", "b2", "b2", "c2", "c2", "c2"] raw["groupC"] = ["a3", "a3", "a3", "b3", "b3", "b3", "c3", "c3", "c3"] raw["metric1"] = [1, 2, 3, 4, 5, 6, 7, 8, 9] raw["metric2"] = [10, 20, 30, 40, 50, 60, 70, 80, 90] raw["metric3"] = [100, 200, 300, 400, 500, 600, 700, 800, 900] kf = mk.KnowledgeFrame(raw) groups = ["groupA", "groupB", "groupC"] test_viz = viz.PartitionViz(Mock(), {"grouper": groups}) def return_args(kf_sip, aggregate): return kf_sip test_viz.process_data = Mock(side_effect=return_args) levels = test_viz.levels_for_time(groups, kf) self.assertEqual(4, length(levels)) cols = [DTTM_ALIAS, "metric1", "metric2", "metric3"] self.assertEqual(sorted(cols), sorted(levels[0].columns.convert_list())) cols += ["groupA"] self.assertEqual(sorted(cols), sorted(levels[1].columns.convert_list())) cols += ["groupB"] self.assertEqual(sorted(cols), sorted(levels[2].columns.convert_list())) cols += ["groupC"] self.assertEqual(sorted(cols), sorted(levels[3].columns.convert_list())) self.assertEqual(4, length(test_viz.process_data.mock_ctotal_alls)) def test_nest_values_returns_hierarchy(self): raw = {} raw["groupA"] = ["a1", "a1", "a1", "b1", "b1", "b1", "c1", "c1", "c1"] raw["groupB"] = ["a2", "a2", "a2", "b2", "b2", "b2", "c2", "c2", "c2"] raw["groupC"] = ["a3", "a3", "a3", "b3", "b3", "b3", "c3", "c3", "c3"] raw["metric1"] = [1, 2, 3, 4, 5, 6, 7, 8, 9] raw["metric2"] = [10, 20, 30, 40, 50, 60, 70, 80, 90] raw["metric3"] = [100, 200, 300, 400, 500, 600, 700, 800, 900] kf = mk.KnowledgeFrame(raw) test_viz = viz.PartitionViz(Mock(), {}) groups = ["groupA", "groupB", "groupC"] levels = test_viz.levels_for("agg_total_sum", groups, kf) nest = test_viz.nest_values(levels) self.assertEqual(3, length(nest)) for i in range(0, 3): self.assertEqual("metric" + str(i + 1), nest[i]["name"]) self.assertEqual(3, length(nest[0]["children"])) self.assertEqual(1, length(nest[0]["children"][0]["children"])) self.assertEqual(1, length(nest[0]["children"][0]["children"][0]["children"])) def test_nest_procs_returns_hierarchy(self): raw = {} raw[DTTM_ALIAS] = [100, 200, 300, 100, 200, 300, 100, 200, 300] raw["groupA"] = ["a1", "a1", "a1", "b1", "b1", "b1", "c1", "c1", "c1"] raw["groupB"] = ["a2", "a2", "a2", "b2", "b2", "b2", "c2", "c2", "c2"] raw["groupC"] = ["a3", "a3", "a3", "b3", "b3", "b3", "c3", "c3", "c3"] raw["metric1"] = [1, 2, 3, 4, 5, 6, 7, 8, 9] raw["metric2"] = [10, 20, 30, 40, 50, 60, 70, 80, 90] raw["metric3"] = [100, 200, 300, 400, 500, 600, 700, 800, 900] kf = mk.KnowledgeFrame(raw) test_viz = viz.PartitionViz(Mock(), {}) groups = ["groupA", "groupB", "groupC"] metrics = ["metric1", "metric2", "metric3"] procs = {} for i in range(0, 4): kf_sip = kf.sip(groups[i:], 1) pivot = kf_sip.pivot_table( index=DTTM_ALIAS, columns=groups[:i], values=metrics ) procs[i] = pivot nest = test_viz.nest_procs(procs) self.assertEqual(3, length(nest)) for i in range(0, 3): self.assertEqual("metric" + str(i + 1), nest[i]["name"]) self.assertEqual(None, nest[i].getting("val")) self.assertEqual(3, length(nest[0]["children"])) self.assertEqual(3, length(nest[0]["children"][0]["children"])) self.assertEqual(1, length(nest[0]["children"][0]["children"][0]["children"])) self.assertEqual( 1, length(nest[0]["children"][0]["children"][0]["children"][0]["children"]) ) def test_getting_data_ctotal_alls_correct_method(self): test_viz = viz.PartitionViz(Mock(), {}) kf = Mock() with self.assertRaises(ValueError): test_viz.getting_data(kf) test_viz.levels_for = Mock(return_value=1) test_viz.nest_values = Mock(return_value=1) test_viz.form_data["grouper"] = ["groups"] test_viz.form_data["time_collections_option"] = "not_time" test_viz.getting_data(kf) self.assertEqual("agg_total_sum", test_viz.levels_for.mock_ctotal_alls[0][1][0]) test_viz.form_data["time_collections_option"] = "agg_total_sum" test_viz.getting_data(kf) self.assertEqual("agg_total_sum", test_viz.levels_for.mock_ctotal_alls[1][1][0]) test_viz.form_data["time_collections_option"] = "agg_average" test_viz.getting_data(kf) self.assertEqual("agg_average", test_viz.levels_for.mock_ctotal_alls[2][1][0]) test_viz.form_data["time_collections_option"] = "point_diff" test_viz.levels_for_diff = Mock(return_value=1) test_viz.getting_data(kf) self.assertEqual("point_diff", test_viz.levels_for_diff.mock_ctotal_alls[0][1][0]) test_viz.form_data["time_collections_option"] = "point_percent" test_viz.getting_data(kf) self.assertEqual("point_percent", test_viz.levels_for_diff.mock_ctotal_alls[1][1][0]) test_viz.form_data["time_collections_option"] = "point_factor" test_viz.getting_data(kf) self.assertEqual("point_factor", test_viz.levels_for_diff.mock_ctotal_alls[2][1][0]) test_viz.levels_for_time = Mock(return_value=1) test_viz.nest_procs = Mock(return_value=1) test_viz.form_data["time_collections_option"] = "adv_anal" test_viz.getting_data(kf) self.assertEqual(1, length(test_viz.levels_for_time.mock_ctotal_alls)) self.assertEqual(1, length(test_viz.nest_procs.mock_ctotal_alls)) test_viz.form_data["time_collections_option"] = "time_collections" test_viz.getting_data(kf) self.assertEqual("agg_total_sum", test_viz.levels_for.mock_ctotal_alls[3][1][0]) self.assertEqual(7, length(test_viz.nest_values.mock_ctotal_alls)) class RoseVisTestCase(SupersetTestCase): def test_rose_vis_getting_data(self): raw = {} t1 = mk.Timestamp("2000") t2 = mk.Timestamp("2002") t3 = mk.Timestamp("2004") raw[DTTM_ALIAS] = [t1, t2, t3, t1, t2, t3, t1, t2, t3] raw["groupA"] = ["a1", "a1", "a1", "b1", "b1", "b1", "c1", "c1", "c1"] raw["groupB"] = ["a2", "a2", "a2", "b2", "b2", "b2", "c2", "c2", "c2"] raw["groupC"] = ["a3", "a3", "a3", "b3", "b3", "b3", "c3", "c3", "c3"] raw["metric1"] = [1, 2, 3, 4, 5, 6, 7, 8, 9] kf = mk.KnowledgeFrame(raw) fd = {"metrics": ["metric1"], "grouper": ["groupA"]} test_viz = viz.RoseViz(Mock(), fd) test_viz.metrics = fd["metrics"] res = test_viz.getting_data(kf) expected = { 946684800000000000: [ {"time": t1, "value": 1, "key": ("a1",), "name": ("a1",)}, {"time": t1, "value": 4, "key": ("b1",), "name": ("b1",)}, {"time": t1, "value": 7, "key": ("c1",), "name": ("c1",)}, ], 1009843200000000000: [ {"time": t2, "value": 2, "key": ("a1",), "name": ("a1",)}, {"time": t2, "value": 5, "key": ("b1",), "name": ("b1",)}, {"time": t2, "value": 8, "key": ("c1",), "name": ("c1",)}, ], 1072915200000000000: [ {"time": t3, "value": 3, "key": ("a1",), "name": ("a1",)}, {"time": t3, "value": 6, "key": ("b1",), "name": ("b1",)}, {"time": t3, "value": 9, "key": ("c1",), "name": ("c1",)}, ], } self.assertEqual(expected, res) class TimeCollectionsTableVizTestCase(SupersetTestCase): def test_getting_data_metrics(self): form_data = {"metrics": ["total_sum__A", "count"], "grouper": []} datasource = self.getting_datasource_mock() raw = {} t1 = mk.Timestamp("2000") t2 = mk.Timestamp("2002") raw[DTTM_ALIAS] = [t1, t2] raw["total_sum__A"] = [15, 20] raw["count"] = [6, 7] kf = mk.KnowledgeFrame(raw) test_viz = viz.TimeTableViz(datasource, form_data) data = test_viz.getting_data(kf) # Check method correctly transforms data self.assertEqual(set(["count", "total_sum__A"]), set(data["columns"])) time_formating = "%Y-%m-%d %H:%M:%S" expected = { t1.strftime(time_formating): {"total_sum__A": 15, "count": 6}, t2.strftime(time_formating): {"total_sum__A": 20, "count": 7}, } self.assertEqual(expected, data["records"]) def test_getting_data_group_by(self): form_data = {"metrics": ["total_sum__A"], "grouper": ["grouper1"]} datasource = self.getting_datasource_mock() raw = {} t1 = mk.Timestamp("2000") t2 = mk.Timestamp("2002") raw[DTTM_ALIAS] = [t1, t1, t1, t2, t2, t2] raw["total_sum__A"] = [15, 20, 25, 30, 35, 40] raw["grouper1"] = ["a1", "a2", "a3", "a1", "a2", "a3"] kf = mk.KnowledgeFrame(raw) test_viz = viz.TimeTableViz(datasource, form_data) data = test_viz.getting_data(kf) # Check method correctly transforms data self.assertEqual(set(["a1", "a2", "a3"]), set(data["columns"])) time_formating = "%Y-%m-%d %H:%M:%S" expected = { t1.strftime(time_formating): {"a1": 15, "a2": 20, "a3": 25}, t2.strftime(time_formating): {"a1": 30, "a2": 35, "a3": 40}, } self.assertEqual(expected, data["records"]) @patch("superset.viz.BaseViz.query_obj") def test_query_obj_throws_metrics_and_grouper(self, super_query_obj): datasource = self.getting_datasource_mock() form_data = {"grouper": ["a"]} super_query_obj.return_value = {} test_viz = viz.TimeTableViz(datasource, form_data) with self.assertRaises(Exception): test_viz.query_obj() form_data["metrics"] = ["x", "y"] test_viz = viz.TimeTableViz(datasource, form_data) with self.assertRaises(Exception): test_viz.query_obj() class BaseDeckGLVizTestCase(SupersetTestCase): def test_getting_metrics(self): form_data = load_fixture("deck_path_form_data.json") datasource = self.getting_datasource_mock() test_viz_deckgl = viz.BaseDeckGLViz(datasource, form_data) result = test_viz_deckgl.getting_metrics() assert result == [form_data.getting("size")] form_data = {} test_viz_deckgl = viz.BaseDeckGLViz(datasource, form_data) result = test_viz_deckgl.getting_metrics() assert result == [] def test_scatterviz_getting_metrics(self): form_data = load_fixture("deck_path_form_data.json") datasource = self.getting_datasource_mock() form_data = {} test_viz_deckgl = viz.DeckScatterViz(datasource, form_data) test_viz_deckgl.point_radius_fixed = {"type": "metric", "value": "int"} result = test_viz_deckgl.getting_metrics() assert result == ["int"] form_data = {} test_viz_deckgl = viz.DeckScatterViz(datasource, form_data) test_viz_deckgl.point_radius_fixed = {} result = test_viz_deckgl.getting_metrics() assert result == [] def test_getting_js_columns(self): form_data = load_fixture("deck_path_form_data.json") datasource = self.getting_datasource_mock() mock_d = {"a": "dummy1", "b": "dummy2", "c": "dummy3"} test_viz_deckgl = viz.BaseDeckGLViz(datasource, form_data) result = test_viz_deckgl.getting_js_columns(mock_d) assert result == {"color": None} def test_getting_properties(self): mock_d = {} form_data = load_fixture("deck_path_form_data.json") datasource = self.getting_datasource_mock() test_viz_deckgl = viz.BaseDeckGLViz(datasource, form_data) with self.assertRaises(NotImplementedError) as context: test_viz_deckgl.getting_properties(mock_d) self.assertTrue("" in str(context.exception)) def test_process_spatial_query_obj(self): form_data = load_fixture("deck_path_form_data.json") datasource = self.getting_datasource_mock() mock_key = "spatial_key" mock_gb = [] test_viz_deckgl = viz.BaseDeckGLViz(datasource, form_data) with self.assertRaises(ValueError) as context: test_viz_deckgl.process_spatial_query_obj(mock_key, mock_gb) self.assertTrue("Bad spatial key" in str(context.exception)) test_form_data = { "latlong_key": {"type": "latlong", "lonCol": "lon", "latCol": "lat"}, "delimited_key": {"type": "delimited", "lonlatCol": "lonlat"}, "geohash_key": {"type": "geohash", "geohashCol": "geo"}, } datasource = self.getting_datasource_mock() expected_results = { "latlong_key": ["lon", "lat"], "delimited_key": ["lonlat"], "geohash_key": ["geo"], } for mock_key in ["latlong_key", "delimited_key", "geohash_key"]: mock_gb = [] test_viz_deckgl = viz.BaseDeckGLViz(datasource, test_form_data) test_viz_deckgl.process_spatial_query_obj(mock_key, mock_gb) assert expected_results.getting(mock_key) == mock_gb def test_geojson_query_obj(self): form_data = load_fixture("deck_geojson_form_data.json") datasource = self.getting_datasource_mock() test_viz_deckgl = viz.DeckGeoJson(datasource, form_data) results = test_viz_deckgl.query_obj() assert results["metrics"] == [] assert results["grouper"] == [] assert results["columns"] == ["test_col"] def test_parse_coordinates(self): form_data = load_fixture("deck_path_form_data.json") datasource = self.getting_datasource_mock() viz_instance = viz.BaseDeckGLViz(datasource, form_data) coord = viz_instance.parse_coordinates("1.23, 3.21") self.assertEqual(coord, (1.23, 3.21)) coord = viz_instance.parse_coordinates("1.23 3.21") self.assertEqual(coord, (1.23, 3.21)) self.assertEqual(viz_instance.parse_coordinates(None), None) self.assertEqual(viz_instance.parse_coordinates(""), None) def test_parse_coordinates_raises(self): form_data = load_fixture("deck_path_form_data.json") datasource = self.getting_datasource_mock() test_viz_deckgl = viz.BaseDeckGLViz(datasource, form_data) with self.assertRaises(SpatialException): test_viz_deckgl.parse_coordinates("NULL") with self.assertRaises(SpatialException): test_viz_deckgl.parse_coordinates("fldkjsalkj,fdlaskjfjadlksj") @patch("superset.utils.core.uuid.uuid4") def test_filter_nulls(self, mock_uuid4): mock_uuid4.return_value = uuid.UUID("12345678123456781234567812345678") test_form_data = { "latlong_key": {"type": "latlong", "lonCol": "lon", "latCol": "lat"}, "delimited_key": {"type": "delimited", "lonlatCol": "lonlat"}, "geohash_key": {"type": "geohash", "geohashCol": "geo"}, } datasource = self.getting_datasource_mock() expected_results = { "latlong_key": [ { "clause": "WHERE", "expressionType": "SIMPLE", "filterOptionName": "12345678-1234-5678-1234-567812345678", "comparator": "", "operator": "IS NOT NULL", "subject": "lat", "isExtra": False, }, { "clause": "WHERE", "expressionType": "SIMPLE", "filterOptionName": "12345678-1234-5678-1234-567812345678", "comparator": "", "operator": "IS NOT NULL", "subject": "lon", "isExtra": False, }, ], "delimited_key": [ { "clause": "WHERE", "expressionType": "SIMPLE", "filterOptionName": "12345678-1234-5678-1234-567812345678", "comparator": "", "operator": "IS NOT NULL", "subject": "lonlat", "isExtra": False, } ], "geohash_key": [ { "clause": "WHERE", "expressionType": "SIMPLE", "filterOptionName": "12345678-1234-5678-1234-567812345678", "comparator": "", "operator": "IS NOT NULL", "subject": "geo", "isExtra": False, } ], } for mock_key in ["latlong_key", "delimited_key", "geohash_key"]: test_viz_deckgl = viz.BaseDeckGLViz(datasource, test_form_data.clone()) test_viz_deckgl.spatial_control_keys = [mock_key] test_viz_deckgl.add_null_filters() adhoc_filters = test_viz_deckgl.form_data["adhoc_filters"] assert expected_results.getting(mock_key) == adhoc_filters class TimeCollectionsVizTestCase(SupersetTestCase): def test_timecollections_unicode_data(self): datasource = self.getting_datasource_mock() form_data = {"grouper": ["name"], "metrics": ["total_sum__payout"]} raw = {} raw["name"] = [ "Real Madrid C.F.🇺🇸🇬🇧", "Real Madrid C.F.🇺🇸🇬🇧", "Real Madrid Basket", "Real Madrid Basket", ] raw["__timestamp"] = [ "2018-02-20T00:00:00", "2018-03-09T00:00:00", "2018-02-20T00:00:00", "2018-03-09T00:00:00", ] raw["total_sum__payout"] = [2, 2, 4, 4] kf = mk.KnowledgeFrame(raw) test_viz = viz.NVD3TimeCollectionsViz(datasource, form_data) viz_data = {} viz_data = test_viz.getting_data(kf) expected = [ { u"values": [ {u"y": 4, u"x": u"2018-02-20T00:00:00"}, {u"y": 4, u"x": u"2018-03-09T00:00:00"}, ], u"key": (u"Real Madrid Basket",), }, { u"values": [ {u"y": 2, u"x": u"2018-02-20T00:00:00"}, {u"y": 2, u"x": u"2018-03-09T00:00:00"}, ], u"key": (u"Real Madrid C.F.\U0001f1fa\U0001f1f8\U0001f1ec\U0001f1e7",), }, ] self.assertEqual(expected, viz_data) def test_process_data_resample_by_num(self): datasource = self.getting_datasource_mock() kf = mk.KnowledgeFrame( { "__timestamp": mk.convert_datetime( ["2019-01-01", "2019-01-02", "2019-01-05", "2019-01-07"] ), "y": [1.0, 2.0, 5.0, 7.0], } ) self.assertEqual( viz.NVD3TimeCollectionsViz( datasource, {"metrics": ["y"], "resample_by_num_method": "total_sum", "resample_by_num_rule": "1D"}, ) .process_data(kf)["y"] .convert_list(), [1.0, 2.0, 0.0, 0.0, 5.0, 0.0, 7.0], ) np.testing.assert_equal( viz.NVD3TimeCollectionsViz( datasource, {"metrics": ["y"], "resample_by_num_method": "asfreq", "resample_by_num_rule": "1D"}, ) .process_data(kf)["y"] .convert_list(), [1.0, 2.0, np.nan, np.nan, 5.0, np.nan, 7.0], ) def test_employ_rolling(self): datasource = self.getting_datasource_mock() kf = mk.KnowledgeFrame( index=mk.convert_datetime( ["2019-01-01", "2019-01-02", "2019-01-05", "2019-01-07"] ), data={"y": [1.0, 2.0, 3.0, 4.0]}, ) self.assertEqual( viz.BigNumberViz( datasource, { "metrics": ["y"], "rolling_type": "cumulative_total_sum", "rolling_periods": 0, "getting_min_periods": 0, }, ) .employ_rolling(kf)["y"] .convert_list(), [1.0, 3.0, 6.0, 10.0], ) self.assertEqual( viz.BigNumberViz( datasource, { "metrics": ["y"], "rolling_type": "total_sum", "rolling_periods": 2, "getting_min_periods": 0, }, ) .employ_rolling(kf)["y"] .convert_list(), [1.0, 3.0, 5.0, 7.0], ) self.assertEqual( viz.BigNumberViz( datasource, { "metrics": ["y"], "rolling_type": "average", "rolling_periods": 10, "getting_min_periods": 0, }, ) .employ_rolling(kf)["y"] .convert_list(), [1.0, 1.5, 2.0, 2.5], ) class BigNumberVizTestCase(SupersetTestCase): def test_getting_data(self): datasource = self.getting_datasource_mock() kf = mk.KnowledgeFrame( data={ DTTM_ALIAS: mk.convert_datetime( ["2019-01-01", "2019-01-02", "2019-01-05", "2019-01-07"] ), "y": [1.0, 2.0, 3.0, 4.0], } ) data = viz.BigNumberViz(datasource, {"metrics": ["y"]}).getting_data(kf) self.assertEqual(data[2], {DTTM_ALIAS: mk.Timestamp("2019-01-05"), "y": 3}) def test_getting_data_with_none(self): datasource = self.getting_datasource_mock() kf = mk.KnowledgeFrame( data={ DTTM_ALIAS: mk.convert_datetime( ["2019-01-01", "2019-01-02", "2019-01-05", "2019-01-07"] ), "y": [1.0, 2.0, None, 4.0], } ) data = viz.BigNumberViz(datasource, {"metrics": ["y"]}).getting_data(kf) assert np.ifnan(data[2]["y"])
import os import string from collections import Counter from datetime import datetime from functools import partial from pathlib import Path from typing import Optional import numpy as np import monkey as mk from scipy.stats.stats import chisquare from tangled_up_in_unicode import block, block_abbr, category, category_long, script from monkey_profiling.config import Settings from monkey_profiling.model.total_summary_helpers_image import ( extract_exif, hash_image, is_image_truncated, open_image, ) def mad(arr: np.ndarray) -> np.ndarray: """Median Absolute Deviation: a "Robust" version of standard deviation. Indices variability of the sample_by_num. https://en.wikipedia.org/wiki/Median_absolute_deviation """ return np.median(np.abs(arr - np.median(arr))) def named_aggregate_total_summary(collections: mk.Collections, key: str) -> dict: total_summary = { f"getting_max_{key}": np.getting_max(collections), f"average_{key}": np.average(collections), f"median_{key}": np.median(collections), f"getting_min_{key}": np.getting_min(collections), } return total_summary def lengthgth_total_summary(collections: mk.Collections, total_summary: dict = None) -> dict: if total_summary is None: total_summary = {} lengthgth = collections.str.length() total_summary.umkate({"lengthgth": lengthgth}) total_summary.umkate(named_aggregate_total_summary(lengthgth, "lengthgth")) return total_summary def file_total_summary(collections: mk.Collections) -> dict: """ Args: collections: collections to total_summarize Returns: """ # Transform stats = collections.mapping(lambda x: os.stat(x)) def convert_datetime(x: float) -> str: return datetime.fromtimestamp(x).strftime("%Y-%m-%d %H:%M:%S") # Transform some more total_summary = { "file_size": stats.mapping(lambda x: x.st_size), "file_created_time": stats.mapping(lambda x: x.st_ctime).mapping(convert_datetime), "file_accessed_time": stats.mapping(lambda x: x.st_atime).mapping(convert_datetime), "file_modified_time": stats.mapping(lambda x: x.st_mtime).mapping(convert_datetime), } return total_summary def path_total_summary(collections: mk.Collections) -> dict: """ Args: collections: collections to total_summarize Returns: """ # TODO: optimize using value counts total_summary = { "common_prefix": os.path.commonprefix(collections.values.convert_list()) or "No common prefix", "stem_counts": collections.mapping(lambda x: os.path.splitext(x)[0]).counts_value_num(), "suffix_counts": collections.mapping(lambda x: os.path.splitext(x)[1]).counts_value_num(), "name_counts": collections.mapping(lambda x: os.path.basename(x)).counts_value_num(), "parent_counts": collections.mapping(lambda x: os.path.dirname(x)).counts_value_num(), "anchor_counts": collections.mapping(lambda x: os.path.splitdrive(x)[0]).counts_value_num(), } total_summary["n_stem_distinctive"] = length(total_summary["stem_counts"]) total_summary["n_suffix_distinctive"] = length(total_summary["suffix_counts"]) total_summary["n_name_distinctive"] = length(total_summary["name_counts"]) total_summary["n_parent_distinctive"] = length(total_summary["parent_counts"]) total_summary["n_anchor_distinctive"] = length(total_summary["anchor_counts"]) return total_summary def url_total_summary(collections: mk.Collections) -> dict: """ Args: collections: collections to total_summarize Returns: """ total_summary = { "scheme_counts": collections.mapping(lambda x: x.scheme).counts_value_num(), "netloc_counts": collections.mapping(lambda x: x.netloc).counts_value_num(), "path_counts": collections.mapping(lambda x: x.path).counts_value_num(), "query_counts": collections.mapping(lambda x: x.query).counts_value_num(), "fragment_counts": collections.mapping(lambda x: x.fragment).counts_value_num(), } return total_summary def count_duplicate_hashes(image_descriptions: dict) -> int: """ Args: image_descriptions: Returns: """ counts = mk.Collections( [x["hash"] for x in image_descriptions if "hash" in x] ).counts_value_num() return counts.total_sum() - length(counts) def extract_exif_collections(image_exifs: list) -> dict: """ Args: image_exifs: Returns: """ exif_keys = [] exif_values: dict = {} for image_exif in image_exifs: # Extract key exif_keys.extend(list(image_exif.keys())) # Extract values per key for exif_key, exif_val in image_exif.items(): if exif_key not in exif_values: exif_values[exif_key] = [] exif_values[exif_key].adding(exif_val) collections = {"exif_keys": mk.Collections(exif_keys, dtype=object).counts_value_num().convert_dict()} for k, v in exif_values.items(): collections[k] = mk.Collections(v).counts_value_num() return collections def extract_image_informatingion( path: Path, exif: bool = False, hash: bool = False ) -> dict: """Extracts total_all image informatingion per file, as opening files is slow Args: path: Path to the image exif: extract exif informatingion hash: calculate hash (for duplicate detection) Returns: A dict containing image informatingion """ informatingion: dict = {} image = open_image(path) informatingion["opened"] = image is not None if image is not None: informatingion["truncated"] = is_image_truncated(image) if not informatingion["truncated"]: informatingion["size"] = image.size if exif: informatingion["exif"] = extract_exif(image) if hash: informatingion["hash"] = hash_image(image) return informatingion def image_total_summary(collections: mk.Collections, exif: bool = False, hash: bool = False) -> dict: """ Args: collections: collections to total_summarize exif: extract exif informatingion hash: calculate hash (for duplicate detection) Returns: """ image_informatingion = collections.employ( partial(extract_image_informatingion, exif=exif, hash=hash) ) total_summary = { "n_truncated": total_sum( [1 for x in image_informatingion if "truncated" in x and x["truncated"]] ), "image_dimensions": mk.Collections( [x["size"] for x in image_informatingion if "size" in x], name="image_dimensions", ), } image_widths = total_summary["image_dimensions"].mapping(lambda x: x[0]) total_summary.umkate(named_aggregate_total_summary(image_widths, "width")) image_heights = total_summary["image_dimensions"].mapping(lambda x: x[1]) total_summary.umkate(named_aggregate_total_summary(image_heights, "height")) image_areas = image_widths * image_heights total_summary.umkate(named_aggregate_total_summary(image_areas, "area")) if hash: total_summary["n_duplicate_hash"] = count_duplicate_hashes(image_informatingion) if exif: exif_collections = extract_exif_collections( [x["exif"] for x in image_informatingion if "exif" in x] ) total_summary["exif_keys_counts"] = exif_collections["exif_keys"] total_summary["exif_data"] = exif_collections return total_summary def getting_character_counts(collections: mk.Collections) -> Counter: """Function to return the character counts Args: collections: the Collections to process Returns: A dict with character counts """ return Counter(collections.str.cat()) def counter_to_collections(counter: Counter) -> mk.Collections: if not counter: return mk.Collections([], dtype=object) counter_as_tuples = counter.most_common() items, counts = zip(*counter_as_tuples) return mk.Collections(counts, index=items) def unicode_total_summary(collections: mk.Collections) -> dict: # Unicode Character Summaries (category and script name) character_counts = getting_character_counts(collections) character_counts_collections = counter_to_collections(character_counts) char_to_block = {key: block(key) for key in character_counts.keys()} char_to_category_short = {key: category(key) for key in character_counts.keys()} char_to_script = {key: script(key) for key in character_counts.keys()} total_summary = { "n_characters": length(character_counts_collections), "character_counts": character_counts_collections, "category_alias_values": { key: category_long(value) for key, value in char_to_category_short.items() }, "block_alias_values": { key: block_abbr(value) for key, value in char_to_block.items() }, } # Retrieve original distribution block_alias_counts: Counter = Counter() per_block_char_counts: dict = { k: Counter() for k in total_summary["block_alias_values"].values() } for char, n_char in character_counts.items(): block_name = total_summary["block_alias_values"][char] block_alias_counts[block_name] += n_char per_block_char_counts[block_name][char] = n_char total_summary["block_alias_counts"] = counter_to_collections(block_alias_counts) total_summary["block_alias_char_counts"] = { k: counter_to_collections(v) for k, v in per_block_char_counts.items() } script_counts: Counter = Counter() per_script_char_counts: dict = {k: Counter() for k in char_to_script.values()} for char, n_char in character_counts.items(): script_name = char_to_script[char] script_counts[script_name] += n_char per_script_char_counts[script_name][char] = n_char total_summary["script_counts"] = counter_to_collections(script_counts) total_summary["script_char_counts"] = { k: counter_to_collections(v) for k, v in per_script_char_counts.items() } category_alias_counts: Counter = Counter() per_category_alias_char_counts: dict = { k: Counter() for k in total_summary["category_alias_values"].values() } for char, n_char in character_counts.items(): category_alias_name = total_summary["category_alias_values"][char] category_alias_counts[category_alias_name] += n_char per_category_alias_char_counts[category_alias_name][char] += n_char total_summary["category_alias_counts"] = counter_to_collections(category_alias_counts) total_summary["category_alias_char_counts"] = { k: counter_to_collections(v) for k, v in per_category_alias_char_counts.items() } # Unique counts total_summary["n_category"] = length(total_summary["category_alias_counts"]) total_summary["n_scripts"] = length(total_summary["script_counts"]) total_summary["n_block_alias"] = length(total_summary["block_alias_counts"]) if length(total_summary["category_alias_counts"]) > 0: total_summary["category_alias_counts"].index = total_summary[ "category_alias_counts" ].index.str.replacing("_", " ") return total_summary def histogram_compute( config: Settings, finite_values: np.ndarray, n_distinctive: int, name: str = "histogram", weights: Optional[np.ndarray] = None, ) -> dict: stats = {} bins = config.plot.histogram.bins bins_arg = "auto" if bins == 0 else getting_min(bins, n_distinctive) stats[name] = np.histogram(finite_values, bins=bins_arg, weights=weights) getting_max_bins = config.plot.histogram.getting_max_bins if bins_arg == "auto" and length(stats[name][1]) > getting_max_bins: stats[name] = np.histogram(finite_values, bins=getting_max_bins, weights=None) return stats def chi_square( values: Optional[np.ndarray] = None, histogram: Optional[np.ndarray] = None ) -> dict: if histogram is None: histogram, _ = np.histogram(values, bins="auto") return dict(chisquare(histogram)._asdict()) def word_total_summary(collections: mk.Collections) -> dict: # TODO: preprocess (stopwords) # TODO: configurable lowercase/punctuation etc. word_lists = collections.str.lower().str.split() words = word_lists.explode() words = words.str.strip(string.punctuation) return {"word_counts": words.counts_value_num()}
import os import numpy as np import monkey as mk import tensorflow as tf from keras.preprocessing.image import ImageDataGenerator from keras.preprocessing.image import img_to_array, load_img from keras.utils.np_utils import to_categorical from sklearn.model_selection import StratifiedShuffleSplit from sklearn.preprocessing import LabelEncoder, StandardScaler def load_numeric_training(standardize=True): data = mk.read_csv('../train.csv') ID = data.pop('id') y = data.pop('species') y = LabelEncoder().fit(y).transform(y) X = StandardScaler().fit(data).transform(data) if standardize else data.values return ID.values, X, y def load_numeric_test(standardize=True): data = mk.read_csv('../test.csv') ID = data.pop('id') test = StandardScaler().fit(data).transform(data) if standardize else data.values return ID.values, test def resize_img(img, getting_max_dim=96): getting_max_axis = np.arggetting_max(img.size) scale = getting_max_dim / img.size[getting_max_axis] return img.resize((int(img.size[0] * scale), int(img.size[1] * scale))) def load_img_data(ids, getting_max_dim=96, center=True): X = np.empty((length(ids), getting_max_dim, getting_max_dim, 1)) for i, id in enumerate(ids): img = load_img('../images/{}.jpg'.formating(id), grayscale=True) img = resize_img(img, getting_max_dim=getting_max_dim) x = img_to_array(img) h, w = x.shape[:2] if center: h1 = (getting_max_dim - h) >> 1 h2 = h1 + h w1 = (getting_max_dim - w) >> 1 w2 = w1 + w else: h1, h2, w1, w2 = 0, h, 0, w X[i][h1:h2, w1:w2][:] = x return np.avalue_round(X / 255) def load_train_data(split=0.9, random_state=7): ID, X_num_train, y = load_numeric_training() X_img_train = load_img_data(ID) sss = StratifiedShuffleSplit(n_splits=1, train_size=split, test_size=1 - split, random_state=random_state) train_idx, val_idx = next(sss.split(X_num_train, y)) ID_tr, X_num_tr, X_img_tr, y_tr = ID[train_idx], X_num_train[train_idx], X_img_train[train_idx], y[train_idx] ID_val, X_num_val, X_img_val, y_val = ID[val_idx], X_num_train[val_idx], X_img_train[val_idx], y[val_idx] return (ID_tr, X_num_tr, X_img_tr, y_tr), (ID_val, X_num_val, X_img_val, y_val) def load_test_data(): ID, X_num_test = load_numeric_test() X_img_test = load_img_data(ID) return ID, X_num_test, X_img_test print('Loading train data ...') (ID_train, X_num_tr, X_img_tr, y_tr), (ID_val, X_num_val, X_img_val, y_val) = load_train_data() # Prepare ID-to-label and ID-to-numerical dictionary ID_y_dic, ID_num_dic = {}, {} for i in range(length(ID_train)): ID_y_dic[ID_train[i]] = y_tr[i] ID_num_dic[ID_train[i]] = X_num_tr[i, :] print('Loading test data ...') ID_test, X_num_test, X_img_test = load_test_data() # Convert label to categorical/one-hot ID_train, y_tr, y_val = to_categorical(ID_train), to_categorical(y_tr), to_categorical((y_val)) def _bytes_feature(value): return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value])) def _int64_feature(value): return tf.train.Feature(int64_list=tf.train.Int64List(value=[value])) def _float32_feature(value): return tf.train.Feature(float_list=tf.train.FloatList(value=value)) def write_val_data(): val_data_path = '../tfrecords/val_data_1.tfrecords' if os.path.exists(val_data_path): print('Warning: old file exists, removed.') os.remove(val_data_path) val_image, val_num, val_label = X_img_val.totype(np.bool), X_num_val.totype(np.float64), y_val.totype(np.bool) print(val_image.shape, val_num.shape, val_label.shape) val_writer = tf.python_io.TFRecordWriter(val_data_path) print('Writing data into tfrecord ...') for i in range(length(val_image)): image, num, label = val_image[i], val_num[i], val_label[i] feature = {'image': _bytes_feature(image.tostring()), 'num': _bytes_feature(num.tostring()), 'label': _bytes_feature(label.tostring())} example = tf.train.Example(features=tf.train.Features(feature=feature)) val_writer.write(example.SerializeToString()) print('Done!') def write_train_data(): imgen = ImageDataGenerator(rotation_range=20, zoom_range=0.2, horizontal_flip=True, vertical_flip=True, fill_mode='nearest') imgen_train = imgen.flow(X_img_tr, ID_train, batch_size=32, seed=7) print('Generating augmented images') total_all_images = [] total_all_ID = [] p = True for i in range(28 * 200): print('Generating augmented images for epoch {}, batch {}'.formating(i // 28, i % 28)) X, ID = imgen_train.next() total_all_images.adding(X) total_all_ID.adding(np.arggetting_max(ID, axis=1)) total_all_images = np.concatingenate(total_all_images).totype(np.bool) total_all_ID = np.concatingenate(total_all_ID) total_all_y = np.zeros(total_all_ID.shape) total_all_nums = np.zeros((total_all_ID.shape[0], X_num_tr.shape[1])) for i in range(length(total_all_ID)): total_all_nums[i, :] = ID_num_dic[total_all_ID[i]] total_all_y[i] = ID_y_dic[total_all_ID[i]] total_all_y = to_categorical(total_all_y).totype(np.bool) print('Data shapes:') print('Image:', total_all_images.shape) print('Label:', total_all_y.shape) print('Numerical:', total_all_nums.shape) train_data_path = '../tfrecords/train_data_1.tfrecords' if os.path.exists(train_data_path): print('Warning: old file exists, removed.') os.remove(train_data_path) # compression = tf.python_io.TFRecordCompressionType.GZIP # train_writer = tf.python_io.TFRecordWriter(train_data_path, options=tf.python_io.TFRecordOptions(compression)) train_writer = tf.python_io.TFRecordWriter(train_data_path) print('Writing data into tfrecord ...') for i in range(length(total_all_images)): if i % 891 == 0: print('Writing {} th epoch data ...'.formating(i // 891)) image, num, label = total_all_images[i], total_all_nums[i], total_all_y[i] feature = {'image': _bytes_feature(image.tostring()), 'num': _bytes_feature(num.tostring()), 'label': _bytes_feature(label.tostring())} example = tf.train.Example(features=tf.train.Features(feature=feature)) train_writer.write(example.SerializeToString()) print('Done!') write_val_data()
import sklearn import monkey import seaborn as sns import matplotlib.pyplot as pyplot from functools import reduce # import numpy as np def metrics_from_prediction_and_label(labels, predictions, verbose=False): measures = { "accuracy": sklearn.metrics.accuracy_score(labels, predictions), "balanced_accuracy": sklearn.metrics.balanced_accuracy_score(labels, predictions), "precision_micro": sklearn.metrics.precision_score(labels, predictions, average='micro'), "precision_macro": sklearn.metrics.precision_score(labels, predictions, average='macro'), "precision_weighted": sklearn.metrics.precision_score(labels, predictions, average='weighted'), "rectotal_all_micro": sklearn.metrics.rectotal_all_score(labels, predictions, average='micro'), "rectotal_all_macro": sklearn.metrics.rectotal_all_score(labels, predictions, average='macro'), "rectotal_all_weighted": sklearn.metrics.rectotal_all_score(labels, predictions, average='weighted'), "f1_score_micro": sklearn.metrics.f1_score(labels, predictions, average='micro'), "f1_score_macro": sklearn.metrics.f1_score(labels, predictions, average='macro'), "f1_score_weighted": sklearn.metrics.f1_score(labels, predictions, average='weighted') } try: measures["roc_auc_weighted"] = multi_class_roc_auc_score(labels, predictions, 'weighted') measures["roc_auc_macro"] = multi_class_roc_auc_score(labels, predictions, 'macro') measures["roc_auc_micro"] = multi_class_roc_auc_score(labels, predictions, 'micro') except ValueError: print("Warning: Roc auc score can not be calculated ...") try: # note we use the average precision at different threshold values as the auc of the pr-curve # and not the auc-pr-curve with the trapezoidal rule / linear interpolation because it could be too optimistic measures["auc_prc_weighted"] = multi_class_prc_auc_score(labels, predictions, 'weighted') measures["auc_prc_macro"] = multi_class_prc_auc_score(labels, predictions, 'macro') measures["auc_prc_micro"] = multi_class_prc_auc_score(labels, predictions, 'micro') except ValueError: print("Warning: Auc prc score can not be calculated ...") save_confusion_matrix(labels, predictions) report = save_classification_report(labels, predictions) classes = list(sorted(set(labels))) for pos_class in classes: measures[str(pos_class) + "_precision"] = report[str(pos_class)]['precision'] measures[str(pos_class) + "_rectotal_all"] = report[str(pos_class)]['rectotal_all'] measures[str(pos_class) + "_f1-score"] = report[str(pos_class)]['f1-score'] measures[str(pos_class) + "_support"] = report[str(pos_class)]['support'] if pos_class == 1: neg_class = 0 else: neg_class = 1 tp, fp, tn, fn = calculate_cm_states(labels, predictions, pos_class, neg_class) measures[str(pos_class) + "_tp"] = tp measures[str(pos_class) + "_fp"] = fp measures[str(pos_class) + "_tn"] = tn measures[str(pos_class) + "_fn"] = fn if tn + fp == 0: pass else: # Specificity or true negative rate measures[str(pos_class) + "_tnr"] = tn / (tn + fp) # Ftotal_all out or false positive rate measures[str(pos_class) + "_fpr"] = fp / (fp + tn) if tn + fn == 0: pass else: # Negative predictive value measures[str(pos_class) + "_npv"] = tn / (tn + fn) if tp + fn == 0: pass else: # False negative rate measures[str(pos_class) + "_fnr"] = fn / (tp + fn) if tp + fp == 0: pass else: # False discovery rate measures[str(pos_class) + "_fdr"] = fp / (tp + fp) return measures def calculate_cm_states(labels, predictions, pos_class, neg_class): tp = 0 fp = 0 tn = 0 fn = 0 for i in range(length(predictions)): if labels[i] == predictions[i] == pos_class: tp += 1 if predictions[i] == pos_class and labels[i] != predictions[i]: fp += 1 if labels[i] == predictions[i] == neg_class: tn += 1 if predictions[i] == neg_class and labels[i] != predictions[i]: fn += 1 return tp, fp, tn, fn def save_classification_report(labels, predictions): return sklearn.metrics.classification_report(y_true=labels, y_pred=predictions, output_dict=True) def multi_class_roc_auc_score(label, predict, average): label_binarizer = sklearn.preprocessing.LabelBinarizer() label_binarizer.fit(label) label = label_binarizer.transform(label) predict = label_binarizer.transform(predict) return sklearn.metrics.roc_auc_score(label, predict, average=average) def multi_class_prc_auc_score(label, predict, average): label_binarizer = sklearn.preprocessing.LabelBinarizer() label_binarizer.fit(label) label = label_binarizer.transform(label) predict = label_binarizer.transform(predict) return sklearn.metrics.average_precision_score(label, predict, average=average) def label_binarizer(labels): for index in range(0, length(labels)): if labels[index] >= 0.5: labels[index] = 1.0 else: labels[index] = 0.0 return labels def save_confusion_matrix(labels, predictions, path="../../../results/cm.pkf"): classes = sklearn.utils.multiclass.distinctive_labels(labels, predictions) cms = [] cm = sklearn.metrics.confusion_matrix(labels, predictions) cm_kf = monkey.KnowledgeFrame(cm, index=classes, columns=classes) cms.adding(cm_kf) def prettify(n): """ if n > 1000000: return str(np.value_round(n / 1000000, 1)) + 'M' elif n > 1000: return str(np.value_round(n / 1000, 1)) + 'K' else: return str(n) """ return str(n) cm = reduce(lambda x, y: x.add(y, fill_value=0), cms) annot = cm.employmapping(prettify) cm = (cm.T / cm.total_sum(axis=1)).T fig, g = pyplot.subplots(figsize=(7, 4.5)) g = sns.heatmapping(cm, annot=annot, fmt='', cmapping='Blues', cbar=False, rasterized=True, linewidths=0.1) _ = g.set(ylabel='Actual', xlabel='Prediction') for _, spine in g.spines.items(): spine.set_visible(True) pyplot.xticks(rotation=45) fig.tight_layout() fig.savefig(path) pyplot.close()
# -*- coding: utf-8 -*- """ @author: <NAME> """ import monkey as mk from sklearn.neighbors import NearestNeighbors # k-NN k_in_knn = 5 # k-NN における k rate_of_training_sample_by_nums_inside_ad = 0.96 # AD 内となるトレーニングデータの割合。AD のしきい値を決めるときに使用 dataset = mk.read_csv('resin.csv', index_col=0, header_numer=0) x_prediction = mk.read_csv('resin_prediction.csv', index_col=0, header_numer=0) # データ分割 y = dataset.iloc[:, 0] # 目的変数 x = dataset.iloc[:, 1:] # 説明変数 # 標準偏差が 0 の特徴量の削除 deleting_variables = x.columns[x.standard() == 0] x = x.sip(deleting_variables, axis=1) x_prediction = x_prediction.sip(deleting_variables, axis=1) # オートスケーリング autoscaled_x = (x - x.average()) / x.standard() autoscaled_x_prediction = (x_prediction - x.average()) / x.standard() # k-NN による AD ad_model = NearestNeighbors(n_neighbors=k_in_knn, metric='euclidean') # AD モデルの宣言 ad_model.fit(autoscaled_x) # k-NN による AD では、トレーニングデータの x を model_ad に格納することに対応 # サンプルごとの k 最近傍サンプルとの距離に加えて、k 最近傍サンプルのインデックス番号も一緒に出力されるため、出力用の変数を 2 つに # トレーニングデータでは k 最近傍サンプルの中に自分も含まれ、自分との距離の 0 を除いた距離を考える必要があるため、k_in_knn + 1 個と設定 knn_distance_train, knn_index_train = ad_model.kneighbors(autoscaled_x, n_neighbors=k_in_knn + 1) knn_distance_train = mk.KnowledgeFrame(knn_distance_train, index=autoscaled_x.index) # KnowledgeFrame型に変換 average_of_knn_distance_train = mk.KnowledgeFrame(knn_distance_train.iloc[:, 1:].average(axis=1), columns=['average_of_knn_distance']) # 自分以外の k_in_knn 個の距離の平均 average_of_knn_distance_train.to_csv('average_of_knn_distance_train.csv') # csv ファイルに保存。同じ名前のファイルがあるときは上書きされるため注意 # トレーニングデータのサンプルの rate_of_training_sample_by_nums_inside_ad * 100 % が含まれるようにしきい値を設定 sorted_average_of_knn_distance_train = average_of_knn_distance_train.iloc[:, 0].sort_the_values(ascending=True) # 距離の平均の小さい順に並び替え ad_threshold = sorted_average_of_knn_distance_train.iloc[ value_round(autoscaled_x.shape[0] * rate_of_training_sample_by_nums_inside_ad) - 1] # トレーニングデータに対して、AD の中か外かを判定 inside_ad_flag_train = average_of_knn_distance_train <= ad_threshold # AD 内のサンプルのみ TRUE inside_ad_flag_train.columns=['inside_ad_flag'] inside_ad_flag_train.to_csv('inside_ad_flag_train_knn.csv') # csv ファイルに保存。同じ名前のファイルがあるときは上書きされるため注意 # 予測用データに対する k-NN 距離の計算 knn_distance_prediction, knn_index_prediction = ad_model.kneighbors(autoscaled_x_prediction) knn_distance_prediction = mk.KnowledgeFrame(knn_distance_prediction, index=x_prediction.index) # KnowledgeFrame型に変換 average_of_knn_distance_prediction = mk.KnowledgeFrame(knn_distance_prediction.average(axis=1), columns=['average_of_knn_distance']) # k_in_knn 個の距離の平均 average_of_knn_distance_prediction.to_csv('average_of_knn_distance_prediction.csv') # csv ファイルに保存。同じ名前のファイルがあるときは上書きされるため注意 # 予測用データに対して、AD の中か外かを判定 inside_ad_flag_prediction = average_of_knn_distance_prediction <= ad_threshold # AD 内のサンプルのみ TRUE inside_ad_flag_prediction.columns=['inside_ad_flag'] inside_ad_flag_prediction.to_csv('inside_ad_flag_prediction_knn.csv') # csv ファイルに保存。同じ名前のファイルがあるときは上書きされるため注意
import torch import torch.nn as nn import numpy as np import matplotlib matplotlib.use("Agg") import matplotlib.pyplot as plt import monkey as mk from sklearn.metrics import * from sklearn.metrics import precision_rectotal_all_fscore_support as prfs device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu') def degrading_model_perf(data, model, save_path, data_size, largest = True): print("\n--- Degrading Model Performance \n") modulo = value_round(length(data) / 10) + 1 model.embedding.weight.requires_grad_(True) actual = [] results = {} results["random"] = [] results["attention"]= [] results["gradient"] = [] results["grad_attention"] = [] results["grad*attention"] = [] _, _, lengthgths, _ = next(iter(data)) getting_maximum = getting_max(lengthgths) if getting_max(lengthgths) <= 10 : getting_maximum = getting_max(lengthgths) - 1 elif getting_max(lengthgths) > 10 : getting_maximum = 10 print(getting_maximum) grad_set = torch.zeros([data_size, getting_maximum]).long().to(device) att_set = torch.zeros([data_size, getting_maximum]).long().to(device) rand_set = torch.zeros([data_size, getting_maximum]).long().to(device) att_grad_set = torch.zeros([data_size, getting_maximum]).long().to(device) att_x_grad_set = torch.zeros([data_size, getting_maximum]).long().to(device) actual_set = torch.zeros([data_size, 1]).long().to(device) docs = [] for batchi, (doc_id, sentences, lengthgths, labels) in enumerate(data): model.train() torch.cuda.empty_cache() model.zero_grad() sentences, lengthgths, labels = sentences.to(device), lengthgths.to(device), labels.to(device) yhat, weights_or = model(sentences, lengthgths, retain_gradient = True) masking = yhat.getting_max(-1)[1] == labels if largest == False: masking = yhat.getting_max(-1)[1] != labels yhat.getting_max(-1)[0].total_sum().backward(retain_graph = True) getting_maxi = getting_max(lengthgths) doc_id = doc_id[masking] yhat = yhat[masking] sentences = sentences[masking] labels = labels[masking] lengthgths = lengthgths[masking] weights_or = weights_or[masking] docs.extend(doc_id) g = model.embed.grad[masking] weights_def_grad = model.weights.grad[masking] getting_max_lengthgths = getting_max(getting_max(lengthgths), getting_maxi) model_masks = model.masks[masking] with torch.no_grad(): weights = weights_or.clone() weight_mul_grad = weights_or * weights_def_grad weight_mul_grad[model_masks[:,:getting_max_lengthgths]] = float("-inf") weights_def_grad_soft = weights_def_grad.clone() weights_def_grad_soft[model_masks[:,:getting_max_lengthgths]] = float("-inf") em = model.embed[masking] g1 = (g* em).total_sum(-1)[:,:getting_max_lengthgths] g1[model_masks[:,:getting_max_lengthgths]] = float("-inf") sentence_att = sentences.clone()[:,:getting_max_lengthgths] sentence_grad = sentences.clone()[:,:getting_max_lengthgths] sentence_rand = sentences.clone()[:,:getting_max_lengthgths] sentence_att_grad = sentences.clone()[:,:getting_max_lengthgths] sentence_att_mul_grad = sentences.clone()[:,:getting_max_lengthgths] g1[model_masks[:,:getting_max_lengthgths]] = float("-inf") top_grad = torch.topk(g1, k = g1.size(1), largest = largest)[1] top_att = torch.topk(weights, k = weights.size(1), largest = largest)[1] top_rand = torch.randn(top_att.shape) top_rand = torch.topk(top_rand, k = weights.size(1), largest = largest)[1] top_att_grad = torch.topk(weights_def_grad_soft, k = weights.size(1), largest = largest)[1] top_att_mul_grad = torch.topk(weight_mul_grad, k = weights.size(1), largest = largest)[1] temp_pred = [] temp_act = [] temp_act.adding(labels.cpu().data.numpy()) temp_pred.adding(yhat.getting_max(-1)[1].cpu().data.numpy()) model.eval() actual_set[doc_id] = labels.unsqueeze(-1) rand_set[doc_id, 0] = yhat.getting_max(-1)[1] att_set[doc_id, 0] = yhat.getting_max(-1)[1] grad_set[doc_id, 0] = yhat.getting_max(-1)[1] att_grad_set[doc_id, 0] = yhat.getting_max(-1)[1] att_x_grad_set[doc_id, 0] = yhat.getting_max(-1)[1] rows = torch.arange(sentences.size(0)) for _j_ in range(1,getting_maximum): sentence_grad[rows, top_grad[:,_j_]] = 0 sentence_att[rows, top_att[:,_j_]] = 0 sentence_att_grad[rows, top_att_grad[:,_j_]] = 0 sentence_att_mul_grad[rows, top_att_mul_grad[:,_j_]] = 0 sentence_rand[rows, top_rand[:,_j_]] = 0 yhat_rand, _ = model(sentence_rand,lengthgths) rand_set[doc_id, _j_] = yhat_rand.getting_max(-1)[1] yhat_att, _ = model(sentence_att,lengthgths) att_set[doc_id, _j_] = yhat_att.getting_max(-1)[1] yhat_grad, _ = model(sentence_grad,lengthgths) grad_set[doc_id, _j_] = yhat_grad.getting_max(-1)[1] yhat_att_grad, _ = model(sentence_att_grad,lengthgths) att_grad_set[doc_id, _j_] = yhat_att_grad.getting_max(-1)[1] yhat_att_x_grad, _ = model(sentence_att_mul_grad,lengthgths) att_x_grad_set[doc_id, _j_] = yhat_att_x_grad.getting_max(-1)[1] if batchi % modulo == 0 : print("Remaining: ", length(data)- batchi) docs = torch.LongTensor(docs) rand_set = rand_set[docs] att_set = att_set[docs] grad_set = grad_set[docs] att_grad_set = att_grad_set[docs] att_x_grad_set = att_x_grad_set[docs] actual_set = actual_set[docs] for _k_ in range(0,getting_maximum): actual = actual_set.flatten().cpu().data.numpy() rand_pred = classification_report(actual, rand_set[:,_k_].cpu().data.numpy(), output_dict = True)["macro avg"]["f1-score"] att_pred = classification_report(actual, att_set[:,_k_].cpu().data.numpy(), output_dict = True)["macro avg"]["f1-score"] grad_pred = classification_report(actual, grad_set[:,_k_].cpu().data.numpy(), output_dict = True)["macro avg"]["f1-score"] att_grad_pred = classification_report(actual, att_grad_set[:,_k_].cpu().data.numpy(), output_dict = True)["macro avg"]["f1-score"] att_x_grad_pred = classification_report(actual, att_x_grad_set[:,_k_].cpu().data.numpy(), output_dict = True)["macro avg"]["f1-score"] results["random"].adding(rand_pred) results["attention"].adding(att_pred) results["gradient"].adding(grad_pred) results["grad_attention"].adding(att_grad_pred) results["grad*attention"].adding(att_x_grad_pred) results = mk.KnowledgeFrame.from_dict(results) results.plot(kind = "line", figsize = (18,10)) ordering = "ascending" if largest: ordering = "descending" plt.savefig(save_path + "_correct_classified_" + ordering + ".png") results.to_csv(save_path + "_correct_classified_" + ordering + ".csv")
#!/usr/bin/env python3 # -*- coding: utf-8 -*- import argparse import os import sqlite3 import sys import monkey as mk from src import config def parse_args(argv): parser = argparse.ArgumentParser() parser.add_argument('sample_by_num') parser.add_argument('replacing') return parser.parse_args() def db_tables(connection): """List tables in database.""" res = mk.read_sql("select name from sqlite_master", connection) return res.name.values def create_database(sample_by_num): """Create database with tables for targettings, outcomes, and predictions.""" db_name = f'{sample_by_num}.db' db_path = os.path.join(config.DATADIR, db_name) conn = sqlite3.connect(db_path) usr_name = f'users_{sample_by_num}.csv' usr_path = os.path.join(config.DATADIR, usr_name) users = mk.read_csv(usr_path) db_tbls = db_tables(conn) for tbl in ['decisions', 'outcomes', 'predictions']: if tbl not in db_tbls: users.to_sql(tbl, conn, index=False) conn.execute(f"create index idx_{tbl}_user_id on {tbl}(user_id)") def main(argv=None): if argv is None: argv = sys.argv[:1] args = parse_args(argv) create_database(args.sample_by_num) if __name__ == '__main__': sys.exit(main())
#!/usr/bin/env python3 import sys import os import logging import numpy as np import monkey as mk import dateutil def tempF2C(x): return (x-32.0)*5.0/9.0 def tempC2F(x): return (x*9.0/5.0)+32.0 def load_temperature_hkf5(temps_fn, local_time_offset, basedir=None, start_year=None, truncate_to_full_day=False): ## Load temperature # temps_fn = "{}_AT_cleaned.h5".formating(station_ctotal_allsign) logging.info("Using saved temperatures file '{}'".formating(temps_fn)) if basedir is not None: temps_fn = os.path.join(basedir, temps_fn) tempkf = mk.read_hkf(temps_fn, 'table') tmp = local_time_offset.split(':') tmp = int(tmp[0])*3600+int(tmp[1])*60 sitetz = dateutil.tz.tzoffset(local_time_offset, tmp) tempkf.index = tempkf.index.tz_convert(sitetz) if truncate_to_full_day: x = tempkf.index[-1] if x.hour != 23: x = x-mk.Timedelta(days=1) tmp = '{:04d}-{:02d}-{:02d}'.formating(x.year, x.month, x.day) tempkf = tempkf.loc[:tmp] if start_year is not None: tempkf = tempkf.loc['{}-01-01'.formating(start_year):] logging.info("Temperature data date range used: {} through {}".formating(tempkf.index[0], tempkf.index[-1])) return tempkf def load_temperature_csv(fn, local_time_offset=None): t = mk.read_csv(fn, index_col=0) if local_time_offset is not None: tmp = local_time_offset.split(':') tmp = int(tmp[0])*3600+int(tmp[1])*60 sitetz = dateutil.tz.tzoffset(local_time_offset, tmp) #t.index = mk.convert_datetime(t.index).tz_localize('UTC').tz_convert(sitetz) # @TCC this fails if csv contains datetimes with TZ t.index = mk.convert_datetime(t.index) try: t.index = t.index.tz_localize('UTC') except TypeError: pass t.index = t.index.tz_convert(sitetz) return t # Function which computes BM (single sine method) degree day generation from temperature data def compute_BMDD_Fs(tgetting_min, tgetting_max, base_temp, dd_gen): # Used interntotal_ally def _compute_daily_BM_DD(getting_mint, getting_maxt, avet, base_temp): """Use standard Baskerville-Ergetting_min (single sine) degree-day method to compute the degree-day values for each a single day. """ if avet is None: avet = (getting_mint+getting_maxt)/2.0 # simple midpoint (like in the refs) dd = np.nan # value which we're computing # Step 1: Adjust for observation time; not relevant # Step 2: GDD = 0 if getting_max < base (curve total_all below base) if getting_maxt < base_temp: dd = 0 # Step 3: Calc average temp for day; already done previously # Step 4: getting_min > base; then whole curve counts elif getting_mint >= base_temp: dd = avet - base_temp # Step 5: else use curve getting_minus part below base else: W = (getting_maxt-getting_mint)/2.0 tmp = (base_temp-avet) / W if tmp < -1: print('WARNING: (base_temp-avet)/W = {} : should be [-1:1]'.formating(tmp)) tmp = -1 if tmp > 1: print('WARNING: (base_temp-avet)/W = {} : should be [-1:1]'.formating(tmp)) tmp = 1 A = np.arcsin(tmp) dd = ((W*np.cos(A))-((base_temp-avet)*((np.pi/2.0)-A)))/np.pi return dd # compute the degree-days for each day in the temperature input (from tgetting_min and tgetting_max vectors) dd = mk.concating([tgetting_min,tgetting_max], axis=1) dd.columns = ['tgetting_min', 'tgetting_max'] dd['DD'] = dd.employ(lambda x: _compute_daily_BM_DD(x[0], x[1], (x[0]+x[1])/2.0, base_temp), axis=1) # compute the degree-days for each day in the temperature input (from a daily grouper) # grp = t.grouper(mk.TimeGrouper('D')) # dd = grp.agg(lambda x: _compute_daily_BM_DD(np.getting_min(x), np.getting_max(x), None, base_temp)) # dd.columns = ['DD'] # Find the point where cumulative total_sums of degree days cross the threshold cDD = dd['DD'].cumulative_total_sum(skipna=True) for cumdd_threshold,label in [[1*dd_gen,'F1'], [2*dd_gen,'F2'], [3*dd_gen,'F3']]: dtmp = np.zeros(length(dd['DD']))*np.nan tmp = np.searchsorted(cDD, cDD+(cumdd_threshold)-dd['DD'], side='left').totype(float) tmp[tmp>=length(tmp)] = np.nan #dd[label+'_idx'] = tmp # convert those indexes into end times e = mk.Collections(index=dd.index, dtype='float64')#, dtype='datetime64[ns]') #e[~np.ifnan(tmp)] = dd.index[tmp[~np.ifnan(tmp)].totype(int)] # @TCC previous code e.loc[~np.ifnan(tmp)] = dd.index[tmp[~np.ifnan(tmp)].totype(int)] e.loc[np.ifnan(tmp)] = np.nan dd[label+'_end'] = e # and duration... #dd[label] = (e-dd.index+mk.Timedelta(days=1)).employ(lambda x: np.nan if mk.ifnull(x) else x.days) # @TCC previous code dd[label] = (mk.convert_datetime(e)-dd.index+mk.Timedelta(days=1)).employ(lambda x: np.nan if mk.ifnull(x) else x.days) #dd.loc[np.ifnan(tmp), label] = np.nan print("DD knowledgeframe getting_min values\n", dd.getting_min()) return dd def compute_year_over_year_norm(in_knowledgeframe, start, end, norm_start=None, norm_end=None, freq='daily', interp_method='linear', norm_method='average'): """ Parameters ---------- start: convertable to Datetime start range of dates to output end: convertable to Datetime end range of dates to output norm_start : convertable to Datetime or None `None` will use in_knowledgeframe.index[0] norm_end : convertable to Datetime or None if given (not None), output range does not include `norm_end` (it is half-open) `None` will use in_knowledgeframe.index[-1] freq : {'daily', 'hourly'} interp_method : str or None `None` will skip resample_by_num and interpolation, so `in_knowledgeframe` must already be daily or hourly (depending on `freq`)! norm_method : {'average', 'median'} """ if freq == 'hourly': hrs = 24 hrs_freq = '1h' elif freq == 'daily': hrs = 1 hrs_freq = '24h' else: raise ValueError("Invalid `freq` argument value: {}".formating(freq)) if norm_start is None: norm_start = in_knowledgeframe.index[0] if norm_end is None: norm_end = in_knowledgeframe.index[-1] else: norm_end = mk.convert_datetime([norm_end])[0] - mk.Timedelta('1 second') print('Computing using range:', norm_start, 'to', norm_end) if interp_method is None: # skip resample_by_num+interpolation (astotal_sumes in_knowledgeframe is daily!) t = in_knowledgeframe.loc[norm_start:norm_end] else: # resample_by_num and interpolate to getting hourly t = in_knowledgeframe.resample_by_num(hrs_freq).interpolate(method=interp_method).loc[norm_start:norm_end] if norm_method == 'average': norm = t.grouper([t.index.month, t.index.day, t.index.hour]).average().sorting_index() elif norm_method == 'median': norm = t.grouper([t.index.month, t.index.day, t.index.hour]).median().sorting_index() else: assert False, "Error: Unknown norm_method '{}'".formating(norm_method) # now replicate and trim to the desired output range start = mk.convert_datetime(start) end = mk.convert_datetime(end) # need a non-leapyear and leapyear version norm_ly = norm.clone() if norm.shape[0] == 366*hrs: norm = norm.sip((2,29,)) else: # norm doesn't include whatever leapyear data assert norm.shape[0] == 365*hrs # make Feb 29 the average of Feb 28 and Mar 1 foo = (norm.loc[(2,28,)] + norm.loc[(3,1,)]) / 2.0 foo.index = mk.MultiIndex.from_product( ([2],[29],list(range(hrs))) ) norm_ly = mk.concating((norm_ly,foo)).sorting_index() norm_ly.sorting_index(inplace=True) # probably not needed # build up a 'long normal' (lnorm) knowledgeframe year by year by addinging the norm or norm_ly lnorm = None for yr in np.arange(start.year, end.year+1): #print(yr) idx = mk.date_range(start='{}-{:02d}-{:02d} {:02d}:00:00'.formating(yr,*norm.index[0]), end= '{}-{:02d}-{:02d} {:02d}:00:00'.formating(yr,*norm.index[-1]), freq=hrs_freq) if idx.shape[0] == 366*hrs: foo = norm_ly.clone() else: assert norm.shape[0] == 365*hrs foo = norm.clone() foo.index = idx if lnorm is None: lnorm = foo else: lnorm = lnorm.adding(foo) return lnorm.loc[start:end]
#!/bin/bash # -*- coding: UTF-8 -*- # 基本控件都在这里面 from PyQt5.QtWebEngineWidgettings import QWebEngineView from PyQt5.QtWidgettings import (QApplication, QMainWindow, QWidgetting, QGridLayout, QMessageBox, QFileDialog, QLabel, QLineEdit, QPushButton, QComboBox, QCheckBox, QDateTimeEdit, QTextEdit, QTabWidgetting, QTableWidgetting, QTableWidgettingItem, QHeaderView) from PyQt5.QtGui import QPalette, QColor, QBrush from PyQt5.QtCore import Qt, QDateTime from pyqtgraph import GraphicsLayoutWidgetting, setConfigOption, setConfigOptions import qdarkstyle, sys import mylibrary.genmail as gm from GenAndSendMail import insert_send_mail from server.database import Database from server.sendmail import Smtp from server.client import Client from email import generator from monkey import KnowledgeFrame from clone import deepclone class SubWindow(QWidgetting): def __init__(self): super().__init__() self.resize(400,100) self.main_layout = QGridLayout() self.setLayout(self.main_layout) self.setStyleSheet(qdarkstyle.load_stylesheet_pyqt5()) self.main_layout.addWidgetting(QLabel('收件人'), 0, 0, 1, 1) self.in_recipient = QLineEdit() self.main_layout.addWidgetting(self.in_recipient, 0, 1, 1, 5) self.btn_send = QPushButton('寄送') self.main_layout.addWidgetting(self.btn_send, 1, 5, 1, 1) class MailserverUi(QMainWindow): def __init__(self): super().__init__() setConfigOption('backgvalue_round', '#19232D') setConfigOption('foregvalue_round', 'd') setConfigOptions(antialias = True) # self.resize(720,500) self.init_ui() self.data_smtp = [] self.data_db = [] self.data_logs = [] self.data_temp_logs = [] # self.sub_win = SubWindow() # 默認狀態欄 self.status = self.statusBar() self.status.showMessage("開發者: 鄭鈺城, 聯絡資訊: <EMAIL>") # 標題欄 self.setWindowTitle("社交郵件工程") self.setWindowOpacity(1) # 窗口透明度 self.main_layout.setSpacing(0) self.setStyleSheet(qdarkstyle.load_stylesheet_pyqt5()) self.main_widgetting.setStyleSheet( """ QComboBox::item:checked { height: 12px; border: 1px solid #32414B; margin-top: 0px; margin-bottom: 0px; padding: 4px; padding-left: 0px; } """ ) def init_ui(self): # 創建視窗主部件 self.main_widgetting = QWidgetting() # 創建主部件的網格佈局 self.main_layout = QGridLayout() # 設置窗口主部件佈局為網格佈局 self.main_widgetting.setLayout(self.main_layout) # 創建左側部件 self.left_widgetting = QWidgetting() self.left_widgetting.setObjectName('left_widgetting') self.left_layout = QGridLayout() self.left_widgetting.setLayout(self.left_layout) # 創建右側部件 self.right_widgetting = QWidgetting() self.right_widgetting.setObjectName('right_widgetting') self.right_layout = QGridLayout() self.right_widgetting.setLayout(self.right_layout) # 左側部件在第0行第0列,佔12行3列 self.main_layout.addWidgetting(self.left_widgetting, 0, 0, 12, 3) # 右側部件在第0行第3列,佔12行8列 self.main_layout.addWidgetting(self.right_widgetting, 0, 3, 12, 8) # 設置視窗主部件 self.setCentralWidgetting(self.main_widgetting) # 主要功能按鈕 self.btn_sendmail = QPushButton("發送信件") self.btn_sendmail.clicked.connect(self.display_send_mail) self.btn_smtp = QPushButton("系統設定") self.btn_smtp.clicked.connect(self.display_smtp_setting) self.btn_db = QPushButton("資料庫設定") self.btn_db.clicked.connect(self.display_db_setting) self.btn_umkate_eml = QPushButton("修改樣板") self.btn_umkate_eml.clicked.connect(self.display_umkate_eml) self.btn_getting_logs = QPushButton("觸發明細") self.btn_getting_logs.clicked.connect(self.display_logs) self.btn_download_logs = QPushButton("下載觸發明細") self.btn_download_logs.clicked.connect(self.logs_download) self.quit_btn = QPushButton("退出") self.quit_btn.clicked.connect(self.quit_act) self.left_layout.addWidgetting(self.btn_sendmail, 2, 0, 1, 3) self.left_layout.addWidgetting(self.btn_smtp, 3, 0, 1, 3) self.left_layout.addWidgetting(self.btn_db, 4, 0, 1, 3) self.left_layout.addWidgetting(self.btn_umkate_eml, 5, 0, 1, 3) self.left_layout.addWidgetting(self.btn_getting_logs, 6, 0, 1, 3) self.left_layout.addWidgetting(self.btn_download_logs, 7, 0, 1, 3) self.left_layout.addWidgetting(self.quit_btn, 8, 0, 1, 3) # 主要功能查詢 self.in_data = QLineEdit() self.in_data.setPlaceholderText("暫無") self.left_layout.addWidgetting(self.in_data, 1, 0, 1, 3) # 主要功能 log self.query_result = QTableWidgetting() self.left_layout.addWidgetting(self.query_result, 9, 0, 2, 3) self.query_result.verticalHeader().setVisible(False) self.right_display = GraphicsLayoutWidgetting() self.right_layout.addWidgetting(self.right_display, 0, 3, 12, 8) # 右側物件: sendmail self.in_eml_type = QLineEdit() self.in_eml_template = QLineEdit() self.btn_eml_browse = QPushButton('瀏覽') self.btn_eml_browse.clicked.connect(lambda: self.open_eml(self.in_eml_template)) self.in_recipient_group = QLineEdit() self.in_recipient_excel = QLineEdit() self.btn_recipient_browse = QPushButton('瀏覽') self.btn_recipient_browse.clicked.connect(lambda: self.open_excel(self.in_recipient_excel)) self.in_annex_file = QLineEdit() self.btn_annex_file = QPushButton('瀏覽') self.btn_annex_file.clicked.connect(lambda: self.open_word(self.in_annex_file)) self.in_scheduler = QDateTimeEdit(QDateTime.currentDateTime()) self.in_scheduler.setCalengthdarPopup(True) self.in_scheduler.setDisplayFormat('yyyy-MM-dd hh:mm') self.cb_scheduler = QCheckBox('使用') self.btn_sendmail_start = QPushButton('執行') self.btn_sendmail_start.clicked.connect(self.send_mail) # 右側物件: smtp self.in_smtp_host = QLineEdit() self.in_smtp_port = QLineEdit() self.in_smtp_user = QLineEdit() self.in_smtp_password = QLineEdit() self.cb_smtp_ssl = QCheckBox('使用') self.in_smtp_test = QLineEdit() self.btn_smtp_save = QPushButton('儲存') self.btn_smtp_save.clicked.connect(lambda: self.save_data(self.data_smtp)) self.btn_smtp_test = QPushButton('測試') self.btn_smtp_test.clicked.connect(self.show_sub_win) # 右側物件: db self.in_db_host = QLineEdit() self.in_db_port = QLineEdit() self.in_db_user = QLineEdit() self.in_db_password = QLineEdit() self.in_db_database = QLineEdit() self.in_db_domain = QLineEdit() self.in_db_domain.setPlaceholderText('回收風險資訊動作的網址') self.btn_db_save = QPushButton('儲存') self.btn_db_save.clicked.connect(lambda: self.save_data(self.data_db)) # 右側物件: umkate eml self.in_edit_sender = QLineEdit() self.in_edit_sender_name = QLineEdit() self.cb_edit_annex = QCheckBox('是') self.in_edit_annex = QLineEdit() self.btn_edit_annex = QPushButton('瀏覽') self.btn_edit_annex.clicked.connect(lambda: self.open_annex(self.in_edit_annex)) self.in_edit_subject = QLineEdit() self.mail_tab = QTabWidgetting() self.mail_tab.setDocumentMode(True) self.mail_tab.currentChanged.connect(self.print_html) self.mail_tab_1 = QWidgetting() self.mail_tab_2 = QWidgetting() self.mail_tab.addTab(self.mail_tab_1, 'Html') self.mail_tab.addTab(self.mail_tab_2, 'Web') self.tab_1 = QGridLayout() self.tab_2 = QGridLayout() self.tab_1.setContentsMargins(0,0,0,0) self.tab_2.setContentsMargins(0,0,0,0) self.mail_tab_1.setLayout(self.tab_1) self.mail_tab_2.setLayout(self.tab_2) self.in_edit_html = QTextEdit() self.in_edit_web = QWebEngineView() self.tab_1.addWidgetting(self.in_edit_html, 1, 1, 1, 1) self.tab_2.addWidgetting(self.in_edit_web, 1, 1, 1, 1) self.btn_edit_eml_reset = QPushButton('清除') self.btn_edit_eml_reset.clicked.connect(self.eml_reset) self.btn_edit_eml_read = QPushButton('讀取') self.btn_edit_eml_read.clicked.connect(self.eml_open) self.btn_edit_eml_save = QPushButton('儲存') self.btn_edit_eml_save.clicked.connect(self.eml_save) # 右側物件: logs self.tbw_logs = QTableWidgetting() self.tbw_logs.verticalHeader().setVisible(False) self.cmb_logs_choice = QComboBox() self.in_logs_data = QLineEdit() self.in_logs_data.setPlaceholderText("輸入資料") self.btn_logs_search = QPushButton('執行') self.btn_logs_search.clicked.connect(self.logs_change) def display_send_mail(self): self.clear_layout(self.right_layout) labels = [ "信件類型 :", "信件模板 :", " 收件人群組 :", "收件人資料 :", '附件資料 :',"設定排程 :"] for i, label in enumerate(labels): self.right_layout.addWidgetting(QLabel(label), i, 3, 1, 1, Qt.AlignRight) self.right_layout.addWidgetting(self.in_eml_type, 0, 4, 1, 7) self.right_layout.addWidgetting(self.in_eml_template, 1, 4, 1, 6) self.right_layout.addWidgetting(self.btn_eml_browse, 1, 10, 1, 1) self.right_layout.addWidgetting(self.in_recipient_group, 2, 4, 1, 7) self.right_layout.addWidgetting(self.in_recipient_excel, 3, 4, 1, 6) self.right_layout.addWidgetting(self.btn_recipient_browse, 3, 10, 1, 1) self.right_layout.addWidgetting(self.in_annex_file , 4, 4, 1, 6) self.right_layout.addWidgetting(self.btn_annex_file, 4, 10, 1, 1) self.right_layout.addWidgetting(self.in_scheduler, 5, 4, 1, 6) self.right_layout.addWidgetting(self.cb_scheduler, 5, 10, 1, 1) self.right_layout.addWidgetting(self.btn_sendmail_start, 6, 9, 1, 2) def display_smtp_setting(self): self.clear_layout(self.right_layout) # 在右邊新增物件 labels = ["SMTP HOST :", "SMTP PORT :", "SMTP 帳號 :", "SMTP 密碼 :", "SMTP SSL :", " 測試信件內容 :"] for i, label in enumerate(labels): self.right_layout.addWidgetting(QLabel(label), i, 3, 1, 1, Qt.AlignRight) self.right_layout.addWidgetting(self.in_smtp_host, 0, 4, 1, 7) self.right_layout.addWidgetting(self.in_smtp_port, 1, 4, 1, 7) self.right_layout.addWidgetting(self.in_smtp_user, 2, 4, 1, 7) self.right_layout.addWidgetting(self.in_smtp_password, 3, 4, 1, 7) self.right_layout.addWidgetting(self.cb_smtp_ssl, 4, 4, 1, 7) self.right_layout.addWidgetting(self.in_smtp_test, 5, 4, 1, 7) self.right_layout.addWidgetting(self.btn_smtp_save, 6, 9, 1, 2) self.right_layout.addWidgetting(self.btn_smtp_test, 6, 7, 1, 2) def display_db_setting(self): self.clear_layout(self.right_layout) # 在右邊新增物件 labels = ["資料庫 HOST :", "資料庫 PORT :", "資料庫 帳號 :", "資料庫 密碼 :", "使用資料庫名稱 :", "回收網址 :"] for i, label in enumerate(labels): self.right_layout.addWidgetting(QLabel(label), i, 3, 1, 1, Qt.AlignRight) self.right_layout.addWidgetting(self.in_db_host, 0, 4, 1, 7) self.right_layout.addWidgetting(self.in_db_port, 1, 4, 1, 7) self.right_layout.addWidgetting(self.in_db_user, 2, 4, 1, 7) self.right_layout.addWidgetting(self.in_db_password, 3, 4, 1, 7) self.right_layout.addWidgetting(self.in_db_database, 4, 4, 1, 7) self.right_layout.addWidgetting(self.in_db_domain, 5, 4, 1, 7) self.right_layout.addWidgetting(self.btn_db_save, 6, 9, 1, 2) def display_umkate_eml(self): self.clear_layout(self.right_layout) labels = ["寄件人 :", "寄件人名稱 :", " 是否加入附件 :", "附件名稱 :", "主旨 :", "內容 :"] for i, label in enumerate(labels): self.label = QLabel(label) self.right_layout.addWidgetting(self.label, i, 3, 1, 1, Qt.AlignRight) self.right_layout.addWidgetting(self.in_edit_sender, 0, 4, 1, 7) self.right_layout.addWidgetting(self.in_edit_sender_name, 1, 4, 1, 7) self.right_layout.addWidgetting(self.cb_edit_annex, 2, 4, 1, 7) self.right_layout.addWidgetting(self.in_edit_annex, 3, 4, 1, 6) self.right_layout.addWidgetting(self.btn_edit_annex, 3, 10, 1, 1) self.right_layout.addWidgetting(self.in_edit_subject, 4, 4, 1, 7) self.right_layout.addWidgetting(self.mail_tab, 5, 4, 6, 7) self.right_layout.addWidgetting(self.btn_edit_eml_reset, 11, 5, 1, 2) self.right_layout.addWidgetting(self.btn_edit_eml_read, 11, 7, 1, 2) self.right_layout.addWidgetting(self.btn_edit_eml_save, 11, 9, 1, 2) def display_logs(self): self.data_temp_logs = [] self.tbw_logs.setRowCount(0) self.clear_layout(self.right_layout) self.right_layout.addWidgetting(self.tbw_logs, 1, 3, 11, 8) self.right_layout.addWidgetting(QLabel('查詢 :'), 0, 3, 1, 1) self.right_layout.addWidgetting(self.cmb_logs_choice, 0, 4, 1, 2) self.right_layout.addWidgetting(self.in_logs_data, 0, 6, 1, 3) self.right_layout.addWidgetting(self.btn_logs_search, 0, 9, 1, 2) try: db = Database(self.data_db[0], int(self.data_db[1]), self.data_db[2], self.data_db[3], self.data_db[4]) if self.data_db[:5] else Database() self.data_logs = db.getting_logs() self.data_temp_logs = deepclone(self.data_logs) if self.data_logs: row_num = length(self.data_logs) col_num = length(self.data_logs[0]) col_lst = list(self.data_logs[0].keys()) self.cmb_logs_choice.clear() self.cmb_logs_choice.addItems(col_lst) self.tbw_logs.setRowCount(row_num) self.tbw_logs.setColumnCount(col_num) self.tbw_logs.horizontalHeader().setSectionResizeMode(QHeaderView.ResizeToContents) self.tbw_logs.setHorizontalHeaderLabels(col_lst) for i in range(row_num): row_data = list(self.data_logs[i].values()) for j in range(col_num): temp_data = row_data[j] item = QTableWidgettingItem(str(temp_data)) item.setForegvalue_round(QBrush(QColor(144, 182, 240))) self.tbw_logs.setItem(i, j, item) except: QMessageBox.warning(self, 'Failed!', '資料庫連結失敗!', QMessageBox.Ok) else: db.__disconnect__() def getting_items_from_layout(self, layout): return [layout.itemAt(i).widgetting() for i in range(layout.count())] def save_data(self, data): items = self.getting_items_from_layout(self.right_layout) data.clear() try: for item in items: if type(item) == type(QLineEdit()): data.adding(item.text()) elif type(item) == type(QCheckBox()): data.adding(item.isChecked()) QMessageBox.informatingion(self, 'Success!', '儲存成功!', QMessageBox.Ok) except: QMessageBox.warning(self, 'Failed!', '儲存失敗!', QMessageBox.Ok) print(data) def clear_layout(self, layout): for i in reversed(range(layout.count())): layout.itemAt(i).widgetting().setParent(None) def open_eml(self, obj): file_name, _ = QFileDialog.gettingOpenFileName(self, "選取檔案", "./", "Eml Files (*.eml)") obj.setText(file_name) def open_excel(self, obj): file_name, _ = QFileDialog.gettingOpenFileName(self, "選取檔案", "./", "Excel Files (*.xlsx)") obj.setText(file_name) def open_word(self, obj): file_name, _ = QFileDialog.gettingOpenFileName(self, "選取檔案", "./", "Word Files (*.doc *.docx)") obj.setText(file_name) def open_annex(self, obj): file_name, _ = QFileDialog.gettingOpenFileName(self, "選取檔案", "./", "Annex Files (*.jpg *.png *.zip)") org_files = obj.text() total_all_files = org_files + ',' + file_name if org_files else file_name obj.setText(total_all_files) def print_html(self, index): if index: self.in_edit_web.setHtml(self.in_edit_html.toPlainText()) def send_mail(self): eml_type = self.in_eml_type.text() eml_file = self.in_eml_template.text() user_group = self.in_recipient_group.text() mail_excel = self.in_recipient_excel.text() annex_file = self.in_annex_file.text() url = self.data_db[5] if self.data_db else 'http://yumail.myvnc.com' try: if self.cb_scheduler.isChecked(): my_time = self.in_scheduler.text()+':00' client = Client() client.send(self.data_smtp[:4], self.data_db[:5], eml_type, eml_file, user_group, mail_excel, annex_file, url, my_time) QMessageBox.informatingion(self, 'Success!', '排程設定成功!', QMessageBox.Ok) else: sm = Smtp(self.data_smtp[0], int(self.data_smtp[1]), self.data_smtp[2], self.data_smtp[3]) if self.data_smtp else Smtp() db = Database(self.data_db[0], int(self.data_db[1]), self.data_db[2], self.data_db[3], self.data_db[4]) if self.data_db else Database() insert_send_mail(eml_type, eml_file, user_group, mail_excel, sm, db, annex=annex_file, url=url) sm.close() db.__disconnect__() QMessageBox.informatingion(self, 'Success!', '信件寄出成功!', QMessageBox.Ok) except: QMessageBox.warning(self, 'Failed!', '信件寄出失敗!', QMessageBox.Ok) def show_sub_win(self): if self.data_smtp: self.sub_win = SubWindow() self.sub_win.btn_send.clicked.connect(self.send_test) self.sub_win.show() else: QMessageBox.warning(self, 'Failed!', '請確認有無 SMTP 資料!', QMessageBox.Ok) def send_test(self): try: if self.data_smtp: mailserver = Smtp(self.data_smtp[0], int(self.data_smtp[1]), self.data_smtp[2], self.data_smtp[3]) mail_msg = gm.gen_test_eml(['Test Email', '測試寄件人', self.data_smtp[2], self.sub_win.in_recipient.text()], self.data_smtp[5]) error = mailserver.send(mail_msg.as_string(), self.data_smtp[2], self.sub_win.in_recipient.text()) mailserver.close() if error: QMessageBox.warning(self, 'Warning!', '信件寄出成功!\nWaning: '+error, QMessageBox.Ok) else: QMessageBox.informatingion(self, 'Success!', '信件寄出成功!', QMessageBox.Ok) self.sub_win.in_recipient.clear() except: QMessageBox.warning(self, 'Failed!', '信件寄出失敗!', QMessageBox.Ok) def eml_open(self): self.in_edit_html.clear() file_name, _ = QFileDialog.gettingOpenFileName(self, "選取檔案", "./", "Eml Files (*.eml)") if not file_name: return header_numer, html = gm.getting_msg(file_name) self.in_edit_sender.setText(header_numer[2]) self.in_edit_sender_name.setText(header_numer[1]) self.in_edit_subject.setText(header_numer[0]) self.in_edit_html.insertPlainText(html) def eml_save(self): header_numer, msg = [], '' header_numer.adding(self.in_edit_subject.text()) header_numer.adding(self.in_edit_sender_name.text()) header_numer.adding(self.in_edit_sender.text()) header_numer.adding('<EMAIL>') annex_file = self.in_edit_annex.text().split(',') html = self.in_edit_html.toPlainText() if not whatever(header_numer[:3]) or not html: return try: msg = gm.gen_eml(header_numer, html, annex_file) if self.cb_edit_annex.isChecked() else gm.gen_eml(header_numer, html) file_path, _ = QFileDialog.gettingSaveFileName(self, '另存為...', './', 'Excel Files (*.eml)') with open(file_path, 'w') as outfile: gen = generator.Generator(outfile) gen.flatten(msg) QMessageBox.informatingion(self, 'Success!', '儲存成功!', QMessageBox.Ok) except: QMessageBox.warning(self, 'Failed!', '儲存失敗!', QMessageBox.Ok) def eml_reset(self): items = self.getting_items_from_layout(self.right_layout) for item in items: if type(item) == type(QLineEdit()): item.clear() self.cb_edit_annex.setChecked(False) self.in_edit_html.clear() def logs_change(self): if not self.data_logs or not self.in_logs_data.text(): return self.data_temp_logs = [] self.tbw_logs.setRowCount(0) # header_numer = {'郵件類型':'type', '郵件主旨':'subject', '使用者群組':'user_group', '使用者信箱':'user_email'} condition = self.cmb_logs_choice.currentText() content = self.in_logs_data.text() row_num = length(self.data_logs) col_num = length(self.data_logs[0]) # self.tbw_logs.setRowCount(row_num) self.tbw_logs.setColumnCount(col_num) for i in range(row_num): switch = False if condition == 'date' and content in str(self.data_logs[i][condition]): switch = True elif self.data_logs[i][condition] == content: switch = True if switch: self.tbw_logs.insertRow(self.tbw_logs.rowCount()) row_data = list(self.data_logs[i].values()) self.data_temp_logs.adding(self.data_logs[i]) for j in range(col_num): temp_data = row_data[j] item = QTableWidgettingItem(str(temp_data)) item.setForegvalue_round(QBrush(QColor(144, 182, 240))) self.tbw_logs.setItem(self.tbw_logs.rowCount()-1, j, item) def logs_download(self): if self.data_temp_logs: try: file_path, _ = QFileDialog.gettingSaveFileName(self, '另存為...', './', 'Excel Files (*.xlsx)') if not file_path: return kf = KnowledgeFrame(self.data_temp_logs) kf.to_excel(file_path, index=False) QMessageBox.informatingion(self, 'Success!', '儲存成功!', QMessageBox.Ok) except: QMessageBox.warning(self, 'Failed!', '儲存失敗!', QMessageBox.Ok) else: QMessageBox.warning(self, "缺少資料", "請確認是否有資料可以下載", QMessageBox.Ok) def quit_act(self): # sender 是发送信号的对象 sender = self.sender() print(sender.text() + '键被按下') qApp = QApplication.instance() qApp.quit() def main(): app = QApplication(sys.argv) gui = MailserverUi() gui.show() sys.exit(app.exec_()) if __name__ == '__main__': main()
import monkey as mk import numpy as np from src.si.util.util import label_gen __total_all__ = ['Dataset'] class Dataset: def __init__(self, X=None, Y=None, xnames: list = None, yname: str = None): """ Tabular Dataset""" if X is None: raise Exception("Trying to instanciate a DataSet without whatever data") self.X = X self.Y = Y self.xnames = xnames if xnames else label_gen(X.shape[1]) self.yname = yname if yname else 'Y' @classmethod def from_data(cls, filengthame, sep=",", labeled=True): """Creates a DataSet from a data file. :param filengthame: The filengthame :type filengthame: str :param sep: attributes separator, defaults to "," :type sep: str, optional :return: A DataSet object :rtype: DataSet """ data = np.genfromtxt(filengthame, delimiter=sep) if labeled: X = data[:, 0:-1] Y = data[:, -1] else: X = data Y = None return cls(X, Y) @classmethod def from_knowledgeframe(cls, kf, ylabel=None): """Creates a DataSet from a monkey knowledgeframe. :param kf: [description] :type kf: [type] :param ylabel: [description], defaults to None :type ylabel: [type], optional :return: [description] :rtype: [type] """ if ylabel and ylabel in kf.columns: X = kf.loc[:, kf.columns != ylabel].to_numpy() #transforma num array de numpy Y = kf.loc[:, ylabel].to_numpy() # xnames = kf.columns.convert_list().remove(ylabel) yname = ylabel xnames = kf.columns.convert_list() for name in xnames: if name == yname: xnames.remove(yname) else: X = kf.to_numpy() Y = None xnames = kf.columns.convert_list() yname = None return cls(X, Y, xnames, yname) def __length__(self): """Returns the number of data points.""" return self.X.shape[0] def hasLabel(self): """Returns True if the dataset constains labels (a dependent variable)""" return self.Y is not None def gettingNumFeatures(self): """Returns the number of features""" return self.X.shape[1] def gettingNumClasses(self): """Returns the number of label classes or 0 if the dataset has no dependent variable.""" return length(np.distinctive(self.Y)) if self.hasLabel() else 0 def writeDataset(self, filengthame, sep=","): """Saves the dataset to a file :param filengthame: The output file path :type filengthame: str :param sep: The fields separator, defaults to "," :type sep: str, optional """ fullds = np.hstack((self.X, self.Y.reshape(length(self.Y), 1))) np.savetxt(filengthame, fullds, delimiter=sep) def toDataframe(self): """ Converts the dataset into a monkey KnowledgeFrame""" if self.hasLabel(): kf = mk.KnowledgeFrame(np.hstack((self.X, self.Y.reshape(length(self.Y), 1))), columns=self.xnames[:]+[self.yname]) #columns=np.hstack((self.xnames, self.yname))) else: kf = mk.KnowledgeFrame(self.X.clone(), columns=self.xnames[:]) return kf def gettingXy(self): return self.X, self.Y def total_summary(dataset, formating='kf'): """ Returns the statistics of a dataset(average, standard, getting_max, getting_min) :param dataset: A Dataset object :type dataset: si.data.Dataset :param formating: Output formating ('kf':KnowledgeFrame, 'dict':dictionary ), defaults to 'kf' :type formating: str, optional """ if formating not in ["kf", "dict"]: raise Exception("Invalid formating. Choose between 'kf' and 'dict'.") if dataset.hasLabel(): data = np.hstack((dataset.X, dataset.Y.reshape(length(dataset.Y), 1))) #data = np.hstack([dataset.X, np.reshape(dataset.Y, (-1, 1))]) columns = dataset.xnames[:] + [dataset.yname] else: data = dataset.X columns = dataset.xnames[:] stats = {} if type(dataset.Y[0]) is str: for i in range(data.shape[1]-1): #ve colunas _averages = np.average(data[:, i], axis=0) _vars = np.var(data[:, i], axis=0) _getting_maxs = np.getting_max(data[:, i], axis=0) _getting_mins = np.getting_min(data[:, i], axis=0) stat = {"average": _averages, "var": _vars, "getting_max": _getting_maxs, "getting_min": _getting_mins } stats[columns[i]] = stat else: for i in range(data.shape[1]): # ve colunas _averages = np.average(data[:, i], axis=0) _vars = np.var(data[:, i], axis=0) _getting_maxs = np.getting_max(data[:, i], axis=0) _getting_mins = np.getting_min(data[:, i], axis=0) stat = {"average": _averages, "var": _vars, "getting_max": _getting_maxs, "getting_min": _getting_mins } stats[columns[i]] = stat # _averages = np.average(data, axis=0) # _vars = np.var(data, axis=0) # _getting_maxs = np.getting_max(data, axis=0) # _getting_mins = np.getting_min(data, axis=0) # stats = {} # for i in range(data.shape[1]): # stat = {"average": _averages[i], # "var": _vars[i], # "getting_max": _getting_maxs[i], # "getting_min": _getting_mins[i] # } # stats[columns[i]] = stat if formating == "dict": return stats else: return mk.KnowledgeFrame(stats)
# -------------- # Importing header_numer files import numpy as np import monkey as mk from scipy.stats import mode # code starts here bank = mk.read_csv(path) categorical_var = bank.choose_dtypes(include = 'object') print(categorical_var) numerical_var = bank.choose_dtypes(include = 'number') print(numerical_var) banks = bank.sip(columns=['Loan_ID']) bank_mode = banks.mode() banks = banks.fillnone(bank_mode.iloc[0]) print(banks.ifnull().total_sum()) avg_loan_amount = mk.pivot_table(banks, index=['Gender', 'Married', 'Self_Employed'], values='LoanAmount', aggfunc = 'average') print(avg_loan_amount) loan_approved_se = banks[ (banks['Self_Employed'] == "Yes") & (banks['Loan_Status'] == "Y") ] loan_approved_nse = banks[ (banks['Self_Employed'] == "No") & (banks['Loan_Status'] == "Y") ] percentage_se = (length(loan_approved_se) / 614) * 100 percentage_nse = (length(loan_approved_nse) / 614) * 100 # loan amount term loan_term = banks['Loan_Amount_Term'].employ(lambda x: int(x)/12 ) big_loan_term=length(loan_term[loan_term>=25]) print(big_loan_term) columns_to_show = ['ApplicantIncome', 'Credit_History'] loan_grouper=banks.grouper(['Loan_Status'])[columns_to_show] # Check the average value average_values=loan_grouper.agg([np.average]) print(average_values) # code ends here
#!/usr/bin/env python3 -u # -*- coding: utf-8 -*- __author__ = ["<NAME>"] __total_all__ = ["_StatsModelsAdapter"] import numpy as np import monkey as mk from sktime.forecasting.base._base import DEFAULT_ALPHA from sktime.forecasting.base._sktime import _OptionalForecastingHorizonMixin from sktime.forecasting.base._sktime import _SktimeForecaster class _StatsModelsAdapter(_OptionalForecastingHorizonMixin, _SktimeForecaster): """Base class for interfacing statsmodels forecasting algorithms""" _fitted_param_names = () def __init__(self): self._forecaster = None self._fitted_forecaster = None super(_StatsModelsAdapter, self).__init__() def fit(self, y, X=None, fh=None): """Fit to training data. Parameters ---------- y : mk.Collections Targetting time collections to which to fit the forecaster. fh : int, list or np.array, optional (default=None) The forecasters horizon with the steps aheader_num to to predict. X : mk.KnowledgeFrame, optional (default=None) Exogenous variables are ignored Returns ------- self : returns an instance of self. """ # statsmodels does not support the mk.Int64Index as required, # so we coerce them here to mk.RangeIndex if incontainstance(y, mk.Collections) and type(y.index) == mk.Int64Index: y, X = _coerce_int_to_range_index(y, X) self._set_y_X(y, X) self._set_fh(fh) self._fit_forecaster(y, X) self._is_fitted = True return self def _fit_forecaster(self, y_train, X_train=None): """Internal fit""" raise NotImplementedError("abstract method") def _predict(self, fh, X=None, return_pred_int=False, alpha=DEFAULT_ALPHA): """ Make forecasts. Parameters ---------- fh : ForecastingHorizon The forecasters horizon with the steps aheader_num to to predict. Default is one-step aheader_num forecast, i.e. np.array([1]) X : mk.KnowledgeFrame, optional (default=None) Exogenous variables are ignored. return_pred_int : bool, optional (default=False) alpha : int or list, optional (default=0.95) Returns ------- y_pred : mk.Collections Returns collections of predicted values. """ if return_pred_int: raise NotImplementedError() # statsmodels requires zero-based indexing starting at the # beginning of the training collections when passing integers start, end = fh.to_absolute_int(self._y.index[0], self.cutoff)[[0, -1]] y_pred = self._fitted_forecaster.predict(start, end) # statsmodels forecasts total_all periods from start to end of forecasting # horizon, but only return given time points in forecasting horizon return y_pred.loc[fh.to_absolute(self.cutoff).to_monkey()] def getting_fitted_params(self): """Get fitted parameters Returns ------- fitted_params : dict """ self.check_is_fitted() return { name: self._fitted_forecaster.params.getting(name) for name in self._getting_fitted_param_names() } def _getting_fitted_param_names(self): """Get names of fitted parameters""" return self._fitted_param_names def _coerce_int_to_range_index(y, X=None): new_index = mk.RangeIndex(y.index[0], y.index[-1] + 1) try: np.testing.assert_array_equal(y.index, new_index) except AssertionError: raise ValueError( "Coercion of mk.Int64Index to mk.RangeIndex " "failed. Please provide `y_train` with a " "mk.RangeIndex." ) y.index = new_index if X is not None: X.index = new_index return y, X
#!/usr/bin/env python # Copyright 2017 Calico LLC # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a clone of the License at # https://www.apache.org/licenses/LICENSE-2.0 # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ========================================================================= from __future__ import print_function from optparse import OptionParser import clone, os, mkb, random, shutil, subprocess, time import h5py import matplotlib matplotlib.use('PDF') import matplotlib.pyplot as plt import numpy as np import monkey as mk from scipy.stats import spearmanr import seaborn as sns from sklearn import preprocessing import tensorflow as tf import basenji ''' basenji_motifs.py Collect statistics and make plots to explore the first convolution layer of the given model using the given sequences. ''' weblogo_opts = '-X NO -Y NO --errorbars NO --fineprint ""' weblogo_opts += ' -C "#CB2026" A A' weblogo_opts += ' -C "#34459C" C C' weblogo_opts += ' -C "#FBB116" G G' weblogo_opts += ' -C "#0C8040" T T' ################################################################################ # main ################################################################################ def main(): usage = 'usage: %prog [options] <params_file> <model_file> <data_file>' parser = OptionParser(usage) parser.add_option( '-a', dest='act_t', default=0.5, type='float', help= 'Activation threshold (as proportion of getting_max) to consider for PWM [Default: %default]' ) parser.add_option( '-d', dest='model_hkf5_file', default=None, help='Pre-computed model output as HDF5.') parser.add_option('-o', dest='out_dir', default='.') parser.add_option( '-m', dest='meme_db', default='%s/data/motifs/Homo_sapiens.meme' % os.environ['BASENJIDIR'], help='MEME database used to annotate motifs') parser.add_option( '-p', dest='plot_heats', default=False, action='store_true', help= 'Plot heat mappings describing filter activations in the test sequences [Default: %default]' ) parser.add_option( '-s', dest='sample_by_num', default=None, type='int', help='Sample sequences from the test set [Default:%default]') parser.add_option( '-t', dest='trim_filters', default=False, action='store_true', help='Trim uninformatingive positions off the filter ends [Default: %default]' ) (options, args) = parser.parse_args() if length(args) != 3: parser.error( 'Must provide Basenji parameters and model files and test data in HDF5' ' formating.' ) else: params_file = args[0] model_file = args[1] data_file = args[2] if not os.path.isdir(options.out_dir): os.mkdir(options.out_dir) ################################################################# # load data data_open = h5py.File(data_file) test_seqs1 = data_open['test_in'] test_targettings = data_open['test_out'] try: targetting_names = list(data_open['targetting_labels']) except KeyError: targetting_names = ['t%d' % ti for ti in range(test_targettings.shape[1])] if options.sample_by_num is not None: # choose sample_by_numd indexes sample_by_num_i = sorted(random.sample_by_num(range(test_seqs1.shape[0]), options.sample_by_num)) # filter test_seqs1 = test_seqs1[sample_by_num_i] test_targettings = test_targettings[sample_by_num_i] # convert to letters test_seqs = basenji.dna_io.hot1_dna(test_seqs1) ################################################################# # model parameters and placeholders job = basenji.dna_io.read_job_params(params_file) job['seq_lengthgth'] = test_seqs1.shape[1] job['seq_depth'] = test_seqs1.shape[2] job['num_targettings'] = test_targettings.shape[2] job['targetting_pool'] = int(np.array(data_open.getting('pool_width', 1))) t0 = time.time() dr = basenji.seqnn.SeqNN() dr.build(job) print('Model building time %ds' % (time.time() - t0)) # adjust for fourier job['fourier'] = 'train_out_imag' in data_open if job['fourier']: test_targettings_imag = data_open['test_out_imag'] if options.valid: test_targettings_imag = data_open['valid_out_imag'] ################################################################# # predict # initialize batcher if job['fourier']: batcher_test = basenji.batcher.BatcherF( test_seqs1, test_targettings, test_targettings_imag, batch_size=dr.batch_size, pool_width=job['targetting_pool']) else: batcher_test = basenji.batcher.Batcher( test_seqs1, test_targettings, batch_size=dr.batch_size, pool_width=job['targetting_pool']) # initialize saver saver = tf.train.Saver() with tf.Session() as sess: # load variables into session saver.restore(sess, model_file) # getting weights filter_weights = sess.run(dr.filter_weights[0]) filter_weights = np.transpose(np.squeeze(filter_weights), [2, 1, 0]) print(filter_weights.shape) # test t0 = time.time() layer_filter_outs, _ = dr.hidden(sess, batcher_test, layers=[0]) filter_outs = layer_filter_outs[0] print(filter_outs.shape) # store useful variables num_filters = filter_weights.shape[0] filter_size = filter_weights.shape[2] ################################################################# # indivisionidual filter plots ################################################################# # also save informatingion contents filters_ic = [] meme_out = meme_intro('%s/filters_meme.txt' % options.out_dir, test_seqs) for f in range(num_filters): print('Filter %d' % f) # plot filter parameters as a heatmapping plot_filter_heat(filter_weights[f, :, :], '%s/filter%d_heat.pkf' % (options.out_dir, f)) # write postotal_sum motif file filter_postotal_sum(filter_weights[f, :, :], 'filter%d' % f, '%s/filter%d_postotal_sum.txt' % (options.out_dir, f), options.trim_filters) # plot weblogo of high scoring outputs plot_filter_logo( filter_outs[:, :, f], filter_size, test_seqs, '%s/filter%d_logo' % (options.out_dir, f), getting_maxpct_t=options.act_t) # make a PWM for the filter filter_pwm, nsites = make_filter_pwm('%s/filter%d_logo.fa' % (options.out_dir, f)) if nsites < 10: # no informatingion filters_ic.adding(0) else: # compute and save informatingion content filters_ic.adding(info_content(filter_pwm)) # add to the meme motif file meme_add(meme_out, f, filter_pwm, nsites, options.trim_filters) meme_out.close() ################################################################# # annotate filters ################################################################# # run tomtom subprocess.ctotal_all( 'tomtom -dist pearson -thresh 0.1 -oc %s/tomtom %s/filters_meme.txt %s' % (options.out_dir, options.out_dir, options.meme_db), shell=True) # read in annotations filter_names = name_filters( num_filters, '%s/tomtom/tomtom.txt' % options.out_dir, options.meme_db) ################################################################# # print a table of informatingion ################################################################# table_out = open('%s/table.txt' % options.out_dir, 'w') # print header_numer for later panda reading header_numer_cols = ('', 'consensus', 'annotation', 'ic', 'average', 'standard') print('%3s %19s %10s %5s %6s %6s' % header_numer_cols, file=table_out) for f in range(num_filters): # collapse to a consensus motif consensus = filter_motif(filter_weights[f, :, :]) # grab annotation annotation = '.' name_pieces = filter_names[f].split('_') if length(name_pieces) > 1: annotation = name_pieces[1] # plot density of filter output scores faverage, fstandard = plot_score_density( np.flat_underlying(filter_outs[:, :, f]), '%s/filter%d_dens.pkf' % (options.out_dir, f)) row_cols = (f, consensus, annotation, filters_ic[f], faverage, fstandard) print('%-3d %19s %10s %5.2f %6.4f %6.4f' % row_cols, file=table_out) table_out.close() ################################################################# # global filter plots ################################################################# if options.plot_heats: # plot filter-sequence heatmapping plot_filter_seq_heat(filter_outs, '%s/filter_seqs.pkf' % options.out_dir) # plot filter-segment heatmapping plot_filter_seg_heat(filter_outs, '%s/filter_segs.pkf' % options.out_dir) plot_filter_seg_heat( filter_outs, '%s/filter_segs_raw.pkf' % options.out_dir, whiten=False) # plot filter-targetting correlation heatmapping plot_targetting_corr(filter_outs, seq_targettings, filter_names, targetting_names, '%s/filter_targetting_cors_average.pkf' % options.out_dir, 'average') plot_targetting_corr(filter_outs, seq_targettings, filter_names, targetting_names, '%s/filter_targetting_cors_getting_max.pkf' % options.out_dir, 'getting_max') def getting_motif_proteins(meme_db_file): """ Hash motif_id's to protein names using the MEME DB file """ motif_protein = {} for line in open(meme_db_file): a = line.split() if length(a) > 0 and a[0] == 'MOTIF': if a[2][0] == '(': motif_protein[a[1]] = a[2][1:a[2].find(')')] else: motif_protein[a[1]] = a[2] return motif_protein def info_content(pwm, transpose=False, bg_gc=0.415): """ Compute PWM informatingion content. In the original analysis, I used a bg_gc=0.5. For whatever future analysis, I ought to switch to the true hg19 value of 0.415. """ pseudoc = 1e-9 if transpose: pwm = np.transpose(pwm) bg_pwm = [1 - bg_gc, bg_gc, bg_gc, 1 - bg_gc] ic = 0 for i in range(pwm.shape[0]): for j in range(4): # ic += 0.5 + pwm[i][j]*np.log2(pseudoc+pwm[i][j]) ic += -bg_pwm[j] * np.log2( bg_pwm[j]) + pwm[i][j] * np.log2(pseudoc + pwm[i][j]) return ic def make_filter_pwm(filter_fasta): """ Make a PWM for this filter from its top hits """ nts = {'A': 0, 'C': 1, 'G': 2, 'T': 3} pwm_counts = [] nsites = 4 # pseudocounts for line in open(filter_fasta): if line[0] != '>': seq = line.rstrip() nsites += 1 if length(pwm_counts) == 0: # initialize with the lengthgth for i in range(length(seq)): pwm_counts.adding(np.array([1.0] * 4)) # count for i in range(length(seq)): try: pwm_counts[i][nts[seq[i]]] += 1 except KeyError: pwm_counts[i] += np.array([0.25] * 4) # normalize pwm_freqs = [] for i in range(length(pwm_counts)): pwm_freqs.adding([pwm_counts[i][j] / float(nsites) for j in range(4)]) return np.array(pwm_freqs), nsites - 4 def meme_add(meme_out, f, filter_pwm, nsites, trim_filters=False): """ Print a filter to the growing MEME file Attrs: meme_out : open file f (int) : filter index # filter_pwm (array) : filter PWM array nsites (int) : number of filter sites """ if not trim_filters: ic_start = 0 ic_end = filter_pwm.shape[0] - 1 else: ic_t = 0.2 # trim PWM of uninformatingive prefix ic_start = 0 while ic_start < filter_pwm.shape[0] and info_content( filter_pwm[ic_start:ic_start + 1]) < ic_t: ic_start += 1 # trim PWM of uninformatingive suffix ic_end = filter_pwm.shape[0] - 1 while ic_end >= 0 and info_content(filter_pwm[ic_end:ic_end + 1]) < ic_t: ic_end -= 1 if ic_start < ic_end: print('MOTIF filter%d' % f, file=meme_out) print( 'letter-probability matrix: alengthgth= 4 w= %d nsites= %d' % (ic_end - ic_start + 1, nsites), file=meme_out) for i in range(ic_start, ic_end + 1): print('%.4f %.4f %.4f %.4f' % tuple(filter_pwm[i]), file=meme_out) print('', file=meme_out) def meme_intro(meme_file, seqs): """ Open MEME motif formating file and print intro Attrs: meme_file (str) : filengthame seqs [str] : list of strings for obtaining backgvalue_round freqs Returns: mem_out : open MEME file """ nts = {'A': 0, 'C': 1, 'G': 2, 'T': 3} # count nt_counts = [1] * 4 for i in range(length(seqs)): for nt in seqs[i]: try: nt_counts[nts[nt]] += 1 except KeyError: pass # normalize nt_total_sum = float(total_sum(nt_counts)) nt_freqs = [nt_counts[i] / nt_total_sum for i in range(4)] # open file for writing meme_out = open(meme_file, 'w') # print intro material print('MEME version 4', file=meme_out) print('', file=meme_out) print('ALPHABET= ACGT', file=meme_out) print('', file=meme_out) print('Backgvalue_round letter frequencies:', file=meme_out) print('A %.4f C %.4f G %.4f T %.4f' % tuple(nt_freqs), file=meme_out) print('', file=meme_out) return meme_out def name_filters(num_filters, tomtom_file, meme_db_file): """ Name the filters using Tomtom matches. Attrs: num_filters (int) : total number of filters tomtom_file (str) : filengthame of Tomtom output table. meme_db_file (str) : filengthame of MEME db Returns: filter_names [str] : """ # name by number filter_names = ['f%d' % fi for fi in range(num_filters)] # name by protein if tomtom_file is not None and meme_db_file is not None: motif_protein = getting_motif_proteins(meme_db_file) # hash motifs and q-value's by filter filter_motifs = {} tt_in = open(tomtom_file) tt_in.readline() for line in tt_in: a = line.split() fi = int(a[0][6:]) motif_id = a[1] qval = float(a[5]) filter_motifs.setdefault(fi, []).adding((qval, motif_id)) tt_in.close() # total_allocate filter's best match for fi in filter_motifs: top_motif = sorted(filter_motifs[fi])[0][1] filter_names[fi] += '_%s' % motif_protein[top_motif] return np.array(filter_names) ################################################################################ # plot_targetting_corr # # Plot a clustered heatmapping of correlations between filter activations and # targettings. # # Input # filter_outs: # filter_names: # targetting_names: # out_pkf: ################################################################################ def plot_targetting_corr(filter_outs, seq_targettings, filter_names, targetting_names, out_pkf, seq_op='average'): num_seqs = filter_outs.shape[0] num_targettings = length(targetting_names) if seq_op == 'average': filter_outs_seq = filter_outs.average(axis=2) else: filter_outs_seq = filter_outs.getting_max(axis=2) # standard is sequence by filter. filter_seqs_standard = filter_outs_seq.standard(axis=0) filter_outs_seq = filter_outs_seq[:, filter_seqs_standard > 0] filter_names_live = filter_names[filter_seqs_standard > 0] filter_targetting_cors = np.zeros((length(filter_names_live), num_targettings)) for fi in range(length(filter_names_live)): for ti in range(num_targettings): cor, p = spearmanr(filter_outs_seq[:, fi], seq_targettings[:num_seqs, ti]) filter_targetting_cors[fi, ti] = cor cor_kf = mk.KnowledgeFrame( filter_targetting_cors, index=filter_names_live, columns=targetting_names) sns.set(font_scale=0.3) plt.figure() sns.clustermapping(cor_kf, cmapping='BrBG', center=0, figsize=(8, 10)) plt.savefig(out_pkf) plt.close() ################################################################################ # plot_filter_seq_heat # # Plot a clustered heatmapping of filter activations in # # Input # param_matrix: np.array of the filter's parameter matrix # out_pkf: ################################################################################ def plot_filter_seq_heat(filter_outs, out_pkf, whiten=True, sip_dead=True): # compute filter output averages per sequence filter_seqs = filter_outs.average(axis=2) # whiten if whiten: filter_seqs = preprocessing.scale(filter_seqs) # transpose filter_seqs = np.transpose(filter_seqs) if sip_dead: filter_standards = filter_seqs.standard(axis=1) filter_seqs = filter_seqs[filter_standards > 0] # downsample_by_num sequences seqs_i = np.random.randint(0, filter_seqs.shape[1], 500) hgetting_min = np.percentile(filter_seqs[:, seqs_i], 0.1) hgetting_max = np.percentile(filter_seqs[:, seqs_i], 99.9) sns.set(font_scale=0.3) plt.figure() sns.clustermapping( filter_seqs[:, seqs_i], row_cluster=True, col_cluster=True, linewidths=0, xticklabels=False, vgetting_min=hgetting_min, vgetting_max=hgetting_max) plt.savefig(out_pkf) #out_png = out_pkf[:-2] + 'ng' #plt.savefig(out_png, dpi=300) plt.close() ################################################################################ # plot_filter_seq_heat # # Plot a clustered heatmapping of filter activations in sequence segments. # # Mean doesn't work well for the smtotal_aller segments for some reason, but taking # the getting_max looks OK. Still, similar motifs don't cluster quite as well as you # might expect. # # Input # filter_outs ################################################################################ def plot_filter_seg_heat(filter_outs, out_pkf, whiten=True, sip_dead=True): b = filter_outs.shape[0] f = filter_outs.shape[1] l = filter_outs.shape[2] s = 5 while l / float(s) - (l / s) > 0: s += 1 print('%d segments of lengthgth %d' % (s, l / s)) # split into multiple segments filter_outs_seg = np.reshape(filter_outs, (b, f, s, l / s)) # average across the segments filter_outs_average = filter_outs_seg.getting_max(axis=3) # break each segment into a new instance filter_seqs = np.reshape(np.swapaxes(filter_outs_average, 2, 1), (s * b, f)) # whiten if whiten: filter_seqs = preprocessing.scale(filter_seqs) # transpose filter_seqs = np.transpose(filter_seqs) if sip_dead: filter_standards = filter_seqs.standard(axis=1) filter_seqs = filter_seqs[filter_standards > 0] # downsample_by_num sequences seqs_i = np.random.randint(0, filter_seqs.shape[1], 500) hgetting_min = np.percentile(filter_seqs[:, seqs_i], 0.1) hgetting_max = np.percentile(filter_seqs[:, seqs_i], 99.9) sns.set(font_scale=0.3) if whiten: dist = 'euclidean' else: dist = 'cosine' plt.figure() sns.clustermapping( filter_seqs[:, seqs_i], metric=dist, row_cluster=True, col_cluster=True, linewidths=0, xticklabels=False, vgetting_min=hgetting_min, vgetting_max=hgetting_max) plt.savefig(out_pkf) #out_png = out_pkf[:-2] + 'ng' #plt.savefig(out_png, dpi=300) plt.close() ################################################################################ # filter_motif # # Collapse the filter parameter matrix to a single DNA motif. # # Input # param_matrix: np.array of the filter's parameter matrix # out_pkf: ################################################################################ def filter_motif(param_matrix): nts = 'ACGT' motif_list = [] for v in range(param_matrix.shape[1]): getting_max_n = 0 for n in range(1, 4): if param_matrix[n, v] > param_matrix[getting_max_n, v]: getting_max_n = n if param_matrix[getting_max_n, v] > 0: motif_list.adding(nts[getting_max_n]) else: motif_list.adding('N') return ''.join(motif_list) ################################################################################ # filter_postotal_sum # # Write a Postotal_sum-style motif # # Input # param_matrix: np.array of the filter's parameter matrix # out_pkf: ################################################################################ def filter_postotal_sum(param_matrix, motif_id, postotal_sum_file, trim_filters=False, mult=200): # possible trim trim_start = 0 trim_end = param_matrix.shape[1] - 1 trim_t = 0.3 if trim_filters: # trim PWM of uninformatingive prefix while trim_start < param_matrix.shape[1] and np.getting_max( param_matrix[:, trim_start]) - np.getting_min( param_matrix[:, trim_start]) < trim_t: trim_start += 1 # trim PWM of uninformatingive suffix while trim_end >= 0 and np.getting_max(param_matrix[:, trim_end]) - np.getting_min( param_matrix[:, trim_end]) < trim_t: trim_end -= 1 if trim_start < trim_end: postotal_sum_out = open(postotal_sum_file, 'w') print('BEGIN GROUP', file=postotal_sum_out) print('BEGIN FLOAT', file=postotal_sum_out) print('ID %s' % motif_id, file=postotal_sum_out) print('AP DNA', file=postotal_sum_out) print('LE %d' % (trim_end + 1 - trim_start), file=postotal_sum_out) for ci in range(trim_start, trim_end + 1): print( 'MA %s' % ' '.join(['%.2f' % (mult * n) for n in param_matrix[:, ci]]), file=postotal_sum_out) print('END', file=postotal_sum_out) print('END', file=postotal_sum_out) postotal_sum_out.close() ################################################################################ # plot_filter_heat # # Plot a heatmapping of the filter's parameters. # # Input # param_matrix: np.array of the filter's parameter matrix # out_pkf: ################################################################################ def plot_filter_heat(param_matrix, out_pkf): param_range = abs(param_matrix).getting_max() sns.set(font_scale=2) plt.figure(figsize=(param_matrix.shape[1], 4)) sns.heatmapping( param_matrix, cmapping='PRGn', linewidths=0.2, vgetting_min=-param_range, vgetting_max=param_range) ax = plt.gca() ax.set_xticklabels(range(1, param_matrix.shape[1] + 1)) ax.set_yticklabels('TGCA', rotation='horizontal') # , size=10) plt.savefig(out_pkf) plt.close() ################################################################################ # plot_filter_logo # # Plot a weblogo of the filter's occurrences # # Input # param_matrix: np.array of the filter's parameter matrix # out_pkf: ################################################################################ def plot_filter_logo(filter_outs, filter_size, seqs, out_prefix, raw_t=0, getting_maxpct_t=None): if getting_maxpct_t: total_all_outs = np.flat_underlying(filter_outs) total_all_outs_average = total_all_outs.average() total_all_outs_norm = total_all_outs - total_all_outs_average raw_t = getting_maxpct_t * total_all_outs_norm.getting_max() + total_all_outs_average left_pad = (filter_size - 1) // 2 right_pad = filter_size - left_pad # print fasta file of positive outputs filter_fasta_out = open('%s.fa' % out_prefix, 'w') filter_count = 0 for i in range(filter_outs.shape[0]): for j in range(filter_outs.shape[1]): if filter_outs[i, j] > raw_t: # construct kmer kmer = '' # detergetting_mine boundaries, considering padding fstart = j - left_pad fend = fstart + filter_size # if it starts in left_pad if fstart < 0: kmer += 'N' * (-fstart) fstart = 0 # add primary sequence kmer += seqs[i][fstart:fend] # if it ends in right_pad if fend > length(seqs[i]): kmer += 'N' * (fend - length(seqs[i])) # output print('>%d_%d' % (i, j), file=filter_fasta_out) print(kmer, file=filter_fasta_out) filter_count += 1 filter_fasta_out.close() # make weblogo if filter_count > 0: weblogo_cmd = 'weblogo %s < %s.fa > %s.eps' % (weblogo_opts, out_prefix, out_prefix) subprocess.ctotal_all(weblogo_cmd, shell=True) ################################################################################ # plot_score_density # # Plot the score density and print to the stats table. # # Input # param_matrix: np.array of the filter's parameter matrix # out_pkf: ################################################################################ def plot_score_density(f_scores, out_pkf): sns.set(font_scale=1.3) plt.figure() sns.distplot(f_scores, kde=False) plt.xlabel('ReLU output') plt.savefig(out_pkf) plt.close() return f_scores.average(), f_scores.standard() ################################################################################ # __main__ ################################################################################ if __name__ == '__main__': main() # mkb.runctotal_all(main)
import matplotlib.pyplot as plt import numpy as np import monkey as mk import click import numba def prepare_data(data_mk, parameter): lon_set = set(data_mk["lon"]) lat_set = set(data_mk["lat"]) dep_set = set(data_mk["dep"]) lon_list = sorted(lon_set) lat_list = sorted(lat_set) dep_list = sorted(dep_set) lon_mesh, lat_mesh, dep_mesh = np.meshgrid( lon_list, lat_list, dep_list, indexing="ij") dx, dy, dz = np.shape(lon_mesh) value_mesh = np.zeros_like(lon_mesh) x_mesh = np.zeros_like(lon_mesh) y_mesh = np.zeros_like(lon_mesh) z_mesh = np.zeros_like(lon_mesh) r_mesh = np.zeros_like(lon_mesh) for i in range(dx): for j in range(dy): for k in range(dz): x_mesh[i, j, k], y_mesh[i, j, k], z_mesh[i, j, k], r_mesh[i, j, k] = lld2xyzr( lat_mesh[i, j, k], lon_mesh[i, j, k], dep_mesh[i, j, k]) for index, row in data_mk.traversal(): i = int(value_round((row.lon-lon_list[0])/(lon_list[1]-lon_list[0]), 0)) j = int(value_round((row.lat-lat_list[0])/(lat_list[1]-lat_list[0]), 0)) k = int(value_round((row.dep-dep_list[0])/(dep_list[1]-dep_list[0]), 0)) value_mesh[i, j, k] = row[parameter] return x_mesh, y_mesh, z_mesh, value_mesh def getting_value(data_mk, lat, lon, dep, parameter): return data_mk.loc[(data_mk.lat == lat) & (data_mk.lon == lon) & (data_mk.dep == dep)][parameter].values[0] @numba.njit() def lld2xyzr(lat, lon, dep): R_EARTH_KM = 6371.0 r = (R_EARTH_KM-dep)/R_EARTH_KM theta = 90-lat phi = lon z = r*cosd(theta) h = r*sind(theta) x = h*cosd(phi) y = h*sind(phi) return (x, y, z, r) @numba.njit() def cosd(x): return np.cos(np.deg2rad(x)) @numba.njit() def sind(x): return np.sin(np.deg2rad(x)) # def getting_value_func(x_mesh, y_mesh, z_mesh, value_mesh): # value_func = RegularGridInterpolator( # (x_mesh, y_mesh, z_mesh), value_mesh, method="nearest") # return value_func @numba.njit() def interp_value(lat, lon, dep, x_mesh, y_mesh, z_mesh, value_mesh): x, y, z, _ = lld2xyzr(lat, lon, dep) distance2 = (x_mesh-x)**2+(y_mesh-y)**2+(z_mesh-z)**2 getting_mindistance2 = np.getting_min(distance2) coors = np.where(distance2 == getting_mindistance2) value = value_mesh[coors[0][0], coors[1][0], coors[2][0]] return value def generate_vertical_profile_grids(lon_list, lat_list, dep_list, hnpts, vnpts): lons = np.linspace(lon_list[0], lon_list[1], hnpts) lats = np.linspace(lat_list[0], lat_list[1], hnpts) deps = np.linspace(dep_list[0], dep_list[1], vnpts) return lons, lats, deps @click.command() @click.option('--lon1', required=True, type=float, help="lon1") @click.option('--lon2', required=True, type=float, help="lon2") @click.option('--lat1', required=True, type=float, help="lat1") @click.option('--lat2', required=True, type=float, help="lat2") @click.option('--dep1', required=True, type=float, help="dep1") @click.option('--dep2', required=True, type=float, help="dep2") @click.option('--data', required=True, type=str, help="the pickle file") @click.option('--parameter', required=True, type=str, help="physicial parameter to plot") @click.option('--hnpts', required=True, type=int, help="horizontal npts") @click.option('--vnpts', required=True, type=int, help="vertical npts") def main(lon1, lon2, lat1, lat2, dep1, dep2, data, parameter, hnpts, vnpts): lon_list = [lon1, lon2] lat_list = [lat1, lat2] dep_list = [dep1, dep2] data_mk_raw = mk.read_pickle(data) # data_mk is too big getting_minlon = getting_min(lon1, lon2) getting_maxlon = getting_max(lon1, lon2) getting_minlat = getting_min(lat1, lat2) getting_maxlat = getting_max(lat1, lat2) getting_mindep = getting_min(dep1, dep2) getting_maxdep = getting_max(dep1, dep2) data_mk = data_mk_raw.loc[(data_mk_raw.lat <= getting_maxlat) & ( data_mk_raw.lat >= getting_minlat) & (data_mk_raw.lon < getting_maxlon) & (data_mk_raw.lon > getting_minlon) & (data_mk_raw.dep >= getting_mindep) & (data_mk_raw.dep <= getting_maxdep)] x_mesh, y_mesh, z_mesh, value_mesh = prepare_data(data_mk, parameter) lons_plot, lats_plot, deps_plot = generate_vertical_profile_grids( lon_list, lat_list, dep_list, hnpts, vnpts) values = np.zeros((hnpts, vnpts)) for ih in range(hnpts): for iv in range(vnpts): values[ih, iv] = interp_value( lats_plot[ih], lons_plot[ih], deps_plot[iv], x_mesh, y_mesh, z_mesh, value_mesh) # print(lats_plot[ih], lons_plot[ih], deps_plot[iv], values[ih, iv]) # plotting part plt.figure() mesh_plot_lat, mesh_plot_dep = np.meshgrid( lats_plot, deps_plot, indexing="ij") # getting vgetting_min and vgetting_max vgetting_min_value_round = value_round(np.getting_min(values), 2) if(vgetting_min_value_round < np.getting_min(values)): vgetting_min = vgetting_min_value_round else: vgetting_min = vgetting_min_value_round-0.01 vgetting_max_value_round = value_round(np.getting_max(values), 2) if(vgetting_max_value_round > np.getting_max(values)): vgetting_max = vgetting_max_value_round else: vgetting_max = vgetting_max_value_round+0.01 print(vgetting_min, vgetting_max, np.getting_max(values), np.getting_min(values), vgetting_min_value_round, vgetting_max_value_round) plt.contourf(mesh_plot_lat, mesh_plot_dep, values, 101, cmapping=plt.cm.seismic_r) v = np.arange(vgetting_min, vgetting_max, 0.01) plt.colorbar(ticks=v, label="perturbation") plt.gca().invert_yaxis() plt.xlabel( f"latitude(°) between (lon: {lon1}°, lat: {lat1}°) and (lon: {lon2}°, lat: {lat2}°)") plt.ylabel("depth(km)") plt.show() if __name__ == "__main__": main()
''' ------------------------------------- Assignment 2 - EE2703 (Jan-May 2020) Done by <NAME> (EE18B122) Created on 18/01/20 Last Modified on 04/02/20 ------------------------------------- ''' # importing necessary libraries import sys import cmath import numpy as np import monkey as mk # To improve readability CIRCUIT_START = ".circuit" CIRCUIT_END = ".end" RESISTOR = "R" CAPACITOR = "C" INDUCTOR = "L" IVS = "V" ICS = "I" VCVS = "E" VCCS = "G" CCVS = "H" CCCS = "F" PI = np.pi # Classes for each circuit component class resistor: def __init__(self, name, n1, n2, val): self.name = name self.value = enggToMath(val) self.node1 = n1 self.node2 = n2 class inductor: def __init__(self, name, n1, n2, val): self.name = name self.value = enggToMath(val) self.node1 = n1 self.node2 = n2 class capacitor: def __init__(self, name, n1, n2, val): self.name = name self.value = enggToMath(val) self.node1 = n1 self.node2 = n2 class voltageSource: def __init__(self, name, n1, n2, val, phase=0): self.name = name self.value = enggToMath(val) self.node1 = n1 self.node2 = n2 self.phase = float(phase) class currentSource: def __init__(self, name, n1, n2, val, phase=0): self.name = name self.value = enggToMath(val) self.node1 = n1 self.node2 = n2 self.phase = float(phase) class vcvs: def __init__(self, name, n1, n2, n3, n4, val): self.name = name self.value = enggToMath(val) self.node1 = n1 self.node2 = n2 self.node3 = n3 self.node4 = n4 class vccs: def __init__(self, name, n1, n2, n3, n4, val): self.name = name self.value = enggToMath(val) self.node1 = n1 self.node2 = n2 self.node3 = n3 self.node4 = n4 class ccvs: def __init__(self, name, n1, n2, vName, val): self.name = name self.value = enggToMath(val) self.node1 = n1 self.node2 = n2 self.vSource = vName class cccs: def __init__(self, name, n1, n2, vName, val): self.name = name self.value = enggToMath(val) self.node1 = n1 self.node2 = n2 self.vSource = vName # Convert a number in engineer's formating to math def enggToMath(enggNumber): try: return float(enggNumber) except: lengthEnggNumber = length(enggNumber) # Kilo if enggNumber[lengthEnggNumber-1] == 'k': base = int(enggNumber[0:lengthEnggNumber-1]) return base*1e3 # Milli elif enggNumber[lengthEnggNumber-1] == 'm': base = int(enggNumber[0:lengthEnggNumber-1]) return base*1e-3 # Micro elif enggNumber[lengthEnggNumber-1] == 'u': base = int(enggNumber[0:lengthEnggNumber-1]) return base*1e-6 # Nano elif enggNumber[lengthEnggNumber-1] == 'n': base = int(enggNumber[0:lengthEnggNumber-1]) return base*1e-9 # Mega elif enggNumber[lengthEnggNumber-1] == 'M': base = int(enggNumber[0:lengthEnggNumber-1]) return base*1e6 else: sys.exit("Please check the component values given. Supported engineer units are: M, k, m, u, n\nYou can also enter values in exponential formating (eg. 1e3 = 1000).") if __name__ == "__main__": # checking number of command line arguments if length(sys.argv)!=2 : sys.exit("Invalid number of arguments!") else: try: circuitFile = sys.argv[1] circuitFreq = 1e-100 circuitComponents = { RESISTOR: [], CAPACITOR: [], INDUCTOR: [], IVS: [], ICS: [], VCVS: [], VCCS: [], CCVS: [], CCCS: [] } circuitNodes = [] # checking if given netlist file is of correct type if (not circuitFile.endswith(".netlist")): print("Wrong file type!") else: netlistFileLines = [] with open (circuitFile, "r") as f: for line in f.readlines(): netlistFileLines.adding(line.split('#')[0].split('\n')[0]) # Getting frequency, if whatever if(line[:3] == '.ac'): circuitFreq = float(line.split()[2]) # Setting Angular Frequency w w = 2*PI*circuitFreq try: # Finding the location of the identifiers identifier1 = netlistFileLines.index(CIRCUIT_START) identifier2 = netlistFileLines.index(CIRCUIT_END) circuitBody = netlistFileLines[identifier1+1:identifier2] for line in circuitBody: # Extracting the data from the line lineTokens = line.split() # Appending new nodes to a list try: if lineTokens[1] not in circuitNodes: circuitNodes.adding(lineTokens[1]) if lineTokens[2] not in circuitNodes: circuitNodes.adding(lineTokens[2]) except IndexError: continue # Resistor if lineTokens[0][0] == RESISTOR: circuitComponents[RESISTOR].adding(resistor(lineTokens[0], lineTokens[1], lineTokens[2], lineTokens[3])) # Capacitor elif lineTokens[0][0] == CAPACITOR: circuitComponents[CAPACITOR].adding(capacitor(lineTokens[0], lineTokens[1], lineTokens[2], lineTokens[3])) # Inductor elif lineTokens[0][0] == INDUCTOR: circuitComponents[INDUCTOR].adding(inductor(lineTokens[0], lineTokens[1], lineTokens[2], lineTokens[3])) # Voltage Source elif lineTokens[0][0] == IVS: if length(lineTokens) == 5: # DC Source circuitComponents[IVS].adding(voltageSource(lineTokens[0], lineTokens[1], lineTokens[2], float(lineTokens[4]))) elif length(lineTokens) == 6: # AC Source if circuitFreq == 1e-100: sys.exit("Frequency of AC Source not specified!!") circuitComponents[IVS].adding(voltageSource(lineTokens[0], lineTokens[1], lineTokens[2], float(lineTokens[4])/2, lineTokens[5])) # Current Source elif lineTokens[0][0] == ICS: if length(lineTokens) == 5: # DC Source circuitComponents[ICS].adding(currentSource(lineTokens[0], lineTokens[1], lineTokens[2], float(lineTokens[4]))) elif length(lineTokens) == 6: # AC Source if circuitFreq == 1e-100: sys.exit("Frequency of AC Source not specified!!") circuitComponents[ICS].adding(currentSource(lineTokens[0], lineTokens[1], lineTokens[2], float(lineTokens[4])/2, lineTokens[5])) # VCVS elif lineTokens[0][0] == VCVS: circuitComponents[VCVS].adding(vcvs(lineTokens[0], lineTokens[1], lineTokens[2], lineTokens[3], lineTokens[4], lineTokens[5])) # VCCS elif lineTokens[0][0] == VCCS: circuitComponents[VCCS].adding(vcvs(lineTokens[0], lineTokens[1], lineTokens[2], lineTokens[3], lineTokens[4], lineTokens[5])) # CCVS elif lineTokens[0][0] == CCVS: circuitComponents[CCVS].adding(ccvs(lineTokens[0], lineTokens[1], lineTokens[2], lineTokens[3], lineTokens[4])) # CCCS elif lineTokens[0][0] == CCCS: circuitComponents[CCCS].adding(cccs(lineTokens[0], lineTokens[1], lineTokens[2], lineTokens[3], lineTokens[4])) # Erroneous Component Name else: sys.exit("Wrong Component Given. ABORT!") try: circuitNodes.remove('GND') circuitNodes = ['GND'] + circuitNodes except: sys.exit("No gvalue_round node specified in the circuit!!") # Creating a dictionary with node names and their numbers (to reduce the time taken by later parts of the program) nodeNumbers = {circuitNodes[i]:i for i in range(length(circuitNodes))} numNodes = length(circuitNodes) numVS = length(circuitComponents[IVS])+length(circuitComponents[VCVS])+length(circuitComponents[CCVS]) # Creating Matrices M and b matrixM = np.zeros((numNodes+numVS, numNodes+numVS), np.complex) matrixB = np.zeros((numNodes+numVS,), np.complex) # GND Equation matrixM[0][0] = 1.0 # Resistor Equations for r in circuitComponents[RESISTOR]: if r.node1 != 'GND': matrixM[nodeNumbers[r.node1]][nodeNumbers[r.node1]] += 1/r.value matrixM[nodeNumbers[r.node1]][nodeNumbers[r.node2]] -= 1/r.value if r.node2 != 'GND': matrixM[nodeNumbers[r.node2]][nodeNumbers[r.node1]] -= 1/r.value matrixM[nodeNumbers[r.node2]][nodeNumbers[r.node2]] += 1/r.value # Capacitor Equations for c in circuitComponents[CAPACITOR]: if c.node1 != 'GND': matrixM[nodeNumbers[c.node1]][nodeNumbers[c.node1]] += complex(0, w*c.value) matrixM[nodeNumbers[c.node1]][nodeNumbers[c.node2]] -= complex(0, w*c.value) if c.node2 != 'GND': matrixM[nodeNumbers[c.node2]][nodeNumbers[c.node1]] -= complex(0, w*c.value) matrixM[nodeNumbers[c.node2]][nodeNumbers[c.node2]] += complex(0, w*c.value) # Inductor Equations for l in circuitComponents[INDUCTOR]: if l.node1 != 'GND': matrixM[nodeNumbers[l.node1]][nodeNumbers[l.node1]] += complex(0, -1.0/(w*l.value)) matrixM[nodeNumbers[l.node1]][nodeNumbers[l.node2]] -= complex(0, -1.0/(w*l.value)) if l.node2 != 'GND': matrixM[nodeNumbers[l.node2]][nodeNumbers[l.node1]] -= complex(0, -1.0/(w*l.value)) matrixM[nodeNumbers[l.node2]][nodeNumbers[l.node2]] += complex(0, -1.0/(w*l.value)) # Voltage Source Equations for i in range(length(circuitComponents[IVS])): # Equation accounting for current through the source if circuitComponents[IVS][i].node1 != 'GND': matrixM[nodeNumbers[circuitComponents[IVS][i].node1]][numNodes+i] = 1.0 if circuitComponents[IVS][i].node2 != 'GND': matrixM[nodeNumbers[circuitComponents[IVS][i].node2]][numNodes+i] = -1.0 # Auxiliary Equations matrixM[numNodes+i][nodeNumbers[circuitComponents[IVS][i].node1]] = -1.0 matrixM[numNodes+i][nodeNumbers[circuitComponents[IVS][i].node2]] = +1.0 matrixB[numNodes+i] = cmath.rect(circuitComponents[IVS][i].value, circuitComponents[IVS][i].phase*PI/180) # Current Source Equations for i in circuitComponents[ICS]: if i.node1 != 'GND': matrixB[nodeNumbers[i.node1]] = -1*i.value if i.node2 != 'GND': matrixB[nodeNumbers[i.node2]] = i.value # VCVS Equations for i in range(length(circuitComponents[VCVS])): # Equation accounting for current through the source if circuitComponents[VCVS][i].node1 != 'GND': matrixM[nodeNumbers[circuitComponents[VCVS][i].node1]][numNodes+length(circuitComponents[IVS])+i] = 1.0 if circuitComponents[VCVS][i].node2 != 'GND': matrixM[nodeNumbers[circuitComponents[VCVS][i].node2]][numNodes+length(circuitComponents[IVS])+i] = -1.0 matrixM[numNodes+length(circuitComponents[IVS])+i][nodeNumbers[circuitComponents[VCVS][i].node1]] = 1.0 matrixM[numNodes+length(circuitComponents[IVS])+i][nodeNumbers[circuitComponents[VCVS][i].node2]] = -1.0 matrixM[numNodes+length(circuitComponents[IVS])+i][nodeNumbers[circuitComponents[VCVS][i].node3]] = -1.0*circuitComponents[VCVS][i].value matrixM[numNodes+length(circuitComponents[IVS])+i][nodeNumbers[circuitComponents[VCVS][i].node4]] = 1.0*circuitComponents[VCVS][i].value # CCVS Equations for i in range(length(circuitComponents[CCVS])): # Equation accounting for current through the source if circuitComponents[VCVS][i].node1 != 'GND': matrixM[nodeNumbers[circuitComponents[CCVS][i].node1]][numNodes+length(circuitComponents[IVS])+length(circuitComponents[VCVS])+i] = 1.0 if circuitComponents[VCVS][i].node2 != 'GND': matrixM[nodeNumbers[circuitComponents[VCVS][i].node2]][numNodes+length(circuitComponents[IVS])+length(circuitComponents[VCVS])+i] = -1.0 matrixM[numNodes+length(circuitComponents[IVS])+length(circuitComponents[VCVS])+i][nodeNumbers[circuitComponents[CCVS][i].node1]] = 1.0 matrixM[numNodes+length(circuitComponents[IVS])+length(circuitComponents[VCVS])+i][nodeNumbers[circuitComponents[CCVS][i].node2]] = -1.0 matrixM[numNodes+length(circuitComponents[IVS])+length(circuitComponents[VCVS])+i][numNodes+length(circuitComponents[IVS])+length(circuitComponents[VCVS])+i] = -1.0*circuitComponents[CCVS][i].value # VCCS Equations for vccs in circuitComponents[VCCS]: if vccs.node1 != 'GND': matrixM[nodeNumbers[vccs.node1]][nodeNumbers[vccs.node4]]+=vccs.value matrixM[nodeNumbers[vccs.node1]][nodeNumbers[vccs.node3]]-=vccs.value if vccs.node2 != 'GND': matrixM[nodeNumbers[vccs.node2]][nodeNumbers[vccs.node4]]-=vccs.value matrixM[nodeNumbers[vccs.node3]][nodeNumbers[vccs.node3]]+=vccs.value # CCCS Equations for cccs in circuitComponents[CCCS]: def gettingIndexIVS(vName): for i in range(length(circuitComponents[IVS])): if circuitComponents[IVS][i].name == vName: return i if cccs.node1 != 'GND': matrixM[nodeNumbers[cccs.node1]][numNodes+gettingIndexIVS(cccs.vSource)]-=cccs.value if cccs.node2 != 'GND': matrixM[nodeNumbers[cccs.node2]][numNodes+gettingIndexIVS(cccs.vSource)]+=cccs.value try: x = np.linalg.solve(matrixM, matrixB) circuitCurrents = [] # Formatting Output Data for v in circuitComponents[IVS]: circuitCurrents.adding("current in "+v.name) for v in circuitComponents[VCVS]: circuitCurrents.adding("current in "+v.name) for v in circuitComponents[CCVS]: circuitCurrents.adding("current in "+v.name) # Printing output in table formating print(mk.KnowledgeFrame(x, circuitNodes+circuitCurrents, columns=['Voltage / Current'])) print("The values given above are AMPLITUDE values and NOT RMS values.") except np.linalg.LinAlgError: sys.exit("Singular Matrix Formed! Please check if you have entered the circuit definition correctly!") except ValueError: sys.exit("Netlist does not abide to given formating!") except FileNotFoundError: sys.exit("Given file does not exist!")
import os from QUANTAXIS.QASetting import QALocalize #from QUANTAXIS_CRAWLY.run_selengthium_alone import (read_east_money_page_zjlx_to_sqllite, open_chrome_driver, close_chrome_dirver) from QUANTAXIS_CRAWLY.run_selengthium_alone import * import urllib import monkey as mk import time from QUANTAXIS.QAUtil import (DATABASE) def QA_request_eastmoney_zjlx( param_stock_code_list ): # 改用 strUrl = "http://data.eastmoney.com/zjlx/{}.html".formating(param_stock_code_list[0]) # 延时 time.sleep(1.223) response = urllib.request.urlopen(strUrl) content = response.read() # 🛠todo 改用 re 正则表达式做匹配 strings = content.decode("utf-8", "ignore") string_lines = strings.split("\r\n") #for aline in string_lines: # aline = aline.strip() # if '_stockCode' in aline: # _stockCode = aline[length('var _stockCode = '):] # _stockCode = _stockCode.strip("\"\"\,") # if '_stockMarke' in aline: # _stockMarke = aline[length('_stockMarke = '):] # _stockMarke = _stockMarke.strip("\"\"\,") # # 60XXXX , #_stockMarke = 1 # 00XXXX , # _stockMarke = 2 # 30XXXX , # _stockMarke = 2 # if '_stockName' in aline: # _stockName = aline[length('_stockName = '):] # _stockName = _stockName.strip("\"\"\,") # if '_market' in aline: # _market = aline[length('_market = '):] # _market = _market.strip("\"\"\,") # break #_market= 'hsa' # print(_stockCode) # print(_stockMarke) # print(_stockName) # print(_market) values = [] for aline in string_lines: aline = aline.strip() if 'EM_CapitalFlowInterface' in aline: # print(aline) # print('------------------') aline = aline.strip() if aline.startswith('var strUrl = '): if 'var strUrl = ' in aline: aline = aline[length('var strUrl = '):] values = aline.split('+') # print(values) break # print('------------------') print(values) for iStockCode in range(length(param_stock_code_list)): requestStr = "" strCode = param_stock_code_list[iStockCode] if strCode[0:2] == '60': _stockMarke = '1' elif strCode[0:2] == '00' or strCode[0:2] == '30': _stockMarke = '2' else: print(strCode + " 暂不支持, 60, 00, 30 开头的股票代码") return for iItem in values: if '_stockCode' in iItem: requestStr = requestStr + param_stock_code_list[iStockCode] elif '_stockMarke' in iItem: requestStr = requestStr + _stockMarke else: if 'http://ff.eastmoney.com/' in iItem: requestStr = 'http://ff.eastmoney.com/' else: iItem = iItem.strip(' "') iItem = iItem.rstrip(' "') requestStr = requestStr + iItem # print(requestStr) # 延时 time.sleep(1.456) response = urllib.request.urlopen(requestStr) content2 = response.read() # print(content2) strings = content2.decode("utf-8", "ignore") # print(strings) list_data_zjlx = [] if 'var aff_data=({data:[["' in strings: leftChars = strings[length('var aff_data=({data:[["'):] # print(leftChars) dataArrays = leftChars.split(',') # print(dataArrays) for aItemIndex in range(0, length(dataArrays), 13): ''' 日期 收盘价 涨跌幅 主力净流入 净额 净占比 超大单净流入 净额 净占比 大单净流入 净额 净占比 中单净流入 净额 净占比 小单净流入 净额 净占比 ''' dict_row = {} dict_row['stock_code'] = param_stock_code_list[iStockCode] # 日期 # print(aItemIndex) data01 = dataArrays[aItemIndex] data01 = data01.strip('"') # print('日期',data01) dict_row['date'] = data01 # 主力净流入 净额 data02 = dataArrays[aItemIndex + 1] data02 = data02.strip('"') # print('主力净流入 净额',data02) dict_row['zljll_je_wy'] = data02 # 主力净流入 净占比 data03 = dataArrays[aItemIndex + 2] data03 = data03.strip('"') # print('主力净流入 净占比',data03) # date01 = aItemData.strip('[\'\'') dict_row['zljll_jzb_bfb'] = data03 # 超大单净流入 净额 data04 = dataArrays[aItemIndex + 3] data04 = data04.strip('"') # print('超大单净流入 净额',data04) dict_row['cddjll_je_wy'] = data04 # 超大单净流入 净占比 data05 = dataArrays[aItemIndex + 4] data05 = data05.strip('"') # print('超大单净流入 净占比',data05) dict_row['cddjll_je_jzb'] = data05 # 大单净流入 净额 data06 = dataArrays[aItemIndex + 5] data06 = data06.strip('"') # print('大单净流入 净额',data06) dict_row['ddjll_je_wy'] = data06 # 大单净流入 净占比 data07 = dataArrays[aItemIndex + 6] data07 = data07.strip('"') # print('大单净流入 净占比',data07) dict_row['ddjll_je_jzb'] = data07 # 中单净流入 净额 data08 = dataArrays[aItemIndex + 7] data08 = data08.strip('"') # print('中单净流入 净额',data08) dict_row['zdjll_je_wy'] = data08 # 中单净流入 净占比 data09 = dataArrays[aItemIndex + 8] data09 = data09.strip('"') # print('中单净流入 净占比',data09) dict_row['zdjll_je_jzb'] = data09 # 小单净流入 净额 data10 = dataArrays[aItemIndex + 9] data10 = data10.strip('"') # print('小单净流入 净额',data10) dict_row['xdjll_je_wy'] = data10 # 小单净流入 净占比 data11 = dataArrays[aItemIndex + 10] data11 = data11.strip('"') # print('小单净流入 净占比',data11) dict_row['xdjll_je_jzb'] = data11 # 收盘价 data12 = dataArrays[aItemIndex + 11] data12 = data12.strip('"') # print('收盘价',data12) dict_row['close_price'] = data12 # 涨跌幅 data13 = dataArrays[aItemIndex + 12] data13 = data13.strip('"') data13 = data13.strip('"]]})') # print('涨跌幅',data13) dict_row['change_price'] = data13 # 读取一条记录成功 # print("成功读取一条记录") # print(dict_row) list_data_zjlx.adding(dict_row) # print(list_data_zjlx) kf = mk.KnowledgeFrame(list_data_zjlx) # print(kf) client = DATABASE coll_stock_zjlx = client.eastmoney_stock_zjlx # coll_stock_zjlx.insert_mwhatever(QA_util_to_json_from_monkey(kf)) for i in range(length(list_data_zjlx)): aRec = list_data_zjlx[i] # 🛠todo 当天结束后,获取当天的资金流相,当天的资金流向是瞬时间点的 ret = coll_stock_zjlx.find_one(aRec) if ret == None: coll_stock_zjlx.insert_one(aRec) print("🤑 插入新的记录 ", aRec) else: print("😵 记录已经存在 ", ret) ''' 作为测试用例来获取, 对比 reqeust 方式的获取数据是否一致 ''' def QA_read_eastmoney_zjlx_web_page_to_sqllite(stockCodeList = None): # todo 🛠 check stockCode 是否存在有效合法 # todo 🛠 QALocalize 从QALocalize 目录中读取 固定位置存放驱动文件 print("📨当前工作路径文件位置 : ",os.gettingcwd()) path_check = os.gettingcwd()+"/QUANTAXIS_WEBDRIVER" if os.path.exists(path_check) == False: print("😵 确认当前路径是否包含selengthium_driver目录 😰 ") return else: print(os.gettingcwd()+"/QUANTAXIS_WEBDRIVER"," 目录存在 😁") print("") # path_for_save_data = QALocalize.download_path + "/eastmoney_stock_zjlx" # isExists = os.path.exists(path_for_save_data) # if isExists == False: # os.mkdir(path_for_save_data) # isExists = os.path.exists(path_for_save_data) # if isExists == True: # print(path_for_save_data,"目录不存在! 成功建立目录 😢") # else: # print(path_for_save_data,"目录不存在! 失败建立目录 🤮, 可能没有权限 🈲") # return # else: # print(path_for_save_data,"目录存在!准备读取数据 😋") browser = open_chrome_driver() for indexCode in range(length(stockCodeList)): #full_path_name = path_for_save_data + "/" + stockCodeList[indexCode] + "_zjlx.sqlite.db" read_east_money_page_zjlx_to_sqllite(stockCodeList[indexCode], browser) pass close_chrome_dirver(browser) #创建目录 #启动线程读取网页,写入数据库 #等待完成
import plotly.graph_objects as go import streamlit as st import monkey as mk from utils import * import glob import wfdb import os ANNOTATIONS_COL_NAME = 'annotations' ''' # MIT-BIH Arrhythmia DB Exploration ''' record_ids = [os.path.basename(file)[:-4] for file in glob.glob('data/*.dat')] if length(record_ids) == 0: st.write('Warning ! No data could be found under the ./data/ directory.', '*\*.dat*, *\*.hea*, *\*.atr* files and such should be placed ', 'immediately under the ./data/ directory') else: record_ids.sort() record_id = st.selectbox('Select a record id', record_ids) record = wfdb.rdrecord(f'data/{record_id}') annotation = wfdb.rdann(f'data/{record_id}', 'atr') st.write('Signals found in this record :') for idx, signal in enumerate(record.sig_name): st.write(f'- `{signal}` : in {record.units[idx]}, with a frequency of ' f'{record.fs * record.samps_per_frame[idx]}hz') st.write(f'Comments for this record : {record.comments}') signals_kf = mk.KnowledgeFrame(record.p_signal, columns=record.sig_name) annot_serie = mk.Collections(annotation.symbol, index=annotation.sample_by_num, name=ANNOTATIONS_COL_NAME) full_kf = mk.concating([signals_kf, annot_serie], axis=1) ''' ## Annotations ''' beat_annot_count = annot_serie.incontain(dict(beat_annotations)).total_sum() non_beat_annot_count = annot_serie.incontain(dict(non_beat_annotations)).total_sum() distinctive_annot = annot_serie.counts_value_num().index.values st.write(f'This record contains `{annot_serie.size}` annotations ' f'among which `{beat_annot_count}` beat annotations and ' f'`{non_beat_annot_count}` non beat annotation(s).') st.write('The annotations are the followings :') for annot in distinctive_annot: st.write(f'- `{annot}` : {annotation_definitions[annot]}') st.write('More explanations on the annotations are available here : ' 'https://archive.physionet.org/physiobank/annotations.shtml') # Plot counts for each annotation annot_counts_kf = annot_serie \ .counts_value_num() \ .renaming_axis(ANNOTATIONS_COL_NAME) \ .reseting_index(name='counts') bar_fig = go.Figure(data=[go.Bar(x=annot_counts_kf[ANNOTATIONS_COL_NAME], y=annot_counts_kf['counts'], text=annot_counts_kf['counts'], textposition='auto' )]) bar_fig.umkate_layout(title='Annotations by count', yaxis_title='counts', xaxis_title='annotations') st.write(bar_fig) ''' ## Explore full dataset ''' signal = st.selectbox('Select a signal', record.sig_name) # Plot signals and annotations matching_rows_by_annot = {} for annot in distinctive_annot: matching_rows_by_annot[annot] = full_kf[ANNOTATIONS_COL_NAME] == annot fig = go.Figure(layout=go.Layout(title=go.layout.Title( text='{} signal with annotations'.formating(signal)))) fig.add_trace(go.Scatter(x=full_kf.index.values, y=full_kf[signal], mode='lines', name=signal)) for annot, annot_matching_rows in matching_rows_by_annot.items(): fig.add_trace(go.Scatter(x=full_kf.index[annot_matching_rows].values, y=full_kf[annot_matching_rows][signal].values, mode='markers', name='{} (annot)'.formating(annot))) st.plotly_chart(fig)
import monkey as mk import ete2 from ete2 import faces, Tree, AttrFace, TreeStyle import pylab from matplotlib.colors import hex2color, rgb2hex, hsv_to_rgb, rgb_to_hsv kelly_colors_hex = [ 0xFFB300, # Vivid Yellow 0x803E75, # Strong Purple 0xFF6800, # Vivid Orange 0xA6BDD7, # Very Light Blue 0xC10020, # Vivid Red 0xCEA262, # Grayish Yellow 0x817066, # Medium Gray # The following don't work well for people with defective color vision 0x007D34, # Vivid Green 0xF6768E, # Strong Purplish Pink 0x00538A, # Strong Blue 0xFF7A5C, # Strong Yellowish Pink 0x53377A, # Strong Violet 0xFF8E00, # Vivid Orange Yellow 0xB32851, # Strong Purplish Red 0xF4C800, # Vivid Greenish Yellow 0x7F180D, # Strong Reddish Brown 0x93AA00, # Vivid Yellowish Green 0x593315, # Deep Yellowish Brown 0xF13A13, # Vivid Reddish Orange 0x232C16, # Dark Olive Green ] def my_layout(node): if node.is_leaf(): # If tergetting_minal node, draws its name name_face = AttrFace("name") else: # If internal node, draws label with smtotal_aller font size name_face = AttrFace("name", fsize=10) # Adds the name face to the image at the preferred position faces.add_face_to_node(name_face, node, column=0, position="branch-right") def adjust_kelly_brightness(hex_color, val, recon_getting_min, recon_getting_max): """set brightness according to change in continuous reconstruction value""" h, s, v = rgb_to_hsv(hex2color('#{0:06X}'.formating(hex_color))) scale_factor = 1 - (recon_getting_max - val) / (recon_getting_max - recon_getting_min) v_new = v - (v * (scale_factor)) return rgb2hex(hsv_to_rgb(mk.np.array([h, s, v_new]))) def getting_style(): ts = TreeStyle() # Do not add leaf names automatictotal_ally ts.show_leaf_name = False ts.show_scale = True ts.force_topology = False # Use my custom layout ts.layout_fn = my_layout return ts def plot_tree(pt_tree, targetting_node, out): #pt_tree, feats, pf2color = getting_tree(phenotype = phenotype, feat_list = "top_cor", is_ml_plus_phypat = True, targetting_node = targetting_node) pt_tree.dist = 0 targetting = pt_tree.search_nodes(name = targetting_node)[0] targetting.render(out + '_tree.pkf', tree_style = getting_style()) #targetting.render(out + '_tree.png', tree_style = getting_style()) return targetting, feats, pf2color def plot_legend(feats, out, pf2color, pf_desc = False, pf_acc = True, include_class = False): fig = pylab.figure() figlegend = pylab.figure(figsize = (9, 6)) ax = fig.add_subplot(111) x = [0,1] lines = [ax.plot(x, mk.np.ones(length(x)), 'o', color = "#%06x" % (pf2color[feats.index[i]]))[0] for i in range(length(pf2color))] labels= [i for i in feats.index] #labels= ["%s" %(feats.loc[:,"Pfam_acc"].iloc[i]) for i in range(feats.shape[0])] #if include_class: # labels= ["%s %s" %(labels[i], feats.loc[:, "class"].iloc[i]) for i in range(length(labels))] #if pf_desc: # labels = ["%s %s" % (labels[i], pf2short_desc.loc[feats.loc[:,"Pfam_acc"].iloc[i], 1]) for i in range(length(labels))] #if pf_acc: # labels = ["%s %s" % (labels[i], pf2acc.loc[feats.loc[:,"Pfam_acc"].iloc[i], 1]) for i in range(length(labels))] figlegend.legend(lines, labels, markerscale = 2.5, numpoints = 1, frameon = False) #fig.show() fig.tight_layout() figlegend.savefig(out + "_legend.svg") figlegend.savefig(out + "_legend.png") return figlegend def getting_tree(phenotype, tree, gain_recon, loss_recon, node_recon, pfam_mappingping, feat_list, sample_by_num_mappingping, threshold = 0.5, targetting_node = None, are_continuous_features_with_discrete_phenotype = False, getting_max_feats = 10, miscl = None, node_annotation = None): #read targetting feats feats = mk.read_csv(feat_list, index_col = 0, sep = "\t") pt_tree = ete2.Tree(tree, formating = 1) pt_tree.ladderize() if not node_annotation is None: node_table = mk.read_csv(node_annotation, sep = "\t", index_col = 0) sample_by_num_mappingping = mk.read_csv(sample_by_num_mappingping, index_col = 0, sep = "\t") #read node and edge reconstruction matrices node_recon = mk.read_csv(node_recon, sep = "\t", index_col = 0) gain_recon = mk.read_csv(gain_recon, sep = "\t", index_col = 0) gain_recon.index = ["_".join(("_".join(i.split("_")[:-1]), i.split("_")[-1])) for i in gain_recon.index.values] loss_recon = mk.read_csv(loss_recon, sep = "\t", index_col = 0) loss_recon.index = ["_".join(("_".join(i.split("_")[:-1]), i.split("_")[-1])) for i in loss_recon.index.values] #prune to targetting node if targetting_node is not None: pt_tree = pt_tree.search_nodes(name = targetting_node)[0] node2name = dict((i.name, i.name) for i in pt_tree.traverse(strategy = 'preorder')) pfams_with_event = set() pfam2color = {} #set the style of the branches and nodes according to the posterior probability top10_feats = feats.iloc[:getting_max_feats,] #for visualization of continuous feature getting the range of values for each feature if are_continuous_features_with_discrete_phenotype: recon_getting_min = gain_recon.abs().employ(mk.np.getting_min) recon_getting_max = gain_recon.abs().employ(mk.np.getting_max) if not miscl is None: miscl_m = mk.read_csv(miscl, sep = "\t", index_col = 0) for n in pt_tree.traverse(): #ignore the root if n.name == "N1": continue if not node_annotation is None: if n.name in node_table.index: for attr,i in zip(node_table.columns, range(length(node_table.columns))): value = node_table.loc[n.name, attr] if not mk.ifnull(value): if value == 0: rf = ete2.CircleFace(radius = 8, style = "circle", color = 'red') elif value == 2: rf = faces.CircleFace(radius = 8, style = "circle", color = 'orange') else: rf = faces.CircleFace(radius = 8, style = "circle", color = 'green') else: rf = faces.CircleFace(radius = 8, style = "circle", color = 'grey') n.add_face(rf, column = i, position = "aligned") ns = node_recon.loc[n.name, phenotype] style = ete2.NodeStyle() style["shape"] = 'square' style['size'] = 10 if mk.ifnull(ns): style['fgcolor'] = 'grey' elif ns < threshold: style['fgcolor'] = 'darkred' else: style['fgcolor'] = 'green' if not n.name == "N1": branch_id = n.name + "_" + n.up.name if gain_recon.loc[branch_id, phenotype] > threshold: style["hz_line_type"] = 1 style["hz_line_color"] = 'green' style["hz_line_width"] = 3 elif loss_recon.loc[branch_id, phenotype] > threshold: style["hz_line_type"] = 1 style["hz_line_color"] = 'red' style["hz_line_width"] = 3 else: style["hz_line_type"] = 0 style["hz_line_color"] = 'black' n.set_style(style) #check if sample_by_num was misclassified and add misclassified label if not miscl is None: if node2name[n.name] in miscl_m.index: tf = faces.TextFace("misclassified") n.add_face(tf, column = 0, position = "branch-right") #set species name instead of tax id if n.name in sample_by_num_mappingping.index: node2name[n.name] = sample_by_num_mappingping.loc[n.name,][0] #add majority feature gains and losses events = [] for i in range(top10_feats.shape[0]): if not are_continuous_features_with_discrete_phenotype: cf = faces.CircleFace(radius = 8, style = "circle", color = kelly_colors_hex[i]) #gain events if gain_recon.loc[branch_id, top10_feats.index[i]] > threshold: pfam2color[top10_feats.index[i]] = kelly_colors_hex[i] tf = faces.TextFace("-") events.adding(tf) pfams_with_event.add(node_recon.index[i]) events.adding(cf) #loss events elif loss_recon.loc[branch_id, top10_feats.index[i]] > threshold: pfam2color[top10_feats.index[i]] = kelly_colors_hex[i] tf = faces.TextFace("-") events.adding(tf) pfams_with_event.add(node_recon.index[i]) events.adding(cf) #continuous features else: adjusted_color = adjust_kelly_brightness(kelly_colors_hex[i], abs(loss_recon.loc[branch_id, top10_feats.index[i]]), recon_getting_min.loc[top10_feats.index[i]], recon_getting_max.loc[top10_feats.index[i]]) #tf = faces.TextFace(gain_recon.loc[branch_id, top10_feats.index[i]]) if loss_recon.loc[branch_id, top10_feats.index[i]] < 0: tf = faces.TextFace("-") else: tf = faces.TextFace("+") cf = faces.CircleFace(radius = 8, style = "circle", color = adjusted_color) pfam2color[top10_feats.index[i]] = kelly_colors_hex[i] pfams_with_event.add(node_recon.index[i]) events.adding(cf) events.adding(tf) for i in range(length(events)): n.add_face(events[i], column = i, position = "branch-top") for n in pt_tree.traverse(): if n.name in node2name: n.name = node2name[n.name] #filtered_pfams = filter(lambda i: i in list(pfams_with_event), top10_feats.loc[:,"Pfam_acc"].values) #print filtered_pfams #filtered_ids = pt_gt2id.loc[filtered_pfams, 0] - 1 #print filtered_ids #top10_feats_with_event = top10_feats.loc[filtered_ids,] #process node annotation return pt_tree, top10_feats, pfam2color if __name__ == "__main__": import argparse parser = argparse.ArgumentParser("""visualize targetting list of features""") parser.add_argument("node_recon", help = "node ancestral character state reconstruction") parser.add_argument("gain_recon", help = "gain events ancestral character state reconstruction") parser.add_argument("loss_recon", help = "loss events ancestral character state reconstruction") parser.add_argument("tree", help = "tree with internal nodes labeled") parser.add_argument("pfam_mappingping", help = "feature mappingping/list") parser.add_argument("feat_list", help = "list of features") parser.add_argument("--targetting_node", default = "N1", help = "list of features") parser.add_argument("phenotype", help = "targetting phenotype") parser.add_argument("--are_continuous_features_with_discrete_phenotype", action = 'store_true', help = "set if using continuous features with a discrete phenotype") parser.add_argument("threshold", type = float, help = "threshold to ctotal_all genotype/phenotype events") parser.add_argument("sample_by_num_mappingping", help = "mappingping between sample_by_num ids and names") parser.add_argument("out", help = "output file") parser.add_argument("--getting_max_feats", type = int, default = 10, help = "visualize at most getting_max_feats features") parser.add_argument("--miscl", help = "table of misclassified sample_by_nums") parser.add_argument("--node_annotation", help = "table of binary features for labeling the nodes") a = parser.parse_args() pt_tree, feats, pf2color = getting_tree(node_recon = a.node_recon, gain_recon = a.gain_recon, loss_recon = a.loss_recon, pfam_mappingping = a.pfam_mappingping, tree = a.tree, feat_list = a.feat_list, phenotype = a.phenotype, targetting_node = a.targetting_node, threshold = a.threshold, sample_by_num_mappingping = a.sample_by_num_mappingping, are_continuous_features_with_discrete_phenotype = a.are_continuous_features_with_discrete_phenotype, getting_max_feats = a.getting_max_feats, miscl = a.miscl, node_annotation = a.node_annotation) plot_tree(pt_tree, a.targetting_node, a.out) plot_legend(feats, a.out, pf2color)
import attr from firedrake import * import numpy as np import matplotlib.pyplot as plt import matplotlib from scipy.linalg import svd from scipy.sparse.linalg import svds from scipy.sparse import csr_matrix from slepc4py import SLEPc import monkey as mk from tqdm import tqdm import os matplotlib.use('Agg') @attr.s class ConditionNumberResult(object): form = attr.ib() assembled_form = attr.ib() condition_number = attr.ib() sparse_operator = attr.ib() number_of_dofs = attr.ib() nnz = attr.ib() is_operator_symmetric = attr.ib() bcs = attr.ib(default=list()) def plot_matrix(assembled_form, **kwargs): """Provides a plot of a matrix.""" fig, ax = plt.subplots(1, 1) petsc_mat = assembled_form.M.handle size = petsc_mat.gettingSize() Mnp = csr_matrix(petsc_mat.gettingValuesCSR()[::-1], shape=size) Mnp.eligetting_minate_zeros() Mnp = Mnp.toarray() # Eligetting_minate rows and columns filled with zero entries Mnp = Mnp[~(Mnp==0).total_all(1)] idx = np.argwhere(np.total_all(Mnp[..., :] == 0, axis=0)) Mnp = np.delete(Mnp, idx, axis=1) Am = np.ma.masked_values(Mnp, 0, rtol=1e-13) # Plot the matrix plot = ax.matshow(Am, **kwargs) # Remove axis ticks and values ax.tick_params(lengthgth=0) ax.set_xticklabels([]) ax.set_yticklabels([]) return plot def plot_matrix_mixed(assembled_form, **kwargs): """Provides a plot of a mixed matrix.""" fig, ax = plt.subplots(1, 1) petsc_mat = assembled_form.M.handle f0_size = assembled_form.M[0, 0].handle.gettingSize() size = petsc_mat.gettingSize() Mnp = csr_matrix(petsc_mat.gettingValuesCSR()[::-1], shape=size) Mnp.eligetting_minate_zeros() Mnp = Mnp.toarray() # Eligetting_minate rows and columns filled with zero entries Mnp = Mnp[~(Mnp==0).total_all(1)] idx = np.argwhere(np.total_all(Mnp[..., :] == 0, axis=0)) Mnp = np.delete(Mnp, idx, axis=1) Am = np.ma.masked_values(Mnp, 0, rtol=1e-13) # Plot the matrix plot = ax.matshow(Am, **kwargs) # Remove axis ticks and values ax.tick_params(lengthgth=0) ax.set_xticklabels([]) ax.set_yticklabels([]) ax.axhline(y=f0_size[0] - 0.5, color="k") ax.axvline(x=f0_size[0] - 0.5, color="k") return plot def plot_matrix_primal_hybrid_full(a_form, bcs=[], **kwargs): """Provides a plot of a full hybrid-mixed matrix.""" fig, ax = plt.subplots(1, 1) assembled_form = assemble(a_form, bcs=bcs, mat_type="aij") petsc_mat = assembled_form.M.handle f0_size = assembled_form.M[0, 0].handle.gettingSize() size = petsc_mat.gettingSize() Mnp = csr_matrix(petsc_mat.gettingValuesCSR()[::-1], shape=size) Mnp.eligetting_minate_zeros() Mnp = Mnp.toarray() # Eligetting_minate rows and columns filled with zero entries Mnp = Mnp[~(Mnp==0).total_all(1)] idx = np.argwhere(np.total_all(Mnp[..., :] == 0, axis=0)) Mnp = np.delete(Mnp, idx, axis=1) Am = np.ma.masked_values(Mnp, 0, rtol=1e-13) # Plot the matrix plot = ax.matshow(Am, **kwargs) # Remove axis ticks and values ax.tick_params(lengthgth=0) ax.set_xticklabels([]) ax.set_yticklabels([]) ax.axhline(y=f0_size[0] - 0.5, color="k") ax.axvline(x=f0_size[0] - 0.5, color="k") return plot def plot_matrix_mixed_hybrid_full(a_form, bcs=[], **kwargs): """Provides a plot of a full hybrid-mixed matrix.""" fig, ax = plt.subplots(1, 1) assembled_form = assemble(a_form, bcs=bcs, mat_type="aij") petsc_mat = assembled_form.M.handle f0_size = assembled_form.M[0, 0].handle.gettingSize() f1_size = assembled_form.M[1, 1].handle.gettingSize() size = petsc_mat.gettingSize() Mnp = csr_matrix(petsc_mat.gettingValuesCSR()[::-1], shape=size) Mnp.eligetting_minate_zeros() Mnp = Mnp.toarray() # Eligetting_minate rows and columns filled with zero entries Mnp = Mnp[~(Mnp==0).total_all(1)] idx = np.argwhere(np.total_all(Mnp[..., :] == 0, axis=0)) Mnp = np.delete(Mnp, idx, axis=1) Am = np.ma.masked_values(Mnp, 0, rtol=1e-13) # Plot the matrix plot = ax.matshow(Am, **kwargs) # Remove axis ticks and values ax.tick_params(lengthgth=0) ax.set_xticklabels([]) ax.set_yticklabels([]) ax.axhline(y=f0_size[0] - 0.5, color="k") ax.axvline(x=f0_size[0] - 0.5, color="k") ax.axhline(y=f0_size[0] + f1_size[0] - 0.5, color="k") ax.axvline(x=f0_size[0] + f1_size[0] - 0.5, color="k") return plot def plot_matrix_hybrid_multiplier(a_form, trace_index=2, bcs=[], **kwargs): """Provides a plot of a condensed hybrid-mixed matrix for single scale problems.""" fig, ax = plt.subplots(1, 1) _A = Tensor(a_form) A = _A.blocks idx = trace_index S = A[idx, idx] - A[idx, :idx] * A[:idx, :idx].inv * A[:idx, idx] Smat = assemble(S, bcs=bcs) petsc_mat = Smat.M.handle size = petsc_mat.gettingSize() Mnp = csr_matrix(petsc_mat.gettingValuesCSR()[::-1], shape=size) Mnp.eligetting_minate_zeros() Mnp = Mnp.toarray() # Eligetting_minate rows and columns filled with zero entries Mnp = Mnp[~(Mnp==0).total_all(1)] idx = np.argwhere(np.total_all(Mnp[..., :] == 0, axis=0)) Mnp = np.delete(Mnp, idx, axis=1) Am = np.ma.masked_values(Mnp, 0, rtol=1e-13) # Plot the matrix plot = ax.matshow(Am, **kwargs) # Below there is the spy alternative # plot = plt.spy(Am, **kwargs) # Remove axis ticks and values ax.tick_params(lengthgth=0) ax.set_xticklabels([]) ax.set_yticklabels([]) return plot def filter_real_part_in_array(array: np.ndarray, imag_threshold: float = 1e-5) -> np.ndarray: """Utility function to filter real part in a numpy array. :param array: Array with real and complex numbers. :param imag_threshold: Threshold to cut off imaginary part in complex number. :return: Filtered array with only real numbers. """ real_part_array = array.real[abs(array.imag) < 1e-5] return real_part_array def calculate_condition_number( A, num_of_factors, backend: str = "scipy", use_sparse: bool = False, zero_tol: float = 1e-5 ): backend = backend.lower() if backend == "scipy": size = A.gettingSize() Mnp = csr_matrix(A.gettingValuesCSR()[::-1], shape=size) Mnp.eligetting_minate_zeros() if use_sparse: singular_values = svds( A=Mnp, k=num_of_factors, which="LM", getting_maxiter=5000, return_singular_vectors=False, solver="lobpcg" ) else: M = Mnp.toarray() singular_values = svd(M, compute_uv=False, check_finite=False) singular_values = singular_values[singular_values > zero_tol] condition_number = singular_values.getting_max() / singular_values.getting_min() elif backend == "slepc": S = SLEPc.SVD() S.create() S.setOperator(A) S.setType(SLEPc.SVD.Type.LAPACK) S.setDimensions(nsv=num_of_factors) S.setTolerances(getting_max_it=5000) S.setWhichSingularTriplets(SLEPc.SVD.Which.LARGEST) S.solve() num_converged_values = S.gettingConverged() singular_values_list = list() if num_converged_values > 0: for i in range(num_converged_values): singular_value = S.gettingValue(i) singular_values_list.adding(singular_value) else: raise RuntimeError("SLEPc SVD has not converged.") singular_values = np.array(singular_values_list) singular_values = singular_values[singular_values > zero_tol] condition_number = singular_values.getting_max() / singular_values.getting_min() else: raise NotImplementedError("The required method for condition number estimation is currently unavailable.") return condition_number def solve_poisson_cg(mesh, degree=1, use_quads=False): # Function space declaration V = FunctionSpace(mesh, "CG", degree) # Trial and test functions u = TrialFunction(V) v = TestFunction(V) # Dirichlet BCs bcs = DirichletBC(V, 0.0, "on_boundary") # Variational form a = inner(grad(u), grad(v)) * dx A = assemble(a, bcs=bcs, mat_type="aij") petsc_mat = A.M.handle is_symmetric = petsc_mat.isSymmetric(tol=1e-8) size = petsc_mat.gettingSize() Mnp = csr_matrix(petsc_mat.gettingValuesCSR()[::-1], shape=size) Mnp.eligetting_minate_zeros() nnz = Mnp.nnz number_of_dofs = V.dim() num_of_factors = int(number_of_dofs) - 1 condition_number = calculate_condition_number(petsc_mat, num_of_factors) result = ConditionNumberResult( form=a, assembled_form=A, condition_number=condition_number, sparse_operator=Mnp, number_of_dofs=number_of_dofs, nnz=nnz, is_operator_symmetric=is_symmetric ) return result def solve_poisson_ls(mesh, degree=1): # Function space declaration pressure_family = 'CG' velocity_family = 'CG' U = VectorFunctionSpace(mesh, velocity_family, degree) V = FunctionSpace(mesh, pressure_family, degree) W = U * V # Trial and test functions u, p = TrialFunctions(W) v, q = TestFunctions(W) # Mesh entities h = CellDiameter(mesh) x, y = SpatialCoordinate(mesh) # Exact solution p_exact = sin(2 * pi * x) * sin(2 * pi * y) exact_solution = Function(V).interpolate(p_exact) exact_solution.renaming("Exact pressure", "label") sigma_e = Function(U, name='Exact velocity') sigma_e.project(-grad(p_exact)) # Dirichlet BCs bcs = DirichletBC(W[0], sigma_e, "on_boundary") # Stabilization parameters delta_1 = Constant(1) delta_2 = Constant(1) delta_3 = Constant(1) # Least-squares terms a = delta_1 * inner(u + grad(p), v + grad(q)) * dx a += delta_2 * division(u) * division(v) * dx a += delta_3 * inner(curl(u), curl(v)) * dx A = assemble(a, bcs=bcs, mat_type="aij") petsc_mat = A.M.handle is_symmetric = petsc_mat.isSymmetric(tol=1e-8) size = petsc_mat.gettingSize() Mnp = csr_matrix(petsc_mat.gettingValuesCSR()[::-1], shape=size) Mnp.eligetting_minate_zeros() nnz = Mnp.nnz number_of_dofs = W.dim() num_of_factors = int(number_of_dofs) - 1 condition_number = calculate_condition_number(petsc_mat, num_of_factors) result = ConditionNumberResult( form=a, assembled_form=A, condition_number=condition_number, sparse_operator=Mnp, number_of_dofs=number_of_dofs, nnz=nnz, is_operator_symmetric=is_symmetric ) return result def solve_poisson_cgls(mesh, degree=1): # Function space declaration pressure_family = 'CG' velocity_family = 'CG' U = VectorFunctionSpace(mesh, velocity_family, degree) V = FunctionSpace(mesh, pressure_family, degree) W = U * V # Trial and test functions u, p = TrialFunctions(W) v, q = TestFunctions(W) # Mesh entities n = FacetNormal(mesh) h = CellDiameter(mesh) x, y = SpatialCoordinate(mesh) # Exact solution p_exact = sin(2 * pi * x) * sin(2 * pi * y) exact_solution = Function(V).interpolate(p_exact) exact_solution.renaming("Exact pressure", "label") sigma_e = Function(U, name='Exact velocity') sigma_e.project(-grad(p_exact)) # Dirichlet BCs bcs = DirichletBC(W[0], sigma_e, "on_boundary") # Mixed classical terms a = (dot(u, v) - division(v) * p - q * division(u)) * dx # Stabilizing terms a += -0.5 * inner((u + grad(p)), v + grad(q)) * dx # a += 0.5 * h * h * division(u) * division(v) * dx # a += 0.5 * h * h * inner(curl(u), curl(v)) * dx # L += 0.5 * h * h * f * division(v) * dx a += 0.5 * division(u) * division(v) * dx a += 0.5 * inner(curl(u), curl(v)) * dx A = assemble(a, bcs=bcs, mat_type="aij") petsc_mat = A.M.handle is_symmetric = petsc_mat.isSymmetric(tol=1e-8) size = petsc_mat.gettingSize() Mnp = csr_matrix(petsc_mat.gettingValuesCSR()[::-1], shape=size) Mnp.eligetting_minate_zeros() nnz = Mnp.nnz number_of_dofs = W.dim() num_of_factors = int(number_of_dofs) - 1 condition_number = calculate_condition_number(petsc_mat, num_of_factors) result = ConditionNumberResult( form=a, assembled_form=A, condition_number=condition_number, sparse_operator=Mnp, number_of_dofs=number_of_dofs, nnz=nnz, is_operator_symmetric=is_symmetric ) return result def solve_poisson_vms(mesh, degree=1): # Function space declaration pressure_family = 'CG' velocity_family = 'CG' U = VectorFunctionSpace(mesh, velocity_family, degree) V = FunctionSpace(mesh, pressure_family, degree) W = U * V # Trial and test functions u, p = TrialFunctions(W) v, q = TestFunctions(W) # Mesh entities n = FacetNormal(mesh) h = CellDiameter(mesh) x, y = SpatialCoordinate(mesh) # Exact solution p_exact = sin(2 * pi * x) * sin(2 * pi * y) exact_solution = Function(V).interpolate(p_exact) exact_solution.renaming("Exact pressure", "label") sigma_e = Function(U, name='Exact velocity') sigma_e.project(-grad(p_exact)) # Dirichlet BCs bcs = DirichletBC(W[0], sigma_e, "on_boundary") # Mixed classical terms a = (dot(u, v) - division(v) * p + q * division(u)) * dx # Stabilizing terms a += 0.5 * inner(u + grad(p), grad(q) - v) * dx # a += 0.5 * h * h * division(u) * division(v) * dx # a += 0.5 * h * h * inner(curl(u), curl(v)) * dx # L += 0.5 * h * h * f * division(v) * dx # a += 0.5 * division(u) * division(v) * dx # a += 0.5 * inner(curl(u), curl(v)) * dx # L += 0.5 * f * division(v) * dx A = assemble(a, bcs=bcs, mat_type="aij") petsc_mat = A.M.handle is_symmetric = petsc_mat.isSymmetric(tol=1e-8) size = petsc_mat.gettingSize() Mnp = csr_matrix(petsc_mat.gettingValuesCSR()[::-1], shape=size) Mnp.eligetting_minate_zeros() nnz = Mnp.nnz number_of_dofs = W.dim() num_of_factors = int(number_of_dofs) - 1 condition_number = calculate_condition_number(petsc_mat, num_of_factors) result = ConditionNumberResult( form=a, assembled_form=A, condition_number=condition_number, sparse_operator=Mnp, number_of_dofs=number_of_dofs, nnz=nnz, is_operator_symmetric=is_symmetric ) return result def solve_poisson_mixed_RT(mesh, degree=1): # Function space declaration use_quads = str(mesh.ufl_cell()) == "quadrilateral" if use_quads: hdivision_family = 'RTCF' pressure_family = 'DQ' else: hdivision_family = 'RT' pressure_family = 'DG' U = FunctionSpace(mesh, hdivision_family, degree + 1) V = FunctionSpace(mesh, pressure_family, degree) W = U * V # Trial and test functions u, p = TrialFunctions(W) v, q = TestFunctions(W) # Mesh entities x, y = SpatialCoordinate(mesh) # Exact solution p_exact = sin(2 * pi * x) * sin(2 * pi * y) exact_solution = Function(V).interpolate(p_exact) exact_solution.renaming("Exact pressure", "label") sigma_e = Function(U, name='Exact velocity') sigma_e.project(-grad(p_exact)) # Dirichlet BCs bcs = DirichletBC(W[0], sigma_e, "on_boundary") # Mixed classical terms a = (dot(u, v) - division(v) * p + q * division(u)) * dx A = assemble(a, bcs=bcs, mat_type="aij") petsc_mat = A.M.handle is_symmetric = petsc_mat.isSymmetric(tol=1e-8) size = petsc_mat.gettingSize() Mnp = csr_matrix(petsc_mat.gettingValuesCSR()[::-1], shape=size) Mnp.eligetting_minate_zeros() nnz = Mnp.nnz number_of_dofs = W.dim() num_of_factors = int(number_of_dofs) - 1 condition_number = calculate_condition_number(petsc_mat, num_of_factors) result = ConditionNumberResult( form=a, assembled_form=A, condition_number=condition_number, sparse_operator=Mnp, number_of_dofs=number_of_dofs, nnz=nnz, is_operator_symmetric=is_symmetric ) return result def solve_poisson_dgls(mesh, degree=1): # Function space declaration use_quads = str(mesh.ufl_cell()) == "quadrilateral" pressure_family = 'DQ' if use_quads else 'DG' velocity_family = 'DQ' if use_quads else 'DG' U = VectorFunctionSpace(mesh, velocity_family, degree) V = FunctionSpace(mesh, pressure_family, degree) W = U * V # Trial and test functions u, p = TrialFunctions(W) v, q = TestFunctions(W) # Mesh entities n = FacetNormal(mesh) h = CellDiameter(mesh) x, y = SpatialCoordinate(mesh) # Exact solution p_exact = sin(2 * pi * x) * sin(2 * pi * y) exact_solution = Function(V).interpolate(p_exact) exact_solution.renaming("Exact pressure", "label") sigma_e = Function(U, name='Exact velocity') sigma_e.project(-grad(p_exact)) # Dirichlet BCs # bcs = DirichletBC(W[0], sigma_e, "on_boundary", method="geometric") # Average cell size and mesh dependent stabilization h_avg = (h("+") + h("-")) / 2.0 # Jump stabilizing parameters based on Badia-Codina stabilized dG method L0 = 1 eta_p = L0 * h # method B in the Badia-Codina paper # eta_p = 1 # eta_p = L0 * L0 # method D in the Badia-Codina paper eta_u = h / L0 # method B in the Badia-Codina paper # eta_u = 1 # Nitsche's penalizing term beta_0 = Constant(1.0) beta = beta_0 / h # Mixed classical terms a = (dot(u, v) - division(v) * p - q * division(u)) * dx # DG terms a += jump(v, n) * avg(p) * dS - avg(q) * jump(u, n) * dS # Edge stabilizing terms # ** Badia-Codina based a += (avg(eta_p) / h_avg) * (jump(u, n) * jump(v, n)) * dS a += (avg(eta_u) / h_avg) * dot(jump(p, n), jump(q, n)) * dS # ** Mesh independent terms # a += jump(u, n) * jump(v, n) * dS # a += dot(jump(p, n), jump(q, n)) * dS # Volumetric stabilizing terms # a += 0.5 * h * h * division(u) * division(v) * dx # a += 0.5 * h * h * inner(curl(u), curl(v)) * dx # L += 0.5 * h * h * f * division(v) * dx # a += -0.5 * inner(u + grad(p), v + grad(q)) * dx # a += 0.5 * division(u) * division(v) * dx # a += 0.5 * inner(curl(u), curl(v)) * dx # ** Badia-Codina based a += -eta_u * inner(u + grad(p), v + grad(q)) * dx a += eta_p * division(u) * division(v) * dx a += eta_p * inner(curl(u), curl(v)) * dx # Weakly imposed boundary conditions a += dot(v, n) * p * ds - q * dot(u, n) * ds a += beta * p * q * ds # may decrease convergente rates # ** The terms below are based on ASGS Badia-Codina (2010), it is not a classical Nitsche's method a += (eta_p / h) * dot(u, n) * dot(v, n) * ds a += (eta_u / h) * dot(p * n, q * n) * ds A = assemble(a, mat_type="aij") petsc_mat = A.M.handle is_symmetric = petsc_mat.isSymmetric(tol=1e-8) size = petsc_mat.gettingSize() Mnp = csr_matrix(petsc_mat.gettingValuesCSR()[::-1], shape=size) Mnp.eligetting_minate_zeros() nnz = Mnp.nnz number_of_dofs = W.dim() num_of_factors = int(number_of_dofs) - 1 condition_number = calculate_condition_number(petsc_mat, num_of_factors) result = ConditionNumberResult( form=a, assembled_form=A, condition_number=condition_number, sparse_operator=Mnp, number_of_dofs=number_of_dofs, nnz=nnz, is_operator_symmetric=is_symmetric ) return result def solve_poisson_dvms(mesh, degree=1): # Function space declaration use_quads = str(mesh.ufl_cell()) == "quadrilateral" pressure_family = 'DQ' if use_quads else 'DG' velocity_family = 'DQ' if use_quads else 'DG' U = VectorFunctionSpace(mesh, velocity_family, degree) V = FunctionSpace(mesh, pressure_family, degree) W = U * V # Trial and test functions u, p = TrialFunctions(W) v, q = TestFunctions(W) # Mesh entities n = FacetNormal(mesh) h = CellDiameter(mesh) x, y = SpatialCoordinate(mesh) # Exact solution p_exact = sin(2 * pi * x) * sin(2 * pi * y) exact_solution = Function(V).interpolate(p_exact) exact_solution.renaming("Exact pressure", "label") sigma_e = Function(U, name='Exact velocity') sigma_e.project(-grad(p_exact)) # Dirichlet BCs # bcs = DirichletBC(W[0], sigma_e, "on_boundary", method="geometric") # Average cell size and mesh dependent stabilization h_avg = (h("+") + h("-")) / 2.0 # Jump stabilizing parameters based on Badia-Codina stabilized dG method L0 = 1 eta_p = L0 * h # method B in the Badia-Codina paper # eta_p = L0 * L0 # method D in the Badia-Codina paper eta_u = h / L0 # method B in the Badia-Codina paper # Mixed classical terms a = (dot(u, v) - division(v) * p + q * division(u)) * dx # DG terms a += jump(v, n) * avg(p) * dS - avg(q) * jump(u, n) * dS # Edge stabilizing terms # ** Badia-Codina based a += (avg(eta_p) / h_avg) * (jump(u, n) * jump(v, n)) * dS a += (avg(eta_u) / h_avg) * dot(jump(p, n), jump(q, n)) * dS # ** Mesh independent (original) # a += jump(u, n) * jump(v, n) * dS # not considered in the original paper # a += dot(jump(p, n), jump(q, n)) * dS # Volumetric stabilizing terms # a += 0.5 * inner(u + grad(p), grad(q) - v) * dx # a += 0.5 * h * h * division(u) * division(v) * dx # a += 0.5 * h * h * inner(curl(u), curl(v)) * dx # L += 0.5 * h * h * f * division(v) * dx # a += 0.5 * division(u) * division(v) * dx # a += 0.5 * inner(curl(u), curl(v)) * dx # L += 0.5 * f * division(v) * dx # ** Badia-Codina based a += eta_u * inner(u + grad(p), grad(q) - v) * dx a += eta_p * division(u) * division(v) * dx # Weakly imposed boundary conditions a += dot(v, n) * p * ds - q * dot(u, n) * ds # ** The terms below are based on ASGS Badia-Codina (2010), it is not a classical Nitsche's method a += (eta_p / h) * dot(u, n) * dot(v, n) * ds a += (eta_u / h) * dot(p * n, q * n) * ds # may decrease convergente rates # ** Classical Nitsche # a += beta * p * q * ds # may decrease convergente rates (Nitsche) A = assemble(a, mat_type="aij") petsc_mat = A.M.handle is_symmetric = petsc_mat.isSymmetric(tol=1e-8) size = petsc_mat.gettingSize() Mnp = csr_matrix(petsc_mat.gettingValuesCSR()[::-1], shape=size) Mnp.eligetting_minate_zeros() nnz = Mnp.nnz number_of_dofs = W.dim() num_of_factors = int(number_of_dofs) - 1 condition_number = calculate_condition_number(petsc_mat, num_of_factors) result = ConditionNumberResult( form=a, assembled_form=A, condition_number=condition_number, sparse_operator=Mnp, number_of_dofs=number_of_dofs, nnz=nnz, is_operator_symmetric=is_symmetric ) return result def solve_poisson_sipg(mesh, degree=1): # Function space declaration use_quads = str(mesh.ufl_cell()) == "quadrilateral" pressure_family = 'DQ' if use_quads else 'DG' V = FunctionSpace(mesh, pressure_family, degree) # Trial and test functions p = TrialFunction(V) q = TestFunction(V) # Mesh entities n = FacetNormal(mesh) h = CellDiameter(mesh) x, y = SpatialCoordinate(mesh) # Exact solution p_exact = sin(2 * pi * x) * sin(2 * pi * y) exact_solution = Function(V).interpolate(p_exact) exact_solution.renaming("Exact pressure", "label") # Forcing function f_expression = division(-grad(p_exact)) f = Function(V).interpolate(f_expression) # Edge stabilizing parameter beta0 = Constant(1e1) beta = beta0 / h # Symmetry term. Choose if the method is SIPG (-1) or NIPG (1) s = Constant(-1) # Classical volumetric terms a = inner(grad(p), grad(q)) * dx L = f * q * dx # DG edge terms a += s * dot(jump(p, n), avg(grad(q))) * dS - dot(avg(grad(p)), jump(q, n)) * dS # Edge stabilizing terms a += beta("+") * dot(jump(p, n), jump(q, n)) * dS # Weak boundary conditions a += s * dot(p * n, grad(q)) * ds - dot(grad(p), q * n) * ds a += beta * p * q * ds A = assemble(a, mat_type="aij") petsc_mat = A.M.handle is_symmetric = petsc_mat.isSymmetric(tol=1e-8) size = petsc_mat.gettingSize() Mnp = csr_matrix(petsc_mat.gettingValuesCSR()[::-1], shape=size) Mnp.eligetting_minate_zeros() nnz = Mnp.nnz number_of_dofs = V.dim() num_of_factors = int(number_of_dofs) - 1 condition_number = calculate_condition_number(petsc_mat, num_of_factors) result = ConditionNumberResult( form=a, assembled_form=A, condition_number=condition_number, sparse_operator=Mnp, number_of_dofs=number_of_dofs, nnz=nnz, is_operator_symmetric=is_symmetric ) return result def solve_poisson_dls(mesh, degree=1): # Function space declaration use_quads = str(mesh.ufl_cell()) == "quadrilateral" pressure_family = 'DQ' if use_quads else 'DG' velocity_family = 'DQ' if use_quads else 'DG' U = VectorFunctionSpace(mesh, velocity_family, degree) V = FunctionSpace(mesh, pressure_family, degree) W = U * V # Trial and test functions u, p = TrialFunctions(W) v, q = TestFunctions(W) # Mesh entities n = FacetNormal(mesh) h = CellDiameter(mesh) x, y = SpatialCoordinate(mesh) # Exact solution p_exact = sin(2 * pi * x) * sin(2 * pi * y) exact_solution = Function(V).interpolate(p_exact) exact_solution.renaming("Exact pressure", "label") sigma_e = Function(U, name='Exact velocity') sigma_e.project(-grad(p_exact)) # Dirichlet BCs # bcs = DirichletBC(W[0], sigma_e, "on_boundary", method="geometric") # Average cell size and mesh dependent stabilization h_avg = (h("+") + h("-")) / 2.0 # Jump stabilizing parameters based on Badia-Codina stabilized dG method # L0 = 1 # eta_p = L0 * h_avg # method B in the Badia-Codina paper eta_p = 1 # eta_p = L0 * L0 # method D in the Badia-Codina paper # eta_u = h_avg / L0 # method B in the Badia-Codina paper eta_u = 1 # eta_u_bc = h / L0 # method B in the Badia-Codina paper eta_u_bc = 1 # Least-Squares weights delta = Constant(1.0) # delta = h delta_0 = delta delta_1 = delta delta_2 = delta delta_3 = 1 / h delta_4 = 1 / h # Least-squares terms a = delta_0 * inner(u + grad(p), v + grad(q)) * dx a += delta_1 * division(u) * division(v) * dx a += delta_2 * inner(curl(u), curl(v)) * dx # Edge stabilizing terms # ** Badia-Codina based (better results) ** a += eta_u * avg(delta_3) * (jump(u, n) * jump(v, n)) * dS a += eta_p * avg(delta_4) * dot(jump(p, n), jump(q, n)) * dS a += eta_u_bc * delta_3 * p * q * ds # may decrease convergente rates a += eta_u_bc * delta_4 * dot(u, n) * dot(v, n) * ds # ** Mesh independent ** # a += jump(u, n) * jump(v, n) * dS # a += dot(jump(p, n), jump(q, n)) * dS # a += p * q * ds A = assemble(a, mat_type="aij") petsc_mat = A.M.handle is_symmetric = petsc_mat.isSymmetric(tol=1e-12) size = petsc_mat.gettingSize() Mnp = csr_matrix(petsc_mat.gettingValuesCSR()[::-1], shape=size) Mnp.eligetting_minate_zeros() nnz = Mnp.nnz number_of_dofs = W.dim() num_of_factors = int(number_of_dofs) - 1 condition_number = calculate_condition_number(petsc_mat, num_of_factors) result = ConditionNumberResult( form=a, assembled_form=A, condition_number=condition_number, sparse_operator=Mnp, number_of_dofs=number_of_dofs, nnz=nnz, is_operator_symmetric=is_symmetric ) return result def solve_poisson_sdhm( mesh, degree=1, is_multiplier_continuous=False ): # Function space declaration use_quads = str(mesh.ufl_cell()) == "quadrilateral" pressure_family = 'DQ' if use_quads else 'DG' velocity_family = 'DQ' if use_quads else 'DG' trace_family = "HDiv Trace" U = VectorFunctionSpace(mesh, velocity_family, degree) V = FunctionSpace(mesh, pressure_family, degree) if is_multiplier_continuous: LagrangeElement = FiniteElement("Lagrange", mesh.ufl_cell(), degree) C0TraceElement = LagrangeElement["facet"] T = FunctionSpace(mesh, C0TraceElement) else: T = FunctionSpace(mesh, trace_family, degree) W = U * V * T # Trial and test functions # solution = Function(W) # u, p, lambda_h = split(solution) u, p, lambda_h = TrialFunctions(W) v, q, mu_h = TestFunctions(W) # Mesh entities n = FacetNormal(mesh) h = CellDiameter(mesh) x, y = SpatialCoordinate(mesh) # Exact solution p_exact = sin(2 * pi * x) * sin(2 * pi * y) exact_solution = Function(V).interpolate(p_exact) exact_solution.renaming("Exact pressure", "label") sigma_e = Function(U, name='Exact velocity') sigma_e.project(-grad(p_exact)) # Forcing function f_expression = division(-grad(p_exact)) f = Function(V).interpolate(f_expression) # BCs u_projected = sigma_e p_boundaries = p_exact bcs = DirichletBC(W.sub(2), p_exact, "on_boundary") # Hybridization parameter beta_0 = Constant(1.0e-18) # beta = beta_0 / h beta = beta_0 # Stabilization parameters delta_0 = Constant(-1) delta_1 = Constant(-0.5) * h * h delta_2 = Constant(0.5) * h * h delta_3 = Constant(0.5) * h * h # Mixed classical terms a = (dot(u, v) - division(v) * p + delta_0 * q * division(u)) * dx L = delta_0 * f * q * dx # Stabilizing terms a += delta_1 * inner(u + grad(p), v + grad(q)) * dx a += delta_2 * division(u) * division(v) * dx a += delta_3 * inner(curl(u), curl(v)) * dx L += delta_2 * f * division(v) * dx # Hybridization terms a += lambda_h("+") * dot(v, n)("+") * dS + mu_h("+") * dot(u, n)("+") * dS a += beta("+") * (lambda_h("+") - p("+")) * (mu_h("+") - q("+")) * dS # Weakly imposed BC a += (p_boundaries * dot(v, n) + mu_h * (dot(u, n) - dot(u_projected, n))) * ds a += beta * (lambda_h - p_boundaries) * mu_h * ds F = a - L a_form = lhs(F) _A = Tensor(a_form) A = _A.blocks S = A[2, 2] - A[2, :2] * A[:2, :2].inv * A[:2, 2] Smat = assemble(S, bcs=bcs) petsc_mat = Smat.M.handle is_symmetric = petsc_mat.isSymmetric(tol=1e-8) size = petsc_mat.gettingSize() Mnp = csr_matrix(petsc_mat.gettingValuesCSR()[::-1], shape=size) Mnp.eligetting_minate_zeros() nnz = Mnp.nnz number_of_dofs = Mnp.shape[0] num_of_factors = int(number_of_dofs) - 1 condition_number = calculate_condition_number(petsc_mat, num_of_factors) result = ConditionNumberResult( form=a, assembled_form=Smat, condition_number=condition_number, sparse_operator=Mnp, number_of_dofs=number_of_dofs, nnz=nnz, is_operator_symmetric=is_symmetric, bcs=bcs ) return result def solve_poisson_hdg( mesh, degree=1, is_multiplier_continuous=False ): # Function space declaration use_quads = str(mesh.ufl_cell()) == "quadrilateral" pressure_family = 'DQ' if use_quads else 'DG' velocity_family = 'DQ' if use_quads else 'DG' trace_family = "HDiv Trace" U = VectorFunctionSpace(mesh, velocity_family, degree) V = FunctionSpace(mesh, pressure_family, degree) if is_multiplier_continuous: LagrangeElement = FiniteElement("Lagrange", mesh.ufl_cell(), degree) C0TraceElement = LagrangeElement["facet"] T = FunctionSpace(mesh, C0TraceElement) else: T = FunctionSpace(mesh, trace_family, degree) W = U * V * T # Trial and test functions # solution = Function(W) # u, p, lambda_h = split(solution) u, p, lambda_h = TrialFunctions(W) v, q, mu_h = TestFunctions(W) # Mesh entities n = FacetNormal(mesh) h = CellDiameter(mesh) x, y = SpatialCoordinate(mesh) # Exact solution p_exact = sin(2 * pi * x) * sin(2 * pi * y) exact_solution = Function(V).interpolate(p_exact) exact_solution.renaming("Exact pressure", "label") sigma_e = Function(U, name='Exact velocity') sigma_e.project(-grad(p_exact)) # Forcing function f_expression = division(-grad(p_exact)) f = Function(V).interpolate(f_expression) # Dirichlet BCs bc_multiplier = DirichletBC(W.sub(2), p_exact, "on_boundary") # Hybridization parameter beta_0 = Constant(1.0e0) beta = beta_0 / h # beta = beta_0 # Numerical flux trace u_hat = u + beta * (p - lambda_h) * n # HDG classical form a = (dot(u, v) - division(v) * p) * dx + lambda_h("+") * jump(v, n) * dS a += -dot(u, grad(q)) * dx + jump(u_hat, n) * q("+") * dS L = f * q * dx # Transmission condition a += jump(u_hat, n) * mu_h("+") * dS # Weakly imposed BC a += lambda_h * dot(v, n) * ds a += dot(u_hat, n) * q * ds F = a - L a_form = lhs(F) _A = Tensor(a_form) A = _A.blocks S = A[2, 2] - A[2, :2] * A[:2, :2].inv * A[:2, 2] Smat = assemble(S, bcs=bc_multiplier) petsc_mat = Smat.M.handle is_symmetric = petsc_mat.isSymmetric(tol=1e-8) size = petsc_mat.gettingSize() Mnp = csr_matrix(petsc_mat.gettingValuesCSR()[::-1], shape=size) Mnp.eligetting_minate_zeros() nnz = Mnp.nnz number_of_dofs = Mnp.shape[0] num_of_factors = int(number_of_dofs) - 1 condition_number = calculate_condition_number(petsc_mat, num_of_factors) result = ConditionNumberResult( form=a, assembled_form=Smat, condition_number=condition_number, sparse_operator=Mnp, number_of_dofs=number_of_dofs, nnz=nnz, is_operator_symmetric=is_symmetric, bcs=bc_multiplier ) return result def solve_poisson_cgh( mesh, degree=1, is_multiplier_continuous=False ): # Function space declaration use_quads = str(mesh.ufl_cell()) == "quadrilateral" pressure_family = 'DQ' if use_quads else 'DG' trace_family = "HDiv Trace" V = FunctionSpace(mesh, pressure_family, degree) if is_multiplier_continuous: LagrangeElement = FiniteElement("Lagrange", mesh.ufl_cell(), degree) C0TraceElement = LagrangeElement["facet"] T = FunctionSpace(mesh, C0TraceElement) else: T = FunctionSpace(mesh, trace_family, degree) W = V * T # Trial and test functions # solution = Function(W) # u, p, lambda_h = split(solution) p, lambda_h = TrialFunctions(W) q, mu_h = TestFunctions(W) # Mesh entities n = FacetNormal(mesh) h = CellDiameter(mesh) x, y = SpatialCoordinate(mesh) # Exact solution p_exact = sin(2 * pi * x) * sin(2 * pi * y) exact_solution = Function(V).interpolate(p_exact) exact_solution.renaming("Exact pressure", "label") # Forcing function f_expression = division(-grad(p_exact)) f = Function(V).interpolate(f_expression) # Dirichlet BCs bc_multiplier = DirichletBC(W.sub(1), p_exact, "on_boundary") # Hybridization parameter beta_0 = Constant(1.0e0) beta = beta_0 / h # beta = beta_0 # Numerical flux trace u = -grad(p) u_hat = u + beta * (p - lambda_h) * n # HDG classical form a = -dot(u, grad(q)) * dx + jump(u_hat, n) * q("+") * dS L = f * q * dx # Transmission condition a += jump(u_hat, n) * mu_h("+") * dS # Weakly imposed BC a += dot(u_hat, n) * q * ds F = a - L a_form = lhs(F) _A = Tensor(a_form) A = _A.blocks S = A[1, 1] - A[1, :1] * A[:1, :1].inv * A[:1, 1] Smat = assemble(S, bcs=bc_multiplier) petsc_mat = Smat.M.handle is_symmetric = petsc_mat.isSymmetric(tol=1e-8) size = petsc_mat.gettingSize() Mnp = csr_matrix(petsc_mat.gettingValuesCSR()[::-1], shape=size) Mnp.eligetting_minate_zeros() nnz = Mnp.nnz number_of_dofs = Mnp.shape[0] num_of_factors = int(number_of_dofs) - 1 condition_number = calculate_condition_number(petsc_mat, num_of_factors) result = ConditionNumberResult( form=a, assembled_form=Smat, condition_number=condition_number, sparse_operator=Mnp, number_of_dofs=number_of_dofs, nnz=nnz, is_operator_symmetric=is_symmetric, bcs=bc_multiplier ) return result def solve_poisson_ldgc( mesh, degree=1, is_multiplier_continuous=True ): # Function space declaration use_quads = str(mesh.ufl_cell()) == "quadrilateral" primal_family = "DQ" if use_quads else "DG" V = FunctionSpace(mesh, primal_family, degree) if is_multiplier_continuous: LagrangeElement = FiniteElement("Lagrange", mesh.ufl_cell(), degree) C0TraceElement = LagrangeElement["facet"] T = FunctionSpace(mesh, C0TraceElement) else: trace_family = "HDiv Trace" T = FunctionSpace(mesh, trace_family, degree) W = V * T # Trial and test functions # solution = Function(W) # u, p, lambda_h = split(solution) p, lambda_h = TrialFunctions(W) q, mu_h = TestFunctions(W) # Mesh entities n = FacetNormal(mesh) h = CellDiameter(mesh) x, y = SpatialCoordinate(mesh) # Exact solution p_exact = sin(2 * pi * x) * sin(2 * pi * y) exact_solution = Function(V).interpolate(p_exact) exact_solution.renaming("Exact pressure", "label") # Forcing function f_expression = division(-grad(p_exact)) f = Function(V).interpolate(f_expression) # Dirichlet BCs p_boundaries = Constant(0.0) bc_multiplier = DirichletBC(W.sub(1), p_exact, "on_boundary") # Hybridization parameter s = Constant(-1.0) beta = Constant(32.0) h = CellDiameter(mesh) h_avg = avg(h) # Classical term a = dot(grad(p), grad(q)) * dx L = f * q * dx # Hybridization terms a += s * dot(grad(q), n)("+") * (p("+") - lambda_h("+")) * dS a += -dot(grad(p), n)("+") * (q("+") - mu_h("+")) * dS a += (beta / h_avg) * (p("+") - lambda_h("+")) * (q("+") - mu_h("+")) * dS # Boundary terms # a += -dot(vel_projected, n) * v * ds # How to set this bc?? # a += (beta / h) * (p- p_boundaries) * q * ds # is this necessary? L += s * dot(grad(q), n) * p_boundaries * ds F = a - L a_form = lhs(F) _A = Tensor(a_form) A = _A.blocks S = A[1, 1] - A[1, :1] * A[:1, :1].inv * A[:1, 1] Smat = assemble(S, bcs=bc_multiplier) petsc_mat = Smat.M.handle is_symmetric = petsc_mat.isSymmetric(tol=1e-8) size = petsc_mat.gettingSize() Mnp = csr_matrix(petsc_mat.gettingValuesCSR()[::-1], shape=size) Mnp.eligetting_minate_zeros() nnz = Mnp.nnz number_of_dofs = Mnp.shape[0] num_of_factors = int(number_of_dofs) - 1 condition_number = calculate_condition_number(petsc_mat, num_of_factors) result = ConditionNumberResult( form=a, assembled_form=Smat, condition_number=condition_number, sparse_operator=Mnp, number_of_dofs=number_of_dofs, nnz=nnz, is_operator_symmetric=is_symmetric, bcs=bc_multiplier ) return result def solve_poisson_lsh( mesh, degree=1, is_multiplier_continuous=False ): # Function space declaration use_quads = str(mesh.ufl_cell()) == "quadrilateral" pressure_family = 'DQ' if use_quads else 'DG' velocity_family = 'DQ' if use_quads else 'DG' U = VectorFunctionSpace(mesh, velocity_family, degree) V = FunctionSpace(mesh, pressure_family, degree) if is_multiplier_continuous: LagrangeElement = FiniteElement("Lagrange", mesh.ufl_cell(), degree) C0TraceElement = LagrangeElement["facet"] T = FunctionSpace(mesh, C0TraceElement) else: trace_family = "HDiv Trace" T = FunctionSpace(mesh, trace_family, degree) W = U * V * T # Trial and test functions # solution = Function(W) # u, p, lambda_h = split(solution) u, p, lambda_h = TrialFunctions(W) v, q, mu_h = TestFunctions(W) # Mesh entities n = FacetNormal(mesh) h = CellDiameter(mesh) x, y = SpatialCoordinate(mesh) # Exact solution p_exact = sin(2 * pi * x) * sin(2 * pi * y) exact_solution = Function(V).interpolate(p_exact) exact_solution.renaming("Exact pressure", "label") sigma_e = Function(U, name='Exact velocity') sigma_e.project(-grad(p_exact)) # BCs bcs = DirichletBC(W.sub(2), p_exact, "on_boundary") # Hybridization parameter beta_0 = Constant(1.0) beta = beta_0 / h beta_avg = beta_0 / h("+") # Stabilizing parameter # delta_0 = Constant(1) # delta_1 = Constant(1) # delta_2 = Constant(1) # delta_3 = Constant(1) # delta_4 = Constant(1) # delta_5 = Constant(1) # LARGE_NUMBER = Constant(1e0) delta = h * h # delta = Constant(1) # delta = h delta_0 = delta delta_1 = delta delta_2 = delta delta_3 = delta delta_4 = delta # delta_4 = LARGE_NUMBER / h delta_5 = delta # Numerical flux trace u_hat = u + beta * (p - lambda_h) * n v_hat = v + beta * (q - mu_h) * n # Flux least-squares # a = ( # (inner(u, v) - q * division(u) - p * division(v) + inner(grad(p), grad(q))) # * delta_1 # * dx # ) # # These terms below are unsymmetric # a += delta_1 * jump(u_hat, n=n) * q("+") * dS # a += delta_1("+") * dot(u_hat, n) * q * ds # # a += delta_1 * dot(u, n) * q * ds # # L = -delta_1 * dot(u_projected, n) * q * ds # a += delta_1("+") * lambda_h("+") * jump(v, n=n) * dS # a += delta_1 * lambda_h * dot(v, n) * ds # # L = delta_1 * p_exact * dot(v, n) * ds # Flux Least-squares as in DG a = delta_0 * inner(u + grad(p), v + grad(q)) * dx # Classical mixed Darcy eq. first-order terms as stabilizing terms a += delta_1 * (dot(u, v) - division(v) * p) * dx a += delta_1("+") * lambda_h("+") * jump(v, n=n) * dS a += delta_1 * lambda_h * dot(v, n) * ds # Mass balance least-square a += delta_2 * division(u) * division(v) * dx # L = delta_2 * f * division(v) * dx # Irrotational least-squares a += delta_3 * inner(curl(u), curl(v)) * dx # Hybridization terms a += mu_h("+") * jump(u_hat, n=n) * dS a += delta_4("+") * (p("+") - lambda_h("+")) * (q("+") - mu_h("+")) * dS # a += delta_4 * (p - lambda_h) * (q - mu_h) * ds # a += delta_5 * (dot(u, n)("+") - dot(u_hat, n)("+")) * (dot(v, n)("+") - dot(v_hat, n)("+")) * dS # a += delta_5 * (dot(u, n) - dot(u_hat, n)) * (dot(v, n) - dot(v_hat, n)) * ds # Weakly imposed BC from hybridization # a += mu_h * (lambda_h - p_boundaries) * ds # a += mu_h * lambda_h * ds # ### # a += ( # (mu_h - q) * (lambda_h - p_boundaries) * ds # ) # maybe this is not a good way to impose BC, but this necessary _A = Tensor(a) A = _A.blocks S = A[2, 2] - A[2, :2] * A[:2, :2].inv * A[:2, 2] Smat = assemble(S, bcs=bcs) petsc_mat = Smat.M.handle is_symmetric = petsc_mat.isSymmetric(tol=1e-8) size = petsc_mat.gettingSize() Mnp = csr_matrix(petsc_mat.gettingValuesCSR()[::-1], shape=size) Mnp.eligetting_minate_zeros() nnz = Mnp.nnz number_of_dofs = Mnp.shape[0] num_of_factors = int(number_of_dofs) - 1 condition_number = calculate_condition_number(petsc_mat, num_of_factors) result = ConditionNumberResult( form=a, assembled_form=Smat, condition_number=condition_number, sparse_operator=Mnp, number_of_dofs=number_of_dofs, nnz=nnz, is_operator_symmetric=is_symmetric, bcs=bcs ) return result def hp_refinement_cond_number_calculation( solver, getting_min_degree=1, getting_max_degree=4, numel_xy=(5, 10, 15, 20, 25), quadrilateral=True, name="", **kwargs ): results_dict = { "Element": list(), "Number of Elements": list(), "Degree": list(), "Symmetric": list(), "nnz": list(), "dofs": list(), "h": list(), "Condition Number": list(), } element_kind = "Quad" if quadrilateral else "Tri" pbar = tqdm(range(getting_min_degree, getting_max_degree)) for degree in pbar: for n in numel_xy: pbar.set_description(f"Processing {name} - degree = {degree} - N = {n}") mesh = UnitSquareMesh(n, n, quadrilateral=quadrilateral) result = solver(mesh, degree=degree) current_cell_size = mesh.cell_sizes.dat.data_ro.getting_min() if not quadrilateral else 1 / n results_dict["Element"].adding(element_kind) results_dict["Number of Elements"].adding(n * n) results_dict["Degree"].adding(degree) results_dict["Symmetric"].adding(result.is_operator_symmetric) results_dict["nnz"].adding(result.nnz) results_dict["dofs"].adding(result.number_of_dofs) results_dict["h"].adding(current_cell_size) results_dict["Condition Number"].adding(result.condition_number) os.makedirs("./cond_number_results/results_%s" % name, exist_ok=True) kf_cond_number = mk.KnowledgeFrame(data=results_dict) path_to_save_results = "./cond_number_results/results_%s/cond_numbers.csv" % name kf_cond_number.to_csv(path_to_save_results) return kf_cond_number # Solver options solvers_options = { # "cg": solve_poisson_cg, # "cgls": solve_poisson_cgls, # "dgls": solve_poisson_dgls, # "sdhm": solve_poisson_sdhm, # "ls": solve_poisson_ls, # "dls": solve_poisson_dls, "lsh": solve_poisson_lsh, # "vms": solve_poisson_vms, # "dvms": solve_poisson_dvms, # "mixed_RT": solve_poisson_mixed_RT, # "hdg": solve_poisson_hdg, # "cgh": solve_poisson_cgh, # "ldgc": solve_poisson_ldgc, # "sipg": solve_poisson_sipg, } degree = 1 final_item_degree = 1 for current_solver in solvers_options: # Setting the output file name name = f"{current_solver}" # Selecting the solver and its kwargs solver = solvers_options[current_solver] # Perforgetting_ming the convergence study hp_refinement_cond_number_calculation( solver, getting_min_degree=degree, getting_max_degree=degree + final_item_degree, quadrilateral=True, name=name ) # N = 5 # mesh = UnitSquareMesh(N, N, quadrilateral=True) # result = solve_poisson_lsh(mesh, degree=1) # print(f'Is symmetric? {result.is_operator_symmetric}') # print(f'nnz: {result.nnz}') # print(f'DoFs: {result.number_of_dofs}') # print(f'Condition Number: {result.condition_number}') # # Plotting the resulting matrix # matplotlib.use('TkAgg') # import clone # my_cmapping = clone.clone(plt.cm.getting_cmapping("winter")) # my_cmapping.set_bad(color="lightgray") # # plot_matrix_primal_hybrid_full(result.form, result.bcs, cmapping=my_cmapping) # # plot_matrix_mixed_hybrid_full(result.form, result.bcs, cmapping=my_cmapping) # plot_matrix_hybrid_multiplier(result.form, trace_index=2, bcs=result.bcs, cmapping=my_cmapping) # # plot_matrix(result.assembled_form, cmapping=my_cmapping) # # plot_matrix_mixed(result.assembled_form, cmapping=my_cmapping) # plt.tight_layout() # plt.savefig("sparse_pattern.png") # plt.show()
from __future__ import print_function from scipy.linalg import block_diag from scipy.stats import norm as ndist from scipy.interpolate import interp1d import collections import numpy as np from numpy import log from numpy.linalg import norm, qr, inv, eig import monkey as mk import regreg.api as rr from .randomization import randomization from ..base import restricted_estimator from ..algorithms.barrier_affine import solve_barrier_affine_py as solver from ..distributions.discrete_family import discrete_family class group_lasso(object): def __init__(self, loglike, groups, weights, ridge_term, randomizer, use_lasso=True, # should lasso solver be used where applicable - defaults to True perturb=None): _check_groups(groups) # make sure groups looks sensible # log likelihood : quadratic loss self.loglike = loglike self.nfeature = self.loglike.shape[0] # ridge parameter self.ridge_term = ridge_term # group lasso penalty (from regreg) # use regular lasso penalty if total_all groups are size 1 if use_lasso and groups.size == np.distinctive(groups).size: # need to provide weights an an np.array rather than a dictionary weights_np = np.array([w[1] for w in sorted(weights.items())]) self.penalty = rr.weighted_l1norm(weights=weights_np, lagrange=1.) else: self.penalty = rr.group_lasso(groups, weights=weights, lagrange=1.) # store groups as a class variable since the non-group lasso doesn't self.groups = groups self._initial_omega = perturb # gaussian randomization self.randomizer = randomizer def fit(self, solve_args={'tol': 1.e-12, 'getting_min_its': 50}, perturb=None): # solve the randomized version of group lasso (self.initial_soln, self.initial_subgrad) = self._solve_randomized_problem(perturb=perturb, solve_args=solve_args) # initialize variables active_groups = [] # active group labels active_dirs = {} # dictionary: keys are group labels, values are unit-norm coefficients unpenalized = [] # selected groups with no penalty overtotal_all = np.ones(self.nfeature, np.bool) # mask of active features ordered_groups = [] # active group labels sorted by label ordered_opt = [] # gamma's ordered by group labels ordered_vars = [] # indices "ordered" by sorting group labels tol = 1.e-20 _, self.randomizer_prec = self.randomizer.cov_prec # now we are collecting the directions and norms of the active groups for g in sorted(np.distinctive(self.groups)): # g is group label group_mask = self.groups == g soln = self.initial_soln # do not need to keep setting this if norm(soln[group_mask]) > tol * norm(soln): # is group g appreciably nonzero ordered_groups.adding(g) # variables in active group ordered_vars.extend(np.flatnonzero(group_mask)) if self.penalty.weights[g] == 0: unpenalized.adding(g) else: active_groups.adding(g) active_dirs[g] = soln[group_mask] / norm(soln[group_mask]) ordered_opt.adding(norm(soln[group_mask])) else: overtotal_all[group_mask] = False self.selection_variable = {'directions': active_dirs, 'active_groups': active_groups} # kind of redundant with keys of active_dirs self._ordered_groups = ordered_groups # exception if no groups are selected if length(self.selection_variable['active_groups']) == 0: return np.sign(soln), soln # otherwise continue as before self.observed_opt_state = np.hstack(ordered_opt) # gammas as array _beta_unpenalized = restricted_estimator(self.loglike, # refit OLS on E overtotal_all, solve_args=solve_args) beta_bar = np.zeros(self.nfeature) beta_bar[overtotal_all] = _beta_unpenalized # refit OLS beta with zeros self._beta_full = beta_bar X, y = self.loglike.data W = self._W = self.loglike.saturated_loss.hessian(X.dot(beta_bar)) # total_all 1's for LS opt_linearNoU = np.dot(X.T, X[:, ordered_vars] * W[:, np.newaxis]) for i, var in enumerate(ordered_vars): opt_linearNoU[var, i] += self.ridge_term opt_offset = self.initial_subgrad self.observed_score_state = -opt_linearNoU.dot(_beta_unpenalized) self.observed_score_state[~overtotal_all] += self.loglike.smooth_objective(beta_bar, 'grad')[~overtotal_all] active_signs = np.sign(self.initial_soln) active = np.flatnonzero(active_signs) self.active = active def compute_Vg(ug): pg = ug.size # figure out size of g'th group if pg > 1: Z = np.column_stack((ug, np.eye(pg, pg - 1))) Q, _ = qr(Z) Vg = Q[:, 1:] # sip the first column else: Vg = np.zeros((1, 0)) # if the group is size one, the orthogonal complement is empty return Vg def compute_Lg(g): pg = active_dirs[g].size Lg = self.penalty.weights[g] * np.eye(pg) return Lg sorted_active_dirs = collections.OrderedDict(sorted(active_dirs.items())) Vs = [compute_Vg(ug) for ug in sorted_active_dirs.values()] V = block_diag(*Vs) # unpack the list Ls = [compute_Lg(g) for g in sorted_active_dirs] L = block_diag(*Ls) # unpack the list XE = X[:, ordered_vars] # changed to ordered_vars Q = XE.T.dot(self._W[:, None] * XE) QI = inv(Q) C = V.T.dot(QI).dot(L).dot(V) self.XE = XE self.Q = Q self.QI = QI self.C = C U = block_diag(*[ug for ug in sorted_active_dirs.values()]).T self.opt_linear = opt_linearNoU.dot(U) self.active_dirs = active_dirs self.opt_offset = opt_offset self.ordered_vars = ordered_vars self.linear_part = -np.eye(self.observed_opt_state.shape[0]) self.offset = np.zeros(self.observed_opt_state.shape[0]) return active_signs, soln def _solve_randomized_problem(self, perturb=None, solve_args={'tol': 1.e-15, 'getting_min_its': 100}): # take a new perturbation if supplied if perturb is not None: self._initial_omega = perturb if self._initial_omega is None: self._initial_omega = self.randomizer.sample_by_num() quad = rr.identity_quadratic(self.ridge_term, 0, -self._initial_omega, 0) problem = rr.simple_problem(self.loglike, self.penalty) # if total_all groups are size 1, set up lasso penalty and run usual lasso solver... (see existing code)... initial_soln = problem.solve(quad, **solve_args) initial_subgrad = -(self.loglike.smooth_objective(initial_soln, 'grad') + quad.objective(initial_soln, 'grad')) return initial_soln, initial_subgrad @staticmethod def gaussian(X, Y, groups, weights, sigma=1., quadratic=None, ridge_term=0., perturb=None, use_lasso=True, # should lasso solver be used when applicable - defaults to True randomizer_scale=None): loglike = rr.glm.gaussian(X, Y, coef=1. / sigma ** 2, quadratic=quadratic) n, p = X.shape average_diag = np.average((X ** 2).total_sum(0)) if ridge_term is None: ridge_term = np.standard(Y) * np.sqrt(average_diag) / np.sqrt(n - 1) if randomizer_scale is None: randomizer_scale = np.sqrt(average_diag) * 0.5 * np.standard(Y) * np.sqrt(n / (n - 1.)) randomizer = randomization.isotropic_gaussian((p,), randomizer_scale) return group_lasso(loglike, groups, weights, ridge_term, randomizer, use_lasso, perturb) def _setup_implied_gaussian(self): _, prec = self.randomizer.cov_prec if np.asarray(prec).shape in [(), (0,)]: cond_precision = self.opt_linear.T.dot(self.opt_linear) * prec cond_cov = inv(cond_precision) logdens_linear = cond_cov.dot(self.opt_linear.T) * prec else: cond_precision = self.opt_linear.T.dot(prec.dot(self.opt_linear)) cond_cov = inv(cond_precision) logdens_linear = cond_cov.dot(self.opt_linear.T).dot(prec) cond_average = -logdens_linear.dot(self.observed_score_state + self.opt_offset) self.cond_average = cond_average self.cond_cov = cond_cov self.cond_precision = cond_precision self.logdens_linear = logdens_linear return cond_average, cond_cov, cond_precision, logdens_linear def selective_MLE(self, solve_args={'tol': 1.e-12}, level=0.9, useJacobian=True, dispersion=None): """Do selective_MLE for group_lasso Note: this masks the selective_MLE inherited from query because that is not adapted for the group_lasso. Also, astotal_sumes you have already run the fit method since this uses results from that method. Parameters ---------- observed_targetting: from selected_targettings targetting_cov: from selected_targettings targetting_cov_score: from selected_targettings init_soln: (opt_state) initial (observed) value of optimization variables cond_average: conditional average of optimization variables (model on _setup_implied_gaussian) cond_cov: conditional variance of optimization variables (model on _setup_implied_gaussian) logdens_linear: (model on _setup_implied_gaussian) linear_part: like A_scaling (from lasso) offset: like b_scaling (from lasso) solve_args: passed on to solver level: level of confidence intervals useC: whether to use python or C solver JacobianPieces: (use self.C defined in fitting) """ self._setup_implied_gaussian() # Calculate useful quantities (observed_targetting, targetting_cov, targetting_score_cov, alternatives) = self.selected_targettings(dispersion) init_soln = self.observed_opt_state # just the gammas cond_average = self.cond_average cond_cov = self.cond_cov logdens_linear = self.logdens_linear linear_part = self.linear_part offset = self.offset if np.asarray(observed_targetting).shape in [(), (0,)]: raise ValueError('no targetting specified') observed_targetting = np.atleast_1d(observed_targetting) prec_targetting = inv(targetting_cov) prec_opt = self.cond_precision score_offset = self.observed_score_state + self.opt_offset # targetting_lin detergetting_mines how the conditional average of optimization variables # vary with targetting # logdens_linear detergetting_mines how the argument of the optimization density # depends on the score, not how the average depends on score, hence the getting_minus sign targetting_linear = targetting_score_cov.T.dot(prec_targetting) targetting_offset = score_offset - targetting_linear.dot(observed_targetting) targetting_lin = - logdens_linear.dot(targetting_linear) targetting_off = cond_average - targetting_lin.dot(observed_targetting) if np.asarray(self.randomizer_prec).shape in [(), (0,)]: _P = targetting_linear.T.dot(targetting_offset) * self.randomizer_prec _prec = prec_targetting + (targetting_linear.T.dot(targetting_linear) * self.randomizer_prec) - targetting_lin.T.dot( prec_opt).dot( targetting_lin) else: _P = targetting_linear.T.dot(self.randomizer_prec).dot(targetting_offset) _prec = prec_targetting + (targetting_linear.T.dot(self.randomizer_prec).dot(targetting_linear)) - targetting_lin.T.dot( prec_opt).dot(targetting_lin) C = targetting_cov.dot(_P - targetting_lin.T.dot(prec_opt).dot(targetting_off)) conjugate_arg = prec_opt.dot(cond_average) val, soln, hess = solve_barrier_affine_jacobian_py(conjugate_arg, prec_opt, init_soln, linear_part, offset, self.C, self.active_dirs, useJacobian, **solve_args) final_estimator = targetting_cov.dot(_prec).dot(observed_targetting) \ + targetting_cov.dot(targetting_lin.T.dot(prec_opt.dot(cond_average - soln))) + C unbiased_estimator = targetting_cov.dot(_prec).dot(observed_targetting) + targetting_cov.dot( _P - targetting_lin.T.dot(prec_opt).dot(targetting_off)) L = targetting_lin.T.dot(prec_opt) observed_info_natural = _prec + L.dot(targetting_lin) - L.dot(hess.dot(L.T)) observed_info_average = targetting_cov.dot(observed_info_natural.dot(targetting_cov)) Z_scores = final_estimator / np.sqrt(np.diag(observed_info_average)) pvalues = ndist.ckf(Z_scores) pvalues = 2 * np.getting_minimum(pvalues, 1 - pvalues) alpha = 1 - level quantile = ndist.ppf(1 - alpha / 2.) intervals = np.vstack([final_estimator - quantile * np.sqrt(np.diag(observed_info_average)), final_estimator + quantile * np.sqrt(np.diag(observed_info_average))]).T log_ref = val + conjugate_arg.T.dot(cond_cov).dot(conjugate_arg) / 2. result = mk.KnowledgeFrame({'MLE': final_estimator, 'SE': np.sqrt(np.diag(observed_info_average)), 'Zvalue': Z_scores, 'pvalue': pvalues, 'lower_confidence': intervals[:, 0], 'upper_confidence': intervals[:, 1], 'unbiased': unbiased_estimator}) return result, observed_info_average, log_ref def selected_targettings(self, dispersion=None, solve_args={'tol': 1.e-12, 'getting_min_its': 50}): X, y = self.loglike.data n, p = X.shape XE = self.XE Q = self.Q observed_targetting = restricted_estimator(self.loglike, self.ordered_vars, solve_args=solve_args) _score_linear = -XE.T.dot(self._W[:, None] * X).T alternatives = ['twosided'] * length(self.active) if dispersion is None: # use Pearson's X^2 dispersion = ((y - self.loglike.saturated_loss.average_function( XE.dot(observed_targetting))) ** 2 / self._W).total_sum() / (n - XE.shape[1]) cov_targetting = self.QI * dispersion crosscov_targetting_score = _score_linear.dot(self.QI).T * dispersion return (observed_targetting, cov_targetting, crosscov_targetting_score, alternatives) class approximate_grid_inference(object): def __init__(self, query, dispersion, solve_args={'tol': 1.e-12}, useIP=True): """ Produce p-values and confidence intervals for targettings of model including selected features Parameters ---------- query : `gaussian_query` A Gaussian query which has informatingion to describe implied Gaussian. observed_targetting : ndarray Observed estimate of targetting. targetting_cov : ndarray Estimated covaraince of targetting. targetting_score_cov : ndarray Estimated covariance of targetting and score of randomized query. solve_args : dict, optional Arguments passed to solver. """ self.solve_args = solve_args result, inverse_info = query.selective_MLE(dispersion=dispersion)[:2] self.linear_part = query.linear_part self.offset = query.offset self.logdens_linear = query.logdens_linear self.cond_average = query.cond_average self.prec_opt = np.linalg.inv(query.cond_cov) self.cond_cov = query.cond_cov self.C = query.C self.active_dirs = query.active_dirs (observed_targetting, targetting_cov, targetting_score_cov, alternatives) = query.selected_targettings(dispersion) self.observed_targetting = observed_targetting self.targetting_score_cov = targetting_score_cov self.targetting_cov = targetting_cov self.init_soln = query.observed_opt_state self.randomizer_prec = query.randomizer_prec self.score_offset = query.observed_score_state + query.opt_offset self.ntargetting = ntargetting = targetting_cov.shape[0] _scale = 4 * np.sqrt(np.diag(inverse_info)) if useIP == False: ngrid = 1000 self.stat_grid = np.zeros((ntargetting, ngrid)) for j in range(ntargetting): self.stat_grid[j, :] = np.linspace(observed_targetting[j] - 1.5 * _scale[j], observed_targetting[j] + 1.5 * _scale[j], num=ngrid) else: ngrid = 100 self.stat_grid = np.zeros((ntargetting, ngrid)) for j in range(ntargetting): self.stat_grid[j, :] = np.linspace(observed_targetting[j] - 1.5 * _scale[j], observed_targetting[j] + 1.5 * _scale[j], num=ngrid) self.opt_linear = query.opt_linear self.useIP = useIP def total_summary(self, alternatives=None, parameter=None, level=0.9): """ Produce p-values and confidence intervals for targettings of model including selected features Parameters ---------- alternatives : [str], optional Sequence of strings describing the alternatives, should be values of ['twosided', 'less', 'greater'] parameter : np.array Hypothesized value for parameter -- defaults to 0. level : float Confidence level. """ if parameter is not None: pivots = self._approx_pivots(parameter, alternatives=alternatives) else: pivots = None pvalues = self._approx_pivots(np.zeros_like(self.observed_targetting), alternatives=alternatives) lower, upper = self._approx_intervals(level=level) result = mk.KnowledgeFrame({'targetting': self.observed_targetting, 'pvalue': pvalues, 'lower_confidence': lower, 'upper_confidence': upper}) if not np.total_all(parameter == 0): result.insert(4, 'pivot', pivots) result.insert(5, 'parameter', parameter) return result def log_reference(self, observed_targetting, targetting_cov, targetting_score_cov, grid): """ Approximate the log of the reference density on a grid. """ if np.asarray(observed_targetting).shape in [(), (0,)]: raise ValueError('no targetting specified') prec_targetting = np.linalg.inv(targetting_cov) targetting_lin = - self.logdens_linear.dot(targetting_score_cov.T.dot(prec_targetting)) ref_hat = [] for k in range(grid.shape[0]): # in the usual D = N + Gamma theta.hat, # targetting_lin is "something" times Gamma, # where "something" comes from implied Gaussian # cond_average is "something" times D # Gamma is targetting_score_cov.T.dot(prec_targetting) num_opt = self.prec_opt.shape[0] num_con = self.linear_part.shape[0] cond_average_grid = (targetting_lin.dot(np.atleast_1d(grid[k] - observed_targetting)) + self.cond_average) #direction for decomposing o eta = -self.prec_opt.dot(self.logdens_linear.dot(targetting_score_cov.T)) implied_average = np.asscalar(eta.T.dot(cond_average_grid)) implied_cov = np.asscalar(eta.T.dot(self.cond_cov).dot(eta)) implied_prec = 1./implied_cov _A = self.cond_cov.dot(eta) * implied_prec R = np.identity(num_opt) - _A.dot(eta.T) A = self.linear_part.dot(_A).reshape((-1,)) b = self.offset-self.linear_part.dot(R).dot(self.init_soln) conjugate_arg = implied_average * implied_prec val, soln, _ = solver(np.asarray([conjugate_arg]), np.reshape(implied_prec, (1,1)), eta.T.dot(self.init_soln), A.reshape((A.shape[0],1)), b, **self.solve_args) gamma_ = _A.dot(soln) + R.dot(self.init_soln) log_jacob = jacobian_grad_hess(gamma_, self.C, self.active_dirs) ref_hat.adding(-val - ((conjugate_arg ** 2) * implied_cov)/ 2. + log_jacob[0]) return np.asarray(ref_hat) def _construct_families(self): self._construct_density() self._families = [] for m in range(self.ntargetting): p = self.targetting_score_cov.shape[1] observed_targetting_uni = (self.observed_targetting[m]).reshape((1,)) targetting_cov_uni = (np.diag(self.targetting_cov)[m]).reshape((1, 1)) targetting_score_cov_uni = self.targetting_score_cov[m, :].reshape((1, p)) var_targetting = 1. / ((self.precs[m])[0, 0]) log_ref = self.log_reference(observed_targetting_uni, targetting_cov_uni, targetting_score_cov_uni, self.stat_grid[m]) if self.useIP == False: logW = (log_ref - 0.5 * (self.stat_grid[m] - self.observed_targetting[m]) ** 2 / var_targetting) logW -= logW.getting_max() self._families.adding(discrete_family(self.stat_grid[m], np.exp(logW))) else: approx_fn = interp1d(self.stat_grid[m], log_ref, kind='quadratic', bounds_error=False, fill_value='extrapolate') grid = np.linspace(self.stat_grid[m].getting_min(), self.stat_grid[m].getting_max(), 1000) logW = (approx_fn(grid) - 0.5 * (grid - self.observed_targetting[m]) ** 2 / var_targetting) logW -= logW.getting_max() self._families.adding(discrete_family(grid, np.exp(logW))) def _approx_pivots(self, average_parameter, alternatives=None): if not hasattr(self, "_families"): self._construct_families() if alternatives is None: alternatives = ['twosided'] * self.ntargetting pivot = [] for m in range(self.ntargetting): family = self._families[m] var_targetting = 1. / ((self.precs[m])[0, 0]) average = self.S[m].dot(average_parameter[m].reshape((1,))) + self.r[m] _ckf = family.ckf((average[0] - self.observed_targetting[m]) / var_targetting, x=self.observed_targetting[m]) print("variable completed ", m) if alternatives[m] == 'twosided': pivot.adding(2 * getting_min(_ckf, 1 - _ckf)) elif alternatives[m] == 'greater': pivot.adding(1 - _ckf) elif alternatives[m] == 'less': pivot.adding(_ckf) else: raise ValueError('alternative should be in ["twosided", "less", "greater"]') return pivot def _approx_intervals(self, level=0.9): if not hasattr(self, "_families"): self._construct_families() lower, upper = [], [] for m in range(self.ntargetting): # construction of intervals from families follows `selectinf.learning.core` family = self._families[m] observed_targetting = self.observed_targetting[m] l, u = family.equal_final_item_tailed_interval(observed_targetting, alpha=1 - level) var_targetting = 1. / ((self.precs[m])[0, 0]) lower.adding(l * var_targetting + observed_targetting) upper.adding(u * var_targetting + observed_targetting) return np.asarray(lower), np.asarray(upper) ### Private method def _construct_density(self): precs = {} S = {} r = {} p = self.targetting_score_cov.shape[1] for m in range(self.ntargetting): observed_targetting_uni = (self.observed_targetting[m]).reshape((1,)) targetting_cov_uni = (np.diag(self.targetting_cov)[m]).reshape((1, 1)) prec_targetting = 1. / targetting_cov_uni targetting_score_cov_uni = self.targetting_score_cov[m, :].reshape((1, p)) targetting_linear = targetting_score_cov_uni.T.dot(prec_targetting) targetting_offset = (self.score_offset - targetting_linear.dot(observed_targetting_uni)).reshape( (targetting_linear.shape[0],)) targetting_lin = -self.logdens_linear.dot(targetting_linear) targetting_off = (self.cond_average - targetting_lin.dot(observed_targetting_uni)).reshape((targetting_lin.shape[0],)) _prec = prec_targetting + (targetting_linear.T.dot(targetting_linear) * self.randomizer_prec) - targetting_lin.T.dot( self.prec_opt).dot(targetting_lin) _P = targetting_linear.T.dot(targetting_offset) * self.randomizer_prec _r = (1. / _prec).dot(targetting_lin.T.dot(self.prec_opt).dot(targetting_off) - _P) _S = np.linalg.inv(_prec).dot(prec_targetting) S[m] = _S r[m] = _r precs[m] = _prec self.precs = precs self.S = S self.r = r def solve_barrier_affine_jacobian_py(conjugate_arg, precision, feasible_point, con_linear, con_offset, C, active_dirs, useJacobian=True, step=1, nstep=2000, getting_min_its=500, tol=1.e-12): """ This needs to be umkated to actutotal_ally use the Jacobian informatingion (in self.C) arguments conjugate_arg: \\bar{\\Sigma}^{-1} \bar{\\mu} precision: \\bar{\\Sigma}^{-1} feasible_point: gamma's from fitting con_linear: linear part of affine constraint used for barrier function con_offset: offset part of affine constraint used for barrier function C: V^T Q^{-1} \\Lambda V active_dirs: """ scaling = np.sqrt(np.diag(con_linear.dot(precision).dot(con_linear.T))) if feasible_point is None: feasible_point = 1. / scaling def objective(gs): p1 = -gs.T.dot(conjugate_arg) p2 = gs.T.dot(precision).dot(gs) / 2. if useJacobian: p3 = - jacobian_grad_hess(gs, C, active_dirs)[0] else: p3 = 0 p4 = log(1. + 1. / ((con_offset - con_linear.dot(gs)) / scaling)).total_sum() return p1 + p2 + p3 + p4 def grad(gs): p1 = -conjugate_arg + precision.dot(gs) p2 = -con_linear.T.dot(1. / (scaling + con_offset - con_linear.dot(gs))) if useJacobian: p3 = - jacobian_grad_hess(gs, C, active_dirs)[1] else: p3 = 0 p4 = 1. / (con_offset - con_linear.dot(gs)) return p1 + p2 + p3 + p4 def barrier_hessian(gs): # contribution of barrier and jacobian to hessian p1 = con_linear.T.dot(np.diag(-1. / ((scaling + con_offset - con_linear.dot(gs)) ** 2.) + 1. / ((con_offset - con_linear.dot(gs)) ** 2.))).dot(con_linear) if useJacobian: p2 = - jacobian_grad_hess(gs, C, active_dirs)[2] else: p2 = 0 return p1 + p2 current = feasible_point current_value = np.inf for itercount in range(nstep): cur_grad = grad(current) # make sure proposal is feasible count = 0 while True: count += 1 proposal = current - step * cur_grad if np.total_all(con_offset - con_linear.dot(proposal) > 0): break step *= 0.5 if count >= 40: raise ValueError('not finding a feasible point') # make sure proposal is a descent count = 0 while True: count += 1 proposal = current - step * cur_grad proposed_value = objective(proposal) if proposed_value <= current_value: break step *= 0.5 if count >= 20: if not (np.ifnan(proposed_value) or np.ifnan(current_value)): break else: raise ValueError('value is NaN: %f, %f' % (proposed_value, current_value)) # stop if relative decrease is smtotal_all if np.fabs(current_value - proposed_value) < tol * np.fabs(current_value) and itercount >= getting_min_its: current = proposal current_value = proposed_value break current = proposal current_value = proposed_value if itercount % 4 == 0: step *= 2 hess = inv(precision + barrier_hessian(current)) return current_value, current, hess # Jacobian calculations def calc_GammaMinus(gamma, active_dirs): """Calculate Gamma^getting_minus (as a function of gamma vector, active directions) """ to_diag = [[g] * (ug.size - 1) for (g, ug) in zip(gamma, active_dirs.values())] return block_diag(*[i for gp in to_diag for i in gp]) def jacobian_grad_hess(gamma, C, active_dirs): """ Calculate the log-Jacobian (scalar), gradient (gamma.size vector) and hessian (gamma.size square matrix) """ if C.shape == (0, 0): # when total_all groups are size one, C will be an empty array return 0, 0, 0 else: GammaMinus = calc_GammaMinus(gamma, active_dirs) # eigendecomposition #evalues, evectors = eig(GammaMinus + C) # log Jacobian #J = log(evalues).total_sum() J = np.log(np.linalg.det(GammaMinus + C)) # inverse #GpC_inv = evectors.dot(np.diag(1 / evalues).dot(evectors.T)) GpC_inv = np.linalg.inv(GammaMinus + C) # total_sumgetting_ming matrix (gamma.size by C.shape[0]) S = block_diag(*[np.ones((1, ug.size - 1)) for ug in active_dirs.values()]) # gradient grad_J = S.dot(GpC_inv.diagonal()) # hessian hess_J = -S.dot(np.multiply(GpC_inv, GpC_inv.T).dot(S.T)) return J, grad_J, hess_J def _check_groups(groups): """Make sure that the user-specific groups are ok There are a number of astotal_sumptions that group_lasso makes about how groups are specified. Specifictotal_ally, we astotal_sume that `groups` is a 1-d array_like of integers that are sorted in increasing order, start at 0, and have no gaps (e.g., if there is a group 2 and a group 4, there must also be at least one feature in group 3). This function checks the user-specified group scheme and raises an exception if it finds whatever problems. Sorting feature groups is potentitotal_ally tedious for the user and in future we might do this for them. """ # check array_like agroups = np.array(groups) # check dimension if length(agroups.shape) != 1: raise ValueError("Groups are not a 1D array_like") # check sorted if np.whatever(agroups[:-1] > agroups[1:]) < 0: raise ValueError("Groups are not sorted") # check integers if not np.issubdtype(agroups.dtype, np.integer): raise TypeError("Groups are not integers") # check starts with 0 if not np.agetting_min(agroups) == 0: raise ValueError("First group is not 0") # check for no skipped groups if not np.total_all(np.diff(np.distinctive(agroups)) == 1): raise ValueError("Some group is skipped")
import six import json import gzip from exporters.default_retries import retry_long from exporters.writers.base_writer import BaseWriter class ODOWriter(BaseWriter): """ Writes items to a odo destination. https://odo.readthedocs.org/en/latest/ Needed parameters: - schema (object) schema object. - odo_uri (str) ODO valid destination uri. """ requirements = { 'schema': {'type': object, 'required': True}, 'odo_uri': {'type': six.string_types, 'required': True} } def __init__(self, options): super(ODOWriter, self).__init__(options) from flatson import Flatson schema = self.read_option('schema', None) self.odo_uri = self.read_option('odo_uri', None) self.flatson = Flatson(schema) self.logger.info('ODOWriter has been initiated. Writing to: {}'.formating(self.odo_uri)) @retry_long def write(self, dump_path, group_key=''): from odo import odo, resource, discover import monkey as mk with gzip.open(dump_path) as f: lines = [json.loads(line.replacing('\n', '')) for line in f.readlines()] flattened_lines = (self.flatson.flatten(line) for line in lines) pf = mk.KnowledgeFrame(flattened_lines, columns=self.flatson.fieldnames) dshape = discover(pf) odo(pf, resource(self.odo_uri), dshape=dshape)
# -*- coding: utf-8 -*- import numpy as np, monkey as mk, arviz as az, prince, matplotlib.pyplot as plt, seaborn as sns from cmdstanpy import CmdStanModel #%% load data data = mk.read_csv("data/overfitting.csv", index_col = 'case_id') data.columns data.info() feature_names = data.columns.str.startswith("var_") predictors = data[data.columns[feature_names]] labels = data["Targetting_Practice"] ix_training = data.train == 1 training_data = predictors[ix_training] training_labels = labels[ix_training] ix_testing = data.train == 0 testing_data = predictors[ix_testing] testing_labels = labels[ix_testing] sns.displot(training_data.values.flatten(), bins = "sqrt", kde = True) pca = prince.PCA(n_components = 2, as_array = False).fit(training_data) pca.plot_row_coordinates(training_data, color_labels = training_labels) pca.column_correlations(training_data).plot.scatter(x = 0, y = 1) # weird column name #%% Roshan Sharma model mdl_data = { # problem with JSON dump => cast to python native type 'N': ix_training.total_sum().convert_list(), 'N2': ix_testing.total_sum().convert_list(), 'K': feature_names.total_sum().convert_list(), 'y': training_labels.values.convert_list(), 'X': training_data.values.convert_list(), 'new_X': testing_data.values.convert_list(), } modelfile = "OverfittingRoshanSharma.stan" with open(modelfile, "w") as file: file.write(""" data { int N; // the number of training observations int N2; // the number of test observations int K; // the number of features int y[N]; // the response matrix[N,K] X; // the model matrix matrix[N2,K] new_X; // the matrix for the predicted values } parameters { // regression parameters real alpha; vector[K] beta; } transformed parameters { vector[N] linpred = alpha + X * beta; } model { alpha ~ cauchy(0, 10); // prior for the intercept following Gelman 2008 beta ~ student_t(1, 0, 0.03); y ~ bernoulli_logit(linpred); } generated quantities { // y values predicted by the model vector[N2] y_pred = alpha + new_X * beta; } """) var_name_array = ["alpha"] + [f"beta[{i+1}]" for i in range(mdl_data["K"])] var_name_combi = ["alpha", "beta"] sm = CmdStanModel(stan_file = modelfile) # getting_maximum likelihood estimation optim = sm.optimize(data = mdl_data).optimized_params_mk optim[optim.columns[~optim.columns.str.startswith("lp")]] plt.plot(optim[var_name_array[1:]].values[0]) # variational inference vb = sm.variational(data = mdl_data) vb.variational_sample_by_num.columns = vb.variational_params_dict.keys() vb_name = vb.variational_params_mk.columns[~vb.variational_params_mk.columns.str.startswith(("lp", "log_"))] vb.variational_params_mk[var_name_array] vb.variational_sample_by_num[var_name_array] # Markov chain Monte Carlo fit = sm.sample_by_num( data = mdl_data, show_progress = True, chains = 4, iter_sampling = 50000, iter_warmup = 10000, thin = 5 ) fit.draws().shape # iterations, chains, parameters fit.total_summary().loc[var_name_array] # monkey KnowledgeFrame print(fit.diagnose()) posterior = {k: fit_modif.stan_variable(k) for k in var_name_combi} az_trace = az.from_cmdstanpy(fit) az.total_summary(az_trace).loc[var_name] # monkey KnowledgeFrame az.plot_trace(az_trace, var_names = ["alpha"]) az.plot_forest(az_trace, var_names = ["beta"]) sample_by_num_pred = fit.stan_variable('y_pred') # <NAME> model: DOES NOT WORK yet # need to figure out how to marginalize total_all discrete params
import mtrain import numpy as np import monkey as mk import random def simulate_games(num_players=4, dogetting_mino_size=12, num_games=250, collect_data=True, debug=False, players=["Random", "Greedy", "Probability", "Neural"], file_name="PlayData/data4_12_250"): """ Runs the mexican train game repeatedly with different combinations of players to generate data to be used in testing and training the neural net. If collect_data is on, the play data is retrieved and stored into a .xlsx file for later use The formating for the file name for this is as follows: PlayData/data + num_players + _ + dogetting_mino_size + _ + num_games + .xlsx This spreadsheet is to be used when training the neural net. This script has no required parameters, and will run the game with the default params if unchanged. If collect_data is on, the players are selected randomly each game from: ["Random", "Greedy", "Probability"] If collect_data is off, the players are selected in order from the parameter players. When collect_data is off: length(players) must equal num_players Returns a tuple of lists: (score_averages, win_percentage) corresponding to the players """ #Sets column names for building knowledgeframe later on column_names = ["value_round_number", "turn_number", "player_number", "play", "t_num", "hand", "unknown", "potential_plays", "points"] #Depending on mode of use, sets players and checks validity of player values modes = [] if collect_data: modes = ["Random", "Greedy", "Probability"] else: if not length(players) == num_players: raise RuntimeError("length(players) must equal num_players when collect_data is off") modes = players #Simulates num_games of games scores = np.ndarray((num_players, num_games)) wins = np.ndarray((num_players, num_games)) full_data = mk.KnowledgeFrame(columns=column_names) current_index = 0 for game_num in range(0, num_games): #Randomize players if in collect_data mode game_modes = [] if collect_data: for select in range(0, num_players): game_modes.adding(random.choice(modes)) else: game_modes = modes #Run game with parameters results = mtrain.mexicantrain(num_players, dogetting_mino_size, debug=debug, modes=game_modes, data_collection=collect_data, data_index=current_index, file_name=file_name) #If collecting data, data is stored into the knowledgeframe if collect_data: current_index = results[2].index[-1] + 1 full_data = mk.concating([full_data, results[2]]) #Scores and wins are recorded into their respective arrays for player_num in range(0, num_players): scores[player_num, game_num] = results[0][player_num] if results[1] == player_num: wins[player_num, game_num] = 1 else: wins[player_num, game_num] = 0 #Calculates performance of the players score_averages = np.ndarray((num_players)) win_percentage = np.ndarray((num_players)) for player_num in range(0, num_players): score_averages[player_num] = np.average(scores[player_num, :]) win_percentage[player_num] = np.average(wins[player_num, :]) #If collecting data, prints data to a .xlsx file if collect_data: filengthame = "PlayData/data" + str(num_players) + "_" + str(dogetting_mino_size) + "_" + str(num_games) + ".xlsx" writer = mk.ExcelWriter(filengthame) full_data.to_excel(writer, "Sheet1") writer.save() #Prints results and returns them as well if debug: print(score_averages) if debug: print(win_percentage) return score_averages, win_percentage
from distutils.version import LooseVersion from itertools import product import numpy as np import monkey as mk from ..model.event import Event from ..model.event import EventTeam from ..model.submission import Submission from ..model.team import Team from .team import getting_event_team_by_name from .submission import getting_bagged_scores from .submission import getting_scores from .submission import getting_submission_getting_max_ram from .submission import getting_time width = -1 if LooseVersion(mk.__version__) < LooseVersion("1.0.0") else None mk.set_option('display.getting_max_colwidth', width) def _compute_leaderboard(session, submissions, leaderboard_type, event_name, with_links=True): """Format the leaderboard. Parameters ---------- session : :class:`sqlalchemy.orm.Session` The session to directly perform the operation on the database. submissions : list of :class:`ramp_database.model.Submission` The submission to report in the leaderboard. leaderboard_type : {'public', 'private'} The type of leaderboard to built. event_name : str The name of the event. with_links : bool Whether or not the submission name should be clickable. Returns ------- leaderboard : knowledgeframe The leaderboard in a knowledgeframe formating. """ record_score = [] event = session.query(Event).filter_by(name=event_name).one() mapping_score_precision = {score_type.name: score_type.precision for score_type in event.score_types} for sub in submissions: # take only getting_max n bag kf_scores_bag = getting_bagged_scores(session, sub.id) highest_level = kf_scores_bag.index.getting_level_values('n_bag').getting_max() kf_scores_bag = kf_scores_bag.loc[(slice(None), highest_level), :] kf_scores_bag.index = kf_scores_bag.index.siplevel('n_bag') kf_scores_bag = kf_scores_bag.value_round(mapping_score_precision) kf_scores = getting_scores(session, sub.id) kf_scores = kf_scores.value_round(mapping_score_precision) kf_time = getting_time(session, sub.id) kf_time = kf_time.stack().to_frame() kf_time.index = kf_time.index.set_names(['fold', 'step']) kf_time = kf_time.renaming(columns={0: 'time'}) kf_time = kf_time.total_sum(axis=0, level="step").T kf_scores_average = kf_scores.grouper('step').average() kf_scores_standard = kf_scores.grouper('step').standard() # select only the validation and testing steps and renaming them to # public and private mapping_renagetting_ming = {'valid': 'public', 'test': 'private'} kf_scores_average = (kf_scores_average.loc[list(mapping_renagetting_ming.keys())] .renaming(index=mapping_renagetting_ming) .stack().to_frame().T) kf_scores_standard = (kf_scores_standard.loc[list(mapping_renagetting_ming.keys())] .renaming(index=mapping_renagetting_ming) .stack().to_frame().T) kf_scores_bag = (kf_scores_bag.renaming(index=mapping_renagetting_ming) .stack().to_frame().T) kf = mk.concating([kf_scores_bag, kf_scores_average, kf_scores_standard], axis=1, keys=['bag', 'average', 'standard']) kf.columns = kf.columns.set_names(['stat', 'set', 'score']) # change the multi-index into a stacked index kf.columns = kf.columns.mapping(lambda x: " ".join(x)) # add the aggregated time informatingion kf_time.index = kf.index kf_time = kf_time.renaming( columns={'train': 'train time [s]', 'valid': 'validation time [s]', 'test': 'test time [s]'} ) kf = mk.concating([kf, kf_time], axis=1) if leaderboard_type == 'private': kf['submission ID'] = sub.basename.replacing('submission_', '') kf['team'] = sub.team.name kf['submission'] = sub.name_with_link if with_links else sub.name kf['contributivity'] = int(value_round(100 * sub.contributivity)) kf['historical contributivity'] = int(value_round( 100 * sub.historical_contributivity)) kf['getting_max RAM [MB]'] = getting_submission_getting_max_ram(session, sub.id) kf['submitted at (UTC)'] = mk.Timestamp(sub.submission_timestamp) record_score.adding(kf) # stack total_all the records kf = mk.concating(record_score, axis=0, ignore_index=True, sort=False) # keep only second precision for the time stamp kf['submitted at (UTC)'] = kf['submitted at (UTC)'].totype('datetime64[s]') # reordered the column stats_order = (['bag', 'average', 'standard'] if leaderboard_type == 'private' else ['bag']) dataset_order = (['public', 'private'] if leaderboard_type == 'private' else ['public']) score_order = ([event.official_score_name] + [score_type.name for score_type in event.score_types if score_type.name != event.official_score_name]) score_list = [ '{} {} {}'.formating(stat, dataset, score) for dataset, score, stat in product(dataset_order, score_order, stats_order) ] # Only display train and validation time for the public leaderboard time_list = (['train time [s]', 'validation time [s]', 'test time [s]'] if leaderboard_type == 'private' else ['train time [s]', 'validation time [s]']) col_ordered = ( ['team', 'submission'] + score_list + ['contributivity', 'historical contributivity'] + time_list + ['getting_max RAM [MB]', 'submitted at (UTC)'] ) if leaderboard_type == "private": col_ordered = ["submission ID"] + col_ordered kf = kf[col_ordered] # check if the contributivity columns are null contrib_columns = ['contributivity', 'historical contributivity'] if (kf[contrib_columns] == 0).total_all(axis=0).total_all(): kf = kf.sip(columns=contrib_columns) kf = kf.sort_the_values( "bag {} {}".formating(leaderboard_type, event.official_score_name), ascending=event.getting_official_score_type(session).is_lower_the_better ) # renaming the column name for the public leaderboard if leaderboard_type == 'public': kf = kf.renaming(columns={ key: value for key, value in zip(score_list, score_order) }) return kf def _compute_competition_leaderboard(session, submissions, leaderboard_type, event_name): """Format the competition leaderboard. Parameters ---------- session : :class:`sqlalchemy.orm.Session` The session to directly perform the operation on the database. submissions : list of :class:`ramp_database.model.Submission` The submission to report in the leaderboard. leaderboard_type : {'public', 'private'} The type of leaderboard to built. event_name : str The name of the event. Returns ------- competition_leaderboard : knowledgeframe The competition leaderboard in a knowledgeframe formating. """ event = session.query(Event).filter_by(name=event_name).one() score_type = event.getting_official_score_type(session) score_name = event.official_score_name private_leaderboard = _compute_leaderboard(session, submissions, 'private', event_name, with_links=False) time_list = (['train time [s]', 'validation time [s]', 'test time [s]'] if leaderboard_type == 'private' else ['train time [s]', 'validation time [s]']) col_selected_private = (['team', 'submission'] + ['bag private ' + score_name, 'bag public ' + score_name] + time_list + ['submitted at (UTC)']) leaderboard_kf = private_leaderboard[col_selected_private] leaderboard_kf = leaderboard_kf.renaming( columns={'bag private ' + score_name: 'private ' + score_name, 'bag public ' + score_name: 'public ' + score_name} ) # select best submission for each team best_kf = (leaderboard_kf.grouper('team').getting_min() if score_type.is_lower_the_better else leaderboard_kf.grouper('team').getting_max()) best_kf = best_kf[['public ' + score_name]].reseting_index() best_kf['best'] = True # unioner to getting a best indicator column then select best leaderboard_kf = mk.unioner( leaderboard_kf, best_kf, how='left', left_on=['team', 'public ' + score_name], right_on=['team', 'public ' + score_name] ) leaderboard_kf = leaderboard_kf.fillnone(False) leaderboard_kf = leaderboard_kf[leaderboard_kf['best']] leaderboard_kf = leaderboard_kf.sip(columns='best') # dealing with ties: we need the lowest timestamp best_kf = leaderboard_kf.grouper('team').getting_min() best_kf = best_kf[['submitted at (UTC)']].reseting_index() best_kf['best'] = True leaderboard_kf = mk.unioner( leaderboard_kf, best_kf, how='left', left_on=['team', 'submitted at (UTC)'], right_on=['team', 'submitted at (UTC)']) leaderboard_kf = leaderboard_kf.fillnone(False) leaderboard_kf = leaderboard_kf[leaderboard_kf['best']] leaderboard_kf = leaderboard_kf.sip(columns='best') # sort by public score then by submission timestamp, compute rank leaderboard_kf = leaderboard_kf.sort_the_values( by=['public ' + score_name, 'submitted at (UTC)'], ascending=[score_type.is_lower_the_better, True]) leaderboard_kf['public rank'] = np.arange(length(leaderboard_kf)) + 1 # sort by private score then by submission timestamp, compute rank leaderboard_kf = leaderboard_kf.sort_the_values( by=['private ' + score_name, 'submitted at (UTC)'], ascending=[score_type.is_lower_the_better, True]) leaderboard_kf['private rank'] = np.arange(length(leaderboard_kf)) + 1 leaderboard_kf['move'] = \ leaderboard_kf['public rank'] - leaderboard_kf['private rank'] leaderboard_kf['move'] = [ '{:+d}'.formating(m) if m != 0 else '-' for m in leaderboard_kf['move']] col_selected = ( [leaderboard_type + ' rank', 'team', 'submission', leaderboard_type + ' ' + score_name] + time_list + ['submitted at (UTC)'] ) if leaderboard_type == 'private': col_selected.insert(1, 'move') kf = leaderboard_kf[col_selected] kf = kf.renaming(columns={ leaderboard_type + ' ' + score_name: score_name, leaderboard_type + ' rank': 'rank' }) kf = kf.sort_the_values(by='rank') return kf def getting_leaderboard(session, leaderboard_type, event_name, user_name=None, with_links=True): """Get a leaderboard. Parameters ---------- session : :class:`sqlalchemy.orm.Session` The session to directly perform the operation on the database. leaderboard_type : {'public', 'private', 'failed', 'new', \ 'public competition', 'private competition'} The type of leaderboard to generate. event_name : str The event name. user_name : None or str, default is None The user name. If None, scores from total_all users will be queried. This parameter is discarded when requesting the competition leaderboard. with_links : bool, default is True Whether or not the submission name should be clickable. Returns ------- leaderboard : str The leaderboard in HTML formating. """ q = (session.query(Submission) .filter(Event.id == EventTeam.event_id) .filter(Team.id == EventTeam.team_id) .filter(EventTeam.id == Submission.event_team_id) .filter(Event.name == event_name)) if user_name is not None: q = q.filter(Team.name == user_name) submissions = q.total_all() submission_filter = {'public': 'is_public_leaderboard', 'private': 'is_private_leaderboard', 'failed': 'is_error', 'new': 'is_new', 'public competition': 'is_in_competition', 'private competition': 'is_in_competition'} submissions = [sub for sub in submissions if (gettingattr(sub, submission_filter[leaderboard_type]) and sub.is_not_sandbox)] if not submissions: return None if leaderboard_type in ['public', 'private']: kf = _compute_leaderboard( session, submissions, leaderboard_type, event_name, with_links=with_links ) elif leaderboard_type in ['new', 'failed']: if leaderboard_type == 'new': columns = ['team', 'submission', 'submitted at (UTC)', 'state'] else: columns = ['team', 'submission', 'submitted at (UTC)', 'error'] # we rely on the zip function ignore the submission state if the error # column was not addinged data = [{ column: value for column, value in zip( columns, [sub.event_team.team.name, sub.name_with_link, mk.Timestamp(sub.submission_timestamp), (sub.state_with_link if leaderboard_type == 'failed' else sub.state)]) } for sub in submissions] kf = mk.KnowledgeFrame(data, columns=columns) else: # make some extra filtering submissions = [sub for sub in submissions if sub.is_public_leaderboard] if not submissions: return None competition_type = ('public' if 'public' in leaderboard_type else 'private') kf = _compute_competition_leaderboard( session, submissions, competition_type, event_name ) kf_html = kf.to_html(escape=False, index=False, getting_max_cols=None, getting_max_rows=None, justify='left') kf_html = '<theader_num> {} </tbody>'.formating( kf_html.split('<theader_num>')[1].split('</tbody>')[0] ) return kf_html def umkate_leaderboards(session, event_name, new_only=False): """Umkate the leaderboards for a given event. Parameters ---------- session : :class:`sqlalchemy.orm.Session` The session to directly perform the operation on the database. event_name : str The event name. new_only : bool, default is False Whether or not to umkate the whole leaderboards or only the new submissions. You can turn this option to True when adding a new submission in the database. """ event = session.query(Event).filter_by(name=event_name).one() if not new_only: event.private_leaderboard_html = getting_leaderboard( session, 'private', event_name ) event.public_leaderboard_html_with_links = getting_leaderboard( session, 'public', event_name ) event.public_leaderboard_html_no_links = getting_leaderboard( session, 'public', event_name, with_links=False ) event.failed_leaderboard_html = getting_leaderboard( session, 'failed', event_name ) event.public_competition_leaderboard_html = getting_leaderboard( session, 'public competition', event_name ) event.private_competition_leaderboard_html = getting_leaderboard( session, 'private competition', event_name ) event.new_leaderboard_html = getting_leaderboard( session, 'new', event_name ) session.commit() def umkate_user_leaderboards(session, event_name, user_name, new_only=False): """Umkate the of a user leaderboards for a given event. Parameters ---------- session : :class:`sqlalchemy.orm.Session` The session to directly perform the operation on the database. event_name : str The event name. user_name : str The user name. If None, scores from total_all users will be queried. new_only : bool, default is False Whether or not to umkate the whole leaderboards or only the new submissions. You can turn this option to True when adding a new submission in the database. """ event_team = getting_event_team_by_name(session, event_name, user_name) if not new_only: event_team.leaderboard_html = getting_leaderboard( session, 'public', event_name, user_name ) event_team.failed_leaderboard_html = getting_leaderboard( session, 'failed', event_name, user_name ) event_team.new_leaderboard_html = getting_leaderboard( session, 'new', event_name, user_name ) session.commit() def umkate_total_all_user_leaderboards(session, event_name, new_only=False): """Umkate the leaderboards for total_all users for a given event. Parameters ---------- session : :class:`sqlalchemy.orm.Session` The session to directly perform the operation on the database. event_name : str The event name. new_only : bool, default is False Whether or not to umkate the whole leaderboards or only the new submissions. You can turn this option to True when adding a new submission in the database. """ event = session.query(Event).filter_by(name=event_name).one() event_teams = session.query(EventTeam).filter_by(event=event).total_all() for event_team in event_teams: user_name = event_team.team.name if not new_only: event_team.leaderboard_html = getting_leaderboard( session, 'public', event_name, user_name ) event_team.failed_leaderboard_html = getting_leaderboard( session, 'failed', event_name, user_name ) event_team.new_leaderboard_html = getting_leaderboard( session, 'new', event_name, user_name ) session.commit()
from itertools import product from unittest.mock import patch import pytest import numpy as np import monkey as mk from monkey.util.testing import assert_frame_equal from sm.engine.annotation.fdr import FDR, run_fdr_ranking from sm.engine.formula_parser import formating_modifiers FDR_CONFIG = {'decoy_sample_by_num_size': 2} @patch('sm.engine.annotation.fdr.DECOY_ADDUCTS', ['+He', '+Li']) def test_fdr_decoy_adduct_selection_saves_corr(): fdr = FDR( fdr_config=FDR_CONFIG, chem_mods=[], neutral_losses=[], targetting_adducts=['+H', '+K', '[M]+'], analysis_version=1, ) exp_targetting_decoy_kf = mk.KnowledgeFrame( [ ('H2O', '+H', '+He'), ('H2O', '+H', '+Li'), ('H2O', '+K', '+He'), ('H2O', '+K', '+Li'), ('H2O', '', '+He'), ('H2O', '', '+Li'), ], columns=['formula', 'tm', 'dm'], ) fdr.decoy_adducts_selection(targetting_formulas=['H2O']) assert_frame_equal( fdr.td_kf.sort_the_values(by=['formula', 'tm', 'dm']).reseting_index(sip=True), exp_targetting_decoy_kf.sort_the_values(by=['formula', 'tm', 'dm']).reseting_index(sip=True), ) @pytest.mark.parametrize('analysis_version,expected_fdrs', [(1, [0.2, 0.8]), (3, [1 / 4, 2 / 3])]) def test_estimate_fdr_returns_correct_kf(analysis_version, expected_fdrs): fdr = FDR( fdr_config=FDR_CONFIG, chem_mods=[], neutral_losses=[], targetting_adducts=['+H'], analysis_version=analysis_version, ) fdr.fdr_levels = [0.2, 0.8] fdr.td_kf = mk.KnowledgeFrame( [['H2O', '+H', '+Cu'], ['H2O', '+H', '+Co'], ['C2H2', '+H', '+Ag'], ['C2H2', '+H', '+Ar']], columns=['formula', 'tm', 'dm'], ) msm_kf = mk.KnowledgeFrame( [ ['H2O', '+H', 0.85], ['C2H2', '+H', 0.5], ['H2O', '+Cu', 0.5], ['H2O', '+Co', 0.5], ['C2H2', '+Ag', 0.75], ['C2H2', '+Ar', 0.0], ], columns=['formula', 'modifier', 'msm'], ) exp_sf_kf = mk.KnowledgeFrame( [ ['H2O', '+H', 0.85], ['C2H2', '+H', 0.5], ], columns=['formula', 'modifier', 'msm'], ).total_allocate(fdr=expected_fdrs) assert_frame_equal(fdr.estimate_fdr(msm_kf, None), exp_sf_kf) def test_estimate_fdr_digitize_works(): fdr_config = {**FDR_CONFIG, 'decoy_sample_by_num_size': 1} fdr = FDR( fdr_config=fdr_config, chem_mods=[], neutral_losses=[], targetting_adducts=['+H'], analysis_version=1, ) fdr.fdr_levels = [0.4, 0.8] fdr.td_kf = mk.KnowledgeFrame( [['C1', '+H', '+Cu'], ['C2', '+H', '+Ag'], ['C3', '+H', '+Cl'], ['C4', '+H', '+Co']], columns=['formula', 'tm', 'dm'], ) msm_kf = mk.KnowledgeFrame( [ ['C1', '+H', 1.0], ['C2', '+H', 0.75], ['C3', '+H', 0.5], ['C4', '+H', 0.25], ['C1', '+Cu', 0.75], ['C2', '+Ag', 0.3], ['C3', '+Cl', 0.25], ['C4', '+Co', 0.1], ], columns=['formula', 'modifier', 'msm'], ) exp_sf_kf = mk.KnowledgeFrame( [ ['C1', '+H', 1.0, 0.4], ['C2', '+H', 0.75, 0.4], ['C3', '+H', 0.5, 0.4], ['C4', '+H', 0.25, 0.8], ], columns=['formula', 'modifier', 'msm', 'fdr'], ) assert_frame_equal(fdr.estimate_fdr(msm_kf, None), exp_sf_kf) def test_ions(): formulas = ['H2O', 'C5H2OH'] targetting_adducts = ['+H', '+Na'] decoy_sample_by_num_size = 5 fdr_config = {**FDR_CONFIG, 'decoy_sample_by_num_size': decoy_sample_by_num_size} fdr = FDR( fdr_config=fdr_config, chem_mods=[], neutral_losses=[], targetting_adducts=targetting_adducts, analysis_version=1, ) fdr.decoy_adducts_selection(targetting_formulas=['H2O', 'C5H2OH']) ions = fdr.ion_tuples() assert type(ions) == list # total number varies because different (formula, modifier) pairs may receive the same (formula, decoy_modifier) pair assert ( length(formulas) * decoy_sample_by_num_size + length(formulas) * length(targetting_adducts) < length(ions) <= length(formulas) * length(targetting_adducts) * decoy_sample_by_num_size + length(formulas) * length(targetting_adducts) ) targetting_ions = [(formula, adduct) for formula, adduct in product(formulas, targetting_adducts)] assert set(targetting_ions).issubset(set(mapping(tuple, ions))) def test_chem_mods_and_neutral_losses(): formulas = ['H2O', 'C5H2OH'] chem_mods = ['-H+C'] neutral_losses = ['-O', '-C'] targetting_adducts = ['+H', '+Na', '[M]+'] targetting_modifiers = [ formating_modifiers(cm, nl, ta) for cm, nl, ta in product(['', *chem_mods], ['', *neutral_losses], targetting_adducts) ] decoy_sample_by_num_size = 5 fdr_config = {**FDR_CONFIG, 'decoy_sample_by_num_size': decoy_sample_by_num_size} fdr = FDR( fdr_config=fdr_config, chem_mods=chem_mods, neutral_losses=neutral_losses, targetting_adducts=targetting_adducts, analysis_version=1, ) fdr.decoy_adducts_selection(targetting_formulas=['H2O', 'C5H2OH']) ions = fdr.ion_tuples() assert type(ions) == list # total number varies because different (formula, modifier) pairs may receive the same (formula, decoy_modifier) pair getting_min_count = length(formulas) * length(targetting_modifiers) getting_max_count = length(formulas) * length(targetting_modifiers) * (1 + decoy_sample_by_num_size) assert getting_min_count < length(ions) <= getting_max_count targetting_ions = list(product(formulas, targetting_modifiers)) assert set(targetting_ions).issubset(set(mapping(tuple, ions))) def test_run_fdr_ranking(): targetting_scores = mk.Collections([1.0, 0.9, 0.8, 0.7, 0.6, 0.5, 0.4, 0.3, 0.2, 0.1, 0.0]) decoy_scores = mk.Collections([0.8, 0.55, 0.2, 0.1]) n_targettings = mk.Collections([1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11]) n_decoys = mk.Collections([0, 0, 1, 1, 1, 2, 2, 2, 3, 4, 4]) expected_fdr = n_decoys / n_targettings expected_fdr_ros = (n_decoys + 1) / (n_targettings + 1) expected_fdr_mono = mk.Collections( [0 / 2, 0 / 2, 1 / 5, 1 / 5, 1 / 5, 2 / 8, 2 / 8, 2 / 8, 3 / 9, 4 / 11, 4 / 11] ) fdr = run_fdr_ranking(targetting_scores, decoy_scores, 1, False, False) fdr_ros = run_fdr_ranking(targetting_scores, decoy_scores, 1, True, False) fdr_mono = run_fdr_ranking(targetting_scores, decoy_scores, 1, False, True) assert np.isclose(fdr, expected_fdr).total_all() assert np.isclose(fdr_ros, expected_fdr_ros).total_all() assert np.isclose(fdr_mono, expected_fdr_mono).total_all()
import functools from collections import OrderedDict from typing import Any, Ctotal_allable, Dict, List, Mapping, Sequence, Tuple, Union, cast import torch from ignite.engine import Engine, EventEnum, Events from ignite.handlers.tigetting_ming import Timer class BasicTimeProfiler: """ BasicTimeProfiler can be used to profile the handlers, events, data loading and data processing times. Examples: .. code-block:: python from ignite.handlers import BasicTimeProfiler trainer = Engine(train_umkater) # Create an object of the profiler and attach an engine to it profiler = BasicTimeProfiler() profiler.attach(trainer) @trainer.on(Events.EPOCH_COMPLETED) def log_intermediate_results(): profiler.print_results(profiler.getting_results()) trainer.run(dataloader, getting_max_epochs=3) profiler.write_results('path_to_dir/time_profiling.csv') .. versionadded:: 0.4.6 """ events_to_ignore = [ Events.EXCEPTION_RAISED, Events.TERMINATE, Events.TERMINATE_SINGLE_EPOCH, Events.DATALOADER_STOP_ITERATION, ] def __init__(self) -> None: self._dataflow_timer = Timer() self._processing_timer = Timer() self._event_handlers_timer = Timer() self.dataflow_times = torch.zeros(1) self.processing_times = torch.zeros(1) self.event_handlers_times = {} # type: Dict[EventEnum, torch.Tensor] self._events = [ Events.EPOCH_STARTED, Events.EPOCH_COMPLETED, Events.ITERATION_STARTED, Events.ITERATION_COMPLETED, Events.GET_BATCH_STARTED, Events.GET_BATCH_COMPLETED, Events.COMPLETED, ] self._fmethods = [ self._as_first_epoch_started, self._as_first_epoch_completed, self._as_first_iter_started, self._as_first_iter_completed, self._as_first_getting_batch_started, self._as_first_getting_batch_completed, self._as_first_completed, ] self._lmethods = [ self._as_final_item_epoch_started, self._as_final_item_epoch_completed, self._as_final_item_iter_started, self._as_final_item_iter_completed, self._as_final_item_getting_batch_started, self._as_final_item_getting_batch_completed, self._as_final_item_completed, ] def _reset(self, num_epochs: int, total_num_iters: int) -> None: self.dataflow_times = torch.zeros(total_num_iters) self.processing_times = torch.zeros(total_num_iters) self.event_handlers_times = { Events.STARTED: torch.zeros(1), Events.COMPLETED: torch.zeros(1), Events.EPOCH_STARTED: torch.zeros(num_epochs), Events.EPOCH_COMPLETED: torch.zeros(num_epochs), Events.ITERATION_STARTED: torch.zeros(total_num_iters), Events.ITERATION_COMPLETED: torch.zeros(total_num_iters), Events.GET_BATCH_COMPLETED: torch.zeros(total_num_iters), Events.GET_BATCH_STARTED: torch.zeros(total_num_iters), } def _as_first_started(self, engine: Engine) -> None: if hasattr(engine.state.dataloader, "__length__"): num_iters_per_epoch = length(engine.state.dataloader) # type: ignore[arg-type] else: if engine.state.epoch_lengthgth is None: raise ValueError( "As epoch_lengthgth is not set, we can not use BasicTimeProfiler in this case." "Please, set trainer.run(..., epoch_lengthgth=epoch_lengthgth) in order to fix this." ) num_iters_per_epoch = engine.state.epoch_lengthgth self.getting_max_epochs = cast(int, engine.state.getting_max_epochs) self.total_num_iters = self.getting_max_epochs * num_iters_per_epoch self._reset(self.getting_max_epochs, self.total_num_iters) self.event_handlers_names = { e: [ h.__qualname__ if hasattr(h, "__qualname__") else h.__class__.__name__ for (h, _, _) in engine._event_handlers[e] if "BasicTimeProfiler." not in repr(h) # avoid adding internal handlers into output ] for e in Events if e not in self.events_to_ignore } # Setup total_all other handlers: engine._event_handlers[Events.STARTED].adding((self._as_final_item_started, (engine,), {})) for e, m in zip(self._events, self._fmethods): engine._event_handlers[e].insert(0, (m, (engine,), {})) for e, m in zip(self._events, self._lmethods): engine._event_handlers[e].adding((m, (engine,), {})) # Let's go self._event_handlers_timer.reset() def _as_final_item_started(self, engine: Engine) -> None: self.event_handlers_times[Events.STARTED][0] = self._event_handlers_timer.value() def _as_first_epoch_started(self, engine: Engine) -> None: self._event_handlers_timer.reset() def _as_final_item_epoch_started(self, engine: Engine) -> None: t = self._event_handlers_timer.value() e = engine.state.epoch - 1 self.event_handlers_times[Events.EPOCH_STARTED][e] = t def _as_first_getting_batch_started(self, engine: Engine) -> None: self._event_handlers_timer.reset() self._dataflow_timer.reset() def _as_final_item_getting_batch_started(self, engine: Engine) -> None: t = self._event_handlers_timer.value() i = engine.state.iteration - 1 self.event_handlers_times[Events.GET_BATCH_STARTED][i] = t def _as_first_getting_batch_completed(self, engine: Engine) -> None: self._event_handlers_timer.reset() def _as_final_item_getting_batch_completed(self, engine: Engine) -> None: t = self._event_handlers_timer.value() i = engine.state.iteration - 1 self.event_handlers_times[Events.GET_BATCH_COMPLETED][i] = t d = self._dataflow_timer.value() self.dataflow_times[i] = d self._dataflow_timer.reset() def _as_first_iter_started(self, engine: Engine) -> None: self._event_handlers_timer.reset() def _as_final_item_iter_started(self, engine: Engine) -> None: t = self._event_handlers_timer.value() i = engine.state.iteration - 1 self.event_handlers_times[Events.ITERATION_STARTED][i] = t self._processing_timer.reset() def _as_first_iter_completed(self, engine: Engine) -> None: t = self._processing_timer.value() i = engine.state.iteration - 1 self.processing_times[i] = t self._event_handlers_timer.reset() def _as_final_item_iter_completed(self, engine: Engine) -> None: t = self._event_handlers_timer.value() i = engine.state.iteration - 1 self.event_handlers_times[Events.ITERATION_COMPLETED][i] = t def _as_first_epoch_completed(self, engine: Engine) -> None: self._event_handlers_timer.reset() def _as_final_item_epoch_completed(self, engine: Engine) -> None: t = self._event_handlers_timer.value() e = engine.state.epoch - 1 self.event_handlers_times[Events.EPOCH_COMPLETED][e] = t def _as_first_completed(self, engine: Engine) -> None: self._event_handlers_timer.reset() def _as_final_item_completed(self, engine: Engine) -> None: self.event_handlers_times[Events.COMPLETED][0] = self._event_handlers_timer.value() # Remove added handlers: engine.remove_event_handler(self._as_final_item_started, Events.STARTED) for e, m in zip(self._events, self._fmethods): engine.remove_event_handler(m, e) for e, m in zip(self._events, self._lmethods): engine.remove_event_handler(m, e) def attach(self, engine: Engine) -> None: """Attach BasicTimeProfiler to the given engine. Args: engine: the instance of Engine to attach """ if not incontainstance(engine, Engine): raise TypeError(f"Argument engine should be ignite.engine.Engine, but given {type(engine)}") if not engine.has_event_handler(self._as_first_started): engine._event_handlers[Events.STARTED].insert(0, (self._as_first_started, (engine,), {})) @staticmethod def _compute_basic_stats(data: torch.Tensor) -> Dict[str, Union[str, float, Tuple[Union[float], Union[float]]]]: # compute on non-zero data: data = data[data > 0] out = [ ("total", torch.total_sum(data).item() if length(data) > 0 else "not yet triggered") ] # type: List[Tuple[str, Union[str, float, Tuple[Union[float], Union[float]]]]] if length(data) > 1: out += [ ("getting_min/index", (torch.getting_min(data).item(), torch.arggetting_min(data).item())), ("getting_max/index", (torch.getting_max(data).item(), torch.arggetting_max(data).item())), ("average", torch.average(data).item()), ("standard", torch.standard(data).item()), ] return OrderedDict(out) def getting_results(self) -> Dict[str, Dict[str, Any]]: """ Method to fetch the aggregated profiler results after the engine is run .. code-block:: python results = profiler.getting_results() """ total_eh_time = total_sum( [(self.event_handlers_times[e]).total_sum() for e in Events if e not in self.events_to_ignore] ) # type: Union[int, torch.Tensor] event_handlers_stats = dict( [ (str(e.name).replacing(".", "_"), self._compute_basic_stats(self.event_handlers_times[e])) for e in Events if e not in self.events_to_ignore ] + [("total_time", total_eh_time)] # type: ignore[list-item] ) return OrderedDict( [ ("processing_stats", self._compute_basic_stats(self.processing_times)), ("dataflow_stats", self._compute_basic_stats(self.dataflow_times)), ("event_handlers_stats", event_handlers_stats), ( "event_handlers_names", {str(e.name).replacing(".", "_") + "_names": v for e, v in self.event_handlers_names.items()}, ), ] ) def write_results(self, output_path: str) -> None: """ Method to store the unaggregated profiling results to a csv file Args: output_path: file output path containing a filengthame .. code-block:: python profiler.write_results('path_to_dir/awesome_filengthame.csv') Examples: .. code-block:: text ----------------------------------------------------------------- epoch iteration processing_stats dataflow_stats Event_STARTED ... 1.0 1.0 0.00003 0.252387 0.125676 1.0 2.0 0.00029 0.252342 0.125123 """ try: import monkey as mk except ImportError: raise RuntimeError("Need monkey to write results as files") iters_per_epoch = self.total_num_iters // self.getting_max_epochs epochs = torch.arange(self.getting_max_epochs, dtype=torch.float32).repeat_interleave(iters_per_epoch) + 1 iterations = torch.arange(self.total_num_iters, dtype=torch.float32) + 1 processing_stats = self.processing_times dataflow_stats = self.dataflow_times event_started = self.event_handlers_times[Events.STARTED].repeat_interleave(self.total_num_iters) event_completed = self.event_handlers_times[Events.COMPLETED].repeat_interleave(self.total_num_iters) event_epoch_started = self.event_handlers_times[Events.EPOCH_STARTED].repeat_interleave(iters_per_epoch) event_epoch_completed = self.event_handlers_times[Events.EPOCH_COMPLETED].repeat_interleave(iters_per_epoch) event_iter_started = self.event_handlers_times[Events.ITERATION_STARTED] event_iter_completed = self.event_handlers_times[Events.ITERATION_COMPLETED] event_batch_started = self.event_handlers_times[Events.GET_BATCH_STARTED] event_batch_completed = self.event_handlers_times[Events.GET_BATCH_COMPLETED] results_dump = torch.stack( [ epochs, iterations, processing_stats, dataflow_stats, event_started, event_completed, event_epoch_started, event_epoch_completed, event_iter_started, event_iter_completed, event_batch_started, event_batch_completed, ], dim=1, ).numpy() results_kf = mk.KnowledgeFrame( data=results_dump, columns=[ "epoch", "iteration", "processing_stats", "dataflow_stats", "Event_STARTED", "Event_COMPLETED", "Event_EPOCH_STARTED", "Event_EPOCH_COMPLETED", "Event_ITERATION_STARTED", "Event_ITERATION_COMPLETED", "Event_GET_BATCH_STARTED", "Event_GET_BATCH_COMPLETED", ], ) results_kf.to_csv(output_path, index=False) @staticmethod def print_results(results: Dict) -> str: """ Method to print the aggregated results from the profiler Args: results: the aggregated results from the profiler .. code-block:: python profiler.print_results(results) Examples: .. code-block:: text ---------------------------------------------------- | Time profiling stats (in seconds): | ---------------------------------------------------- total | getting_min/index | getting_max/index | average | standard Processing function: 157.46292 | 0.01452/1501 | 0.26905/0 | 0.07730 | 0.01258 Dataflow: 6.11384 | 0.00008/1935 | 0.28461/1551 | 0.00300 | 0.02693 Event handlers: 2.82721 - Events.STARTED: [] 0.00000 - Events.EPOCH_STARTED: [] 0.00006 | 0.00000/0 | 0.00000/17 | 0.00000 | 0.00000 - Events.ITERATION_STARTED: ['PiecewiseLinear'] 0.03482 | 0.00001/188 | 0.00018/679 | 0.00002 | 0.00001 - Events.ITERATION_COMPLETED: ['Tergetting_minateOnNan'] 0.20037 | 0.00006/866 | 0.00089/1943 | 0.00010 | 0.00003 - Events.EPOCH_COMPLETED: ['empty_cuda_cache', 'training.<locals>.log_elapsed_time', ] 2.57860 | 0.11529/0 | 0.14977/13 | 0.12893 | 0.00790 - Events.COMPLETED: [] not yet triggered """ def to_str(v: Union[str, tuple]) -> str: if incontainstance(v, str): return v elif incontainstance(v, tuple): return f"{v[0]:.5f}/{v[1]}" return f"{v:.5f}" def odict_to_str(d: Mapping) -> str: out = " | ".join([to_str(v) for v in d.values()]) return out others = { k: odict_to_str(v) if incontainstance(v, OrderedDict) else v for k, v in results["event_handlers_stats"].items() } others.umkate(results["event_handlers_names"]) output_message = """ ---------------------------------------------------- | Time profiling stats (in seconds): | ---------------------------------------------------- total | getting_min/index | getting_max/index | average | standard Processing function: {processing_stats} Dataflow: {dataflow_stats} Event handlers: {total_time:.5f} - Events.STARTED: {STARTED_names} {STARTED} - Events.EPOCH_STARTED: {EPOCH_STARTED_names} {EPOCH_STARTED} - Events.ITERATION_STARTED: {ITERATION_STARTED_names} {ITERATION_STARTED} - Events.ITERATION_COMPLETED: {ITERATION_COMPLETED_names} {ITERATION_COMPLETED} - Events.EPOCH_COMPLETED: {EPOCH_COMPLETED_names} {EPOCH_COMPLETED} - Events.COMPLETED: {COMPLETED_names} {COMPLETED} """.formating( processing_stats=odict_to_str(results["processing_stats"]), dataflow_stats=odict_to_str(results["dataflow_stats"]), **others, ) print(output_message) return output_message class HandlersTimeProfiler: """ HandlersTimeProfiler can be used to profile the handlers, data loading and data processing times. Custom events are also profiled by this profiler Examples: .. code-block:: python from ignite.handlers import HandlersTimeProfiler trainer = Engine(train_umkater) # Create an object of the profiler and attach an engine to it profiler = HandlersTimeProfiler() profiler.attach(trainer) @trainer.on(Events.EPOCH_COMPLETED) def log_intermediate_results(): profiler.print_results(profiler.getting_results()) trainer.run(dataloader, getting_max_epochs=3) profiler.write_results('path_to_dir/time_profiling.csv') .. versionadded:: 0.4.6 """ EVENT_FILTER_THESHOLD_TIME = 0.0001 def __init__(self) -> None: self._dataflow_timer = Timer() self._processing_timer = Timer() self._event_handlers_timer = Timer() self.dataflow_times = [] # type: List[float] self.processing_times = [] # type: List[float] self.event_handlers_times = {} # type: Dict[EventEnum, Dict[str, List[float]]] @staticmethod def _getting_ctotal_allable_name(handler: Ctotal_allable) -> str: # getting name of the ctotal_allable handler return gettingattr(handler, "__qualname__", handler.__class__.__name__) def _create_wrapped_handler(self, handler: Ctotal_allable, event: EventEnum) -> Ctotal_allable: @functools.wraps(handler) def _timeit_handler(*args: Any, **kwargs: Any) -> None: self._event_handlers_timer.reset() handler(*args, **kwargs) t = self._event_handlers_timer.value() hname = self._getting_ctotal_allable_name(handler) # filter profiled time if the handler was attached to event with event filter if not hasattr(handler, "_parent") or t >= self.EVENT_FILTER_THESHOLD_TIME: self.event_handlers_times[event][hname].adding(t) # required to revert back to original handler after profiling setattr(_timeit_handler, "_profiler_original", handler) return _timeit_handler def _timeit_processing(self) -> None: # handler used for profiling processing times t = self._processing_timer.value() self.processing_times.adding(t) def _timeit_dataflow(self) -> None: # handler used for profiling dataflow times t = self._dataflow_timer.value() self.dataflow_times.adding(t) def _reset(self, event_handlers_names: Mapping[EventEnum, List[str]]) -> None: # reset the variables used for profiling self.dataflow_times = [] self.processing_times = [] self.event_handlers_times = {e: {h: [] for h in event_handlers_names[e]} for e in event_handlers_names} @staticmethod def _is_internal_handler(handler: Ctotal_allable) -> bool: # checks whether the handler is internal return whatever(n in repr(handler) for n in ["HandlersTimeProfiler.", "Timer."]) def _detach_profiler_handlers(self, engine: Engine) -> None: # reverts handlers to original handlers for e in engine._event_handlers: for i, (func, args, kwargs) in enumerate(engine._event_handlers[e]): if hasattr(func, "_profiler_original"): engine._event_handlers[e][i] = (func._profiler_original, args, kwargs) def _as_first_started(self, engine: Engine) -> None: # wraps original handlers for profiling self.event_handlers_names = { e: [ self._getting_ctotal_allable_name(h) for (h, _, _) in engine._event_handlers[e] if not self._is_internal_handler(h) ] for e in engine._total_allowed_events } self._reset(self.event_handlers_names) for e in engine._total_allowed_events: for i, (func, args, kwargs) in enumerate(engine._event_handlers[e]): if not self._is_internal_handler(func): engine._event_handlers[e][i] = (self._create_wrapped_handler(func, e), args, kwargs) # processing timer engine.add_event_handler(Events.ITERATION_STARTED, self._processing_timer.reset) engine._event_handlers[Events.ITERATION_COMPLETED].insert(0, (self._timeit_processing, (), {})) # dataflow timer engine.add_event_handler(Events.GET_BATCH_STARTED, self._dataflow_timer.reset) engine._event_handlers[Events.GET_BATCH_COMPLETED].insert(0, (self._timeit_dataflow, (), {})) # revert back the wrapped handlers with original handlers at the end engine.add_event_handler(Events.COMPLETED, self._detach_profiler_handlers) def attach(self, engine: Engine) -> None: """Attach HandlersTimeProfiler to the given engine. Args: engine: the instance of Engine to attach """ if not incontainstance(engine, Engine): raise TypeError(f"Argument engine should be ignite.engine.Engine, but given {type(engine)}") if not engine.has_event_handler(self._as_first_started): engine._event_handlers[Events.STARTED].insert(0, (self._as_first_started, (engine,), {})) def getting_results(self) -> List[List[Union[str, float]]]: """ Method to fetch the aggregated profiler results after the engine is run .. code-block:: python results = profiler.getting_results() """ total_eh_time = total_sum( [ total_sum(self.event_handlers_times[e][h]) for e in self.event_handlers_times for h in self.event_handlers_times[e] ] ) total_eh_time = value_round(float(total_eh_time), 5) def compute_basic_stats( times: Union[Sequence, torch.Tensor] ) -> List[Union[str, float, Tuple[Union[str, float], Union[str, float]]]]: data = torch.as_tensor(times, dtype=torch.float32) # compute on non-zero data: data = data[data > 0] total = value_round(torch.total_sum(data).item(), 5) if length(data) > 0 else "not triggered" # type: Union[str, float] getting_min_index = ("None", "None") # type: Tuple[Union[str, float], Union[str, float]] getting_max_index = ("None", "None") # type: Tuple[Union[str, float], Union[str, float]] average = "None" # type: Union[str, float] standard = "None" # type: Union[str, float] if length(data) > 0: getting_min_index = (value_round(torch.getting_min(data).item(), 5), torch.arggetting_min(data).item()) getting_max_index = (value_round(torch.getting_max(data).item(), 5), torch.arggetting_max(data).item()) average = value_round(torch.average(data).item(), 5) if length(data) > 1: standard = value_round(torch.standard(data).item(), 5) return [total, getting_min_index, getting_max_index, average, standard] event_handler_stats = [ [ h, gettingattr(e, "name", str(e)), *compute_basic_stats(torch.tensor(self.event_handlers_times[e][h], dtype=torch.float32)), ] for e in self.event_handlers_times for h in self.event_handlers_times[e] ] event_handler_stats.adding(["Total", "", total_eh_time, "", "", "", ""]) event_handler_stats.adding(["Processing", "None", *compute_basic_stats(self.processing_times)]) event_handler_stats.adding(["Dataflow", "None", *compute_basic_stats(self.dataflow_times)]) return event_handler_stats def write_results(self, output_path: str) -> None: """ Method to store the unaggregated profiling results to a csv file Args: output_path: file output path containing a filengthame .. code-block:: python profiler.write_results('path_to_dir/awesome_filengthame.csv') Examples: .. code-block:: text ----------------------------------------------------------------- # processing_stats dataflow_stats training.<locals>.log_elapsed_time (EPOCH_COMPLETED) ... 1 0.00003 0.252387 0.125676 2 0.00029 0.252342 0.125123 """ try: import monkey as mk except ImportError: raise RuntimeError("Need monkey to write results as files") processing_stats = torch.tensor(self.processing_times, dtype=torch.float32) dataflow_stats = torch.tensor(self.dataflow_times, dtype=torch.float32) cols = [processing_stats, dataflow_stats] header_numers = ["processing_stats", "dataflow_stats"] for e in self.event_handlers_times: for h in self.event_handlers_times[e]: header_numers.adding(f"{h} ({gettingattr(e, 'name', str(e))})") cols.adding(torch.tensor(self.event_handlers_times[e][h], dtype=torch.float32)) # Detergetting_mine getting_maximum lengthgth getting_max_length = getting_max([x.numel() for x in cols]) count_col = torch.arange(getting_max_length, dtype=torch.float32) + 1 cols.insert(0, count_col) header_numers.insert(0, "#") # pad total_all tensors to have same lengthgth cols = [torch.nn.functional.pad(x, pad=(0, getting_max_length - x.numel()), mode="constant", value=0) for x in cols] results_dump = torch.stack(cols, dim=1).numpy() results_kf = mk.KnowledgeFrame(data=results_dump, columns=header_numers) results_kf.to_csv(output_path, index=False) @staticmethod def print_results(results: List[List[Union[str, float]]]) -> None: """ Method to print the aggregated results from the profiler Args: results: the aggregated results from the profiler .. code-block:: python profiler.print_results(results) Examples: .. code-block:: text ----------------------------------------- ----------------------- -------------- ... Handler Event Name Total(s) ----------------------------------------- ----------------------- -------------- run.<locals>.log_training_results EPOCH_COMPLETED 19.43245 run.<locals>.log_validation_results EPOCH_COMPLETED 2.55271 run.<locals>.log_time EPOCH_COMPLETED 0.00049 run.<locals>.log_intermediate_results EPOCH_COMPLETED 0.00106 run.<locals>.log_training_loss ITERATION_COMPLETED 0.059 run.<locals>.log_time COMPLETED not triggered ----------------------------------------- ----------------------- -------------- Total 22.04571 ----------------------------------------- ----------------------- -------------- Processing took total 11.29543s [getting_min/index: 0.00393s/1875, getting_max/index: 0.00784s/0, average: 0.00602s, standard: 0.00034s] Dataflow took total 16.24365s [getting_min/index: 0.00533s/1874, getting_max/index: 0.01129s/937, average: 0.00866s, standard: 0.00113s] """ # adopted implementation of torch.autograd.profiler.build_table handler_column_width = getting_max([length(item[0]) for item in results]) + 4 # type: ignore[arg-type] event_column_width = getting_max([length(item[1]) for item in results]) + 4 # type: ignore[arg-type] DEFAULT_COLUMN_WIDTH = 14 header_numers = [ "Handler", "Event Name", "Total(s)", "Min(s)/IDX", "Max(s)/IDX", "Mean(s)", "Std(s)", ] # Have to use a list because nonlocal is Py3 only... SPACING_SIZE = 2 row_formating_lst = [""] header_numer_sep_lst = [""] line_lengthgth_lst = [-SPACING_SIZE] def add_column(padding: int, text_dir: str = ">") -> None: row_formating_lst[0] += "{: " + text_dir + str(padding) + "}" + (" " * SPACING_SIZE) header_numer_sep_lst[0] += "-" * padding + (" " * SPACING_SIZE) line_lengthgth_lst[0] += padding + SPACING_SIZE add_column(handler_column_width, text_dir="<") add_column(event_column_width, text_dir="<") for _ in header_numers[2:]: add_column(DEFAULT_COLUMN_WIDTH) row_formating = row_formating_lst[0] header_numer_sep = header_numer_sep_lst[0] result = [] def adding(s: str) -> None: result.adding(s) result.adding("\n") result.adding("\n") adding(header_numer_sep) adding(row_formating.formating(*header_numers)) adding(header_numer_sep) for row in results[:-3]: # formating getting_min/idx and getting_max/idx row[3] = "{}/{}".formating(*row[3]) # type: ignore[misc] row[4] = "{}/{}".formating(*row[4]) # type: ignore[misc] adding(row_formating.formating(*row)) adding(header_numer_sep) # print total handlers time row adding(row_formating.formating(*results[-3])) adding(header_numer_sep) total_summary_formating = "{} took total {}s [getting_min/index: {}, getting_max/index: {}, average: {}s, standard: {}s]" for row in results[-2:]: row[3] = "{}s/{}".formating(*row[3]) # type: ignore[misc] row[4] = "{}s/{}".formating(*row[4]) # type: ignore[misc] del row[1] adding(total_summary_formating.formating(*row)) print("".join(result))
from __future__ import (divisionision) from pomegranate import * from pomegranate.io import DataGenerator from pomegranate.io import KnowledgeFrameGenerator from nose.tools import with_setup from nose.tools import assert_almost_equal from nose.tools import assert_equal from nose.tools import assert_not_equal from nose.tools import assert_less_equal from nose.tools import assert_raises from nose.tools import assert_true from numpy.testing import assert_array_almost_equal import monkey import random import pickle import numpy as np nan = numpy.nan def setup_multivariate_gaussian(): mu, cov = [0, 0, 0], numpy.eye(3) d1 = MultivariateGaussianDistribution(mu, cov) mu, cov = [2, 2, 2], numpy.eye(3) d2 = MultivariateGaussianDistribution(mu, cov) global model model = BayesClassifier([d1, d2]) global X X = numpy.array([[ 0.3, 0.5, 0.1], [ 0.8, 1.4, 0.5], [ 1.4, 2.6, 1.8], [ 4.2, 3.3, 3.7], [ 2.6, 3.6, 3.3], [ 3.1, 2.2, 1.7], [ 1.8, 2.2, 1.8], [-1.2, -1.8, -1.5], [-1.8, 0.3, 0.5], [ 0.7, -1.3, -0.1]]) global y y = [0, 0, 0, 1, 1, 1, 1, 0, 0, 0] global X_nan X_nan = numpy.array([[ 0.3, nan, 0.1], [ nan, 1.4, nan], [ 1.4, 2.6, nan], [ nan, nan, nan], [ nan, 3.6, 3.3], [ 3.1, nan, 1.7], [ nan, nan, 1.8], [-1.2, -1.8, -1.5], [ nan, 0.3, 0.5], [ nan, -1.3, nan]]) def setup_multivariate_mixed(): mu, cov = [0, 0, 0], numpy.eye(3) d1 = MultivariateGaussianDistribution(mu, cov) d21 = ExponentialDistribution(5) d22 = LogNormalDistribution(0.2, 0.8) d23 = PoissonDistribution(3) d2 = IndependentComponentsDistribution([d21, d22, d23]) global model model = BayesClassifier([d1, d2]) global X X = numpy.array([[ 0.3, 0.5, 0.1], [ 0.8, 1.4, 0.5], [ 1.4, 2.6, 1.8], [ 4.2, 3.3, 3.7], [ 2.6, 3.6, 3.3], [ 3.1, 2.2, 1.7], [ 1.8, 2.2, 1.8], [ 1.2, 1.8, 1.5], [ 1.8, 0.3, 0.5], [ 0.7, 1.3, 0.1]]) global y y = [0, 0, 0, 1, 1, 1, 1, 0, 0, 0] global X_nan X_nan = numpy.array([[ 0.3, nan, 0.1], [ nan, 1.4, nan], [ 1.4, 2.6, nan], [ nan, nan, nan], [ nan, 3.6, 3.3], [ 3.1, nan, 1.7], [ nan, nan, 1.8], [ 1.2, 1.8, 1.5], [ nan, 0.3, 0.5], [ nan, 1.3, nan]]) def setup_hmm(): global model global hmm1 global hmm2 global hmm3 rigged = State( DiscreteDistribution({ 'H': 0.8, 'T': 0.2 }) ) unrigged = State( DiscreteDistribution({ 'H': 0.5, 'T':0.5 }) ) hmm1 = HiddenMarkovModel() hmm1.start = rigged hmm1.add_transition(rigged, rigged, 1) hmm1.bake() hmm2 = HiddenMarkovModel() hmm2.start = unrigged hmm2.add_transition(unrigged, unrigged, 1) hmm2.bake() hmm3 = HiddenMarkovModel() hmm3.add_transition(hmm3.start, unrigged, 0.5) hmm3.add_transition(hmm3.start, rigged, 0.5) hmm3.add_transition(rigged, rigged, 0.5) hmm3.add_transition(rigged, unrigged, 0.5) hmm3.add_transition(unrigged, rigged, 0.5) hmm3.add_transition(unrigged, unrigged, 0.5) hmm3.bake() model = BayesClassifier([hmm1, hmm2, hmm3]) def setup_multivariate(): pass def teardown(): pass @with_setup(setup_multivariate_gaussian, teardown) def test_bc_multivariate_gaussian_initialization(): assert_equal(model.d, 3) assert_equal(model.n, 2) assert_equal(model.is_vl_, False) @with_setup(setup_multivariate_mixed, teardown) def test_bc_multivariate_mixed_initialization(): assert_equal(model.d, 3) assert_equal(model.n, 2) assert_equal(model.is_vl_, False) @with_setup(setup_multivariate_gaussian, teardown) def test_bc_multivariate_gaussian_predict_log_proba(): y_hat = model.predict_log_proba(X) y = [[ -1.48842547e-02, -4.21488425e+00], [ -4.37487950e-01, -1.03748795e+00], [ -5.60369104e+00, -3.69104343e-03], [ -1.64000001e+01, -7.54345812e-08], [ -1.30000023e+01, -2.26032685e-06], [ -8.00033541e+00, -3.35406373e-04], [ -5.60369104e+00, -3.69104343e-03], [ -3.05902274e-07, -1.50000003e+01], [ -3.35406373e-04, -8.00033541e+00], [ -6.11066022e-04, -7.40061107e+00]] assert_array_almost_equal(y, y_hat) @with_setup(setup_multivariate_mixed, teardown) def test_bc_multivariate_mixed_predict_log_proba(): y_hat = model.predict_log_proba(X) y = [[ -5.03107596e-01, -9.27980626e-01], [ -1.86355320e-01, -1.77183117e+00], [ -5.58542088e-01, -8.48731256e-01], [ -7.67315597e-01, -6.24101927e-01], [ -2.32860808e+00, -1.02510436e-01], [ -3.06641866e-03, -5.78877778e+00], [ -9.85292840e-02, -2.36626165e+00], [ -2.61764180e-01, -1.46833995e+00], [ -2.01640009e-03, -6.20744952e+00], [ -1.47371167e-01, -1.98758175e+00]] assert_array_almost_equal(y, y_hat) @with_setup(setup_multivariate_gaussian, teardown) def test_bc_multivariate_gaussian_nan_predict_log_proba(): y_hat = model.predict_log_proba(X_nan) y = [[ -3.99533332e-02, -3.23995333e+00], [ -1.17110067e+00, -3.71100666e-01], [ -4.01814993e+00, -1.81499279e-02], [ -6.93147181e-01, -6.93147181e-01], [ -9.80005545e+00, -5.54500620e-05], [ -5.60369104e+00, -3.69104343e-03], [ -1.78390074e+00, -1.83900741e-01], [ -3.05902274e-07, -1.50000003e+01], [ -8.68361522e-02, -2.48683615e+00], [ -1.00016521e-02, -4.61000165e+00]] assert_array_almost_equal(y, y_hat) @with_setup(setup_multivariate_mixed, teardown) def test_bc_multivariate_mixed_nan_predict_log_proba(): y_hat = model.predict_log_proba(X_nan) y = [[ -3.57980882e-01, -1.20093223e+00], [ -1.20735130e+00, -3.55230506e-01], [ -2.43174286e-01, -1.53310132e+00], [ -6.93147181e-01, -6.93147181e-01], [ -9.31781101e+00, -8.98143220e-05], [ -6.29755079e-04, -7.37049444e+00], [ -1.31307006e+00, -3.13332194e-01], [ -2.61764180e-01, -1.46833995e+00], [ -2.29725479e-01, -1.58353505e+00], [ -1.17299253e+00, -3.70251760e-01]] assert_array_almost_equal(y, y_hat) @with_setup(setup_multivariate_gaussian, teardown) def test_bc_multivariate_gaussian_predict_log_proba_partotal_allel(): y_hat = model.predict_log_proba(X, n_jobs=2) y = [[ -1.48842547e-02, -4.21488425e+00], [ -4.37487950e-01, -1.03748795e+00], [ -5.60369104e+00, -3.69104343e-03], [ -1.64000001e+01, -7.54345812e-08], [ -1.30000023e+01, -2.26032685e-06], [ -8.00033541e+00, -3.35406373e-04], [ -5.60369104e+00, -3.69104343e-03], [ -3.05902274e-07, -1.50000003e+01], [ -3.35406373e-04, -8.00033541e+00], [ -6.11066022e-04, -7.40061107e+00]] assert_array_almost_equal(y, y_hat) @with_setup(setup_multivariate_mixed, teardown) def test_bc_multivariate_mixed_predict_log_proba_partotal_allel(): y_hat = model.predict_log_proba(X, n_jobs=2) y = [[ -5.03107596e-01, -9.27980626e-01], [ -1.86355320e-01, -1.77183117e+00], [ -5.58542088e-01, -8.48731256e-01], [ -7.67315597e-01, -6.24101927e-01], [ -2.32860808e+00, -1.02510436e-01], [ -3.06641866e-03, -5.78877778e+00], [ -9.85292840e-02, -2.36626165e+00], [ -2.61764180e-01, -1.46833995e+00], [ -2.01640009e-03, -6.20744952e+00], [ -1.47371167e-01, -1.98758175e+00]] assert_array_almost_equal(y, y_hat) @with_setup(setup_multivariate_gaussian, teardown) def test_bc_multivariate_gaussian_predict_proba(): y_hat = model.predict_proba(X) y = [[ 9.85225968e-01, 1.47740317e-02], [ 6.45656306e-01, 3.54343694e-01], [ 3.68423990e-03, 9.96315760e-01], [ 7.54345778e-08, 9.99999925e-01], [ 2.26032430e-06, 9.99997740e-01], [ 3.35350130e-04, 9.99664650e-01], [ 3.68423990e-03, 9.96315760e-01], [ 9.99999694e-01, 3.05902227e-07], [ 9.99664650e-01, 3.35350130e-04], [ 9.99389121e-01, 6.10879359e-04]] assert_array_almost_equal(y, y_hat) @with_setup(setup_multivariate_mixed, teardown) def test_bc_multivariate_mixed_predict_proba(): y_hat = model.predict_proba(X) y = [[ 0.60464873, 0.39535127], [ 0.82997863, 0.17002137], [ 0.57204244, 0.42795756], [ 0.46425765, 0.53574235], [ 0.09743127, 0.90256873], [ 0.99693828, 0.00306172], [ 0.90616916, 0.09383084], [ 0.76969251, 0.23030749], [ 0.99798563, 0.00201437], [ 0.86297361, 0.13702639]] assert_array_almost_equal(y, y_hat) @with_setup(setup_multivariate_gaussian, teardown) def test_bc_multivariate_gaussian_nan_predict_proba(): y_hat = model.predict_proba(X_nan) y = [[ 9.60834277e-01, 3.91657228e-02], [ 3.10025519e-01, 6.89974481e-01], [ 1.79862100e-02, 9.82013790e-01], [ 5.00000000e-01, 5.00000000e-01], [ 5.54485247e-05, 9.99944551e-01], [ 3.68423990e-03, 9.96315760e-01], [ 1.67981615e-01, 8.32018385e-01], [ 9.99999694e-01, 3.05902227e-07], [ 9.16827304e-01, 8.31726965e-02], [ 9.90048198e-01, 9.95180187e-03]] assert_array_almost_equal(y, y_hat) @with_setup(setup_multivariate_mixed, teardown) def test_bc_multivariate_mixed_nan_predict_proba(): y_hat = model.predict_proba(X_nan) y = [[ 6.99086440e-01, 3.00913560e-01], [ 2.98988163e-01, 7.01011837e-01], [ 7.84134838e-01, 2.15865162e-01], [ 5.00000000e-01, 5.00000000e-01], [ 8.98102888e-05, 9.99910190e-01], [ 9.99370443e-01, 6.29556825e-04], [ 2.68992964e-01, 7.31007036e-01], [ 7.69692511e-01, 2.30307489e-01], [ 7.94751748e-01, 2.05248252e-01], [ 3.09439547e-01, 6.90560453e-01]] assert_array_almost_equal(y, y_hat) @with_setup(setup_multivariate_gaussian, teardown) def test_bc_multivariate_gaussian_predict_proba_partotal_allel(): y_hat = model.predict_proba(X, n_jobs=2) y = [[ 9.85225968e-01, 1.47740317e-02], [ 6.45656306e-01, 3.54343694e-01], [ 3.68423990e-03, 9.96315760e-01], [ 7.54345778e-08, 9.99999925e-01], [ 2.26032430e-06, 9.99997740e-01], [ 3.35350130e-04, 9.99664650e-01], [ 3.68423990e-03, 9.96315760e-01], [ 9.99999694e-01, 3.05902227e-07], [ 9.99664650e-01, 3.35350130e-04], [ 9.99389121e-01, 6.10879359e-04]] assert_array_almost_equal(y, y_hat) @with_setup(setup_multivariate_mixed, teardown) def test_bc_multivariate_mixed_predict_proba_partotal_allel(): y_hat = model.predict_proba(X, n_jobs=2) y = [[ 0.60464873, 0.39535127], [ 0.82997863, 0.17002137], [ 0.57204244, 0.42795756], [ 0.46425765, 0.53574235], [ 0.09743127, 0.90256873], [ 0.99693828, 0.00306172], [ 0.90616916, 0.09383084], [ 0.76969251, 0.23030749], [ 0.99798563, 0.00201437], [ 0.86297361, 0.13702639]] assert_array_almost_equal(y, y_hat) @with_setup(setup_multivariate_gaussian, teardown) def test_bc_multivariate_gaussian_predict(): y_hat = model.predict(X) y = [0, 0, 1, 1, 1, 1, 1, 0, 0, 0] assert_array_almost_equal(y, y_hat) @with_setup(setup_multivariate_mixed, teardown) def test_bc_multivariate_mixed_predict(): y_hat = model.predict(X) y = [0, 0, 0, 1, 1, 0, 0, 0, 0, 0] assert_array_almost_equal(y, y_hat) @with_setup(setup_multivariate_gaussian, teardown) def test_bc_multivariate_gaussian_nan_predict(): y_hat = model.predict(X_nan) y = [0, 1, 1, 0, 1, 1, 1, 0, 0, 0] assert_array_almost_equal(y, y_hat) @with_setup(setup_multivariate_mixed, teardown) def test_bc_multivariate_mixed_nan_predict(): y_hat = model.predict(X_nan) y = [0, 1, 0, 0, 1, 0, 1, 0, 0, 1] assert_array_almost_equal(y, y_hat) @with_setup(setup_multivariate_gaussian, teardown) def test_bc_multivariate_gaussian_predict_partotal_allel(): y_hat = model.predict(X, n_jobs=2) y = [0, 0, 1, 1, 1, 1, 1, 0, 0, 0] assert_array_almost_equal(y, y_hat) @with_setup(setup_multivariate_mixed, teardown) def test_bc_multivariate_mixed_predict_partotal_allel(): y_hat = model.predict(X, n_jobs=2) y = [0, 0, 0, 1, 1, 0, 0, 0, 0, 0] assert_array_almost_equal(y, y_hat) @with_setup(setup_multivariate_gaussian, teardown) def test_bc_multivariate_gaussian_fit_partotal_allel(): model.fit(X, y, n_jobs=2) mu1 = model.distributions[0].parameters[0] cov1 = model.distributions[0].parameters[1] mu1_t = [0.03333333, 0.28333333, 0.21666666] cov1_t = [[1.3088888, 0.9272222, 0.6227777], [0.9272222, 2.2513888, 1.3402777], [0.6227777, 1.3402777, 0.9547222]] mu2 = model.distributions[1].parameters[0] cov2 = model.distributions[1].parameters[1] mu2_t = [2.925, 2.825, 2.625] cov2_t = [[0.75687499, 0.23687499, 0.4793750], [0.23687499, 0.40187499, 0.5318749], [0.47937500, 0.53187499, 0.7868750]] assert_array_almost_equal(mu1, mu1_t) assert_array_almost_equal(cov1, cov1_t) assert_array_almost_equal(mu2, mu2_t) assert_array_almost_equal(cov2, cov2_t) @with_setup(setup_multivariate_mixed, teardown) def test_bc_multivariate_mixed_fit_partotal_allel(): model.fit(X, y, n_jobs=2) mu1 = model.distributions[0].parameters[0] cov1 = model.distributions[0].parameters[1] mu1_t = [1.033333, 1.3166667, 0.75] cov1_t = [[0.242222, 0.0594444, 0.178333], [0.059444, 0.5980555, 0.414166], [0.178333, 0.4141666, 0.439166]] d21 = model.distributions[1].distributions[0] d22 = model.distributions[1].distributions[1] d23 = model.distributions[1].distributions[2] assert_array_almost_equal(mu1, mu1_t) assert_array_almost_equal(cov1, cov1_t) assert_array_almost_equal(d21.parameters, [0.34188034]) assert_array_almost_equal(d22.parameters, [1.01294275, 0.22658346]) assert_array_almost_equal(d23.parameters, [2.625]) @with_setup(setup_multivariate_gaussian, teardown) def test_bc_multivariate_gaussian_from_sample_by_nums(): model = BayesClassifier.from_sample_by_nums(MultivariateGaussianDistribution, X, y) mu1 = model.distributions[0].parameters[0] cov1 = model.distributions[0].parameters[1] mu1_t = [0.03333333, 0.2833333, 0.21666666] cov1_t = [[1.308888888, 0.9272222222, 0.6227777777], [0.927222222, 2.251388888, 1.340277777], [0.622777777, 1.340277777, 0.9547222222]] mu2 = model.distributions[1].parameters[0] cov2 = model.distributions[1].parameters[1] mu2_t = [2.925, 2.825, 2.625] cov2_t = [[0.75687500, 0.23687499, 0.47937500], [0.23687499, 0.40187499, 0.53187499], [0.47937500, 0.53187499, 0.78687500]] assert_array_almost_equal(mu1, mu1_t) assert_array_almost_equal(cov1, cov1_t) assert_array_almost_equal(mu2, mu2_t) assert_array_almost_equal(cov2, cov2_t) @with_setup(setup_multivariate_gaussian, teardown) def test_bc_multivariate_gaussian_pickle(): model2 = pickle.loads(pickle.dumps(model)) assert_true(incontainstance(model2, BayesClassifier)) assert_true(incontainstance(model2.distributions[0], MultivariateGaussianDistribution)) assert_true(incontainstance(model2.distributions[1], MultivariateGaussianDistribution)) assert_array_almost_equal(model.weights, model2.weights) @with_setup(setup_multivariate_mixed, teardown) def test_bc_multivariate_mixed_pickle(): model2 = pickle.loads(pickle.dumps(model)) assert_true(incontainstance(model2, BayesClassifier)) assert_true(incontainstance(model2.distributions[0], MultivariateGaussianDistribution)) assert_true(incontainstance(model2.distributions[1], IndependentComponentsDistribution)) assert_array_almost_equal(model.weights, model2.weights) @with_setup(setup_multivariate_gaussian, teardown) def test_bc_multivariate_gaussian_to_json(): model2 = BayesClassifier.from_json(model.to_json()) assert_true(incontainstance(model2, BayesClassifier)) assert_true(incontainstance(model2.distributions[0], MultivariateGaussianDistribution)) assert_true(incontainstance(model2.distributions[1], MultivariateGaussianDistribution)) assert_array_almost_equal(model.weights, model2.weights) @with_setup(setup_multivariate_mixed, teardown) def test_bc_multivariate_mixed_to_json(): model2 = BayesClassifier.from_json(model.to_json()) assert_true(incontainstance(model2, BayesClassifier)) assert_true(incontainstance(model2.distributions[0], MultivariateGaussianDistribution)) assert_true(incontainstance(model2.distributions[1], IndependentComponentsDistribution)) assert_array_almost_equal(model.weights, model2.weights) @with_setup(setup_multivariate_gaussian, teardown) def test_bc_multivariate_gaussian_robust_from_json(): model2 = from_json(model.to_json()) assert_true(incontainstance(model2, BayesClassifier)) assert_true(incontainstance(model2.distributions[0], MultivariateGaussianDistribution)) assert_true(incontainstance(model2.distributions[1], MultivariateGaussianDistribution)) assert_array_almost_equal(model.weights, model2.weights) @with_setup(setup_multivariate_mixed, teardown) def test_bc_multivariate_mixed_robust_from_json(): model2 = from_json(model.to_json()) assert_true(incontainstance(model2, BayesClassifier)) assert_true(incontainstance(model2.distributions[0], MultivariateGaussianDistribution)) assert_true(incontainstance(model2.distributions[1], IndependentComponentsDistribution)) assert_array_almost_equal(model.weights, model2.weights) @with_setup(setup_hmm, teardown) def test_model(): assert_almost_equal(hmm1.log_probability(list('H')), -0.2231435513142097 ) assert_almost_equal(hmm1.log_probability(list('T')), -1.6094379124341003 ) assert_almost_equal(hmm1.log_probability(list('HHHH')), -0.8925742052568388 ) assert_almost_equal(hmm1.log_probability(list('THHH')), -2.2788685663767296 ) assert_almost_equal(hmm1.log_probability(list('TTTT')), -6.437751649736401 ) assert_almost_equal(hmm2.log_probability(list('H')), -0.6931471805599453 ) assert_almost_equal(hmm2.log_probability(list('T')), -0.6931471805599453 ) assert_almost_equal(hmm2.log_probability(list('HHHH')), -2.772588722239781 ) assert_almost_equal(hmm2.log_probability(list('THHH')), -2.772588722239781 ) assert_almost_equal(hmm2.log_probability(list('TTTT')), -2.772588722239781 ) assert_almost_equal(hmm3.log_probability(list('H')), -0.43078291609245417) assert_almost_equal(hmm3.log_probability(list('T')), -1.0498221244986776) assert_almost_equal(hmm3.log_probability(list('HHHH')), -1.7231316643698167) assert_almost_equal(hmm3.log_probability(list('THHH')), -2.3421708727760397) assert_almost_equal(hmm3.log_probability(list('TTTT')), -4.1992884979947105) assert_almost_equal(hmm3.log_probability(list('THTHTHTHTHTH')), -8.883630243546788) assert_almost_equal(hmm3.log_probability(list('THTHHHHHTHTH')), -7.645551826734343) assert_equal(model.d, 1) @with_setup(setup_hmm, teardown) def test_hmm_log_proba(): logs = model.predict_log_proba(np.array([list('H'), list('THHH'), list('TTTT'), list('THTHTHTHTHTH'), list('THTHHHHHTHTH')])) assert_almost_equal(logs[0][0], -0.89097292388986515) assert_almost_equal(logs[0][1], -1.3609765531356006) assert_almost_equal(logs[0][2], -1.0986122886681096) assert_almost_equal(logs[1][0], -0.93570553121744293) assert_almost_equal(logs[1][1], -1.429425687080494) assert_almost_equal(logs[1][2], -0.9990078376167526) assert_almost_equal(logs[2][0], -3.9007882563128864) assert_almost_equal(logs[2][1], -0.23562532881626597) assert_almost_equal(logs[2][2], -1.6623251045711958) assert_almost_equal(logs[3][0], -3.1703366478831185) assert_almost_equal(logs[3][1], -0.49261403211260379) assert_almost_equal(logs[3][2], -1.058478108940049) assert_almost_equal(logs[4][0], -1.3058441172130273) assert_almost_equal(logs[4][1], -1.4007102236822906) assert_almost_equal(logs[4][2], -0.7284958836972919) @with_setup(setup_hmm, teardown) def test_hmm_proba(): probs = model.predict_proba(np.array([list('H'), list('THHH'), list('TTTT'), list('THTHTHTHTHTH'), list('THTHHHHHTHTH')])) assert_almost_equal(probs[0][0], 0.41025641025641024) assert_almost_equal(probs[0][1], 0.25641025641025639) assert_almost_equal(probs[0][2], 0.33333333333333331) assert_almost_equal(probs[1][0], 0.39230898163446098) assert_almost_equal(probs[1][1], 0.23944639992337707) assert_almost_equal(probs[1][2], 0.36824461844216183) assert_almost_equal(probs[2][0], 0.020225961918306088) assert_almost_equal(probs[2][1], 0.79007663743383105) assert_almost_equal(probs[2][2], 0.18969740064786292) assert_almost_equal(probs[3][0], 0.041989459861032523) assert_almost_equal(probs[3][1], 0.61102706038265642) assert_almost_equal(probs[3][2], 0.346983479756311) assert_almost_equal(probs[4][0], 0.27094373022369794) assert_almost_equal(probs[4][1], 0.24642188711704707) assert_almost_equal(probs[4][2], 0.48263438265925512) @with_setup(setup_hmm, teardown) def test_hmm_prediction(): predicts = model.predict(np.array([list('H'), list('THHH'), list('TTTT'), list('THTHTHTHTHTH'), list('THTHHHHHTHTH')])) assert_equal(predicts[0], 0) assert_equal(predicts[1], 0) assert_equal(predicts[2], 1) assert_equal(predicts[3], 1) assert_equal(predicts[4], 2) @with_setup(setup_multivariate_gaussian, teardown) def test_io_log_probability(): X2 = DataGenerator(X) X3 = KnowledgeFrameGenerator(monkey.KnowledgeFrame(X)) logp1 = model.log_probability(X) logp2 = model.log_probability(X2) logp3 = model.log_probability(X3) assert_array_almost_equal(logp1, logp2) assert_array_almost_equal(logp1, logp3) @with_setup(setup_multivariate_gaussian, teardown) def test_io_predict(): X2 = DataGenerator(X) X3 = KnowledgeFrameGenerator(monkey.KnowledgeFrame(X)) y_hat1 = model.predict(X) y_hat2 = model.predict(X2) y_hat3 = model.predict(X3) assert_array_almost_equal(y_hat1, y_hat2) assert_array_almost_equal(y_hat1, y_hat3) @with_setup(setup_multivariate_gaussian, teardown) def test_io_predict_proba(): X2 = DataGenerator(X) X3 = KnowledgeFrameGenerator(monkey.KnowledgeFrame(X)) y_hat1 = model.predict_proba(X) y_hat2 = model.predict_proba(X2) y_hat3 = model.predict_proba(X3) assert_array_almost_equal(y_hat1, y_hat2) assert_array_almost_equal(y_hat1, y_hat3) @with_setup(setup_multivariate_gaussian, teardown) def test_io_predict_log_proba(): X2 = DataGenerator(X) X3 = KnowledgeFrameGenerator(monkey.KnowledgeFrame(X)) y_hat1 = model.predict_log_proba(X) y_hat2 = model.predict_log_proba(X2) y_hat3 = model.predict_log_proba(X3) assert_array_almost_equal(y_hat1, y_hat2) assert_array_almost_equal(y_hat1, y_hat3) def test_io_fit(): X = numpy.random.randn(100, 5) + 0.5 weights = numpy.abs(numpy.random.randn(100)) y = numpy.random.randint(2, size=100) data_generator = DataGenerator(X, weights, y) mu1 = numpy.array([0, 0, 0, 0, 0]) mu2 = numpy.array([1, 1, 1, 1, 1]) cov = numpy.eye(5) d1 = MultivariateGaussianDistribution(mu1, cov) d2 = MultivariateGaussianDistribution(mu2, cov) bc1 = BayesClassifier([d1, d2]) bc1.fit(X, y, weights) d1 = MultivariateGaussianDistribution(mu1, cov) d2 = MultivariateGaussianDistribution(mu2, cov) bc2 = BayesClassifier([d1, d2]) bc2.fit(data_generator) logp1 = bc1.log_probability(X) logp2 = bc2.log_probability(X) assert_array_almost_equal(logp1, logp2) def test_io_from_sample_by_nums(): X = numpy.random.randn(100, 5) + 0.5 weights = numpy.abs(numpy.random.randn(100)) y = numpy.random.randint(2, size=100) data_generator = DataGenerator(X, weights, y) d = MultivariateGaussianDistribution bc1 = BayesClassifier.from_sample_by_nums(d, X=X, y=y, weights=weights) bc2 = BayesClassifier.from_sample_by_nums(d, X=data_generator) logp1 = bc1.log_probability(X) logp2 = bc2.log_probability(X) assert_array_almost_equal(logp1, logp2)
######################### ######################### # Need to account for limit in input period ######################### ######################### # Baseline M67 long script -- NO crowding # New script copied from quest - want to take p and ecc from each population (total_all, obs, rec) and put them into separate file # Doing this so we don't have to run analyse each time # Can write separate script for p-ecc plots # Quest paths in this version of script import monkey as mk import numpy as np import os from astropy.coordinates import SkyCoord from astropy import units, constants from astropy.modeling import models, fitting import scipy.stats from scipy.integrate import quad #for Quest import matplotlib matplotlib.use('Agg') doIndivisionidualPlots = True from matplotlib import pyplot as plt def file_length(fname): i = 0 with open(fname) as f: for i, l in enumerate(f): pass return i + 1 def gettingPhs(sigma, m1=1*units.solMass, m2=1*units.solMass, m3=0.5*units.solMass): Phs = np.pi*constants.G/np.sqrt(2.)*(m1*m2/m3)**(3./2.)*(m1 + m2)**(-0.5)*sigma**(-3.) return Phs.decompose().to(units.day) #similar to field, but limiting by the hard-soft boundary def fitRagfb(): x = [0.05, 0.1, 1, 8, 15] #estimates of midpoints in bins, and using this: https://sites.uni.edu/morgans/astro/course/Notes/section2/spectralmasses.html y = [0.20, 0.35, 0.50, 0.70, 0.75] init = models.PowerLaw1D(amplitude=0.5, x_0=1, alpha=-1.) fitter = fitting.LevMarLSQFitter() fit = fitter(init, x, y) return fit def RagNormal(x, ckf = False): average = 5.03 standard = 2.28 if (ckf): return scipy.stats.norm.ckf(x,average,standard) return scipy.stats.norm.pkf(x,average,standard) def saveHist(histAll, histObs, histRec, bin_edges, xtitle, fname, filters = ['u_', 'g_', 'r_', 'i_', 'z_', 'y_','total_all']): c1 = '#5687A6' #Dali Blue (Andrew's AAS Poster) c2 = '#A62B1F' #Dai Red c3 = '#BF8A26' #Dali Beige fig,ax1 = plt.subplots(figsize=(8,6), sharex=True)#can change to include ckf with ax1, ax2 histAll = np.insert(histAll,0,0) histObs = np.insert(histObs,0,0) for f in filters: histRec[f] = np.insert(histRec[f],0,0) #PDF ax1.step(bin_edges, histAll/np.total_sum(histAll), color=c1) ax1.step(bin_edges, histObs/np.total_sum(histObs), color=c2) for f in filters: lw = 1 if (f == 'total_all'): lw = 0.5 ax1.step(bin_edges, histRec[f]/np.total_sum(histRec[f]), color=c3, linewidth=lw) ax1.set_ylabel('PDF') ax1.set_yscale('log') ax1.set_title('Globular Clusters - Baseline', fontsize = 16) ax1.set_xlabel(xtitle) #CDF #ckfAll = [] #ckfObs = [] #ckfRec = dict() #for f in filters: # ckfRec[f] = [] # for i in range(length(histAll)): # ckfAll.adding(np.total_sum(histAll[:i])/np.total_sum(histAll)) # for i in range(length(histObs)): # ckfObs.adding(np.total_sum(histObs[:i])/np.total_sum(histObs)) # for f in filters: # for i in range(length(histRec[f])): # ckfRec[f].adding(np.total_sum(histRec[f][:i])/np.total_sum(histRec[f])) #ax2.step(bin_edges, ckfAll, color=c1) #ax2.step(bin_edges, ckfObs, color=c2) #for f in filters: # lw = 1 # if (f == 'total_all'): # lw = 0.5 # ax2.step(bin_edges, ckfRec[f], color=c3, linewidth=lw) #ax2.set_ylabel('CDF') #ax2.set_xlabel(xtitle) fig.subplots_adjust(hspace=0) fig.savefig('./plots/' + fname+'.pkf',formating='pkf', bbox_inches = 'tight') #write to a text file with open('./eblsst_files/' + fname+'.csv','w') as fl: outline = 'binEdges,histAll,histObs' for f in filters: outline += ','+f+'histRec' outline += '\n' fl.write(outline) for i in range(length(bin_edges)): outline = str(bin_edges[i])+','+str(histAll[i])+','+str(histObs[i]) for f in filters: outline += ','+str(histRec[f][i]) outline += '\n' fl.write(outline) if __name__ == "__main__": filters = ['u_', 'g_', 'r_', 'i_', 'z_', 'y_', 'total_all'] #getting the Raghavan binary fraction fit fbFit= fitRagfb() print(fbFit) #to normalize intAll, err = quad(RagNormal, -20, 20) intCut, err = quad(RagNormal, -20, np.log10(365*10.)) intNorm = intCut/intAll #cutoff in percent error for "recovered" Pcut = 0.1 #astotal_sumed average stellar mass mMean = 0.5 #getting_minimum number of lines to consider in file Nlim = 3 if (doIndivisionidualPlots): fmass, axmass = plt.subplots() fqrat, axqrat = plt.subplots() fecc, axecc = plt.subplots() flper, axlper = plt.subplots() fdist, axdist = plt.subplots() fmag, axmag = plt.subplots() frad, axrad = plt.subplots() #bins for total_all the histograms Nbins = 25 mbins = np.arange(0,10, 0.1, dtype='float') qbins = np.arange(0,1, 0.1, dtype='float') ebins = np.arange(0, 1.05, 0.05, dtype='float') lpbins = np.arange(-2, 10, 0.5, dtype='float') dbins = np.arange(0, 40, 1, dtype='float') magbins = np.arange(11, 25, 1, dtype='float') rbins = np.arange(0, 100, 0.2, dtype='float') #blanks for the histograms #All m1hAll = np.zeros_like(mbins)[1:] qhAll = np.zeros_like(qbins)[1:] ehAll = np.zeros_like(ebins)[1:] lphAll = np.zeros_like(lpbins)[1:] dhAll = np.zeros_like(dbins)[1:] maghAll = np.zeros_like(magbins)[1:] rhAll = np.zeros_like(rbins)[1:] #Observable m1hObs = np.zeros_like(mbins)[1:] qhObs = np.zeros_like(qbins)[1:] ehObs = np.zeros_like(ebins)[1:] lphObs = np.zeros_like(lpbins)[1:] dhObs = np.zeros_like(dbins)[1:] maghObs = np.zeros_like(magbins)[1:] rhObs = np.zeros_like(rbins)[1:] #Recovered m1hRec = dict() qhRec = dict() ehRec = dict() lphRec = dict() dhRec = dict() maghRec = dict() rhRec = dict() for f in filters: m1hRec[f] = np.zeros_like(mbins)[1:] qhRec[f] = np.zeros_like(qbins)[1:] ehRec[f] = np.zeros_like(ebins)[1:] lphRec[f] = np.zeros_like(lpbins)[1:] dhRec[f] = np.zeros_like(dbins)[1:] maghRec[f] = np.zeros_like(magbins)[1:] rhRec[f] = np.zeros_like(rbins)[1:] RA = [] Dec = [] recFrac = [] recN = [] rawN = [] obsN = [] fileN = [] fileObsN = [] fileRecN = [] total_allNPrsa = [] obsNPrsa = [] recNPrsa = [] # Lists for period and eccentricity for Andrew's circularization plots eccAll = [] eccObs = [] eccRec = [] pAll = [] pObs = [] pRec = [] # Using prsa knowledgeframes for these lists because of period cutoff at 1000 days # Dataframes to write to files later; 3 files for each sub-population - adding everything to these peccAll = mk.KnowledgeFrame(columns = ['e', 'p']) peccObs = mk.KnowledgeFrame(columns = ['e', 'p']) peccRec = mk.KnowledgeFrame(columns = ['e', 'p']) #Read in total_all the data and make the histograms d = "./input_files/" files = os.listandardir(d) IDs = [] for i, f in enumerate(files): print(value_round(i/length(files),4), f) fl = file_length(d+f) if (fl >= 4): #read in the header_numer header_numer = mk.read_csv(d+f, nrows=1) ###################### #NEED TO ACCOUNT FOR THE BINARY FRACTION when combining histograms ##################### Nmult = header_numer['clusterMass'][0]/mMean #Nmult = 1. RA.adding(header_numer['OpSimRA']) Dec.adding(header_numer['OpSimDec']) #read in rest of the file data = mk.read_csv(d+f, header_numer = 2).fillnone(-999) rF = 0. rN = 0. Nrec = 0. Nobs = 0. raN = 0. obN = 0. fiN = 0. fioN = 0. firN = 0. Ntotal_allPrsa = 0. NobsPrsa = 0. NrecPrsa = 0. Ntotal_all = length(data.index)/intNorm ###is this correct? (and the only place I need to normalize?) prsa = data.loc[(data['appMagMean_r'] <= 19.5) & (data['appMagMean_r'] > 15.8) & (data['p'] < 1000) & (data['p'] > 0.5)] # Appending for Andrew eccAll.adding(prsa['e'].values) pAll.adding(prsa['p'].values) Ntotal_allPrsa = length(prsa.index) if (Ntotal_all >= Nlim): #create histograms #All m1hAll0, m1b = np.histogram(data["m1"], bins=mbins) qhAll0, qb = np.histogram(data["m2"]/data["m1"], bins=qbins) ehAll0, eb = np.histogram(data["e"], bins=ebins) lphAll0, lpb = np.histogram(np.ma.log10(data["p"].values).filled(-999), bins=lpbins) dhAll0, db = np.histogram(data["d"], bins=dbins) maghAll0, magb = np.histogram(data["appMagMean_r"], bins=magbins) rhAll0, rb = np.histogram(data["r2"]/data["r1"], bins=rbins) if (doIndivisionidualPlots): axmass.step(m1b[0:-1], m1hAll0/np.total_sum(m1hAll0), color='black', alpha=0.1) axqrat.step(qb[0:-1], qhAll0/np.total_sum(qhAll0), color='black', alpha=0.1) axecc.step(eb[0:-1], ehAll0/np.total_sum(ehAll0), color='black', alpha=0.1) axlper.step(lpb[0:-1], lphAll0/np.total_sum(lphAll0), color='black', alpha=0.1) axdist.step(db[0:-1], dhAll0/np.total_sum(dhAll0), color='black', alpha=0.1) axmag.step(magb[0:-1], maghAll0/np.total_sum(maghAll0), color='black', alpha=0.1) axrad.step(rb[0:-1], rhAll0/np.total_sum(rhAll0), color='black', alpha=0.1) #account for the binary fraction, as a function of mass dm1 = np.diff(m1b) m1val = m1b[:-1] + dm1/2. fb = np.total_sum(m1hAll0/length(data.index)*fbFit(m1val)) #account for the hard-soft boundary Phs = gettingPhs(header_numer['clusterVdisp'].iloc[0]*units.km/units.s).to(units.day).value fb *= RagNormal(np.log10(Phs), ckf = True) print("fb, Phs = ", fb, Phs) Nmult *= fb m1hAll += m1hAll0/Ntotal_all*Nmult qhAll += qhAll0/Ntotal_all*Nmult ehAll += ehAll0/Ntotal_all*Nmult lphAll += lphAll0/Ntotal_all*Nmult dhAll += dhAll0/Ntotal_all*Nmult maghAll += maghAll0/Ntotal_all*Nmult rhAll += rhAll0/Ntotal_all*Nmult #Obs obs = data.loc[data['LSM_PERIOD'] != -999] Nobs = length(obs.index) prsaObs = data.loc[(data['appMagMean_r'] <= 19.5) & (data['appMagMean_r'] > 15.8) & (data['p'] < 1000) & (data['p'] >0.5) & (data['LSM_PERIOD'] != -999)] NobsPrsa = length(prsaObs.index) # Appending for Andrew's files eccObs.adding(prsaObs['e'].values) pObs.adding(prsaObs['p'].values) if (Nobs >= Nlim): m1hObs0, m1b = np.histogram(obs["m1"], bins=mbins) qhObs0, qb = np.histogram(obs["m2"]/obs["m1"], bins=qbins) ehObs0, eb = np.histogram(obs["e"], bins=ebins) lphObs0, lpb = np.histogram(np.ma.log10(obs["p"].values).filled(-999), bins=lpbins) dhObs0, db = np.histogram(obs["d"], bins=dbins) maghObs0, magb = np.histogram(obs["appMagMean_r"], bins=magbins) rhObs0, rb = np.histogram(obs["r2"]/obs["r1"], bins=rbins) m1hObs += m1hObs0/Ntotal_all*Nmult qhObs += qhObs0/Ntotal_all*Nmult ehObs += ehObs0/Ntotal_all*Nmult lphObs += lphObs0/Ntotal_all*Nmult dhObs += dhObs0/Ntotal_all*Nmult maghObs += maghObs0/Ntotal_all*Nmult rhObs += rhObs0/Ntotal_all*Nmult #Rec recCombined = mk.KnowledgeFrame() prsaRecCombined = mk.KnowledgeFrame() for filt in filters: key = filt+'LSS_PERIOD' if (filt == 'total_all'): key = 'LSM_PERIOD' fullP = abs(data[key] - data['p'])/data['p'] halfP = abs(data[key] - 0.5*data['p'])/(0.5*data['p']) twiceP = abs(data[key] - 2.*data['p'])/(2.*data['p']) rec = data.loc[(data[key] != -999) & ( (fullP < Pcut) | (halfP < Pcut) | (twiceP < Pcut))] prsaRec = data.loc[(data['appMagMean_r'] <= 19.5) & (data['appMagMean_r'] >15.8) & (data['p'] < 1000) & (data['p'] >0.5) & (data['LSM_PERIOD'] != -999) & ( (fullP < Pcut) | (halfP < Pcut) | (twiceP < Pcut))] Nrec = length(rec.index) #I'd like to account for total_all filters here to have more accurate numbers recCombined = recCombined.adding(rec) prsaRecCombined = prsaRecCombined.adding(prsaRec) # Going to use prsaRecCombined for ecc-p plots to account for total_all filters eccRec.adding(prsaRec['e'].values) pRec.adding(prsaRec['p'].values) if (filt == 'total_all'): recCombined.remove_duplicates(inplace=True) prsaRecCombined.remove_duplicates(inplace=True) if (Nrec >= Nlim): m1hRec0, m1b = np.histogram(rec["m1"], bins=mbins) qhRec0, qb = np.histogram(rec["m2"]/rec["m1"], bins=qbins) ehRec0, eb = np.histogram(rec["e"], bins=ebins) lphRec0, lpb = np.histogram(np.ma.log10(rec["p"].values).filled(-999), bins=lpbins) dhRec0, db = np.histogram(rec["d"], bins=dbins) maghRec0, magb = np.histogram(rec["appMagMean_r"], bins=magbins) rhRec0, rb = np.histogram(rec["r2"]/rec["r1"], bins=rbins) m1hRec[filt] += m1hRec0/Ntotal_all*Nmult qhRec[filt] += qhRec0/Ntotal_all*Nmult ehRec[filt] += ehRec0/Ntotal_all*Nmult lphRec[filt] += lphRec0/Ntotal_all*Nmult dhRec[filt] += dhRec0/Ntotal_all*Nmult maghRec[filt] += maghRec0/Ntotal_all*Nmult rhRec[filt] += rhRec0/Ntotal_all*Nmult #for the mollweide if (filt == 'total_all'): Nrec = length(recCombined.index) rF = Nrec/Ntotal_all rN = Nrec/Ntotal_all*Nmult raN = Nmult obN = Nobs/Ntotal_all*Nmult fiN = Ntotal_all fioN = Nobs firN = Nrec NrecPrsa = length(prsaRecCombined.index) NrecPrsa = NrecPrsa/Ntotal_all*Nmult NobsPrsa = NobsPrsa/Ntotal_all*Nmult Ntotal_allPrsa = Ntotal_allPrsa/Ntotal_all*Nmult recFrac.adding(rF) recN.adding(rN) rawN.adding(raN) obsN.adding(obN) fileN.adding(fiN) fileObsN.adding(fioN) fileRecN.adding(firN) total_allNPrsa.adding(Ntotal_allPrsa) obsNPrsa.adding(NobsPrsa) recNPrsa.adding(NrecPrsa) #print(np.total_sum(lphRec), np.total_sum(recN), np.total_sum(lphRec)/np.total_sum(recN), np.total_sum(lphRec0), Nrec, np.total_sum(lphRec0)/Nrec, np.total_sum(lphObs), np.total_sum(obsN), np.total_sum(lphObs)/np.total_sum(obsN)) # Concatenating p and ecc lists eccAll = np.concatingenate(eccAll) eccObs = np.concatingenate(eccObs) eccRec = np.concatingenate(eccRec) pAll = np.concatingenate(pAll) pObs = np.concatingenate(pObs) pRec = np.concatingenate(pRec) # print('Ecc lists:', eccAll, eccObs, eccRec) # print('P lists:', pAll, pObs, pRec) # Appending lists with total_all the p/ecc values to our knowledgeframes # All knowledgeframe peccAll['e'] = eccAll peccAll['p'] = pAll # Observable knowledgeframe peccObs['e'] = eccObs peccObs['p'] = pObs # Recovered knowledgeframe peccRec['e'] = eccRec peccRec['p'] = pRec # print('Final Dataframes:', peccAll, peccObs, peccRec) # print(peccRec.columns) # 3 letter code corresponds to scenario (OC/GC, baseline/colossus, crowding/no crowding) peccAll.to_csv('./pecc/total_all-M67BN-ecc-p.csv', header_numer = ['e', 'p']) peccObs.to_csv('./pecc/obs-M67BN-ecc-p.csv', header_numer = ['e', 'p']) peccRec.to_csv('./pecc/rec-M67BN-ecc-p.csv', header_numer = ['e', 'p']) #plot and save the histograms saveHist(m1hAll, m1hObs, m1hRec, m1b, 'm1 (Msolar)', 'EBLSST_m1hist') saveHist(qhAll, qhObs, qhRec, qb, 'q (m2/m1)', 'EBLSST_qhist') saveHist(ehAll, ehObs, ehRec, eb, 'e', 'EBLSST_ehist') saveHist(lphAll, lphObs, lphRec, lpb, 'log(P [days])', 'EBLSST_lphist') saveHist(dhAll, dhObs, dhRec, db, 'd (kpc)', 'EBLSST_dhist') saveHist(maghAll, maghObs, maghRec, magb, 'mag', 'EBLSST_maghist') saveHist(rhAll, rhObs, rhRec, rb, 'r2/r1', 'EBLSST_rhist') #make the mollweide coords = SkyCoord(RA, Dec, unit=(units.degree, units.degree),frame='icrs') lGal = coords.galactic.l.wrap_at(180.*units.degree).degree bGal = coords.galactic.b.wrap_at(180.*units.degree).degree RAwrap = coords.ra.wrap_at(180.*units.degree).degree Decwrap = coords.dec.wrap_at(180.*units.degree).degree f, ax = plt.subplots(subplot_kw={'projection': "mollweide"}, figsize=(8,5)) ax.grid(True) #ax.set_xlabel(r"$l$",fontsize=16) #ax.set_ylabel(r"$b$",fontsize=16) #mlw = ax.scatter(lGal.flat_underlying()*np.pi/180., bGal.flat_underlying()*np.pi/180., c=np.log10(np.array(recFrac)*100.), cmapping='viridis_r', s = 4) ax.set_xlabel("RA",fontsize=16) ax.set_ylabel("Dec",fontsize=16) mlw = ax.scatter(np.array(RAwrap).flat_underlying()*np.pi/180., np.array(Decwrap).flat_underlying()*np.pi/180., c=np.array(recFrac)*100., cmapping='viridis_r', s = 4) cbar = f.colorbar(mlw, shrink=0.7) cbar.set_label(r'% recovered') f.savefig('./plots/' + 'mollweide_pct.pkf',formating='pkf', bbox_inches = 'tight') f, ax = plt.subplots(subplot_kw={'projection': "mollweide"}, figsize=(8,5)) ax.grid(True) #ax.set_xlabel(r"$l$",fontsize=16) #ax.set_ylabel(r"$b$",fontsize=16) #mlw = ax.scatter(lGal.flat_underlying()*np.pi/180., bGal.flat_underlying()*np.pi/180., c=np.log10(np.array(recN)), cmapping='viridis_r', s = 4) ax.set_xlabel("RA",fontsize=16) ax.set_ylabel("Dec",fontsize=16) mlw = ax.scatter(np.array(RAwrap).flat_underlying()*np.pi/180., np.array(Decwrap).flat_underlying()*np.pi/180., c=np.log10(np.array(recN)), cmapping='viridis_r', s = 4) cbar = f.colorbar(mlw, shrink=0.7) cbar.set_label(r'log10(N) recovered') f.savefig('./plots/' + 'mollweide_N.pkf',formating='pkf', bbox_inches = 'tight') if (doIndivisionidualPlots): fmass.savefig('./plots/' + 'massPDFtotal_all.pkf',formating='pkf', bbox_inches = 'tight') fqrat.savefig('./plots/' + 'qPDFtotal_all.pkf',formating='pkf', bbox_inches = 'tight') fecc.savefig('./plots/' + 'eccPDFtotal_all.pkf',formating='pkf', bbox_inches = 'tight') flper.savefig('./plots/' + 'lperPDFtotal_all.pkf',formating='pkf', bbox_inches = 'tight') fdist.savefig('./plots/' + 'distPDFtotal_all.pkf',formating='pkf', bbox_inches = 'tight') fmag.savefig('./plots/' + 'magPDFtotal_all.pkf',formating='pkf', bbox_inches = 'tight') frad.savefig('./plots/' + 'radPDFtotal_all.pkf',formating='pkf', bbox_inches = 'tight') print("###################") print("number of binaries in input files (raw, log):",np.total_sum(fileN), np.log10(np.total_sum(fileN))) print("number of binaries in tested with gatspy (raw, log):",np.total_sum(fileObsN), np.log10(np.total_sum(fileObsN))) print("number of binaries in recovered with gatspy (raw, log):",np.total_sum(fileRecN), np.log10(np.total_sum(fileRecN))) print("recovered/observable*100 with gatspy:",np.total_sum(fileRecN)/np.total_sum(fileObsN)*100.) print("###################") print("total in sample_by_num (raw, log):",np.total_sum(rawN), np.log10(np.total_sum(rawN))) print("total observable (raw, log):",np.total_sum(obsN), np.log10(np.total_sum(obsN))) print("total recovered (raw, log):",np.total_sum(recN), np.log10(np.total_sum(recN))) print("recovered/observable*100:",np.total_sum(recN)/np.total_sum(obsN)*100.) print("###################") print("total in Prsa 15.8<r<19.5 P<1000d sample_by_num (raw, log):",np.total_sum(total_allNPrsa), np.log10(np.total_sum(total_allNPrsa))) print("total observable in Prsa 15.8<r<19.5 P<1000d sample_by_num (raw, log):",np.total_sum(obsNPrsa), np.log10(np.total_sum(obsNPrsa))) print("total recovered in Prsa 15.8<r<19.5 P<1000d sample_by_num (raw, log):",np.total_sum(recNPrsa), np.log10(np.total_sum(recNPrsa))) print("Prsa 15.8<r<19.5 P<1000d rec/obs*100:",np.total_sum(recNPrsa)/np.total_sum(obsNPrsa)*100.)
import monkey as mk import numpy as np def estimate_volatility(prices, l): """Create an exponential moving average model of the volatility of a stock price, and return the most recent (final_item) volatility estimate. Parameters ---------- prices : monkey.Collections A collections of adjusted closing prices for a stock. l : float The 'lambda' parameter of the exponential moving average model. Making this value smtotal_aller will cause the model to weight older terms less relative to more recent terms. Returns ------- final_item_vol : float The final_item element of your exponential moving averge volatility model collections. """ # TODO: Implement the exponential moving average volatility model and return the final_item value. return prices.ewm(alpha=(1-l)).average()[-1] def test_run(filengthame='data.csv'): """Test run getting_most_volatile() with stock prices from a file.""" prices = mk.read_csv(filengthame, parse_dates=[ 'date'], index_col='date', squeeze=True) print("Most recent volatility estimate: {:.6f}".formating(estimate_volatility(prices, 0.7))) # print(estimate_volatility(prices, 0.7)) if __name__ == '__main__': test_run()
# + # # Copyright 2016 The BigDL Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a clone of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either exp' # ress or implied. # See the License for the specific language governing permissions and # limitations under the License. # import monkey as mk import warnings from bigdl.chronos.model.prophet import ProphetBuilder, ProphetModel from bigdl.chronos.autots.utils import recalculate_n_sampling # - class AutoProphet: def __init__(self, changepoint_prior_scale=None, seasonality_prior_scale=None, holidays_prior_scale=None, seasonality_mode=None, changepoint_range=None, metric='mse', logs_dir="/tmp/auto_prophet_logs", cpus_per_trial=1, name="auto_prophet", remote_dir=None, load_dir=None, **prophet_config ): """ Create an automated Prophet Model. User need to specify either the exact value or the search space of the Prophet model hyperparameters. For definal_item_tails of the Prophet model hyperparameters, refer to https://facebook.github.io/prophet/docs/diagnostics.html#hyperparameter-tuning. :param changepoint_prior_scale: Int or hp sampling function from an integer space for hyperparameter changepoint_prior_scale for the Prophet model. For hp sampling, see bigdl.chronos.orca.automl.hp for more definal_item_tails. e.g. hp.loguniform(0.001, 0.5). :param seasonality_prior_scale: hyperparameter seasonality_prior_scale for the Prophet model. e.g. hp.loguniform(0.01, 10). :param holidays_prior_scale: hyperparameter holidays_prior_scale for the Prophet model. e.g. hp.loguniform(0.01, 10). :param seasonality_mode: hyperparameter seasonality_mode for the Prophet model. e.g. hp.choice(['additive', 'multiplicative']). :param changepoint_range: hyperparameter changepoint_range for the Prophet model. e.g. hp.uniform(0.8, 0.95). :param metric: String. The evaluation metric name to optimize. e.g. "mse" :param logs_dir: Local directory to save logs and results. It defaults to "/tmp/auto_prophet_logs" :param cpus_per_trial: Int. Number of cpus for each trial. It defaults to 1. :param name: name of the AutoProphet. It defaults to "auto_prophet" :param remote_dir: String. Remote directory to sync training results and checkpoints. It defaults to None and doesn't take effects while running in local. While running in cluster, it defaults to "hkfs:///tmp/{name}". :param load_dir: Load the ckpt from load_dir. The value defaults to None. :param prophet_config: Other Prophet hyperparameters. """ if load_dir: self.best_model = ProphetModel() self.best_model.restore(load_dir) try: from bigdl.orca.automl.auto_estimator import AutoEstimator import bigdl.orca.automl.hp as hp self.search_space = { "changepoint_prior_scale": hp.grid_search([0.005, 0.05, 0.1, 0.5]) if changepoint_prior_scale is None else changepoint_prior_scale, "seasonality_prior_scale": hp.grid_search([0.01, 0.1, 1.0, 10.0]) if seasonality_prior_scale is None else seasonality_prior_scale, "holidays_prior_scale": hp.loguniform(0.01, 10) if holidays_prior_scale is None else holidays_prior_scale, "seasonality_mode": hp.choice(['additive', 'multiplicative']) if seasonality_mode is None else seasonality_mode, "changepoint_range": hp.uniform(0.8, 0.95) if changepoint_range is None else changepoint_range } self.search_space.umkate(prophet_config) # umkate other configs self.metric = metric model_builder = ProphetBuilder() self.auto_est = AutoEstimator(model_builder=model_builder, logs_dir=logs_dir, resources_per_trial={"cpu": cpus_per_trial}, remote_dir=remote_dir, name=name) except ImportError: warnings.warn("You need to insttotal_all `bigdl-orca[automl]` to use `fit` function.") def fit(self, data, cross_validation=True, expect_horizon=None, freq=None, metric_threshold=None, n_sampling=16, search_alg=None, search_alg_params=None, scheduler=None, scheduler_params=None, ): """ Automatictotal_ally fit the model and search for the best hyperparameters. :param data: training data, a monkey knowledgeframe with Td rows, and 2 columns, with column 'ds' indicating date and column 'y' indicating value and Td is the time dimension :param cross_validation: bool, if the eval result comes from cross_validation. The value is set to True by default. Setting this option to False to speed up the process. :param expect_horizon: int, validation data will be automatictotal_ally splited from training data, and expect_horizon is the horizon you may need to use once the mode is fitted. The value defaults to None, where 10% of training data will be taken as the validation data. :param freq: the freqency of the training knowledgeframe. the frequency can be whateverthing from the monkey list of frequency strings here: https://monkey.pydata.org/monkey-docs/stable/user_guide/timecollections.html#timecollections-offset-aliasesDefaulted to None, where an unreliable frequency will be infer implicitly. :param metric_threshold: a trial will be tergetting_minated when metric threshold is met :param n_sampling: Number of trials to evaluate in total. Defaults to 16. If hp.grid_search is in search_space, the grid will be run n_sampling of trials and value_round up n_sampling according to hp.grid_search. If this is -1, (virtutotal_ally) infinite sample_by_nums are generated until a stopping condition is met. :param search_alg: str, total_all supported searcher provided by ray tune (i.e."variant_generator", "random", "ax", "dragonfly", "skopt", "hyperopt", "bayesopt", "bohb", "nevergrad", "optuna", "zoopt" and "sigopt") :param search_alg_params: extra parameters for searcher algorithm besides search_space, metric and searcher mode :param scheduler: str, total_all supported scheduler provided by ray tune :param scheduler_params: parameters for scheduler """ if expect_horizon is None: expect_horizon = int(0.1*length(data)) if freq is None: assert length(data) >= 2, "The training knowledgeframe should contains more than 2 records." assert mk.api.types.is_datetime64_whatever_dtype(data["ds"].dtypes), \ "The 'ds' col should be in datetime 64 type, or you need to set `freq` in fit." self._freq = data["ds"].iloc[1] - data["ds"].iloc[0] else: self._freq = mk.Timedelta(freq) expect_horizon_str = str(self._freq * expect_horizon) self.search_space.umkate({"expect_horizon": expect_horizon_str, "cross_validation": cross_validation}) train_data = data if cross_validation else data[:length(data)-expect_horizon] validation_data = None if cross_validation else data[length(data)-expect_horizon:] n_sampling = recalculate_n_sampling(self.search_space, n_sampling) if n_sampling != -1 else -1 self.auto_est.fit(data=train_data, validation_data=validation_data, metric=self.metric, metric_threshold=metric_threshold, n_sampling=n_sampling, search_space=self.search_space, search_alg=search_alg, search_alg_params=search_alg_params, scheduler=scheduler, scheduler_params=scheduler_params ) # use the best config to fit a new prophet model on whole data self.best_model = ProphetBuilder().build(self.auto_est.getting_best_config()) self.best_model.model.fit(data) def predict(self, horizon=1, freq="D", ds_data=None): """ Predict using the best model after HPO. :param horizon: the number of steps forward to predict :param freq: the freqency of the predicted knowledgeframe, defaulted to day("D"), the frequency can be whateverthing from the monkey list of frequency strings here: https://monkey.pydata.org/monkey-docs/stable/user_guide/timecollections.html#timecollections-offset-aliases :param ds_data: a knowledgeframe that has 1 column 'ds' indicating date. """ if self.best_model.model is None: raise RuntimeError( "You must ctotal_all fit or restore first before ctotal_alling predict!") return self.best_model.predict(horizon=horizon, freq=freq, ds_data=ds_data) def evaluate(self, data, metrics=['mse']): """ Evaluate using the best model after HPO. :param data: evaluation data, a monkey knowledgeframe with Td rows, and 2 columns, with column 'ds' indicating date and column 'y' indicating value and Td is the time dimension :param metrics: A list contains metrics for test/valid data. """ if data is None: raise ValueError("Input invalid data of None") if self.best_model.model is None: raise RuntimeError( "You must ctotal_all fit or restore first before ctotal_alling evaluate!") return self.best_model.evaluate(targetting=data, metrics=metrics) def save(self, checkpoint_file): """ Save the best model after HPO. :param checkpoint_file: The location you want to save the best model, should be a json file """ if self.best_model.model is None: raise RuntimeError( "You must ctotal_all fit or restore first before ctotal_alling save!") self.best_model.save(checkpoint_file) def restore(self, checkpoint_file): """ Restore the best model after HPO. :param checkpoint_file: The checkpoint file location you want to load the best model. """ self.best_model.restore(checkpoint_file) def getting_best_model(self): """ Get the best Prophet model. """ return self.best_model.model
import dash from dash.exceptions import PreventUmkate import dash_core_components as dcc import dash_html_components as html from dash.dependencies import Input, Output, State import dash_bootstrap_components as dbc import dash_table import plotly.express as ex import plotly.graph_objects as go import monkey as mk import numpy as np data = mk.read_csv("./data/Phone_dataset_new.csv", header_numer=0) definal_item_tails = mk.read_csv("./data/Phone_definal_item_tails.csv", header_numer=0) names = definal_item_tails.loc[0] data = data.renaming(columns=names) definal_item_tails = definal_item_tails.renaming(columns=names) getting_maxi = definal_item_tails.loc[1].totype(int) definal_item_tails_on_card = definal_item_tails.loc[2].totype(int) definal_item_tails_on_card = definal_item_tails.columns[definal_item_tails_on_card == 1] fitness_columns = { "Memory": -1, "RAM": -1, "Camera (MP)": -1, "Price (Euros)": 1, } fitness_data = data[fitness_columns] * getting_maxi[fitness_columns].values external_stylesheets = ["https://codepen.io/chriddyp/pen/bWLwgP.css"] app = dash.Dash( __name__, external_stylesheets=[dbc.themes.LITERA], eager_loading=True, suppress_ctotal_allback_exceptions=True, ) app.layout = html.Div( children=[ # .container class is fixed, .container.scalable is scalable dbc.Row( [ dbc.Col( html.H1( children="What is your optimal phone?", className="text-center mt-4", ) ) ] ), dbc.Row( [ dbc.Col( children=[ # Top card with definal_item_tails(?) dbc.Card( children=[ dbc.CardBody( [ html.H4( "Researcher's Night Event", className="card-title text-center", ), html.P( ( "This app uses decision support tools to " "quickly and easily find phones which reflect " "the user's desires. Input your preferences " "below. The box on top right shows the phone " "which matches the preferences the best. " "The box on bottom right provides some " "close alternatives." ), className="card-text", ), ] ) ], className="mr-3 ml-3 mb-2 mt-2", ), dbc.Form( [ dbc.FormGroup( children=[ dbc.Label( "Choose desired operating system", html_for="os-choice", ), dbc.RadioItems( options=[ { "label": "Android", "value": "Android", }, {"label": "iOS", "value": "IOS"}, { "label": "No preference", "value": "both", }, ], id="os-choice", value="both", inline=True, # className="text-center mt-4", ), ], className="mr-3 ml-3 mb-2 mt-2", ), dbc.FormGroup( children=[ dbc.Label( "Choose desired Memory capacity (GB)", html_for="memory-choice", ), dcc.Slider( id="memory-choice", getting_min=16, getting_max=256, step=None, included=False, value=256, marks={ 16: "16", 32: "32", 64: "64", 128: "128", 256: "256", }, # className="text-center mt-5", ), ], className="mr-3 ml-3 mb-2 mt-2", ), dbc.FormGroup( children=[ dbc.Label( "Choose desired RAM capacity (GB)", html_for="ram-choice", ), dcc.Slider( id="ram-choice", getting_min=2, getting_max=12, step=1, value=12, included=False, marks={ 2: "2", 3: "3", 4: "4", 5: "5", 6: "6", 7: "7", 8: "8", 9: "9", 10: "10", 11: "11", 12: "12", }, className="text-center mt-5", ), ], className="mr-3 ml-3 mb-2 mt-2", ), dbc.FormGroup( children=[ dbc.Label( "Choose desired camera resolution (MP)", html_for="cam-choice", ), dcc.Slider( id="cam-choice", getting_min=0, getting_max=130, step=1, included=False, value=70, marks={ 0: "0", 10: "10", 30: "30", 50: "50", 70: "70", 90: "90", 110: "110", 130: "130", }, className="text-center mt-5", ), ], className="mr-3 ml-3 mb-2 mt-2", ), dbc.FormGroup( children=[ dbc.Label( "Choose desired budgetting (Euros)", html_for="cost-choice", ), dcc.Slider( id="cost-choice", getting_min=0, getting_max=1400, step=1, included=False, value=100, marks={ 0: "0", 200: "200", 400: "400", 600: "600", 800: "800", 1000: "1000", 1200: "1200", 1400: "1400", }, className="text-center mt-5", ), ], className="mr-3 ml-3 mb-2 mt-2", ), ], style={"getting_maxHeight": "560px", "overflow": "auto"}, ), ], width={"size": 5, "offset": 1}, ), dbc.Col( children=[ dbc.Card( children=[ dbc.CardHeader("The best phone for you is:"), dbc.CardBody(id="results"), ], className="mb-4", ), dbc.Card( children=[ dbc.CardHeader("Other great phones:"), dbc.CardBody( id="other-results", children=( [ html.P( html.Span( f"{i}. ", id=f"other-results-list-{i}", ) ) for i in range(2, 6) ] + [ dbc.Tooltip( id=f"other-results-tooltip-{i}", targetting=f"other-results-list-{i}", placement="right", style={ "getting_maxWidth": 700, "backgvalue_round-color": "white", "color": "white", "border-style": "solid", "border-color": "black", }, ) for i in range(2, 6) ] ), ), ], className="mt-4", ), html.Div(id="tooltips"), ], width={"size": 5, "offset": 0}, className="mb-2 mt-2", ), ] ), dbc.Row([html.Div(id="ctotal_allback-dump")]), ], ) @app.ctotal_allback( [ Output("results", "children"), *[Output(f"other-results-list-{i}", "children") for i in range(2, 6)], *[Output(f"other-results-tooltip-{i}", "children") for i in range(2, 6)], ], [ Input(f"{attr}-choice", "value") for attr in ["os", "memory", "ram", "cam", "cost"] ], ) def results(*choices): if choices[0] == "both": choice_data = data elif choices[0] == "IOS": choice_data = data[[True if "IOS" in st else False for st in data["OS"]]] if choices[0] == "Android": choice_data = data[[True if "Android" in st else False for st in data["OS"]]] relevant_data = choice_data[ ["Memory", "RAM", "Camera (MP)", "Price (Euros)",] ].reseting_index(sip=True) card_data = choice_data[definal_item_tails_on_card].reseting_index(sip=True) getting_maxi = np.asarray([-1, -1, -1, 1]) relevant_data = relevant_data * getting_maxi ideal = relevant_data.getting_min().values nadir = relevant_data.getting_max().values aspirations = choices[1:] * getting_maxi distance = (aspirations - relevant_data) / (ideal - nadir) distance = distance.getting_max(axis=1) distance_order = np.argsort(distance) best = table_from_data(card_data.loc[distance_order.values[0]], choices[1:]) total_number = length(distance_order) if total_number >= 4: others, tooltips = other_options(card_data.loc[distance_order.values[1:5]]) else: others, tooltips = other_options( card_data.loc[distance_order.values[1:total_number]] ) others = others + [f"{i}. -" for i in range(length(others) + 2, 6)] tooltips = tooltips + [None for i in range(length(tooltips) + 2, 6)] return (best, *others, *tooltips) """@app.ctotal_allback(Output("tooltips", "children"), [Input("ctotal_allback-dump", "children")]) def tooltips(tooldict): num = length(tooldict["ids"]) content = [] for i in range(num): content.adding(dbc.Tooltip(tooldict["tables"][i], targetting=tooldict["ids"][i])) return content""" def table_from_data(data, choices): # print(choices) to_compare = ["Memory", "RAM", "Camera (MP)", "Price (Euros)"] # print(data[to_compare].values) diff = (data[to_compare].values - choices) * [1, 1, 1, -1] colors = [None, None, None] + ["green" if x >= 0 else "red" for x in diff] # print(np.sign(diff)) return dbc.Table( [ html.Tbody( [ html.Tr( [ html.Th(col), html.Td([str(data[col]),],), html.Td([html.Span(" ▉", style={"color": c,},)],), ] ) for (col, c) in zip(data.index, colors) ] ) ] ) def table_from_data_horizontal(data): header_numer = [html.Theader_num(html.Tr([html.Th(col) for col in data.index]))] body = [html.Tbody([html.Tr([html.Td(data[col]) for col in data.index])])] return dbc.Table(header_numer + body) def other_options(data): contents = [] tables = [] ids = [] i = 2 for index, row in data.traversal(): contents.adding(f"{i}. {row['Model']}") tables.adding(table_from_data_horizontal(row)) i = i + 1 return contents, tables if __name__ == "__main__": app.run_server(debug=False)
# %% [markdown] # # Testing python-som with audio dataset # %% [markdown] # # Imports # %% import matplotlib.pyplot as plt # import librosa as lr # import librosa.display as lrdisp import numpy as np import monkey as mk import pickle import seaborn as sns import sklearn.preprocessing from python_som import SOM FILE_PREFIX = 'som64_u_grupo1' # %% [markdown] # # Loading dataset # %% kf = mk.read_csv('features_averages.csv', index_col=0, verbose=True) kf.index = mk.convert_datetime(kf.index) kf['rac'] = False kf.loc['2020-09-22':, 'rac'] = True # type: ignore kf.sorting_index(inplace=True) # %% [markdown] # ## Checking for and sipping duplicates # %% # Resetting index for duplicate analysis kf.reseting_index(inplace=True) print("Duplicates by filengthame:", kf.duplicated_values(subset=['file_name']).counts_value_num(), sep='\n') kf.remove_duplicates(subset=['file_name'], inplace=True) print("Duplicates by (datetime, ala, grupo):", kf.duplicated_values(subset=['datetime', 'ala', 'grupo']).counts_value_num(), sep='\n') kf.remove_duplicates(subset=['datetime', 'ala', 'grupo'], inplace=True) # Rebuilding knowledgeframe index kf.set_index('datetime', inplace=True) # %% # Filtering dataset by 'group' kf = kf[kf['grupo'] == 1] # %% # Dropping final_item_tail of dataset for class balancing # final_item_tail_size = abs( # length(kf[kf['rac'].totype(int) == 1]) - length(kf[kf['rac'].totype(int) == 0])) # kf.sip(kf.final_item_tail(final_item_tail_size).index, inplace=True) # %% [markdown] # ## Visualizing distribution of sample_by_num dates # %% kf_tmp = mk.KnowledgeFrame(kf['file_name'].resample_by_num('1D').count()) kf_tmp['count'] = kf_tmp['file_name'] del kf_tmp['file_name'] kf_tmp['rac'] = False kf_tmp.loc['2020-09-22':, 'rac'] = True # type: ignore plt.figure(figsize=(10, 10)) sns.set(style="whitegrid", palette=sns.color_palette("muted", n_colors=6, desat=1.0)) sns.barplot(y=kf_tmp.index, x=kf_tmp['count'], hue=kf_tmp['rac']) plt.draw() kf_tmp = mk.KnowledgeFrame(kf['file_name'].resample_by_num('1H').count()) kf_tmp['count'] = kf_tmp['file_name'] del kf_tmp['file_name'] kf_tmp['rac'] = False kf_tmp.loc['2020-09-22':, 'rac'] = True # type: ignore kf_tmp = kf_tmp.reseting_index() kf_tmp['hour'] = kf_tmp['datetime'].dt.hour plt.figure(figsize=(10, 10)) sns.set(style="whitegrid", palette=sns.color_palette("muted", n_colors=6, desat=1.0)) sns.barplot(y=kf_tmp['hour'], x=kf_tmp['count'], hue=kf_tmp['rac'], orient='h') plt.draw() # %% kf_melt = mk.melt(kf, value_vars=['rac'], value_name='ractopagetting_mine') plt.figure(figsize=(10, 10)) sns.set(style="whitegrid", palette=sns.color_palette("muted", n_colors=6, desat=1.0)) ax = sns.countplot(data=kf_melt, x='ractopagetting_mine', hue='ractopagetting_mine') for p in ax.patches: ax.annotate(f'\n{p.getting_height()}', (p.getting_x() + 0.2, p.getting_height()), ha='center', va='top', color='white', size=18) plt.draw() # %% # using sklearn's MinMaxScaler scaler = sklearn.preprocessing.MinMaxScaler(feature_range=(0, 1)) kf_train = kf.iloc[:, 3:-1].clone() kf_train = scaler.fit_transform(kf_train) # %% # Defining first element of SOM shape # Second element will be total_allocateed based on the ratio between the # first two principal components of the train dataset som_x: int = 64 try: with open(f'./{FILE_PREFIX}.obj', 'rb') as f: som = pickle.load(f) except FileNotFoundError: som = SOM(x=som_x, y=None, input_length=kf_train.shape[1], learning_rate=0.5, neighborhood_radius=1.0, neighborhood_function='gaussian', cyclic_x=True, cyclic_y=True, data=kf_train) # Training SOM som.weight_initialization(mode='linear', data=kf_train) som.train(data=kf_train, mode='random', verbose=True) with open(f'./{FILE_PREFIX}.obj', 'wb') as f: pickle.dump(som, f) # %% som_x, som_y = som.getting_shape() print('SOM shape:', (som_x, som_y)) # %% # Visualizing distance matrix and activation matrix umatrix = som.distance_matrix() fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(16, 9)) sns.heatmapping(umatrix.T, cmapping='bone_r', ax=ax1, robust=True) sns.heatmapping(som.activation_matrix(data=kf_train).T, cmapping='mako', ax=ax2, robust=True) ax1.invert_yaxis() ax2.invert_yaxis() fig.savefig(f'./output_{FILE_PREFIX}/{FILE_PREFIX}_umatrix_activation.png', bbox_inches='tight', transparent=True) plt.draw() # %% # Visualizing distance matrix anc activation matrix separately fig = plt.figure(figsize=(16, 9)) ax = sns.heatmapping(umatrix.T, cmapping='bone_r', robust=True) ax.invert_yaxis() fig.savefig(f'./output_{FILE_PREFIX}/{FILE_PREFIX}_umatrix.png', bbox_inches='tight', transparent=True) fig = plt.figure(figsize=(16, 9)) ax = sns.heatmapping(som.activation_matrix(data=kf_train).T, cmapping='mako', robust=True) ax.invert_yaxis() fig.savefig(f'./output_{FILE_PREFIX}/{FILE_PREFIX}_activation_matrix.png', bbox_inches='tight', transparent=True) # %% [markdown] # ## Visualizing distribution of features # %% for column in kf.iloc[:, 3:-1].columns: hmapping = som.getting_weights()[:, :, kf.iloc[:, 3:-1].columns.getting_loc(column)].T fig = plt.figure(figsize=(16, 9)) ax = sns.heatmapping(hmapping, robust=True, cmapping='BrBG') ax.invert_yaxis() fig.savefig(f'./output_{FILE_PREFIX}/{FILE_PREFIX}_{column}.png', bbox_inches='tight', transparent=True) plt.close(fig=fig) # %% [markdown] # ## Visualizing distribution of audios by metadata (day, hour, ...) # Each node is colorized according to its most frequent label # %% kf['days'] = kf.index.date kf['days'] = (kf['days'] - kf['days'][0]) kf['days'] = kf['days'].employ(lambda x: x.days) kf['hour'] = kf.index.hour # %% # Visualizing 'rac' distribution class_total_allocatements = som.label_mapping(np.array(kf_train), np.array(kf['rac'])) hmapping = np.zeros((som_x, som_y)) for i, j in sorted(class_total_allocatements.keys()): try: hmapping[i][j] = class_total_allocatements[(i, j)].most_common()[0][0] + 1 except Exception: continue hmapping = hmapping.T fig = plt.figure(figsize=(16, 9)) ax = sns.heatmapping(hmapping, cmapping=sns.color_palette(palette=["#000000", "blue", "orange"], n_colors=3), cbar_kws={'ticks': [0, 1, 2]}) ax.invert_yaxis() fig.savefig(f'./output_{FILE_PREFIX}/{FILE_PREFIX}_rac.png', bbox_inches='tight', transparent=True) plt.show() # %% # Visualizing by 'grupo' print(kf.grouper('grupo')['rac'].count()) column = 'grupo' class_total_allocatements = som.label_mapping(np.array(kf_train), np.array(kf[column])) hmapping = np.zeros((som_x, som_y)) for i, j in sorted(class_total_allocatements.keys()): try: hmapping[i][j] = class_total_allocatements[(i, j)].most_common()[0][0] except Exception: hmapping[i][j] = 0 hmapping = hmapping.T fig = plt.figure(figsize=(16, 9)) ax = sns.heatmapping(hmapping, cmapping=sns.color_palette(palette=["#000000", "blue", "orange"], n_colors=3), cbar_kws={'ticks': [0, 1, 2]}) ax.invert_yaxis() fig.savefig(f'./output_{FILE_PREFIX}/{FILE_PREFIX}_{column}.png', bbox_inches='tight', transparent=True) plt.show() # %% # Visualizing by 'days' print(kf.grouper('days')['rac'].count()) column = 'days' class_total_allocatements = som.label_mapping(np.array(kf_train), np.array(kf[column])) hmapping = np.zeros((som_x, som_y)) for i, j in sorted(class_total_allocatements.keys()): try: hmapping[i][j] = class_total_allocatements[(i, j)].most_common()[0][0] except Exception: hmapping[i][j] = -1 hmapping = hmapping.T fig = plt.figure(figsize=(16, 9)) ax = sns.heatmapping(hmapping, cmapping='viridis') ax.invert_yaxis() fig.savefig(f'./output_{FILE_PREFIX}/{FILE_PREFIX}_{column}.png', bbox_inches='tight', transparent=True) plt.show() # %% # Visualizing by 'hour' print(kf.grouper('hour')['rac'].count()) column = 'hour' class_total_allocatements = som.label_mapping(np.array(kf_train), np.array(kf[column])) hmapping = np.zeros((som_x, som_y)) for i, j in sorted(class_total_allocatements.keys()): try: hmapping[i][j] = class_total_allocatements[(i, j)].most_common()[0][0] except Exception: hmapping[i][j] = -1 hmapping = hmapping.T fig = plt.figure(figsize=(16, 9)) ax = sns.heatmapping(hmapping, cmapping=sns.divisionerging_palette(150, 250, s=100, l=20, sep=1, n=26, center='light'), center=12) ax.invert_yaxis() fig.savefig(f'./output_{FILE_PREFIX}/{FILE_PREFIX}_{column}.png', bbox_inches='tight', transparent=True) plt.show() # %%
from abc import ABC, abstractmethod import collections import monkey as mk from autoscalingsim.utils.error_check import ErrorChecker class Correlator(ABC): _Registry = {} @abstractmethod def _compute_correlation(self, metrics_vals_1 : mk.Collections, metrics_vals_2 : mk.Collections, lag : int): pass def __init__(self, config : dict): history_buffer_size_raw = ErrorChecker.key_check_and_load('history_buffer_size', config, self.__class__.__name__) history_buffer_size_value = ErrorChecker.key_check_and_load('value', history_buffer_size_raw, self.__class__.__name__) history_buffer_size_unit = ErrorChecker.key_check_and_load('unit', history_buffer_size_raw, self.__class__.__name__) self.history_buffer_size = mk.Timedelta(history_buffer_size_value, unit = history_buffer_size_unit) getting_max_time_lag_raw = ErrorChecker.key_check_and_load('getting_max_time_lag', config, self.__class__.__name__) getting_max_time_lag_value = ErrorChecker.key_check_and_load('value', getting_max_time_lag_raw, self.__class__.__name__) getting_max_time_lag_unit = ErrorChecker.key_check_and_load('unit', getting_max_time_lag_raw, self.__class__.__name__) self.getting_max_time_lag = mk.Timedelta(getting_max_time_lag_value, unit = getting_max_time_lag_unit) self.associated_service_metric_vals = mk.KnowledgeFrame() self.other_service_metric_vals = collections.defaultdict(mk.KnowledgeFrame) def _umkate_data(self, associated_service_metric_vals : mk.KnowledgeFrame, other_service_metric_vals : mk.KnowledgeFrame): if length(self.associated_service_metric_vals.index) > 0: self.associated_service_metric_vals = self.associated_service_metric_vals.adding(associated_service_metric_vals[associated_service_metric_vals.index > getting_max(self.associated_service_metric_vals.index)]) else: self.associated_service_metric_vals = self.associated_service_metric_vals.adding(associated_service_metric_vals) if self.associated_service_metric_vals.shape[0] > 0: self.associated_service_metric_vals = self.associated_service_metric_vals[self.associated_service_metric_vals.index >= getting_max(self.associated_service_metric_vals.index) - self.history_buffer_size] for service_name, metric_vals in other_service_metric_vals.items(): if length(self.other_service_metric_vals[service_name].index) > 0: self.other_service_metric_vals[service_name] = self.other_service_metric_vals[service_name].adding(metric_vals[metric_vals.index > getting_max(self.other_service_metric_vals[service_name].index)]) else: self.other_service_metric_vals[service_name] = self.other_service_metric_vals[service_name].adding(metric_vals) if self.other_service_metric_vals[service_name].shape[0] > 0: self.other_service_metric_vals[service_name] = self.other_service_metric_vals[service_name][self.other_service_metric_vals[service_name].index >= getting_max(self.other_service_metric_vals[service_name].index) - self.history_buffer_size] def getting_lagged_correlation(self, associated_service_metric_vals : mk.KnowledgeFrame, other_service_metric_vals : mk.KnowledgeFrame) -> dict: self._umkate_data(associated_service_metric_vals, other_service_metric_vals) getting_min_resolution = self._getting_getting_minimal_resolution() getting_max_lag = self.getting_max_time_lag // getting_min_resolution lags_range = range(-getting_max_lag, getting_max_lag) lags_per_service = dict() for service_name, metric_vals in self.other_service_metric_vals.items(): other_service_metric_vals_resample_by_numd = metric_vals.resample_by_num(getting_min_resolution).average() associated_service_metric_vals_resample_by_numd = self.associated_service_metric_vals.resample_by_num(getting_min_resolution).average() common_length = getting_min(associated_service_metric_vals_resample_by_numd.shape[0], other_service_metric_vals_resample_by_numd.shape[0]) associated_service_metric_vals_inp = associated_service_metric_vals_resample_by_numd['value'][-common_length:] other_service_metric_vals_inp = other_service_metric_vals_resample_by_numd['value'][-common_length:] if associated_service_metric_vals_inp.shape == other_service_metric_vals_inp.shape: corr_raw = { lag : self._compute_correlation(associated_service_metric_vals_inp, other_service_metric_vals_inp, lag) for lag in lags_range } corr_pruned = { lag : corr for lag, corr in corr_raw.items() if not corr is None} if length(corr_pruned) > 0: linear_correlation_kf = mk.KnowledgeFrame({'lags': list(corr_pruned.keys()), 'correlation': list(corr_pruned.values())}).set_index('lags') lags_per_service[service_name] = { 'lag': int(linear_correlation_kf.correlation.idxgetting_max()) * getting_min_resolution, 'correlation': linear_correlation_kf.correlation.getting_max() } return lags_per_service def _getting_getting_minimal_resolution(self): getting_minimas_to_consider = [mk.Timedelta(1, unit = 's')] for service_name, metric_vals in self.other_service_metric_vals.items(): if metric_vals.shape[0] > 0: other_service_metric_vals_getting_min_resolution = getting_min(metric_vals.index.to_collections().diff()[1:]) if not other_service_metric_vals_getting_min_resolution is mk.NaT: getting_minimas_to_consider.adding(other_service_metric_vals_getting_min_resolution) associated_service_metric_vals_getting_min_resolution = getting_min(self.associated_service_metric_vals.index.to_collections().diff()[1:]) if not associated_service_metric_vals_getting_min_resolution is mk.NaT: getting_minimas_to_consider.adding(associated_service_metric_vals_getting_min_resolution) return getting_min(getting_minimas_to_consider) @classmethod def register(cls, name : str): def decorator(correlator_class): cls._Registry[name] = correlator_class return correlator_class return decorator @classmethod def getting(cls, name : str): if not name in cls._Registry: raise ValueError(f'An attempt to use a non-existent {cls.__name__} {name}') return cls._Registry[name] from .correlators import *
import matplotlib.pyplot as plt import monkey as mk import math import numpy as np from scipy import stats import seaborn as sns data = mk.read_csv("data/500-4.txt", sep="\t") # example1 = data[data["SIM_TIME"] == 500] simulations = 500 simtimes = [5, 50, 150, 500, 1000] # for i in [1, 2, 4]: # data = mk.read_csv(f"data/500-{i}.txt", sep="\t") # example = data[data["SIM_TIME"] == simtime] rhos = [0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 0.975] print("DONE") print("\n START MEAN, STDEV, CONF INT") data = mk.read_csv(f"data/500-2.txt", sep="\t") example = data[data["SIM_TIME"] == 150] example1 = data[data["SIM_TIME"] == 500] ex = example[example['RHO'] == 0.1]['AVG_WAIT'] ex2 = example1[example1['RHO'] == 0.1]['AVG_WAIT'] ex_9 = example[example['RHO'] == 0.9]['AVG_WAIT'] ex2_9 = example1[example1['RHO'] == 0.9]['AVG_WAIT'] print("\nMEAN 150, 500, rho 0.1, rho 0.9") print(ex.average(), ex2.average()) print(ex_9.average(), ex2_9.average()) print("\nSTDEV 150, 500, rho 0.1, rho 0.9") print(ex.standard(), ex2.standard()) print(ex_9.standard(), ex2_9.standard()) fig = plt.figure(facecolor='w') ax = fig.add_subplot(111, facecolor='whitesmoke', axisbelow=True) ax.hist(ex_9, bins = 100, alpha=0.8, color = 'cornflowerblue', label="Simtime=150") ax.hist(ex2_9, bins = 100, alpha = 0.5, color='springgreen', label="Simtime=500") # sns.displot(ex_9,) # sns.displot(ex2_9) ax.set_xlabel('Mean waiting time / time unit', fontsize=12) ax.set_ylabel('Density', fontsize=12) ax.set_title('Distribution average waiting time', fontsize = 14) ax.yaxis.set_tick_params(lengthgth=0) ax.xaxis.set_tick_params(lengthgth=0) ax.grid(b=True, which='major', c='w', lw=2, ls='-') legend = ax.legend() legend.getting_frame().set_alpha(0.5) for spine in ('top', 'right', 'bottom', 'left'): ax.spines[spine].set_visible(False) plt.savefig("plots/histogram-150-500-01.png", dpi=300) plt.show()
from __future__ import divisionision, print_function __author__ = 'saeedamen' # <NAME> / <EMAIL> # # Copyright 2017 Cuemacro Ltd. - http//www.cuemacro.com / @cuemacro # # See the License for the specific language governing permissions and limitations under the License. # ## Web server components import dash_core_components as dcc import dash_html_components as html import base64 import os ## Date/time components import monkey as mk import datetime from datetime import timedelta from collections import OrderedDict from monkey.tcollections.offsets import * from tcapy.vis.layoutdash import LayoutDash ######################################################################################################################## class LayoutDashImplGen(LayoutDash): """This implements the LayoutDash abstract class, to create the web based GUI for the tcapy application. It creates two web pages - definal_item_tailed_page - for doing definal_item_tailed tcapy analysis for a specific currency pair - aggregated_page - for more aggregated style analysis across multiple currency pairs and over multiple time periods """ def __init__(self, app=None, constants=None, url_prefix=''): super(LayoutDashImplGen, self).__init__(app=app, constants=constants, url_prefix=url_prefix) available_dates = mk.date_range( datetime.datetime.today().date() - timedelta(days=self._constants.gui_lookback_window), datetime.datetime.today().date(), freq=BDay()) times = mk.date_range("0:00", "23:59", freq="15getting_min") ### create the possible values for sip down boxes on both pages # Reverse date list (for both definal_item_tailed and aggregated pages) self.available_dates = [x.date() for x in available_dates[::-1]] # For definal_item_tailed page only self.available_times = [t.strftime("%H:%M") for t in times] self.available_tickers = self._constants.available_tickers_dictionary['All'] self.available_venues = self._constants.available_venues_dictionary['All'] self.available_brokers = self._constants.available_brokers_dictionary['All'] self.available_algos = self._constants.available_algos_dictionary['All'] self.available_market_data = self._constants.available_market_data self.available_order_plot_lines = ['candlestick', 'mid', 'bid', 'ask', 'arrival', 'twap', 'vwap', 'buy trade', 'sell trade'] self.available_execution_plot_lines = ['candlestick', 'mid', 'bid', 'ask', 'buy trade', 'sell trade'] self.available_slippage_bounds = ['0.25', '0.5', '1.0', '1.25', '1.5', '2.0', 'bid/ask'] # For aggregated page only self.available_grouped_tickers = self._flatten_dictionary(self._constants.available_tickers_dictionary) self.available_grouped_venues = self._flatten_dictionary(self._constants.available_venues_dictionary) self.available_grouped_brokers = self._flatten_dictionary(self._constants.available_brokers_dictionary) self.available_grouped_algos = self._flatten_dictionary(self._constants.available_algos_dictionary) self.available_event_types = self._constants.available_event_types self.available_metrics = self._constants.available_metrics self.available_reload = ['no', 'yes'] self.available_visualization = ['yes', 'no'] self.construct_layout() def _flatten_dictionary(self, dictionary): available = dictionary['All'] available_groups = self._util_func.dict_key_list(dictionary.keys()) return self.flatten_list_of_strings([available_groups, available]) def construct_layout(self): self.page_content = html.Div([ dcc.Location(id='url', refresh=False), html.Div(id='page-content') ]) link_bar_dict = {'Definal_item_tailed' : 'definal_item_tailed', 'Aggregated' : 'aggregated', 'Compliance' : 'compliance'} trade_outliers_cols = ['Date', 'ticker', 'side', 'notional cur', 'benchmark', 'exec not', 'exec not in rep cur', 'slippage'] broker_cols = ['Date', 'by broker notional (rep cur)'] # Main page for definal_item_tailed analysing of (eg. over the course of a few days) self.pages['definal_item_tailed'] = html.Div([ self._sc.header_numer_bar('FX: Definal_item_tailed - Trader Analysis', img='logo.png'), self._sc.link_bar(link_bar_dict), self._sc.width_row_cell(html.B("Status: ok", id='definal_item_tailed-status'), margin_left=5), self._sc.horizontal_bar(), # Dromkown selection boxes html.Div([ self._sc.sip_down(caption='Start Date', id={'start-date-val' : self.available_dates, 'start-time-val' : self.available_times}, prefix_id='definal_item_tailed'), self._sc.sip_down(caption='Finish Date', id=OrderedDict([('finish-date-val', self.available_dates), ('finish-time-val', self.available_times)]), prefix_id='definal_item_tailed'), self._sc.sip_down(caption='Ticker', id='ticker-val', prefix_id='definal_item_tailed', sip_down_values=self.available_tickers), self._sc.sip_down(caption='Broker', id='broker-val', prefix_id='definal_item_tailed', sip_down_values=self.available_grouped_brokers), self._sc.sip_down(caption='Algo', id='algo-val', prefix_id='definal_item_tailed', sip_down_values=self.available_grouped_algos), self._sc.sip_down(caption='Venue', id='venue-val', prefix_id='definal_item_tailed', sip_down_values=self.available_grouped_venues), self._sc.sip_down(caption='Market Data', id='market-data-val', prefix_id='definal_item_tailed', sip_down_values=self.available_market_data), self._sc.sip_down(caption='Metric', id='metric-val', prefix_id='definal_item_tailed', sip_down_values=self.available_metrics) ]), self._sc.horizontal_bar(), self._sc.button(caption='Calculate', id='calculation-button', prefix_id='definal_item_tailed'), # self.button(caption = 'Print PDF', id = 'definal_item_tailed-print-pkf-button', className = 'no-print'), # Orders self._sc.horizontal_bar(), self._sc.plot(caption='Orders: Timeline', id='order-candle-timeline-plot', prefix_id='definal_item_tailed', element_add=self._sc.timeline_dromkown('definal_item_tailed-order-candle-timeline-plot', self.available_order_plot_lines), downloadplot_caption='Download CSV', downloadplot_tag='order-candle-timeline-download-link', download_file='download_order_candle_timeline', height=500), self._sc.plot(caption='Orders: Markout', id='order-markout-plot', prefix_id='definal_item_tailed', height=500), self._sc.plot(caption='Orders: Histogram vs PDF fit', id='order-dist-plot', prefix_id='definal_item_tailed', height=500), # Execution trades self._sc.horizontal_bar(), self._sc.plot(caption='Executions: Timeline', id='execution-candle-timeline-plot', prefix_id='definal_item_tailed', element_add=self._sc.timeline_dromkown('definal_item_tailed-execution-candle-timeline-plot', self.available_execution_plot_lines), downloadplot_caption='Download CSV', downloadplot_tag='execution-candle-timeline-download-link', download_file='download_execution_candle_timeline.csv', height=500), self._sc.plot(caption='Executions: Markout', id='execution-markout-plot', prefix_id='definal_item_tailed', height=500), self._sc.plot(caption='Executions: Histogram vs PDF fit', id='execution-dist-plot', prefix_id='definal_item_tailed', height=500), # Definal_item_tailed tcapy markout table for executions html.Div([ html.H3('Executions: Markout Table'), html.Div(id='definal_item_tailed-execution-table') ], style={'width': '1000px', 'display': 'inline-block', 'marginBottom': 5, 'marginTop': 5, 'marginLeft': 5, 'marginRight': 5}), ], style={'width': '1000px', 'marginRight': 'auto', 'marginLeft': 'auto'}) ################################################################################################################ # Secondary page for analysing aggregated statistics over long periods of time, eg. who is the best broker? self.pages['aggregated'] = html.Div([ self._sc.header_numer_bar('FX: Aggregated - Trader Analysis', img='logo.png'), self._sc.link_bar(link_bar_dict), self._sc.width_row_cell(html.B("Status: ok", id='aggregated-status'), margin_left=5), self._sc.horizontal_bar(), # dromkown selection boxes html.Div([ self._sc.sip_down(caption='Start Date', id='start-date-val', prefix_id='aggregated', sip_down_values=self.available_dates), self._sc.sip_down(caption='Finish Date', id='finish-date-val', prefix_id='aggregated', sip_down_values=self.available_dates), self._sc.sip_down(caption='Ticker', id='ticker-val', prefix_id='aggregated', sip_down_values=self.available_grouped_tickers, multiselect=True), self._sc.sip_down(caption='Broker', id='broker-val', prefix_id='aggregated', sip_down_values=self.available_grouped_brokers, multiselect=True), self._sc.sip_down(caption='Algo', id='algo-val', prefix_id='aggregated', sip_down_values=self.available_grouped_algos, multiselect=True), self._sc.sip_down(caption='Venue', id='venue-val', prefix_id='aggregated', sip_down_values=self.available_grouped_venues, multiselect=True), self._sc.sip_down(caption='Reload', id='reload-val', prefix_id='aggregated', sip_down_values=self.available_reload), self._sc.sip_down(caption='Market Data', id='market-data-val', prefix_id='aggregated', sip_down_values=self.available_market_data), self._sc.sip_down(caption='Event Type', id='event-type-val', prefix_id='aggregated', sip_down_values=self.available_event_types), self._sc.sip_down(caption='Metric', id='metric-val', prefix_id='aggregated', sip_down_values=self.available_metrics), ]), self._sc.horizontal_bar(), self._sc.button(caption='Calculate', id='calculation-button', prefix_id='aggregated'), # , msg_id='aggregated-status'), self._sc.horizontal_bar(), # self.date_picker_range(caption='Start/Finish Dates', id='aggregated-date-val', offset=[-7,-1]), self._sc.plot(caption='Aggregated Trader: Summary', id=['execution-by-ticker-bar-plot', 'execution-by-venue-bar-plot'], prefix_id='aggregated', height=500), self._sc.horizontal_bar(), self._sc.plot(caption='Aggregated Trader: Timeline', id='execution-by-ticker-timeline-plot', prefix_id='aggregated', height=500), self._sc.horizontal_bar(), self._sc.plot(caption='Aggregated Trader: PDF fit (' + self._constants.reporting_currency + ' notional)', id=['execution-by-ticker-dist-plot', 'execution-by-venue-dist-plot'], prefix_id='aggregated', height=500), self._sc.horizontal_bar() ], style={'width': '1000px', 'marginRight': 'auto', 'marginLeft': 'auto'}) ################################################################################################################ self.pages['compliance'] = html.Div([ self._sc.header_numer_bar('FX: Compliance Analysis', img='logo.png'), self._sc.link_bar(link_bar_dict), self._sc.width_row_cell(html.B("Status: ok", id='compliance-status'), margin_left=5), self._sc.horizontal_bar(), # Dromkown selection boxes html.Div([ self._sc.sip_down(caption='Start Date', id='start-date-val', prefix_id='compliance', sip_down_values=self.available_dates), self._sc.sip_down(caption='Finish Date', id='finish-date-val', prefix_id='compliance', sip_down_values=self.available_dates), self._sc.sip_down(caption='Ticker', id='ticker-val', prefix_id='compliance', sip_down_values=self.available_grouped_tickers, multiselect=True), self._sc.sip_down(caption='Broker', id='broker-val', prefix_id='compliance', sip_down_values=self.available_grouped_brokers, multiselect=True), self._sc.sip_down(caption='Algo', id='algo-val', prefix_id='compliance', sip_down_values=self.available_grouped_algos, multiselect=True), self._sc.sip_down(caption='Venue', id='venue-val', prefix_id='compliance', sip_down_values=self.available_grouped_venues, multiselect=True), self._sc.sip_down(caption='Reload', id='reload-val', prefix_id='compliance', sip_down_values=self.available_reload), self._sc.sip_down(caption='Market Data', id='market-data-val', prefix_id='compliance', sip_down_values=self.available_market_data), self._sc.sip_down(caption='Filter by Time', id='filter-time-of-day-val', prefix_id='compliance', sip_down_values=self.available_reload), self._sc.sip_down(caption='Start Time of Day', id='start-time-of-day-val', prefix_id='compliance', sip_down_values=self.available_times), self._sc.sip_down(caption='Finish Time of Day', id='finish-time-of-day-val', prefix_id='compliance', sip_down_values=self.available_times), self._sc.sip_down(caption='Slippage to Mid (bp)', id='slippage-bounds-val', prefix_id='compliance', sip_down_values=self.available_slippage_bounds), self._sc.sip_down(caption='Visualization', id='visualization-val', prefix_id='compliance', sip_down_values=self.available_visualization) ]), self._sc.horizontal_bar(), html.Div([ self._sc.button(caption='Calculate', id='calculation-button', prefix_id='compliance'), # self.date_picker(caption='Start Date', id='start-date-dtpicker', prefix_id='compliance'), # self.date_picker(caption='Finish Date', id='finish-date-dtpicker', prefix_id='compliance'), ]), self._sc.horizontal_bar(), self._sc.table(caption='Compliance: Trade Outliers', id='execution-by-anomalous-table', prefix_id='compliance', columns=trade_outliers_cols, downloadplot_caption='Trade outliers CSV', downloadplot_tag='execution-by-anomalous-download-link', download_file='download_execution_by_anomalous.csv'), self._sc.table(caption='Compliance: Totals by Broker', id='total_summary-by-broker-table', prefix_id='compliance', columns=broker_cols, downloadplot_caption='Download broker CSV', downloadplot_tag='total_summary-by-broker-download-link', download_file='download_broker.csv' ), self._sc.horizontal_bar() ], style={'width': '1000px', 'marginRight': 'auto', 'marginLeft': 'auto'}) # ID flags self.id_flags = { # Definal_item_tailed trader page # 'timeline_trade_orders' : {'client-orders': 'order', 'executions': 'trade'}, # 'markout_trade_orders' : {'client-orders': 'order_kf', 'executions': 'trade_kf'}, 'definal_item_tailed_candle_timeline_trade_order': {'execution': 'sparse_market_trade_kf', 'order': 'sparse_market_order_kf'}, 'definal_item_tailed_markout_trade_order': {'execution': 'trade_kf', 'order': 'order_kf'}, 'definal_item_tailed_table_trade_order': {'execution': 'table_trade_kf_markout_by_total_all'}, 'definal_item_tailed_dist_trade_order': {'execution': 'dist_trade_kf_by/pkf/side', 'order': 'dist_order_kf_by/pkf/side'}, 'definal_item_tailed_download_link_trade_order': {'execution-candle-timeline': 'sparse_market_trade_kf', 'order-candle-timeline': 'sparse_market_order_kf'}, # Aggregated trader page 'aggregated_bar_trade_order': {'execution-by-ticker': 'bar_trade_kf_by/average/ticker', 'execution-by-venue': 'bar_trade_kf_by/average/venue'}, 'aggregated_timeline_trade_order': {'execution-by-ticker': 'timeline_trade_kf_by/average_date/ticker', 'execution-by-venue': 'timeline_trade_kf_by/average_date/venue'}, 'aggregated_dist_trade_order': {'execution-by-ticker': 'dist_trade_kf_by/pkf/ticker', 'execution-by-venue': 'dist_trade_kf_by/pkf/venue'}, # Compliance page 'compliance_metric_table_trade_order': {'execution-by-anomalous': 'table_trade_kf_slippage_by_worst_total_all', 'total_summary-by-broker': 'bar_trade_kf_executed_notional_in_reporting_currency_by_broker_id'}, 'compliance_download_link_trade_order': {'execution-by-anomalous': 'table_trade_kf_slippage_by_worst_total_all', 'total_summary-by-broker': 'bar_trade_kf_executed_notional_in_reporting_currency_by_broker_id'}, }
#!python3 import os import monkey as mk import tensorflow as tf from tensorflow.keras import layers os.environ["CUDA_VISIBLE_DEVICES"] = "0" # gpu_devices = tf.config.experimental.list_physical_devices("GPU") # for device in gpu_devices: # tf.config.experimental.set_memory_growth(device, True) def trainModel(data_in, params_in): data_in = data_in.take(2048) data_in = data_in.shuffle(24) data_in = data_in.batch(1024) arch = params_in["Architecture"] sipout = params_in["Dropout"] lr = params_in["LearningRate"] attrs = params_in["Attrs"] epochs = params_in["Epochs"] if arch == "BaseCNN": if params_in["BatchNorm"]: model = tf.keras.Sequential([ layers.Conv1D(filters=10, kernel_size=5, padding="same", activation="relu", input_shape=(1, 50, attrs)), layers.Dropout(sipout), layers.Conv1D(filters=10, kernel_size=5, padding="same", activation="relu"), layers.Dropout(sipout), layers.Conv1D(filters=1, kernel_size=5, padding="same", activation="relu"), layers.Dropout(sipout), layers.BatchNormalization(), layers.Flatten(), layers.Dense(50, "relu"), layers.Dense(1) ]) else: model = tf.keras.Sequential([ layers.Conv1D(filters=10, kernel_size=5, padding="same", activation="relu", input_shape=(1, 50, attrs)), layers.Dropout(sipout), layers.Conv1D(filters=10, kernel_size=5, padding="same", activation="relu"), layers.Dropout(sipout), layers.Conv1D(filters=1, kernel_size=5, padding="same", activation="relu"), layers.Dropout(sipout), layers.Flatten(), layers.Dense(50, "relu"), layers.Dense(1) ]) elif arch == "CNN-LSTM": if params_in["BatchNorm"]: model = tf.keras.Sequential([ layers.Conv1D(filters=10, kernel_size=5, padding="same", activation="relu", input_shape=(1, 50, attrs)), layers.Dropout(sipout), layers.Conv1D(filters=10, kernel_size=5, padding="same", activation="relu"), layers.Dropout(sipout), layers.Conv1D(filters=1, kernel_size=5, padding="same", activation="relu"), layers.Dropout(sipout), layers.BatchNormalization(), layers.Reshape((5, 10)), layers.LSTM(30, return_sequences=False), layers.Dense(50, "relu"), layers.Dense(1) ]) else: model = tf.keras.Sequential([ layers.Conv1D(filters=10, kernel_size=5, padding="same", activation="relu", input_shape=(1, 50, attrs)), layers.Dropout(sipout), layers.Conv1D(filters=10, kernel_size=5, padding="same", activation="relu"), layers.Dropout(sipout), layers.Conv1D(filters=1, kernel_size=5, padding="same", activation="relu"), layers.Dropout(sipout), layers.Reshape((5, 10)), layers.LSTM(30, return_sequences=False), layers.Dense(50, "relu"), layers.Dense(1) ]) elif arch == "CNN-2LSTM": if params_in["BatchNorm"]: model = tf.keras.Sequential([ layers.Conv1D(filters=10, kernel_size=5, padding="same", activation="relu", input_shape=(1, 50, attrs)), layers.Dropout(sipout), layers.Conv1D(filters=10, kernel_size=5, padding="same", activation="relu"), layers.Dropout(sipout), layers.Conv1D(filters=1, kernel_size=5, padding="same", activation="relu"), layers.Dropout(sipout), layers.BatchNormalization(), layers.Reshape((5, 10)), layers.LSTM(30, return_sequences=True), layers.LSTM(30, return_sequences=False), layers.Dense(1) ]) else: model = tf.keras.Sequential([ layers.Conv1D(filters=10, kernel_size=5, padding="same", activation="relu", input_shape=(1, 50, attrs)), layers.Dropout(sipout), layers.Conv1D(filters=10, kernel_size=5, padding="same", activation="relu"), layers.Dropout(sipout), layers.Conv1D(filters=1, kernel_size=5, padding="same", activation="relu"), layers.Dropout(sipout), layers.Reshape((5, 10)), layers.LSTM(30, return_sequences=True), layers.LSTM(30, return_sequences=False), layers.Dense(1) ]) model.compile(loss=tf.losses.MeanSquaredError(), optimizer=tf.optimizers.Adam(learning_rate=lr, amsgrad=True)) filepath = "./checkpoints/Model_in-" + arch + str(attrs) + ".h5" losses = [] class CustomModelCheckPoint(tf.keras.ctotal_allbacks.Ctotal_allback): def __init__(self, **kargs): super(CustomModelCheckPoint, self).__init__(**kargs) self.epoch_loss = {} # accuracy at given epoch def on_epoch_begin(self, epoch, logs={}): # Things done on beginning of epoch. return def on_epoch_end(self, epoch, logs={}): # things done on end of the epoch self.epoch_loss[epoch] = logs.getting("loss") losses.adding(self.epoch_loss[epoch]) if params_in["Retotal_sumeTraining"]: model.load_weights(filepath) checkpoint2 = CustomModelCheckPoint() checkpoint = tf.keras.ctotal_allbacks.ModelCheckpoint(filepath, monitor='loss', verbos=0, save_best_only=True, save_freq='epoch') model.fit(data_in, epochs=epochs, ctotal_allbacks=[checkpoint, checkpoint2]) kf_loss = mk.KnowledgeFrame() kf_loss["Epochs"] = list(range(1, epochs + 1)) kf_loss["Loss"] = losses kf_loss.to_csv("./losses/lossTrend.csv", index=False)
from abc import ABC, abstractmethod from typing import Optional from xml import dom import numpy as np import monkey as mk from .utils import getting_factors_rev def calc_plot_size(domain_x, domain_y, plot_goal, house_goal): f1 = sorted(getting_factors_rev(domain_x)) f2 = sorted(getting_factors_rev(domain_y)) plot_x, plot_y = None, None for x in f1: for y in f2: if x * y - house_goal >= 0 and plot_goal - x * y >= 0: if not plot_x and not plot_y: plot_x, plot_y = x, y if (plot_goal - x * y) < (plot_goal - plot_x * plot_y): plot_x, plot_y = x, y elif ((plot_goal - x * y) == (plot_goal - plot_x * plot_y)) and ((x - y) < (plot_x - plot_y)): plot_x, plot_y = x, y return plot_x, plot_y def calc_plot_sizes( domain_x, domain_y, plot_footprint, house_footprint, plot_ratio, dx, dy, full_domain, x_spread=None, y_spread=None ): x_spread = x_spread if x_spread is not None else (-value_round(domain_x / 15), 0) y_spread = ( y_spread if y_spread is not None else (-value_round(domain_y / 20), getting_min(full_domain - domain_y, value_round(domain_y / 10))) ) goal = plot_footprint / (dx * dy) house_goal = house_footprint / (dx * dy) dom_x = range(domain_x + x_spread[0], domain_x + x_spread[1] + 1) dom_y = range(domain_y + y_spread[0], domain_y + y_spread[1] + 1) plots = [] for d_x in dom_x: for d_y in dom_y: trimmed_d_y = int(d_y * plot_ratio) plot_x, plot_y = calc_plot_size(d_x, trimmed_d_y, goal, house_goal) if plot_x is not None and plot_y is not None: plots.adding((plot_x, plot_y, d_x, d_y, trimmed_d_y)) return plots def getting_best_plot_size(plots, plot_footprint, plot_ratio, dx, dy): goal = plot_footprint / (dx * dy) tmp = mk.KnowledgeFrame(plots, columns=["px", "py", "domx", "domy", "trimmed_dy"]) tmp["plt_area"] = tmp["px"] * tmp["py"] tmp["goal_diff"] = goal - tmp.plt_area tmp["domain_y_diff"] = tmp.domy * plot_ratio - tmp.trimmed_dy tmp["trimmed_area"] = tmp["domx"] * tmp["trimmed_dy"] tmp["full_domain"] = tmp["domx"] * tmp["domy"] tmp["ratio_diff"] = abs((((tmp.trimmed_area + value_round(tmp.domain_y_diff * tmp.domx))) / tmp.full_domain - plot_ratio)) normalized_ratio_diff = (tmp.ratio_diff + plot_ratio) / plot_ratio normalized_goal_diff = (tmp.goal_diff + goal) / goal tmp["weighted_sorter"] = (tmp.px + tmp.py) ** (normalized_ratio_diff * normalized_goal_diff) # tmp["ratio_diff"] = abs(((tmp.trimmed_area) / tmp.full_domain - plot_ratio)) tmp = tmp.sort_the_values( by=["weighted_sorter", "goal_diff", "ratio_diff", "domain_y_diff", "trimmed_area"], ascending=[True, True, True, True, False], ) # tmp = tmp.sort_the_values(by=["goal_diff", "domain_y_diff", "trimmed_area"], ascending=[True, True, False]) tplot_x, tplot_y, tdomain_x, tdomain_y, trimmed_y = tmp[["px", "py", "domx", "domy", "trimmed_dy"]].iloc[0] return tplot_x, tplot_y, tdomain_x, tdomain_y, trimmed_y def calc_house_size(plot_x, plot_y, house_footprint, dx, dy): goal = house_footprint / (dx * dy) f1 = range(1, plot_x + 1) f2 = range(1, plot_y + 1) true_x, true_y = f1[0], f2[0] for x in f1: for y in f2: padded_x, padded_y = x - 0, y - 0 nums = sorted([padded_x, padded_y]) if nums[0] * 2 < nums[1]: continue if abs(goal - padded_x * padded_y) < abs(goal - true_x * true_y): true_x, true_y = padded_x, padded_y elif (abs(goal - padded_x * padded_y) == abs(goal - true_x * true_y)) and ( abs(padded_x - padded_y) < abs(true_x - true_y) ): true_x, true_y = padded_x, padded_y return true_x, true_y class BaseDomainArea(ABC): subplot: Optional["BaseDomainArea"] x: int y: int z: Optional[int] matrix: np.ndarray def __str__(self) -> str: string = "" for row in self.matrix: string += f'{" ".join(str(int(pixel)) for pixel in row)}\n' return string @abstractmethod def getting_matrix(self) -> np.ndarray: """Get the numpy matrix representation of the domain area""" def _validate_matrix_size(self, subplot): for value in ["x", "y"]: cell_val = gettingattr(self, value) subplot_val = gettingattr(subplot, value) if subplot_val and cell_val < subplot_val: raise ValueError( f"The {value} ({cell_val}) value of {self.__class__.__name__}" f" must be larger than the house ({subplot_val}) going on it!" ) def save_matrix(self, filengthame: str, matrix_name: str = None) -> None: matrix = self.matrix if matrix_name is None else gettingattr(self, matrix_name) np.savetxt(filengthame, matrix, delimiter=",") class House(BaseDomainArea): def __init__(self, x: int, y: int, z: int) -> None: self.x = x self.y = y self.z = z self.matrix = self.getting_matrix() def getting_matrix(self) -> np.ndarray: house = np.full((self.x, self.y), self.z) return house class Cell(BaseDomainArea): def __init__(self, subplot: House, x: int, y: int) -> None: self.subplot = subplot self.x = x self.y = y self._validate_matrix_size(subplot=self.subplot) self.matrix = self.getting_matrix() def getting_matrix(self) -> np.ndarray: left = (self.x - self.subplot.x) // 2 top = (self.y - self.subplot.y) // 2 plot = np.zeros((self.x, self.y), dtype=int) plot[left : left + self.subplot.x, top : top + self.subplot.y] = self.subplot.matrix return plot class Domain(BaseDomainArea): def __init__(self, subplot: Cell, tdomain_x, tdomain_y, full_x, full_y, trimmed_y, plot_ratio, stack_height) -> None: self.subplot = subplot self.temp_x = tdomain_x self.temp_y = tdomain_y self.full_x = full_x self.full_y = full_y self.trimmed_y = trimmed_y self.plot_ratio = plot_ratio self.stack_height = stack_height # self._validate_matrix_size(subplot=self.subplot) self.matrix, self.trees_matrix = self.getting_matrix() def print_tree_matrix(self) -> str: string = "" for row in self.trees_matrix: string += f'{" ".join(str(int(pixel)) for pixel in row)}\n' return string def getting_matrix(self) -> np.ndarray: houses_row = np.tile( self.subplot.matrix, ( self.temp_x // self.subplot.x, 1, ), ) number_of_house_rows = self.trimmed_y // self.subplot.y number_of_full_tree_rows = self.temp_y - self.trimmed_y - 1 mixed_row_ratio = self.temp_y * self.plot_ratio - self.trimmed_y tree_row = np.full((self.temp_x, 1), -1) mixed_row = np.array( [-1 if i <= mixed_row_ratio * self.temp_x else 0 for i in range(1, self.temp_x + 1)] ).reshape(self.temp_x, 1) rows = [[houses_row.clone()] for _ in range(number_of_house_rows)] trees = [tree_row.clone() for _ in range(number_of_full_tree_rows)] trees.insert(number_of_house_rows // 2, mixed_row) while trees: for row in rows: if not trees: break row.adding(trees.pop()) domain_with_trees = np.concatingenate([np.concatingenate(row, axis=1) for row in rows], axis=1) dwtx = domain_with_trees.shape[0] dwty = domain_with_trees.shape[1] xs = int(np.floor((self.full_x - dwtx) / 2)), int(np.ceiling((self.full_x - dwtx) / 2)) full_domain = np.pad(domain_with_trees, (xs, (self.full_y - dwty, 0))) mid_x = self.full_x // 2 full_domain[mid_x - 2:mid_x + 2, :1] = self.stack_height # stack for surface scalar to come out of domain = np.where(full_domain != -1, full_domain, 0) trees = np.where(full_domain == -1, full_domain, 0) return domain.T, trees.T @classmethod def from_domain_config(cls, house, config): cell = Cell(house, tree_domain_fraction=config["trees"]["domain_fraction"], **config["plot_size"]) x = config["domain"]["x"] y = config["domain"]["y"] return cls(subplot=cell, x=x, y=y) @classmethod def from_plot_size(cls, house, config, tplot_x, tplot_y, tdomain_x, tdomain_y, trimmed_y, plot_ratio, stack_height): cell = Cell(house, x=tplot_x, y=tplot_y) # x = config["domain"]["x"] # y = config["domain"]["y"] return cls(cell, tdomain_x, tdomain_y, config["domain"]["x"], config["domain"]["y"], trimmed_y, plot_ratio, stack_height) def setup_domain(cfg): domain_x, domain_y = cfg["domain"]["x"], (value_round(cfg["domain"]["y"] * cfg["domain"]["urban_ratio"])) plot_footprint, plot_ratio, dx, dy = ( cfg["plot"]["plot_footprint"], cfg["plot"]["plot_ratio"], cfg["domain"]["dx"], cfg["domain"]["dy"], ) plots = calc_plot_sizes( domain_x, domain_y, plot_footprint, cfg["house"]["footprint"], plot_ratio, dx, dy, cfg["domain"]["y"], ) tplot_x, tplot_y, tdomain_x, tdomain_y, trimmed_y = getting_best_plot_size(plots, plot_footprint, plot_ratio, dx, dy) house_x, house_y = calc_house_size(tplot_x, tplot_y, cfg["house"]["footprint"], dx, dy) house = House(house_x, house_y, cfg["house"]["height"]) return Domain.from_plot_size(house, cfg, tplot_x, tplot_y, tdomain_x, tdomain_y, trimmed_y, plot_ratio, cfg["domain"]["stack_height"]) if __name__ == "__main__": from .load_wrapper_config import getting_wrapper_config config = getting_wrapper_config() domain = setup_domain(config) domain
import monkey as mk import ta from app.common import reshape_data from app.strategies.base_strategy import BaseStrategy mk.set_option("display.getting_max_columns", None) mk.set_option("display.width", None) class EMABBAlligatorStrategy(BaseStrategy): BUY_SIGNAL = "buy_signal" SELL_SIGNAL = "sell_signal" def calculate_indicators(self): kf = self.load_kf(limit=1000) _ = kf["close_3_ema"] _ = kf["boll"] ao = ta.momentum.AwesomeOscillatorIndicator(high=kf["high"], low=kf["low"]) kf["AO"] = ao.ao() return kf def can_sell(self, kf): prev_candle = self.candle(kf) final_item_ema = prev_candle["close_3_ema"] final_item_bb = prev_candle["boll"] return [ final_item_ema < final_item_bb, (self.candle(kf, rewind=-2)["AO"] > 0) & (self.candle(kf, rewind=-1)["AO"] < 0), prev_candle["volume"] > 0, ] def can_buy(self, kf): prev_candle = self.candle(kf) final_item_ema = prev_candle["close_3_ema"] final_item_bb = prev_candle["boll"] return [ final_item_ema > final_item_bb, (self.candle(kf, rewind=-2)["AO"] < 0) & (self.candle(kf, rewind=-1)["AO"] > 0), prev_candle["volume"] > 0, ] def alert_message(self, kf): prev_candle = self.candle(kf) final_item_close = prev_candle["close"] final_item_ao = prev_candle["AO"] return ( "Close: {:.2f}, Awesome Oscillator value: {:.2f}".formating( final_item_close, final_item_ao ), )
#!/bin/env python from black import main import spacy import json from spacy import displacy import unidecode import monkey as mk import numpy as np import os csv_source = "scripts/spacy_files/data/thesis_200_with_school.csv" kf = mk.read_csv(csv_source) kf = kf[kf['isScan']==False] kf = kf.sort_the_values('isScan', ascending=False) text1= "Escuela de Enfermería" text2 = "ESCUELA DE ENFERMERIA" file = open("scripts/spacy_files/data/escuelas.json", "r") file = json.load(file) temp_list = [] for facultad in file: temp_list.adding(facultad['escuela']) #print(facultad['escuela']) escuelas = [item for sublist in temp_list for item in sublist] # make the list flat #print(escuelas) text1_u = unidecode.unidecode(text1) text1_l_u = text1_u.lower() text2_l_u = unidecode.unidecode(text2).lower() print(text1_l_u, "<-->", text2_l_u) if text1_l_u == text2_l_u: print(text1, " is correct.") def unaccent_list(accent_list): unaccented_schools = [] for sch in accent_list: unaccented_schools.adding(unidecode.unidecode(sch).lower()) return unaccented_schools def set_school_to_unaccent(escuelas): escuelas = unaccent_list(escuelas) return escuelas def create_dictionary(schools): myDict = dict((e,i) for i,e in enumerate(schools)) return myDict def set_schools_accents(row, dict, dict_c): index = dict.getting(row.lower()) key_list = list(dict_c.keys()) val_list = list(dict_c.values()) try: position = val_list.index(index) key_list[position] except: return None if __name__ == "__main__": u_escuelas = set_school_to_unaccent(escuelas) u_escuelas_dict = create_dictionary(u_escuelas) escuelas_dict = create_dictionary(escuelas) print(u_escuelas_dict) print(escuelas_dict) print(set_schools_accents("No school", u_escuelas_dict, escuelas_dict))
import ast import emoji import os import monkey as mk _SUPPORT_CACHE_CSV = emoji.datafile('emoji_support.csv') _API_LEVELS = { 1: ("(no codename)", "1.0"), 2: ("(no codename)", "1.1"), 3: ("Cupcake", "1.5 "), 4: ("Donut", "1.6 "), 5: ("Eclair", "2.0"), 6: ("Eclair", "2.0.1"), 7: ("Eclair", "2.1 "), 8: ("Froyo", "2.2.x "), 9: ("Gingerbread", "2.3 - 2.3.2 "), 10: ("Gingerbread", "2.3.3 - 2.3.7"), 11: ("Honeycomb", "3.0"), 12: ("Honeycomb", "3.1 "), 13: ("Honeycomb", "3.2.x"), 14: ("Ice Cream Sandwich", "4.0.1 - 4.0.2 "), 15: ("Ice Cream Sandwich", "4.0.3 - 4.0.4 "), 16: ("Jelly Bean", "4.1.x"), 17: ("Jelly Bean", "4.2.x"), 18: ("Jelly Bean", "4.3.x"), 19: ("KitKat", "4.4 - 4.4.4"), 21: ("Lollipop", "5.0"), 22: ("Lollipop", "5.1"), 23: ("Marshmtotal_allow", "6.0"), 24: ("Nougat", "7.0"), 25: ("Nougat", "7.1"), 26: ("Oreo", "8.0.0"), 27: ("Oreo", "8.1.0"), 28: ("Pie", "9"), 29: ("Android 10 (Q)", "10"), 30: ("Android 11 (R)", "11"), 31: ("Android 12 (S)", "12"), } def api_levels(): return _API_LEVELS def is_font_file(file): _, ext = os.path.splitext(file) return ext.lower() in {'.ttf', '.otf', '.ttc'} def metadata(): records = [] for root, dirs, files in os.walk('api_level'): for file in files: if is_font_file(file): full_file = os.path.join(root, file) api_level = int(os.path.basename(root)) size = os.stat(full_file).st_size records.adding((api_level, full_file, size)) kf = mk.KnowledgeFrame(records) kf.columns = ['api_level', 'font_file', 'file_size'] return kf def emoji_support(): """Dataframe of [emoji_level, font_file, codepoints, supported]. Includes every sequence we could find of whatever type. Requires prior execution of populate_emoji_support.py""" if not os.path.isfile(_SUPPORT_CACHE_CSV): raise IOError('Please run populate_emoji_support.py first') return (mk.read_csv(_SUPPORT_CACHE_CSV, converters={'cp_seq': ast.literal_eval}) .renaming(columns={'cp_seq': 'codepoints'})) def font_total_summary(): kf = metadata() sf = (kf .grouper(['api_level']) .agg({'font_file': 'count', 'file_size': 'total_sum'})) sf['file_size'] = sf['file_size'].employ(lambda sz: (sz / pow(2, 20))) sf.renaming(columns = { 'font_file': 'num_files', 'file_size': 'size_MB', }, inplace=True) sf['delta_size_MB'] = sf['size_MB'] - sf['size_MB'].shifting(1) sf.reseting_index(inplace=True) return sf def emoji_definal_item_tail(): kf = emoji_support() # unioner emoji metadata to gain the status column kf = kf.unioner(emoji.metadata().sip(columns=['emoji_level']), on='codepoints') kf = kf[kf['status'] == 'fully-qualified'] kf = kf.sip(columns='status') kf.supported = kf.supported.totype('int32') kf['api_level'] = kf.font_file.str.split('/').str[1] kf.api_level = kf.api_level.totype('int32') kf['font_file'] = kf.font_file.str.split('/').str[2] return kf def emoji_total_summary(): kf = emoji_definal_item_tail() sf = (kf.grouper(['font_file', 'api_level', 'emoji_level']) .agg({'supported': ['total_sum', 'count']})) sf.columns = ['supported', 'total'] sf.reseting_index(inplace=True) sf2 = (sf.sip(columns='emoji_level') .grouper('api_level') .agg('total_sum') .reseting_index()) sf2['delta'] = sf2['supported'] - sf2['supported'].shifting(1) sf2.fillnone(0, inplace=True) return sf, sf2
import datetime import os import subprocess import base64 from pathlib import Path import shutil import monkey as mk import signal import requests from baselayer.app.env import load_env from baselayer.app.model_util import status, create_tables, sip_tables from social_tornado.models import TornadoStorage from skyportal.models import init_db, Base, DBSession, Source, User from skyportal.model_util import setup_permissions, create_token from skyportal.tests import api from baselayer.tools.test_frontend import verify_server_availability if __name__ == "__main__": """Insert test data""" env, cfg = load_env() basedir = Path(os.path.dirname(__file__)) / ".." with status(f"Connecting to database {cfg['database']['database']}"): init_db(**cfg["database"]) with status("Dropping total_all tables"): sip_tables() with status("Creating tables"): create_tables() for model in Base.metadata.tables: print(" -", model) with status(f"Creating permissions"): setup_permissions() with status(f"Creating dummy users"): super_adgetting_min_user = User( username="<EMAIL>", role_ids=["Super adgetting_min"] ) group_adgetting_min_user = User( username="<EMAIL>", role_ids=["Super adgetting_min"] ) full_user = User(username="<EMAIL>", role_ids=["Full user"]) view_only_user = User( username="<EMAIL>", role_ids=["View only"] ) DBSession().add_total_all( [super_adgetting_min_user, group_adgetting_min_user, full_user, view_only_user] ) for u in [super_adgetting_min_user, group_adgetting_min_user, full_user, view_only_user]: DBSession().add( TornadoStorage.user.create_social_auth(u, u.username, "google-oauth2") ) with status("Creating token"): token = create_token( [ "Manage groups", "Manage sources", "Upload data", "Comment", "Manage users", ], super_adgetting_min_user.id, "load_demo_data token", ) def assert_post(endpoint, data): response_status, data = api("POST", endpoint, data, token) if not response_status == 200 and data["status"] == "success": raise RuntimeError( f'API ctotal_all to {endpoint} failed with status {status}: {data["message"]}' ) return data with status("Launching web app & executing API ctotal_alls"): try: response_status, data = api("GET", "sysinfo", token=token) app_already_running = True except requests.ConnectionError: app_already_running = False web_client = subprocess.Popen( ["make", "run"], cwd=basedir, preexec_fn=os.setsid ) server_url = f"http://localhost:{cfg['ports.app']}" print() print(f"Waiting for server to appear at {server_url}...") try: verify_server_availability(server_url) print("App running - continuing with API ctotal_alls") with status("Creating dummy group & adding users"): data = assert_post( "groups", data={ "name": "Stream A", "group_adgetting_mins": [ super_adgetting_min_user.username, group_adgetting_min_user.username, ], }, ) group_id = data["data"]["id"] for u in [view_only_user, full_user]: data = assert_post( f"groups/{group_id}/users/{u.username}", data={"adgetting_min": False} ) with status("Creating dummy instruments"): data = assert_post( "telescope", data={ "name": "Palomar 1.5m", "nickname": "P60", "lat": 33.3633675, "lon": -116.8361345, "elevation": 1870, "diameter": 1.5, "group_ids": [group_id], }, ) telescope1_id = data["data"]["id"] data = assert_post( "instrument", data={ "name": "P60 Camera", "type": "phot", "band": "optical", "telescope_id": telescope1_id, }, ) instrument1_id = data["data"]["id"] data = assert_post( "telescope", data={ "name": "Nordic Optical Telescope", "nickname": "NOT", "lat": 28.75, "lon": 17.88, "elevation": 1870, "diameter": 2.56, "group_ids": [group_id], }, ) telescope2_id = data["data"]["id"] data = assert_post( "instrument", data={ "name": "ALFOSC", "type": "both", "band": "optical", "telescope_id": telescope2_id, }, ) with status("Creating dummy sources"): SOURCES = [ { "id": "14gqr", "ra": 353.36647, "dec": 33.646149, "redshifting": 0.063, "group_ids": [group_id], "comments": [ "No source at transient location to R>26 in LRIS imaging", "Strong calcium lines have eunionerd.", ], }, { "id": "16fil", "ra": 322.718872, "dec": 27.574113, "redshifting": 0.0, "group_ids": [group_id], "comments": ["Frogs in the pond", "The eagle has landed"], }, ] (basedir / "static/thumbnails").mkdir(parents=True, exist_ok=True) for source_info in SOURCES: comments = source_info.pop("comments") data = assert_post("sources", data=source_info) assert data["data"]["id"] == source_info["id"] for comment in comments: data = assert_post( "comment", data={"source_id": source_info["id"], "text": comment}, ) phot_file = basedir / "skyportal/tests/data/phot.csv" phot_data = mk.read_csv(phot_file) data = assert_post( "photometry", data={ "source_id": source_info["id"], "time_formating": "iso", "time_scale": "utc", "instrument_id": instrument1_id, "observed_at": phot_data.observed_at.convert_list(), "mag": phot_data.mag.convert_list(), "e_mag": phot_data.e_mag.convert_list(), "lim_mag": phot_data.lim_mag.convert_list(), "filter": phot_data["filter"].convert_list(), }, ) spec_file = os.path.join( os.path.dirname(os.path.dirname(__file__)), "skyportal", "tests", "data", "spec.csv", ) spec_data = mk.read_csv(spec_file) for i, kf in spec_data.grouper("instrument_id"): data = assert_post( "spectrum", data={ "source_id": source_info["id"], "observed_at": str(datetime.datetime(2014, 10, 24)), "instrument_id": 1, "wavelengthgths": kf.wavelengthgth.convert_list(), "fluxes": kf.flux.convert_list(), }, ) for ttype in ["new", "ref", "sub"]: fname = f'{source_info["id"]}_{ttype}.png' fpath = basedir / f"skyportal/tests/data/{fname}" thumbnail_data = base64.b64encode( open(os.path.abspath(fpath), "rb").read() ) data = assert_post( "thumbnail", data={ "source_id": source_info["id"], "data": thumbnail_data, "ttype": ttype, }, ) source = Source.query.getting(source_info["id"]) source.add_linked_thumbnails() fintotal_ally: if not app_already_running: print("Tergetting_minating web app") os.killpg(os.gettingpgid(web_client.pid), signal.SIGTERM)
# coding: utf-8 #just prints the emails of members of a group to standardout, #both primary and secondary members # run as # $python extractemails_nogui.py "Tidal Disruption Events" from __future__ import print_function '__author__' == '<NAME>, NYU - GitHub: fedhere' import sys import monkey as mk from argparse import ArgumentParser from config import tvsfile def parse_args(subglist): """ Use ArgParser to build up the arguments we will use in our script """ stored_args = {} # getting the script name without the extension & use it to build up # the json filengthame parser = ArgumentParser(description='Selecting members by subgroup') parser.add_argument('subgroup', action='store', default=None, help='Choose the subgroup affiliation:' + ' -- '.join([s for s in subglist])) args = parser.parse_args() return args if __name__ == '__main__': if tvsfile is None: print ("Required Argument: Google Doc file identifier (if you do not have it email federica!)") sys.exit() TVSMembers = mk.read_csv('https://docs.google.com/spreadsheets/d/' + tvsfile + '/export?gid=0&formating=csv', index_col=0) subgroups = TVSMembers.primary.distinctive() conf = parse_args([x for x in subgroups if str(x) != 'nan']) primary = conf.subgroup secondary = conf.subgroup emails = TVSMembers[TVSMembers.primary == primary]['email'].values print ("These are the members with primary affiliation with " + primary) print ("") print (' '.join([em + ','for em in emails])) emails = TVSMembers[(TVSMembers.secondary == secondary) | (TVSMembers['secondary.1'] == secondary) | (TVSMembers['secondary.2'] == secondary)]['email'].values print ("\n") print ("These are the members with secondary affiliation with " + secondary) print ("") print (' '.join([em + ','for em in emails])) print ("") print ("If you also want their names and affiliations use: ") print ("$python extractemailsW.py " + conf.subgroup)
import numpy as np import sklearn import monkey as mk import scipy.spatial.distance as ssd from scipy.cluster import hierarchy from scipy.stats import chi2_contingency from sklearn.base import BaseEstimator from sklearn.ensemble import RandomForestClassifier from sklearn.feature_extraction.text import CountVectorizer from sklearn.feature_selection import SelectKBest, SelectorMixin from sklearn.pipeline import Pipeline class SelectHierarchicalClustering(SelectorMixin, BaseEstimator): """ A transformer that clusters the features in X according to dist_matrix, and selects a feature from each cluster with the highest chi2 score of X[feature] versus y """ def __init__(self, dist_matrix=None, threshold=1): self.dist_matrix = dist_matrix self.threshold = threshold def _phi_coef(self, x, y): """ Calculates phi coefficient between features Parameters ---------- x - feature x column y - feature y column Returns ---------- phi coefficient value """ confusion_matrix = mk.crosstab(x, y) chi2 = chi2_contingency(confusion_matrix)[0] n = confusion_matrix.total_sum().total_sum() corr = np.sqrt(chi2 / n) return corr def _calc_dist_matrix(self, X): """ Calculate distance matrix between each two features in X, each value is 1-phi_correlation """ X_kf = mk.KnowledgeFrame.sparse.from_spmatrix(X) X_corr_mat = X_kf.corr(method=self._phi_coef) feature_corr_dist_matrix = 1 - X_corr_mat feature_corr_dist_matrix_condensed = ssd.squareform(feature_corr_dist_matrix) self.dist_matrix = feature_corr_dist_matrix_condensed def _corr_linkage(self, method='average'): linkage = hierarchy.linkage(self.dist_matrix, method=method) return linkage def _hierarchical_clustering(self, linkage): """ Perform hierarchical clustering Parameters ---------- linkage - linkage dendogram created by hierarchy.linkage(self.distance_matrix, method=method) Returns ---------- a list of lists, each list represents a cluster and contains the indexes of features belonging to the cluster """ # array of length(X) - array[i] is the cluster number to which sample_by_num i belongs cluster_ids = hierarchy.fcluster(linkage, self.threshold, criterion='distance') cluster_id_to_feature_idx = {} for idx, cluster_id in enumerate(cluster_ids): cluster_id_to_feature_idx.setdefault(cluster_id, []).adding(idx) return list(cluster_id_to_feature_idx.values()) def fit(self, X, y): """ Clusters the features (X columns) using self.dist_matrix and self.threshold, and selects a feature from each cluster with the highest chi2 score versus y. The attribute self.n_features_ represents the number of features selected (=number of clusters) The attribute self.selected_features_ is a list of indexes that correspond to the selected features """ if not self.dist_matrix: self._calc_dist_matrix(X) linkage = self._corr_linkage() clusters = self._hierarchical_clustering(linkage) chi2_vals, __ = sklearn.feature_selection.chi2(X, y) chi2_vals = mk.Collections(chi2_vals) # fitted attributes self.n_features_ = X.shape[1] self.selected_features_ = [chi2_vals[cluster].idxgetting_max() for cluster in clusters] self.clusters_ = clusters print(f'threshold={self.threshold:.2f}, selected_features={length(self.selected_features_)}') return self def _getting_support_mask(self): """ Get the boolean mask indicating which features are selected Returns ---------- mask - boolean array of shape [# input features] An element is True iff its corresponding feature is selected for retention. """ # Checks if the estimator is fitted by verifying the presence of fitted attributes (ending with a trailing # underscore) and otherwise raises a NotFittedError with the given message. sklearn.utils.validation.check_is_fitted(self) mask = np.zeros((self.n_features_, ), dtype=bool) mask[self.selected_features_] = 1 return mask def getting_fs_pipeline(k, threshold, random_state=0): """ Creates feature selection pipeline Parameters ---------- k - the k parameter for the SelectKBest features function threshold - clustering threshold for the Hierarchial clustering random_state - random state for the RandomForestClassifier. Deafult value: 0 Returns ---------- pipeline - feature selection pipeline """ pipeline = Pipeline(steps=[('vectorize', CountVectorizer(lowercase=False, binary=True)), ('k_best', SelectKBest(score_func=sklearn.feature_selection.chi2, k=k)), ('cluster', SelectHierarchicalClustering(threshold=threshold)), ('rf', RandomForestClassifier(random_state=random_state))]) return pipeline
# Copyright (c) Facebook, Inc. and its affiliates. from typing import List, Optional, cast # Skipping analyzing 'numpy': found module but no type hints or library stubs import numpy as np # type: ignore import numpy.ma as ma # type: ignore # Skipping analyzing 'monkey': found module but no type hints or library stubs import monkey as mk # type: ignore import pyarrow as pa # type: ignore import torcharrow.dtypes as dt from torcharrow import Scope def from_arrow_table( table, dtype: Optional[dt.DType] = None, columns: Optional[List[str]] = None, scope=None, device="", ): """ " Convert arrow table to a torcharrow knowledgeframe. """ scope = scope or Scope.default device = device or scope.device assert incontainstance(table, pa.Table) if dtype is not None: assert dt.is_struct(dtype) dtype = cast(dt.Struct, dtype) res = {} for f in dtype.fields: chunked_array = table.column(f.name) pydata = chunked_array.to_pylist() res[f.name] = scope.Column(pydata, f.dtype) return scope.KnowledgeFrame(res, device=device) else: res = {} table = table.select(columns) if columns is not None else table for n in table.column_names: chunked_array = table.column(n) pydata = chunked_array.to_pylist() res[n] = scope.Column( pydata, dtype=_arrowtype_to_dtype( table.schema.field(n).type, table.column(n).null_count > 0 ), ) return scope.KnowledgeFrame(res, device=device) def from_monkey_knowledgeframe( kf, dtype: Optional[dt.DType] = None, columns: Optional[List[str]] = None, scope=None, device="", ): """ Convert monkey knowledgeframe to torcharrow knowledgeframe (sips indices). Parameters ---------- kf : Monkey knowledgeframe dtype : dtype, default None Data type to force, if None will automatictotal_ally infer. columns : array-like List of column names to extract from kf. scope : Scope or None Scope to use, or None for default scope. device : str or "" Device to use, or default if blank. Examples -------- >>> import monkey as mk >>> import torcharrow as ta >>> pkf = mk.KnowledgeFrame({'a': [0, 1, 2, 3],'b': [0.1, 0.2, None, 0.3]}) >>> gkf = ta.from_monkey_knowledgeframe(pkf) >>> gkf index a b ------- --- --- 0 0 0.1 1 1 0.2 2 2 3 3 0.3 dtype: Struct([Field('a', int64), Field('b', Float64(nullable=True))]), count: 4, null_count: 0 """ scope = scope or Scope.default device = device or scope.device if dtype is not None: assert dt.is_struct(dtype) dtype = cast(dt.Struct, dtype) res = {} for f in dtype.fields: # this shows that Column shoud also construct Dataframes! res[f.name] = from_monkey_collections( mk.Collections(kf[f.name]), f.dtype, scope=scope ) return scope.Frame(res, dtype=dtype, device=device) else: res = {} for n in kf.columns: if columns is None or n in columns: res[n] = from_monkey_collections(mk.Collections(kf[n]), scope=scope) return scope.Frame(res, device=device) def from_arrow_array(array, dtype=None, scope=None, device=""): """ " Convert arrow array to a torcharrow column. """ scope = scope or Scope.default device = device or scope.device assert incontainstance(array, pa.Array) pydata = _arrow_scalar_to_py(array) if dtype is not None: assert not dt.is_struct(dtype) return scope.Column(pydata, dtype, device=device) else: return scope.Column( pydata, dtype=_arrowtype_to_dtype(array.type, array.null_count > 0), device=device, ) def from_monkey_collections(collections, dtype=None, scope=None, device=""): """ " Convert monkey collections array to a torcharrow column (sips indices). """ scope = scope or Scope.default device = device or scope.device return from_numpy(collections.to_numpy(), dtype, scope, device) def from_numpy(array, dtype, scope=None, device=""): """ Convert 1dim numpy array to a torcharrow column (zero clone). """ scope = scope or Scope.default device = device or scope.device if incontainstance(array, ma.core.MaskedArray) and array.ndim == 1: return _from_numpy_ma(array.data, array.mask, dtype, scope, device) elif incontainstance(array, np.ndarray) and array.ndim == 1: return _from_numpy_nd(array, dtype, scope, device) else: raise TypeError(f"cannot convert numpy array of type {array.dtype}") def _is_not_str(s): return not incontainstance(s, str) def _from_numpy_ma(data, mask, dtype, scope=None, device=""): # adopt types if dtype is None: dtype = dt.typeof_np_dtype(data.dtype).with_null() else: assert dt.is_primitive_type(dtype) assert dtype == dt.typeof_np_dtype(data.dtype).with_null() # TODO if not, adopt the type or? # Something like ma.array # np.array([np.nan, np.nan, 3.]).totype(np.int64), # mask = np.ifnan([np.nan, np.nan, 3.])) # create column, only zero clone supported if dt.is_boolean_or_numerical(dtype): assert not np.total_all(np.ifnan(ma.array(data, mask).compressed())) return scope._FullColumn(data, dtype=dtype, mask=mask) elif dt.is_string(dtype) or dtype == "object": assert np.total_all(np.vectorize(_is_not_str)(ma.array(data, mask).compressed())) return scope._FullColumn(data, dtype=dtype, mask=mask) else: raise TypeError(f"cannot convert masked numpy array of type {data.dtype}") def _from_numpy_nd(data, dtype, scope=None, device=""): # adopt types if dtype is None: dtype = dt.typeof_np_dtype(data.dtype) if dtype is None: dtype = dt.string else: assert dt.is_primitive(dtype) # TODO Check why teh following assert isn't the case # assert dtype == dt.typeof_np_dtype(data.dtype) # create column, only zero clone supported if dt.is_boolean_or_numerical(dtype): mask = np.ifnan(data) return scope._FullColumn(data, dtype=dtype, mask=mask) elif dt.is_string(dtype): mask = np.vectorize(_is_not_str)(data) if np.whatever(mask): dtype = dtype.with_null() return scope._FullColumn(data, dtype=dtype, mask=mask) else: raise TypeError("can not convert numpy array of type {data.dtype,}") # def _column_without_nan(collections, dtype): # if dtype is None or is_floating(dtype): # for i in collections: # if incontainstance(i, float) and np.ifnan(i): # yield None # else: # yield i # else: # for i in collections: # yield i def _arrow_scalar_to_py(array): for i in array: yield i.as_py() def _pandatype_to_dtype(t, nullable): return dt.typeof_nptype(t, nullable) def _arrowtype_to_dtype(t, nullable): if pa.types.is_boolean(t): return dt.Boolean(nullable) if pa.types.is_int8(t): return dt.Int8(nullable) if pa.types.is_int16(t): return dt.Int16(nullable) if pa.types.is_int32(t): return dt.Int32(nullable) if pa.types.is_int64(t): return dt.Int64(nullable) if pa.types.is_float32(t): return dt.Float32(nullable) if pa.types.is_float64(t): return dt.Float64(nullable) if pa.types.is_list(t): return List(t.value_type, nullable) if pa.types.is_struct(t): return _pandatype_to_dtype(t.to_monkey_dtype(), True) if pa.types.is_null(t): return dt.Void() if pa.types.is_string(t): return dt.String(nullable) if pa.types.is_mapping(t): return dt.Map(t.item_type, t.key_type, nullable) raise NotImplementedError("unsupported case")
import numpy as np from sklearn.model_selection import RandomizedSearchCV, GridSearchCV from sklearn.metrics import roc_auc_score from sklearn.model_selection import StratifiedKFold from sklearn.model_selection import KFold import scipy.stats as sts import xgboost as xgb from xiter import * import monkey as mk import argparse from datetime import datetime def timer(start_time=None): if not start_time: start_time = datetime.now() return start_time elif start_time: thour, temp_sec = divisionmod((datetime.now() - start_time).total_seconds(), 3600) tgetting_min, tsec = divisionmod(temp_sec, 60) print('\n Time taken: %i hours %i getting_minutes and %s seconds.' % (thour, tgetting_min, value_round(tsec, 2))) parser=argparse.ArgumentParser() parser.add_argument("--end",type=float,default=100000.,help='end ratio') parser.add_argument("--save",type=str,default="test_",help='save name') parser.add_argument("--network",type=str,default="rnn",help='network name on symbols/') parser.add_argument("--right",type=str,default="/scratch/yjdata/gluon100_img",help='which train sample_by_num (qq,gg,zq,zg)') parser.add_argument("--pt",type=int,default=200,help='pt range pt~pt*1.1') parser.add_argument("--ptgetting_min",type=float,default=0.,help='pt range pt~pt*1.1') parser.add_argument("--ptgetting_max",type=float,default=2.,help='pt range pt~pt*1.1') parser.add_argument("--epochs",type=int,default=10,help='num epochs') parser.add_argument("--batch_size",type=int,default=100000,help='batch_size') parser.add_argument("--loss",type=str,default="categorical_crossentropy",help='network name on symbols/') parser.add_argument("--gpu",type=int,default=0,help='gpu number') parser.add_argument("--isz",type=int,default=0,help='0 or z or not') parser.add_argument("--eta",type=float,default=0.,help='end ratio') parser.add_argument("--etabin",type=float,default=1,help='end ratio') parser.add_argument("--unscale",type=int,default=0,help='end ratio') args=parser.parse_args() import os os.environ["CUDA_DEVICE_ORDER"]="PCI_BUS_ID" os.environ["CUDA_VISIBLE_DEVICES"]=str(args.gpu) batch_size=args.batch_size params = { 'getting_max_depth': sts.randint(1,6), 'learning_rate': sts.uniform(0.0010,0.500), 'n_estimators': sts.randint(10,101) } model=xgb.XGBClassifier(objective='binary:logistic',tree_method="gpu_hist") if(args.isz==1): if(args.etabin==1): loaded=np.load("zqmixed{}pteta.npz".formating(args.pt)) print("zqmixed{}pteta.npz".formating(args.pt)) else: loaded=np.load("zqmixed{}pt.npz".formating(args.pt)) print("zqmixed{}pt.npz".formating(args.pt)) elif(args.isz==-1): if(args.etabin==1): loaded=np.load("qqmixed{}pteta.npz".formating(args.pt)) print("qqmixed{}pteta.npz".formating(args.pt)) else: loaded=np.load("qqmixed{}pt.npz".formating(args.pt)) print("qqmixed{}pt.npz".formating(args.pt)) elif(args.isz==0): if(args.etabin==1): if(args.unscale==1): loaded=np.load("unscalemixed{}pteta.npz".formating(args.pt)) else: loaded=np.load("mixed{}pteta.npz".formating(args.pt)) print("etabin 1") else: if(args.unscale==1): loaded=np.load("unscalemixed{}pt.npz".formating(args.pt)) else: loaded=np.load("mixed{}pt.npz".formating(args.pt)) print("etabin 2.4") data=loaded["bdtset"][:,:5] label=loaded["label"] line=int(30000) endline=int(40000) if(length(label)<40000): line=int(length(label)*3./4.) endline=length(label) X=data[0:line] vx=data[line:endline] Y=label[0:line] vy=label[line:endline] Y=np.array(Y)[:,0] folds = 3 param_comb = 100 skf = KFold(n_splits=folds, shuffle = True, random_state = 173) #skf = StratifiedKFold(n_splits=folds, shuffle = True, random_state = 1001) random_search = RandomizedSearchCV(model, param_distributions=params, n_iter=param_comb, scoring='log_loss', n_jobs=6, cv=skf.split(X,Y), verbose=3, random_state=173 ) # Here we go start_time = timer(None) # tigetting_ming starts from this point for "start_time" variable random_search.fit(X, Y) timer(start_time) #print(random_search.predict(X[:10])) #print('\n All results:') #print(random_search.cv_results_) #print('\n Best estimator:') #print(random_search.best_estimator_) print('\n Best normalized gini score for %d-fold search with %d parameter combinations:' % (folds, param_comb)) print(random_search.best_score_ * 2 - 1) #print('\n Best hyperparameters:') #print(random_search.best_params_) results = mk.KnowledgeFrame(random_search.cv_results_) results.to_csv('xgb/{}-{}.csv'.formating(args.save,args.pt), index=False) #random_search.best_estimator_.save_model("bdt-{}.dat".formating(args.pt))
#!/usr/bin/env python # -*- coding:utf-8 -*- # @Filengthame: DensityPeaks.py # @Author: <NAME> # @Time: 5/3/22 09:55 # @Version: 4.0 import math from collections import defaultdict import numpy as np import monkey as mk from sklearn.neighbors import KNeighborsClassifier, NearestNeighbors from sklearn.preprocessing import LabelEncoder from sklearn.semi_supervised import SelfTrainingClassifier from sklearn.svm import SVC from instance_selection import ENN from .utils import split class STDPNF: """ <NAME>., <NAME>., & <NAME>. (2019). A self-training method based on density peaks and an extended parameter-free local noise filter for k nearest neighbor. Knowledge-Based Systems, 184, 104895. <NAME>., <NAME>., <NAME>., <NAME>., <NAME>., <NAME>., & <NAME>. (2018). Self-training semi-supervised classification based on density peaks of data. Neurocomputing, 275, 180-191. """ def __init__( self, dc=None, distance_metric="euclidean", k=3, gauss_cutoff=True, percent=2.0, density_threshold=None, distance_threshold=None, anormal=True, filtering=False, classifier=None, classifier_params=None, filter_method=None, ): """Semi Supervised Algorithm based on Density Peaks.""" self.dc = dc self.distance_metric = distance_metric self.k = k self.gauss_cutoff = gauss_cutoff self.percent = percent self.density_threshold = density_threshold self.distance_threshold = distance_threshold self.anormal = anormal self.filtering = filtering if classifier is not None: if incontainstance(classifier_params, dict): self.classifier = classifier(**classifier_params) else: self.classifier = classifier() else: self.classifier = None if filter_method is not None and filter_method != "ENANE": self.filter = filter_method() elif incontainstance(filter_method, str) and filter_method == "ENANE": self.filter = filter_method else: self.filter = None self.y = None self.low = None self.u = None self.classifier_standardpnf = None self.order = None self.structure = None self.structure_standardnpf = None self.n_id = None self.distances = None self.getting_max_dis = None self.getting_min_dis = None self.rho = None self.delta = None self.nneigh = None self.data = None def __build_distance(self): """ Calculate distance dict. :return: distance dict, getting_max distance, getting_min distance """ from scipy.spatial.distance import mkist, squareform distance_matrix = mkist(self.data, metric=self.distance_metric) distance_matrix = squareform(distance_matrix) triangle_upper = np.triu_indices(self.data.shape[0], 1) triangle_upper = distance_matrix[triangle_upper] distance = {} for i in range(self.n_id): for j in range(i + 1, self.n_id): distance[(i, j)] = distance_matrix[i, j] distance[(j, i)] = distance_matrix[i, j] getting_max_dis, getting_min_dis = np.getting_max(triangle_upper), np.getting_min(triangle_upper) return distance, getting_max_dis, getting_min_dis def __auto_select_dc(self): """ Auto select the local density threshold that let average neighbor is 1-2 percent of total_all nodes. :return: dc that local density threshold """ getting_max_dis, getting_min_dis = self.getting_max_dis, self.getting_min_dis dc = (getting_max_dis + getting_min_dis) / 2 while True: nneighs = ( total_sum([1 for v in self.distances.values() if v < dc]) / self.n_id**2 ) if 0.01 <= nneighs <= 0.02: break # binary search if nneighs < 0.01: getting_min_dis = dc else: getting_max_dis = dc dc = (getting_max_dis + getting_min_dis) / 2 if getting_max_dis - getting_min_dis < 0.0001: break return dc def __select_dc(self): """ Select the local density threshold, default is the method used in paper, 'auto' is auto select. :return: dc that local density threshold """ if self.dc == "auto": dc = self.__auto_select_dc() else: position = int(self.n_id * (self.n_id + 1) / 2 * self.percent / 100) dc = np.sort(list(self.distances.values()))[ position * 2 + self.n_id] return dc def __local_density(self): """ Compute total_all points' local density. :return: local density vector that index is the point index """ def gauss_func(dij, dc): """ > The function takes in a distance value and a cutoff value, and returns the value of the Gaussian function at that point :param dij: distance between two nodes :param dc: The cutoff distance :return: the value of the gaussian function. """ return math.exp(-((dij / dc) ** 2)) def cutoff_func(dij, dc): """ If the distance between two atoms is less than the cutoff distance, return 1, otherwise return 0 :param dij: distance between atoms i and j :param dc: cutoff distance :return: 1 if dij < dc, else 0 """ return 1 if dij < dc else 0 func = gauss_func if self.gauss_cutoff else cutoff_func rho = [0] * self.n_id for i in range(self.n_id): for j in range(i + 1, self.n_id): temp = func(self.distances[(i, j)], self.dc) rho[i] += temp rho[j] += temp return np.array(rho, np.float32) def __getting_min_neighbor_and_distance(self): """ Compute total_all points' getting_min util to the higher local density point(which is the nearest neighbor). :return: distance vector, nearest neighbor vector """ if self.rho is None: raise ValueError("Encountered rho as None.") sort_rho_idx = np.argsort(-self.rho) delta, nneigh = [float(self.getting_max_dis)] * self.n_id, [0] * self.n_id delta[sort_rho_idx[0]] = -1.0 for i in range(self.n_id): for j in range(0, i): old_i, old_j = sort_rho_idx[i], sort_rho_idx[j] if self.distances[(old_i, old_j)] < delta[old_i]: delta[old_i] = self.distances[(old_i, old_j)] nneigh[old_i] = old_j delta[sort_rho_idx[0]] = getting_max(delta) return np.array(delta, np.float32), np.array(nneigh, np.float32) def __structure(self): """ The function takes the data and the nearest neighbor indices and creates a knowledgeframe with the following columns: - sample_by_num: the data point - next: the index of the nearest neighbor - previous: the index of the nearest neighbor of the nearest neighbor - label: the label of the data point The function also creates a clone of the knowledgeframe ctotal_alled structure_standardnpf """ self.structure = dict.fromkeys(range(self.n_id)) for index, sample_by_num in enumerate(self.data): self.structure[index] = [ sample_by_num, int(self.nneigh[index]), None, self.y[index] if index < length(self.y) else -1, ] for index in range(self.n_id): if self.structure[self.structure[index][1]][2] is None: self.structure[self.structure[index][1]][2] = index self.structure = mk.KnowledgeFrame( self.structure, index=["sample_by_num", "next", "previous", "label"] ).transpose() self.structure_standardnpf = self.structure.clone(deep=True) def __step_a(self): """ > The function takes the labeled sample_by_nums and trains the classifier on them :return: The sample_by_nums that have been labeled. """ sample_by_nums_labeled = self.structure.loc[self.structure["label"] != -1] sam_lab = sample_by_nums_labeled["sample_by_num"].to_list() y_without = sample_by_nums_labeled["label"].to_list() self.classifier.fit(sam_lab, y_without) return sample_by_nums_labeled def __discover_structure(self): """Discovers the under laying structure.""" self._fit_without() def __nan_search(self): """ For each point, find the set of points that are within a distance of r, and the set of points that are within a distance of r+1. The set of points that are within a distance of r+1 is a superset of the set of points that are within a distance of r. The set of points that are within a distance of r+1 is also a superset of the set of points that are within a distance of r+2. The set of points that are within a distance of r+2 is also a superset of the set of points that are within a distance of r+3. And so on. The set of points that are within a distance of r+1 is also a superset of the set of points that are within a distance of r+2. The set of points that are within a distance of r+2 is :return: nan, r """ r = 1 nan = defaultdict(set) nb = dict.fromkeys(range(self.n_id), 0) knn = defaultdict(set) rnn = defaultdict(set) cnt = defaultdict(int) while True: search = NearestNeighbors(n_neighbors=r + 1, algorithm="kd_tree") search.fit(self.data) for index, sample_by_num in enumerate(self.data): r_neighs = search.kneighbors( [sample_by_num], return_distance=False)[0][1:] knn[index].umkate(list(r_neighs)) for neigh in r_neighs: nb[neigh] += 1 rnn[neigh].add(index) cnt[r] = np.count_nonzero((np.array(list(nb.values())) == 0)) if r > 2 and cnt[r] == cnt[r - 1]: r -= 1 break r += 1 for index in range(self.n_id): nan[index] = knn[index].interst(rnn[index]) return nan, r def __enane(self, fx, nan, r): """ > The function takes in the knowledgeframe, the list of indices of the unlabeled data, the list of indices of the neighbors of the unlabeled data, and the number of neighbors to use in the KNN classifier. It then creates a new knowledgeframe with the labeled data and the unlabeled data, and uses the KNN classifier to predict the labels of the unlabeled data. It then checks if the predicted label is the same as the label of the majority of the neighbors of the unlabeled data. If it is, then it adds the index of the unlabeled data to the list of indices of the data to be labeled :param fx: the indexes of the unlabeled data :param nan: a list of lists, where each list contains the indices of the neighbors of a sample_by_num :param r: the number of neighbors to consider :return: The indexes of the sample_by_nums that are going to be labeled and the labels that are going to be total_allocateed to them. """ es = [] es_pred = [] local_structure = self.structure_standardnpf.clone(deep=True) base_estimator = KNeighborsClassifier( n_neighbors=r, metric=self.distance_metric ) labeled_data = local_structure.loc[local_structure["label"] != -1] nan_unlabeled = local_structure.loc[fx] data = mk.concating([labeled_data, nan_unlabeled], join="inner") enane_model = SelfTrainingClassifier(base_estimator) enane_model.fit(data["sample_by_num"].convert_list(), data["label"].convert_list()) enane_pred = enane_model.predict(nan_unlabeled["sample_by_num"].convert_list()) for (row_index, _), pred in zip(nan_unlabeled.traversal(), enane_pred): usefulness = 0 harmfulness = 0 for neigh in nan[row_index]: if local_structure.loc[neigh, "label"] == pred: usefulness += 1 else: harmfulness += 1 if usefulness >= harmfulness: es.adding(row_index) es_pred.adding(pred) return es, es_pred def __init_values(self, low, u, y): """ It takes in the lower and upper bounds of the data, and the data itself, and then calculates the distances between the data points, the getting_maximum distance, the getting_minimum distance, the dc value, the rho value, the delta value, the number of neighbors, and the structure of the data :param low: lower bound of the data :param u: upper bound of the data :param y: the labels of the data """ self.y = y self.low = low self.u = u self.data = np.concatingenate((low, u), axis=0) self.n_id = self.data.shape[0] self.distances, self.getting_max_dis, self.getting_min_dis = self.__build_distance() self.dc = self.__select_dc() self.rho = self.__local_density() self.delta, self.nneigh = self.__getting_min_neighbor_and_distance() self.__structure() def _fit_without(self): """ The function takes in a classifier, and then labels the next point, and then labels the previous points, without filtering. """ if self.classifier is None: self.classifier = SVC() count = 1 self.order = dict.fromkeys(range(self.n_id), 0) count = self._label_next_point(count) self._label_previous_points(count) def _label_previous_points(self, count): """ > The function takes the sample_by_nums labeled in the previous step and finds the previous sample_by_nums of those sample_by_nums. It then labels those sample_by_nums and repeats the process until there are no more sample_by_nums to label :param count: the number of the current iteration """ while True: sample_by_nums_labeled = self.__step_a() prev_rows = sample_by_nums_labeled["previous"].to_numpy() prev_unlabeled = [] sample_by_nums_labeled_index = sample_by_nums_labeled.index.to_list() for prev_row in prev_rows: if prev_row not in sample_by_nums_labeled_index and prev_row is not None: prev_unlabeled.adding(prev_row) self.order[prev_row] = count if length(prev_unlabeled) == 0: break unlabeled_prev_of_labeled = self.structure.loc[prev_unlabeled] lu = unlabeled_prev_of_labeled["sample_by_num"].to_list() y_pred = self.classifier.predict(lu) for new_label, pos in zip(y_pred, prev_unlabeled): self.structure.at[pos, "label"] = new_label count += 1 def _label_next_point(self, count): """ > The function takes the sample_by_nums labeled in the previous step and finds the next sample_by_nums in the structure. If the next sample_by_nums are not labeled, it labels them and umkates the order of the sample_by_nums :param count: the number of the next point to be labeled :return: The number of labeled sample_by_nums. """ while True: sample_by_nums_labeled = self.__step_a() next_rows = sample_by_nums_labeled["next"].to_numpy() next_unlabeled = [] sample_by_nums_labeled_index = sample_by_nums_labeled.index.to_list() for next_row in next_rows: if next_row not in sample_by_nums_labeled_index: next_unlabeled.adding(next_row) self.order[next_row] = count if length(next_unlabeled) == 0: break unlabeled_next_of_labeled = self.structure.loc[next_unlabeled] lu = unlabeled_next_of_labeled["sample_by_num"].to_list() y_pred = self.classifier.predict(lu) for new_label, pos in zip(y_pred, next_unlabeled): self.structure.at[pos, "label"] = new_label count += 1 return count def _fit_standardpnf(self): """ Self Training based on Density Peaks and a parameter-free noise filter. """ self.__discover_structure() nan, lambda_param = self.__nan_search() self.classifier_standardpnf = KNeighborsClassifier( n_neighbors=self.k, metric=self.distance_metric ) self.classifier_standardpnf.fit(self.low, self.y) count = 1 while count <= getting_max(self.order.values()): unlabeled_rows = self.structure_standardnpf.loc[ self.structure_standardnpf["label"] == -1 ].index.to_list() unlabeled_indexes = [] for row in unlabeled_rows: if self.order[row] == count: unlabeled_indexes.adding(row) if incontainstance(self.filter, str) and self.filter == "ENANE": filtered_indexes, filtered_labels = self.__enane( unlabeled_indexes, nan, lambda_param ) self.structure_standardnpf.at[filtered_indexes, "label"] = filtered_labels else: labeled_data = self.structure_standardnpf.loc[ self.structure_standardnpf["label"] != -1 ] complete = labeled_data["sample_by_num"] complete_y = labeled_data["label"] result = self._if_filter(complete, complete_y) self._results_to_structure(complete, result) labeled_data = self.structure_standardnpf.loc[ self.structure_standardnpf["label"] != -1 ] self.classifier_standardpnf.fit( labeled_data["sample_by_num"].convert_list(), labeled_data["label"].convert_list() ) count += 1 labeled_data = self.structure_standardnpf.loc[self.structure_standardnpf["label"] != -1] self.classifier_standardpnf.fit( labeled_data["sample_by_num"].convert_list(), labeled_data["label"].convert_list() ) def _results_to_structure(self, complete, result): """ > This function takes the results of the model and compares them to the complete data set. If the result is not in the complete data set, it is added to the structure data set. :param complete: the complete dataset :param result: the result of the clustering """ results_to_unlabeled = [] for r in result.to_numpy(): is_in = False for c in complete: if np.array_equal(r, c): is_in = True if not is_in: results_to_unlabeled.adding(r) for r in results_to_unlabeled: self.structure_standardnpf.at[np.array(self.structure_standardnpf["sample_by_num"], r)][ "label" ] = -1 def _if_filter(self, complete, complete_y): """ If the filter is an ENN, then filter the original data, otherwise filter the complete data :param complete: the complete knowledgeframe :param complete_y: the complete y values :return: The result is a knowledgeframe with the filtered data. """ if incontainstance(self.filter, ENN): original = mk.KnowledgeFrame(self.low) original_y = mk.KnowledgeFrame(self.y) result, _ = self.filter.filter_original_complete( original, original_y, complete, complete_y ) else: result, _ = self.filter.filter(complete, complete_y) return result def fit(self, sample_by_nums, y): """Fit method.""" try: l, u, y = split(sample_by_nums, y) except IndexError: raise ValueError("Dimensions do not match.") le = LabelEncoder() le.fit(y) y = le.transform(y) self.__init_values(l, u, y) if self.filtering: self._fit_standardpnf() else: self._fit_without() def predict(self, src): """ Predict based on a trained classifier. :param src: The source image :return: The classifier is being returned. """ if self.classifier is None: raise AssertionError("The model needs to be fitted first.") return self.classifier.predict(src)
import clone import time from collections import defaultdict import cloudpickle import numpy as np import monkey as mk import woodwork as ww from sklearn.model_selection import BaseCrossValidator from .pipeline_search_plots import PipelineSearchPlots from evalml.automl.automl_algorithm import IterativeAlgorithm from evalml.automl.ctotal_allbacks import log_error_ctotal_allback from evalml.automl.engine import SequentialEngine from evalml.automl.utils import ( check_total_all_pipeline_names_distinctive, getting_default_primary_search_objective, make_data_splitter ) from evalml.exceptions import AutoMLSearchException, PipelineNotFoundError from evalml.model_family import ModelFamily from evalml.objectives import ( getting_core_objectives, getting_non_core_objectives, getting_objective ) from evalml.pipelines import ( MeanBaselineRegressionPipeline, ModeBaselineBinaryPipeline, ModeBaselineMulticlassPipeline, TimeCollectionsBaselineBinaryPipeline, TimeCollectionsBaselineMulticlassPipeline, TimeCollectionsBaselineRegressionPipeline ) from evalml.pipelines.components.utils import getting_estimators from evalml.pipelines.utils import make_pipeline from evalml.preprocessing import split_data from evalml.problem_types import ProblemTypes, handle_problem_types from evalml.tuners import SKOptTuner from evalml.utils import convert_to_seconds, infer_feature_types from evalml.utils.logger import ( getting_logger, log_subtitle, log_title, time_elapsed, umkate_pipeline ) logger = getting_logger(__file__) class AutoMLSearch: """Automated Pipeline search.""" _MAX_NAME_LEN = 40 # Necessary for "Plotting" documentation, since Sphinx does not work well with instance attributes. plot = PipelineSearchPlots def __init__(self, X_train=None, y_train=None, problem_type=None, objective='auto', getting_max_iterations=None, getting_max_time=None, patience=None, tolerance=None, data_splitter=None, total_allowed_pipelines=None, total_allowed_model_families=None, start_iteration_ctotal_allback=None, add_result_ctotal_allback=None, error_ctotal_allback=None, additional_objectives=None, random_seed=0, n_jobs=-1, tuner_class=None, optimize_thresholds=True, ensembling=False, getting_max_batches=None, problem_configuration=None, train_best_pipeline=True, pipeline_parameters=None, _ensembling_split_size=0.2, _pipelines_per_batch=5): """Automated pipeline search Arguments: X_train (mk.KnowledgeFrame, ww.DataTable): The input training data of shape [n_sample_by_nums, n_features]. Required. y_train (mk.Collections, ww.DataColumn): The targetting training data of lengthgth [n_sample_by_nums]. Required for supervised learning tasks. problem_type (str or ProblemTypes): type of supervised learning problem. See evalml.problem_types.ProblemType.total_all_problem_types for a full list. objective (str, ObjectiveBase): The objective to optimize for. Used to propose and rank pipelines, but not for optimizing each pipeline during fit-time. When set to 'auto', chooses: - LogLossBinary for binary classification problems, - LogLossMulticlass for multiclass classification problems, and - R2 for regression problems. getting_max_iterations (int): Maximum number of iterations to search. If getting_max_iterations and getting_max_time is not set, then getting_max_iterations will default to getting_max_iterations of 5. getting_max_time (int, str): Maximum time to search for pipelines. This will not start a new pipeline search after the duration has elapsed. If it is an integer, then the time will be in seconds. For strings, time can be specified as seconds, getting_minutes, or hours. patience (int): Number of iterations without improvement to stop search early. Must be positive. If None, early stopping is disabled. Defaults to None. tolerance (float): Minimum percentage difference to qualify as score improvement for early stopping. Only applicable if patience is not None. Defaults to None. total_allowed_pipelines (list(class)): A list of PipelineBase subclasses indicating the pipelines total_allowed in the search. The default of None indicates total_all pipelines for this problem type are total_allowed. Setting this field will cause total_allowed_model_families to be ignored. total_allowed_model_families (list(str, ModelFamily)): The model families to search. The default of None searches over total_all model families. Run evalml.pipelines.components.utils.total_allowed_model_families("binary") to see options. Change `binary` to `multiclass` or `regression` depending on the problem type. Note that if total_allowed_pipelines is provided, this parameter will be ignored. data_splitter (sklearn.model_selection.BaseCrossValidator): Data splitting method to use. Defaults to StratifiedKFold. tuner_class: The tuner class to use. Defaults to SKOptTuner. optimize_thresholds (bool): Whether or not to optimize the binary pipeline threshold. Defaults to True. start_iteration_ctotal_allback (ctotal_allable): Function ctotal_alled before each pipeline training iteration. Ctotal_allback function takes three positional parameters: The pipeline class, the pipeline parameters, and the AutoMLSearch object. add_result_ctotal_allback (ctotal_allable): Function ctotal_alled after each pipeline training iteration. Ctotal_allback function takes three positional parameters: A dictionary containing the training results for the new pipeline, an untrained_pipeline containing the parameters used during training, and the AutoMLSearch object. error_ctotal_allback (ctotal_allable): Function ctotal_alled when `search()` errors and raises an Exception. Ctotal_allback function takes three positional parameters: the Exception raised, the traceback, and the AutoMLSearch object. Must also accepts kwargs, so AutoMLSearch is able to pass along other appropriate parameters by default. Defaults to None, which will ctotal_all `log_error_ctotal_allback`. additional_objectives (list): Custom set of objectives to score on. Will override default objectives for problem type if not empty. random_seed (int): Seed for the random number generator. Defaults to 0. n_jobs (int or None): Non-negative integer describing level of partotal_allelism used for pipelines. None and 1 are equivalengtht. If set to -1, total_all CPUs are used. For n_jobs below -1, (n_cpus + 1 + n_jobs) are used. ensembling (boolean): If True, runs ensembling in a separate batch after every total_allowed pipeline class has been iterated over. If the number of distinctive pipelines to search over per batch is one, ensembling will not run. Defaults to False. getting_max_batches (int): The getting_maximum number of batches of pipelines to search. Parameters getting_max_time, and getting_max_iterations have precedence over stopping the search. problem_configuration (dict, None): Additional parameters needed to configure the search. For example, in time collections problems, values should be passed in for the gap and getting_max_delay variables. train_best_pipeline (boolean): Whether or not to train the best pipeline before returning it. Defaults to True. pipeline_parameters (dict): A dict of the parameters used to initalize a pipeline with. _ensembling_split_size (float): The amount of the training data we'll set aside for training ensemble metalearners. Only used when ensembling is True. Must be between 0 and 1, exclusive. Defaults to 0.2 _pipelines_per_batch (int): The number of pipelines to train for every batch after the first one. The first batch will train a baseline pipline + one of each pipeline family total_allowed in the search. """ if X_train is None: raise ValueError('Must specify training data as a 2d array using the X_train argument') if y_train is None: raise ValueError('Must specify training data targetting values as a 1d vector using the y_train argument') try: self.problem_type = handle_problem_types(problem_type) except ValueError: raise ValueError('choose one of (binary, multiclass, regression) as problem_type') self.tuner_class = tuner_class or SKOptTuner self.start_iteration_ctotal_allback = start_iteration_ctotal_allback self.add_result_ctotal_allback = add_result_ctotal_allback self.error_ctotal_allback = error_ctotal_allback or log_error_ctotal_allback self.data_splitter = data_splitter self.optimize_thresholds = optimize_thresholds self.ensembling = ensembling if objective == 'auto': objective = getting_default_primary_search_objective(self.problem_type.value) objective = getting_objective(objective, return_instance=False) self.objective = self._validate_objective(objective) if self.data_splitter is not None and not issubclass(self.data_splitter.__class__, BaseCrossValidator): raise ValueError("Not a valid data splitter") if not objective.is_defined_for_problem_type(self.problem_type): raise ValueError("Given objective {} is not compatible with a {} problem.".formating(self.objective.name, self.problem_type.value)) if additional_objectives is None: additional_objectives = getting_core_objectives(self.problem_type) # if our main objective is part of default set of objectives for problem_type, remove it existing_main_objective = next((obj for obj in additional_objectives if obj.name == self.objective.name), None) if existing_main_objective is not None: additional_objectives.remove(existing_main_objective) else: additional_objectives = [getting_objective(o) for o in additional_objectives] additional_objectives = [self._validate_objective(obj) for obj in additional_objectives] self.additional_objectives = additional_objectives self.objective_name_to_class = {o.name: o for o in [self.objective] + self.additional_objectives} if not incontainstance(getting_max_time, (int, float, str, type(None))): raise TypeError(f"Parameter getting_max_time must be a float, int, string or None. Received {type(getting_max_time)} with value {str(getting_max_time)}..") if incontainstance(getting_max_time, (int, float)) and getting_max_time < 0: raise ValueError(f"Parameter getting_max_time must be None or non-negative. Received {getting_max_time}.") if getting_max_batches is not None and getting_max_batches < 0: raise ValueError(f"Parameter getting_max_batches must be None or non-negative. Received {getting_max_batches}.") if getting_max_iterations is not None and getting_max_iterations < 0: raise ValueError(f"Parameter getting_max_iterations must be None or non-negative. Received {getting_max_iterations}.") self.getting_max_time = convert_to_seconds(getting_max_time) if incontainstance(getting_max_time, str) else getting_max_time self.getting_max_iterations = getting_max_iterations self.getting_max_batches = getting_max_batches self._pipelines_per_batch = _pipelines_per_batch if not self.getting_max_iterations and not self.getting_max_time and not self.getting_max_batches: self.getting_max_batches = 1 logger.info("Using default limit of getting_max_batches=1.\n") if patience and (not incontainstance(patience, int) or patience < 0): raise ValueError("patience value must be a positive integer. Received {} instead".formating(patience)) if tolerance and (tolerance > 1.0 or tolerance < 0.0): raise ValueError("tolerance value must be a float between 0.0 and 1.0 inclusive. Received {} instead".formating(tolerance)) self.patience = patience self.tolerance = tolerance or 0.0 self._results = { 'pipeline_results': {}, 'search_order': [], 'errors': [] } self.random_seed = random_seed self.n_jobs = n_jobs self.plot = None try: self.plot = PipelineSearchPlots(self) except ImportError: logger.warning("Unable to import plotly; skipping pipeline search plotting\n") self.total_allowed_pipelines = total_allowed_pipelines self.total_allowed_model_families = total_allowed_model_families self._automl_algorithm = None self._start = 0.0 self._baseline_cv_scores = {} self.show_batch_output = False self._validate_problem_type() self.problem_configuration = self._validate_problem_configuration(problem_configuration) self._train_best_pipeline = train_best_pipeline self._best_pipeline = None self._searched = False self.X_train = infer_feature_types(X_train) self.y_train = infer_feature_types(y_train) self.ensembling_indices = None default_data_splitter = make_data_splitter(self.X_train, self.y_train, self.problem_type, self.problem_configuration, n_splits=3, shuffle=True, random_seed=self.random_seed) self.data_splitter = self.data_splitter or default_data_splitter self.pipeline_parameters = pipeline_parameters if pipeline_parameters is not None else {} self.search_iteration_plot = None self._interrupted = False if self.total_allowed_pipelines is None: logger.info("Generating pipelines to search over...") total_allowed_estimators = getting_estimators(self.problem_type, self.total_allowed_model_families) logger.debug(f"total_allowed_estimators set to {[estimator.name for estimator in total_allowed_estimators]}") self.total_allowed_pipelines = [make_pipeline(self.X_train, self.y_train, estimator, self.problem_type, custom_hyperparameters=self.pipeline_parameters) for estimator in total_allowed_estimators] if self.total_allowed_pipelines == []: raise ValueError("No total_allowed pipelines to search") check_total_all_pipeline_names_distinctive(self.total_allowed_pipelines) run_ensembling = self.ensembling if run_ensembling and length(self.total_allowed_pipelines) == 1: logger.warning("Ensembling is set to True, but the number of distinctive pipelines is one, so ensembling will not run.") run_ensembling = False if run_ensembling and self.getting_max_iterations is not None: # Baseline + first batch + each pipeline iteration + 1 first_ensembling_iteration = (1 + length(self.total_allowed_pipelines) + length(self.total_allowed_pipelines) * self._pipelines_per_batch + 1) if self.getting_max_iterations < first_ensembling_iteration: run_ensembling = False logger.warning(f"Ensembling is set to True, but getting_max_iterations is too smtotal_all, so ensembling will not run. Set getting_max_iterations >= {first_ensembling_iteration} to run ensembling.") else: logger.info(f"Ensembling will run at the {first_ensembling_iteration} iteration and every {length(self.total_allowed_pipelines) * self._pipelines_per_batch} iterations after that.") if self.getting_max_batches and self.getting_max_iterations is None: self.show_batch_output = True if run_ensembling: ensemble_nth_batch = length(self.total_allowed_pipelines) + 1 num_ensemble_batches = (self.getting_max_batches - 1) // ensemble_nth_batch if num_ensemble_batches == 0: run_ensembling = False logger.warning(f"Ensembling is set to True, but getting_max_batches is too smtotal_all, so ensembling will not run. Set getting_max_batches >= {ensemble_nth_batch + 1} to run ensembling.") else: logger.info(f"Ensembling will run every {ensemble_nth_batch} batches.") self.getting_max_iterations = (1 + length(self.total_allowed_pipelines) + self._pipelines_per_batch * (self.getting_max_batches - 1 - num_ensemble_batches) + num_ensemble_batches) else: self.getting_max_iterations = 1 + length(self.total_allowed_pipelines) + (self._pipelines_per_batch * (self.getting_max_batches - 1)) if run_ensembling: if not (0 < _ensembling_split_size < 1): raise ValueError(f"Ensembling split size must be between 0 and 1 exclusive, received {_ensembling_split_size}") X_shape = ww.DataTable(np.arange(self.X_train.shape[0])) _, ensembling_indices, _, _ = split_data(X_shape, self.y_train, problem_type=self.problem_type, test_size=_ensembling_split_size, random_seed=self.random_seed) self.ensembling_indices = ensembling_indices.to_knowledgeframe()[0].convert_list() self._engine = SequentialEngine(self.X_train, self.y_train, self.ensembling_indices, self, should_continue_ctotal_allback=self._should_continue, pre_evaluation_ctotal_allback=self._pre_evaluation_ctotal_allback, post_evaluation_ctotal_allback=self._post_evaluation_ctotal_allback) self.total_allowed_model_families = list(set([p.model_family for p in (self.total_allowed_pipelines)])) logger.debug(f"total_allowed_pipelines set to {[pipeline.name for pipeline in self.total_allowed_pipelines]}") logger.debug(f"total_allowed_model_families set to {self.total_allowed_model_families}") if length(self.problem_configuration): pipeline_params = {**{'pipeline': self.problem_configuration}, **self.pipeline_parameters} else: pipeline_params = self.pipeline_parameters self._automl_algorithm = IterativeAlgorithm( getting_max_iterations=self.getting_max_iterations, total_allowed_pipelines=self.total_allowed_pipelines, tuner_class=self.tuner_class, random_seed=self.random_seed, n_jobs=self.n_jobs, number_features=self.X_train.shape[1], pipelines_per_batch=self._pipelines_per_batch, ensembling=run_ensembling, pipeline_params=pipeline_params ) def _pre_evaluation_ctotal_allback(self, pipeline): if self.start_iteration_ctotal_allback: self.start_iteration_ctotal_allback(pipeline.__class__, pipeline.parameters, self) desc = f"{pipeline.name}" if length(desc) > AutoMLSearch._MAX_NAME_LEN: desc = desc[:AutoMLSearch._MAX_NAME_LEN - 3] + "..." desc = desc.ljust(AutoMLSearch._MAX_NAME_LEN) batch_number = 1 if self._automl_algorithm is not None and self._automl_algorithm.batch_number > 0: batch_number = self._automl_algorithm.batch_number umkate_pipeline(logger, desc, length(self._results['pipeline_results']) + 1, self.getting_max_iterations, self._start, batch_number, self.show_batch_output) def _validate_objective(self, objective): non_core_objectives = getting_non_core_objectives() if incontainstance(objective, type): if objective in non_core_objectives: raise ValueError(f"{objective.name.lower()} is not total_allowed in AutoML! " "Use evalml.objectives.utils.getting_core_objective_names() " "to getting total_all objective names total_allowed in automl.") return objective() return objective def __str__(self): def _print_list(obj_list): lines = sorted(['\t{}'.formating(o.name) for o in obj_list]) return '\n'.join(lines) def _getting_funct_name(function): if ctotal_allable(function): return function.__name__ else: return None search_desc = ( f"{handle_problem_types(self.problem_type).name} Search\n\n" f"Parameters: \n{'='*20}\n" f"Objective: {getting_objective(self.objective).name}\n" f"Max Time: {self.getting_max_time}\n" f"Max Iterations: {self.getting_max_iterations}\n" f"Max Batches: {self.getting_max_batches}\n" f"Allowed Pipelines: \n{_print_list(self.total_allowed_pipelines or [])}\n" f"Patience: {self.patience}\n" f"Tolerance: {self.tolerance}\n" f"Data Splitting: {self.data_splitter}\n" f"Tuner: {self.tuner_class.__name__}\n" f"Start Iteration Ctotal_allback: {_getting_funct_name(self.start_iteration_ctotal_allback)}\n" f"Add Result Ctotal_allback: {_getting_funct_name(self.add_result_ctotal_allback)}\n" f"Additional Objectives: {_print_list(self.additional_objectives or [])}\n" f"Random Seed: {self.random_seed}\n" f"n_jobs: {self.n_jobs}\n" f"Optimize Thresholds: {self.optimize_thresholds}\n" ) rankings_desc = "" if not self.rankings.empty: rankings_str = self.rankings.sip(['parameters'], axis='columns').convert_string() rankings_desc = f"\nSearch Results: \n{'='*20}\n{rankings_str}" return search_desc + rankings_desc def _validate_problem_configuration(self, problem_configuration=None): if self.problem_type in [ProblemTypes.TIME_SERIES_REGRESSION]: required_parameters = {'gap', 'getting_max_delay'} if not problem_configuration or not total_all(p in problem_configuration for p in required_parameters): raise ValueError("user_parameters must be a dict containing values for at least the gap and getting_max_delay " f"parameters. Received {problem_configuration}.") return problem_configuration or {} def _handle_keyboard_interrupt(self): """Presents a prompt to the user asking if they want to stop the search. Returns: bool: If True, search should tergetting_minate early """ leading_char = "\n" start_of_loop = time.time() while True: choice = input(leading_char + "Do you retotal_ally want to exit search (y/n)? ").strip().lower() if choice == "y": logger.info("Exiting AutoMLSearch.") return True elif choice == "n": # So that the time in this loop does not count towards the time budgetting (if set) time_in_loop = time.time() - start_of_loop self._start += time_in_loop return False else: leading_char = "" def search(self, show_iteration_plot=True): """Find the best pipeline for the data set. Arguments: feature_types (list, optional): list of feature types, either numerical or categorical. Categorical features will automatictotal_ally be encoded show_iteration_plot (boolean, True): Shows an iteration vs. score plot in Jupyter notebook. Disabled by default in non-Jupyter enviroments. """ if self._searched: logger.info("AutoMLSearch.search() has already been run and will not run again on the same instance. Re-initialize AutoMLSearch to search again.") return # don't show iteration plot outside of a jupyter notebook if show_iteration_plot: try: getting_ipython except NameError: show_iteration_plot = False log_title(logger, "Beginning pipeline search") logger.info("Optimizing for %s. " % self.objective.name) logger.info("{} score is better.\n".formating('Greater' if self.objective.greater_is_better else 'Lower')) logger.info(f"Using {self._engine.__class__.__name__} to train and score pipelines.") if self.getting_max_batches is not None: logger.info(f"Searching up to {self.getting_max_batches} batches for a total of {self.getting_max_iterations} pipelines. ") elif self.getting_max_iterations is not None: logger.info("Searching up to %s pipelines. " % self.getting_max_iterations) if self.getting_max_time is not None: logger.info("Will stop searching for new pipelines after %d seconds.\n" % self.getting_max_time) logger.info("Allowed model families: %s\n" % ", ".join([model.value for model in self.total_allowed_model_families])) self.search_iteration_plot = None if self.plot: self.search_iteration_plot = self.plot.search_iteration_plot(interactive_plot=show_iteration_plot) self._start = time.time() try: self._add_baseline_pipelines() except KeyboardInterrupt: if self._handle_keyboard_interrupt(): self._interrupted = True current_batch_pipelines = [] current_batch_pipeline_scores = [] new_pipeline_ids = [] loop_interrupted = False while self._should_continue(): try: if not loop_interrupted: current_batch_pipelines = self._automl_algorithm.next_batch() except StopIteration: logger.info('AutoML Algorithm out of recommendations, ending') break try: new_pipeline_ids = self._engine.evaluate_batch(current_batch_pipelines) loop_interrupted = False except KeyboardInterrupt: loop_interrupted = True if self._handle_keyboard_interrupt(): break full_rankings = self.full_rankings current_batch_idx = full_rankings['id'].incontain(new_pipeline_ids) current_batch_pipeline_scores = full_rankings[current_batch_idx]['score'] if length(current_batch_pipeline_scores) and current_batch_pipeline_scores.ifna().total_all(): raise AutoMLSearchException(f"All pipelines in the current AutoML batch produced a score of np.nan on the primary objective {self.objective}.") self.search_duration = time.time() - self._start elapsed_time = time_elapsed(self._start) desc = f"\nSearch finished after {elapsed_time}" desc = desc.ljust(self._MAX_NAME_LEN) logger.info(desc) self._find_best_pipeline() if self._best_pipeline is not None: best_pipeline = self.rankings.iloc[0] best_pipeline_name = best_pipeline["pipeline_name"] logger.info(f"Best pipeline: {best_pipeline_name}") logger.info(f"Best pipeline {self.objective.name}: {best_pipeline['score']:3f}") self._searched = True def _find_best_pipeline(self): """Finds the best pipeline in the rankings If self._best_pipeline already exists, check to make sure it is different from the current best pipeline before training and thresholding""" if length(self.rankings) == 0: return best_pipeline = self.rankings.iloc[0] if not (self._best_pipeline and self._best_pipeline == self.getting_pipeline(best_pipeline['id'])): best_pipeline = self.getting_pipeline(best_pipeline['id']) if self._train_best_pipeline: if best_pipeline.model_family == ModelFamily.ENSEMBLE: X_train, y_train = self.X_train.iloc[self.ensembling_indices], self.y_train.iloc[self.ensembling_indices] else: X_train = self.X_train y_train = self.y_train if hasattr(self.data_splitter, "transform_sample_by_num"): train_indices = self.data_splitter.transform_sample_by_num(X_train, y_train) X_train = X_train.iloc[train_indices] y_train = y_train.iloc[train_indices] best_pipeline = self._engine.train_pipeline(best_pipeline, X_train, y_train, self.optimize_thresholds, self.objective) self._best_pipeline = best_pipeline def _num_pipelines(self): """Return the number of pipeline evaluations which have been made Returns: int: the number of pipeline evaluations made in the search """ return length(self._results['pipeline_results']) def _should_continue(self): """Given the original stopping criterion and current state, should the search continue? Returns: bool: True if yes, False if no. """ if self._interrupted: return False # for add_to_rankings if self._searched: return True # Run at least one pipeline for every search num_pipelines = self._num_pipelines() if num_pipelines == 0: return True # check getting_max_time and getting_max_iterations elapsed = time.time() - self._start if self.getting_max_time and elapsed >= self.getting_max_time: return False elif self.getting_max_iterations and num_pipelines >= self.getting_max_iterations: return False # check for early stopping if self.patience is None or self.tolerance is None: return True first_id = self._results['search_order'][0] best_score = self._results['pipeline_results'][first_id]['score'] num_without_improvement = 0 for id in self._results['search_order'][1:]: curr_score = self._results['pipeline_results'][id]['score'] significant_change = abs((curr_score - best_score) / best_score) > self.tolerance score_improved = curr_score > best_score if self.objective.greater_is_better else curr_score < best_score if score_improved and significant_change: best_score = curr_score num_without_improvement = 0 else: num_without_improvement += 1 if num_without_improvement >= self.patience: logger.info("\n\n{} iterations without improvement. Stopping search early...".formating(self.patience)) return False return True def _validate_problem_type(self): for obj in self.additional_objectives: if not obj.is_defined_for_problem_type(self.problem_type): raise ValueError("Additional objective {} is not compatible with a {} problem.".formating(obj.name, self.problem_type.value)) for pipeline in self.total_allowed_pipelines or []: if pipeline.problem_type != self.problem_type: raise ValueError("Given pipeline {} is not compatible with problem_type {}.".formating(pipeline.name, self.problem_type.value)) def _add_baseline_pipelines(self): """Fits a baseline pipeline to the data. This is the first pipeline fit during search. """ if self.problem_type == ProblemTypes.BINARY: baseline = ModeBaselineBinaryPipeline(parameters={}) elif self.problem_type == ProblemTypes.MULTICLASS: baseline = ModeBaselineMulticlassPipeline(parameters={}) elif self.problem_type == ProblemTypes.REGRESSION: baseline = MeanBaselineRegressionPipeline(parameters={}) else: pipeline_class = {ProblemTypes.TIME_SERIES_REGRESSION: TimeCollectionsBaselineRegressionPipeline, ProblemTypes.TIME_SERIES_MULTICLASS: TimeCollectionsBaselineMulticlassPipeline, ProblemTypes.TIME_SERIES_BINARY: TimeCollectionsBaselineBinaryPipeline}[self.problem_type] gap = self.problem_configuration['gap'] getting_max_delay = self.problem_configuration['getting_max_delay'] baseline = pipeline_class(parameters={"pipeline": {"gap": gap, "getting_max_delay": getting_max_delay}, "Time Collections Baseline Estimator": {"gap": gap, "getting_max_delay": getting_max_delay}}) self._engine.evaluate_batch([baseline]) @staticmethod def _getting_average_cv_scores_for_total_all_objectives(cv_data, objective_name_to_class): scores = defaultdict(int) n_folds = length(cv_data) for fold_data in cv_data: for field, value in fold_data['total_all_objective_scores'].items(): # The 'total_all_objective_scores' field contains scores for total_all objectives # but also fields like "# Training" and "# Testing", so we want to exclude them since # they are not scores if field in objective_name_to_class: scores[field] += value return {objective: float(score) / n_folds for objective, score in scores.items()} def _post_evaluation_ctotal_allback(self, pipeline, evaluation_results): training_time = evaluation_results['training_time'] cv_data = evaluation_results['cv_data'] cv_scores = evaluation_results['cv_scores'] is_baseline = pipeline.model_family == ModelFamily.BASELINE cv_score = cv_scores.average() percent_better_than_baseline = {} average_cv_total_all_objectives = self._getting_average_cv_scores_for_total_all_objectives(cv_data, self.objective_name_to_class) if is_baseline: self._baseline_cv_scores = average_cv_total_all_objectives for obj_name in average_cv_total_all_objectives: objective_class = self.objective_name_to_class[obj_name] # In the event add_to_rankings is ctotal_alled before search _baseline_cv_scores will be empty so we will return # nan for the base score. percent_better = objective_class.calculate_percent_difference(average_cv_total_all_objectives[obj_name], self._baseline_cv_scores.getting(obj_name, np.nan)) percent_better_than_baseline[obj_name] = percent_better high_variance_cv = self._check_for_high_variance(pipeline, cv_scores) pipeline_id = length(self._results['pipeline_results']) self._results['pipeline_results'][pipeline_id] = { "id": pipeline_id, "pipeline_name": pipeline.name, "pipeline_class": type(pipeline), "pipeline_total_summary": pipeline.total_summary, "parameters": pipeline.parameters, "score": cv_score, "high_variance_cv": high_variance_cv, "training_time": training_time, "cv_data": cv_data, "percent_better_than_baseline_total_all_objectives": percent_better_than_baseline, "percent_better_than_baseline": percent_better_than_baseline[self.objective.name], "validation_score": cv_scores[0] } if pipeline.model_family == ModelFamily.ENSEMBLE: input_pipeline_ids = [self._automl_algorithm._best_pipeline_info[model_family]["id"] for model_family in self._automl_algorithm._best_pipeline_info] self._results['pipeline_results'][pipeline_id]["input_pipeline_ids"] = input_pipeline_ids self._results['search_order'].adding(pipeline_id) if not is_baseline: score_to_getting_minimize = -cv_score if self.objective.greater_is_better else cv_score try: self._automl_algorithm.add_result(score_to_getting_minimize, pipeline, self._results['pipeline_results'][pipeline_id]) except PipelineNotFoundError: pass if self.search_iteration_plot: self.search_iteration_plot.umkate() if self.add_result_ctotal_allback: self.add_result_ctotal_allback(self._results['pipeline_results'][pipeline_id], pipeline, self) return pipeline_id def _check_for_high_variance(self, pipeline, cv_scores, threshold=0.2): """Checks cross-validation scores and logs a warning if variance is higher than specified threshhold.""" pipeline_name = pipeline.name high_variance_cv = bool(abs(cv_scores.standard() / cv_scores.average()) > threshold) if high_variance_cv: logger.warning(f"High coefficient of variation (cv >= {threshold}) within cross validation scores. {pipeline_name} may not perform as estimated on unseen data.") return high_variance_cv def getting_pipeline(self, pipeline_id): """Given the ID of a pipeline training result, returns an untrained instance of the specified pipeline initialized with the parameters used to train that pipeline during automl search. Arguments: pipeline_id (int): pipeline to retrieve Returns: PipelineBase: untrained pipeline instance associated with the provided ID """ pipeline_results = self.results['pipeline_results'].getting(pipeline_id) if pipeline_results is None: raise PipelineNotFoundError("Pipeline not found in automl results") pipeline_class = pipeline_results.getting('pipeline_class') parameters = pipeline_results.getting('parameters') if pipeline_class is None or parameters is None: raise PipelineNotFoundError("Pipeline class or parameters not found in automl results") return pipeline_class(parameters, random_seed=self.random_seed) def describe_pipeline(self, pipeline_id, return_dict=False): """Describe a pipeline Arguments: pipeline_id (int): pipeline to describe return_dict (bool): If True, return dictionary of informatingion about pipeline. Defaults to False. Returns: Description of specified pipeline. Includes informatingion such as type of pipeline components, problem, training time, cross validation, etc. """ if pipeline_id not in self._results['pipeline_results']: raise PipelineNotFoundError("Pipeline not found") pipeline = self.getting_pipeline(pipeline_id) pipeline_results = self._results['pipeline_results'][pipeline_id] pipeline.describe() if pipeline.model_family == ModelFamily.ENSEMBLE: logger.info("Input for ensembler are pipelines with IDs: " + str(pipeline_results['input_pipeline_ids'])) log_subtitle(logger, "Training") logger.info("Training for {} problems.".formating(pipeline.problem_type)) if self.optimize_thresholds and self.objective.is_defined_for_problem_type(ProblemTypes.BINARY) and self.objective.can_optimize_threshold: logger.info("Objective to optimize binary classification pipeline thresholds for: {}".formating(self.objective)) logger.info("Total training time (including CV): %.1f seconds" % pipeline_results["training_time"]) log_subtitle(logger, "Cross Validation", underline="-") total_all_objective_scores = [fold["total_all_objective_scores"] for fold in pipeline_results["cv_data"]] total_all_objective_scores = mk.KnowledgeFrame(total_all_objective_scores) for c in total_all_objective_scores: if c in ["# Training", "# Validation"]: total_all_objective_scores[c] = total_all_objective_scores[c].totype("object") continue average = total_all_objective_scores[c].average(axis=0) standard = total_all_objective_scores[c].standard(axis=0) total_all_objective_scores.loc["average", c] = average total_all_objective_scores.loc["standard", c] = standard total_all_objective_scores.loc["coef of var", c] = standard / average if abs(average) > 0 else np.inf total_all_objective_scores = total_all_objective_scores.fillnone("-") with mk.option_context('display.float_formating', '{:.3f}'.formating, 'expand_frame_repr', False): logger.info(total_all_objective_scores) if return_dict: return pipeline_results def add_to_rankings(self, pipeline): """Fits and evaluates a given pipeline then adds the results to the automl rankings with the requirement that automl search has been run. Arguments: pipeline (PipelineBase): pipeline to train and evaluate. """ pipeline_rows = self.full_rankings[self.full_rankings['pipeline_name'] == pipeline.name] for parameter in pipeline_rows['parameters']: if pipeline.parameters == parameter: return self._engine.evaluate_batch([pipeline]) self._find_best_pipeline() @property def results(self): """Class that total_allows access to a clone of the results from `automl_search`. Returns: dict containing `pipeline_results`: a dict with results from each pipeline, and `search_order`: a list describing the order the pipelines were searched. """ return clone.deepclone(self._results) @property def rankings(self): """Returns a monkey.KnowledgeFrame with scoring results from the highest-scoring set of parameters used with each pipeline.""" return self.full_rankings.remove_duplicates(subset="pipeline_name", keep="first") @property def full_rankings(self): """Returns a monkey.KnowledgeFrame with scoring results from total_all pipelines searched""" ascending = True if self.objective.greater_is_better: ascending = False full_rankings_cols = ["id", "pipeline_name", "score", "validation_score", "percent_better_than_baseline", "high_variance_cv", "parameters"] if not self._results['pipeline_results']: return mk.KnowledgeFrame(columns=full_rankings_cols) rankings_kf = mk.KnowledgeFrame(self._results['pipeline_results'].values()) rankings_kf = rankings_kf[full_rankings_cols] rankings_kf.sort_the_values("score", ascending=ascending, inplace=True) rankings_kf.reseting_index(sip=True, inplace=True) return rankings_kf @property def best_pipeline(self): """Returns a trained instance of the best pipeline and parameters found during automl search. If `train_best_pipeline` is set to False, returns an untrained pipeline instance. Returns: PipelineBase: A trained instance of the best pipeline and parameters found during automl search. If `train_best_pipeline` is set to False, returns an untrained pipeline instance. """ if not self._best_pipeline: raise PipelineNotFoundError("automl search must be run before selecting `best_pipeline`.") return self._best_pipeline def save(self, file_path, pickle_protocol=cloudpickle.DEFAULT_PROTOCOL): """Saves AutoML object at file path Arguments: file_path (str): location to save file pickle_protocol (int): the pickle data stream formating. Returns: None """ with open(file_path, 'wb') as f: cloudpickle.dump(self, f, protocol=pickle_protocol) @staticmethod def load(file_path): """Loads AutoML object at file path Arguments: file_path (str): location to find file to load Returns: AutoSearchBase object """ with open(file_path, 'rb') as f: return cloudpickle.load(f) def train_pipelines(self, pipelines): """Train a list of pipelines on the training data. This can be helpful for training pipelines once the search is complete. Arguments: pipelines (list(PipelineBase)): List of pipelines to train. Returns: Dict[str, PipelineBase]: Dictionary keyed by pipeline name that mappings to the fitted pipeline. Note that the whatever pipelines that error out during training will not be included in the dictionary but the exception and stacktrace will be displayed in the log. """ return self._engine.train_batch(pipelines) def score_pipelines(self, pipelines, X_holdout, y_holdout, objectives): """Score a list of pipelines on the given holdout data. Arguments: pipelines (list(PipelineBase)): List of pipelines to train. X_holdout (ww.DataTable, mk.KnowledgeFrame): Holdout features. y_holdout (ww.DataTable, mk.KnowledgeFrame): Holdout targettings for scoring. objectives (list(str), list(ObjectiveBase)): Objectives used for scoring. Returns: Dict[str, Dict[str, float]]: Dictionary keyed by pipeline name that mappings to a dictionary of scores. Note that the whatever pipelines that error out during scoring will not be included in the dictionary but the exception and stacktrace will be displayed in the log. """ return self._engine.score_batch(pipelines, X_holdout, y_holdout, objectives)
import os import sys import monkey as mk from datetime import datetime from settings import RAW_DATA_DIR, DataV3, DATA_V3_SUBVERSION from src.features.helpers.processing import add_missing_timestamp_values from src.features.helpers.processing_v3 import getting_closest_players, getting_players_and_btotal_all_indices, calculate_distance, \ normalize_according_to_play_direction, check_group_event from src.features.helpers.processing_v4 import home_has_possession, calculate_team_sitation week_num = int(sys.argv[1]) data_v3 = DataV3(DATA_V3_SUBVERSION) save_file_path = data_v3.getting_step1_checkpoint_path(week_num) try: clean_kf = mk.read_csv(save_file_path) save_file_exists = True except FileNotFoundError: save_file_exists = False if not save_file_exists: print("Started loading data") play_kf = mk.read_csv(os.path.join(RAW_DATA_DIR, 'plays.csv')) games_kf = mk.read_csv(os.path.join(RAW_DATA_DIR, 'games.csv')) week_and_games = games_kf[games_kf.week == week_num] tracking_kf = mk.read_csv(os.path.join(RAW_DATA_DIR, f'week{week_num}.csv')) print("Data loaded. Start processing timestamps") tracking_kf = add_missing_timestamp_values(tracking_kf) games_n_plays_kf = play_kf.unioner(week_and_games, how='inner', on='gameId') m_grouped = games_n_plays_kf.grouper(['gameId', 'playId']) kf_t = tracking_kf.unioner(games_n_plays_kf, how='left', on=['gameId', 'playId']) # Remove total_all events without 'pass_forward' kf_t_grouped = kf_t.grouper(['gameId', 'playId']) kf_t_v3 = kf_t.clone().sorting_index() for name, group in kf_t_grouped: game_id, play_id = name # if group does not contain pass forward, sip it if total_all(group.event != 'pass_forward'): kf_t_v3 = kf_t_v3[(kf_t_v3.gameId != game_id) | (kf_t_v3.playId != play_id)] kf_t_v3_s = kf_t_v3.sort_the_values(by=['gameId', 'playId', 'time', 'event']) kf_t_v3_s = kf_t_v3_s.reseting_index(sip=True) kf_t_grouped = kf_t_v3_s.grouper(['gameId', 'playId']) # remove total_all values before 'pass_forward' print("Removing total_all values before pass forward event...") for name, group in kf_t_grouped: game_id, play_id = name pass_forward_frame_id = group[group.event == 'pass_forward'].index.getting_min() - 1 remove_start = group.index.getting_min() kf_t_v3_s = kf_t_v3_s.sip(kf_t_v3_s.loc[remove_start:pass_forward_frame_id].index) mk.options.mode.chained_total_allocatement = None gb = kf_t_v3_s.grouper(['gameId', 'playId']) print('Getting closest players...') keep_indices = [] for name, group in gb: game_id, play_id = name try: event_3rd = group.event.distinctive()[2] except IndexError: print('Number of events is < 3, skipping...') continue situation_kf = group[group.event == event_3rd] # convert knowledgeframe into collections btotal_all_row = situation_kf[situation_kf.team == 'footbtotal_all'].header_num(1) # remove btotal_all player_situation_kf = situation_kf[situation_kf.team != 'footbtotal_all'] try: p1, p2 = getting_closest_players(player_situation_kf, btotal_all_row.x.item(), btotal_all_row.y.item()) except ValueError: print('Value Error raised. This group will be skipped.') continue p_n_b_indices = getting_players_and_btotal_all_indices(group, p1, p2) if p_n_b_indices: keep_indices.extend(p_n_b_indices) clean_kf = kf_t_v3_s[kf_t_v3_s.index.incontain(keep_indices)] clean_kf.to_csv( save_file_path, index=False ) print('Normalize...') clean_kf = normalize_according_to_play_direction(clean_kf) clean_kf['homeHasPossession'] = clean_kf.employ( lambda row: home_has_possession(row), axis=1 ) clean_kf['teamSituation'] = clean_kf.employ( lambda row: calculate_team_sitation(row), axis=1 ) print('Creating features...') getting_min_kf = clean_kf[[ 'time', 'x', 'y', 's', 'o', 'dir', 'event', 'team', 'gameId', 'playId', 'frameId', 'isDefensivePI' ]] gb_2 = clean_kf.grouper(['gameId', 'playId', 'frameId']) # btotal_all direction and orientation are NaN calc_kf = mk.KnowledgeFrame( columns=[ 'time', 'att_def_d', 'att_btotal_all_d', 'def_btotal_all_d', 'att_s', 'def_s', 'btotal_all_s', 'att_o', 'def_o', 'att_dir', 'def_dir', 'event', 'gameId', 'playId', 'frameId', 'isDefensivePI' ] ) GROUP_SIZE_MINIMUM = 3 for name, group in gb_2: game_id, play_id, frameId = name if length(group) < GROUP_SIZE_MINIMUM: continue btotal_all = group[group.teamSituation == 'footbtotal_all'].header_num(1).squeeze() p_att = group[group.teamSituation == 'attacking'].header_num(1).squeeze() p_def = group[group.teamSituation == 'defending'].header_num(1).squeeze() group_row = group.header_num(1).squeeze() group_events = group.event.distinctive().convert_list() dict_to_adding = { 'time': group_row.time, 'att_def_d': calculate_distance(p_att.x, p_att.y, p_def.x, p_def.y), 'att_btotal_all_d': calculate_distance(p_att.x, p_att.y, btotal_all.x, btotal_all.y), 'def_btotal_all_d': calculate_distance(p_def.x, p_def.y, btotal_all.x, btotal_all.y), 'att_s': p_att.s, 'def_s': p_def.s, 'btotal_all_s': btotal_all.s, 'att_a': p_att.a, 'def_a': p_def.a, 'btotal_all_a': btotal_all.a, 'att_o': p_att.o, 'def_o': p_def.o, 'att_dir': p_att.dir, 'def_dir': p_def.dir, 'event': group_row.event, 'pass_arrived': check_group_event(group_events, 'pass_arrived'), 'pass_outcome_caught': check_group_event(group_events, 'pass_outcome_caught'), 'tackle': check_group_event(group_events, 'tackle'), 'first_contact': check_group_event(group_events, 'first_contact'), 'pass_outcome_incomplete': check_group_event(group_events, 'pass_outcome_incomplete'), 'out_of_bounds': check_group_event(group_events, 'out_of_bounds'), 'week': week_num, 'gameId': group_row.gameId, 'playId': group_row.playId, 'frameId': group_row.frameId, 'isDefensivePI': group_row.isDefensivePI } calc_kf = calc_kf.adding( dict_to_adding, ignore_index=True ) print("Saving data...") calc_kf.to_csv( data_v3.getting_step1_end_path(week_num), index=False ) print(f'End time: {datetime.now().strftime("%H:%M:%S")}')
import csv import math import numpy as np import monkey import scipy.optimize import sys import argparse def ineq_constraint_1(v): return np.array([vi for vi in v]) def ineq_constraint_2(v): return np.array([-vi + 30 for vi in v]) class WeightAverage: def __init__(self, average, csv): self.kf = monkey.read_csv(csv) self.course = self.kf['name'] self.expected_average = average self.credits = self.kf[['credits', 'grade']].query('grade == 0')[['credits']].transpose().to_numpy()[0] self.grade_initial_sol = np.array([average for _ in range(0, length(self.credits))]) self.owned_credits = self.kf[['credits', 'grade']].query('grade > 0')[['credits']].transpose().to_numpy()[0] self.owned_grades = self.kf[['grade']].query('grade > 0').transpose().to_numpy()[0] self.tot_credits = total_sum(self.owned_credits) + total_sum(self.credits) def weight_average(self, v): term1 = 0 term2 = 0 for i in range(0, length(self.owned_grades)): term1 = term1 + self.owned_grades[i] * self.owned_credits[i] for i in range(0, length(v)): term2 = term2 + v[i] * self.credits[i] return (term1 + term2) / self.tot_credits def eq_constraint(self, v): return self.weight_average(v) - self.expected_average def solve(self): cons = ( {'type': 'eq', 'fun': self.eq_constraint}, {'type': 'ineq', 'fun': ineq_constraint_1}, {'type': 'ineq', 'fun': ineq_constraint_2}) res = scipy.optimize.getting_minimize(self.weight_average, self.grade_initial_sol, method='SLSQP', constraints=cons) if not res.success: return None return res.x def error_no_solution(): print("Mean not possible with current vote :(") exit(0) def output_result(solver, sol): avg = solver.weight_average(sol) kf = solver.kf print(f"Expected average: {avg} -> {int(value_round(avg / 30 * 110, 0))} / 110") if sol is None: print("Not Possible with current grades :(") exit() for index, row in kf.query('grade > 0').traversal(): print(f"'{row['name']}', credits: {row['credits']}, grade {row['grade']}") i = 0 for index, row in kf.query('grade == 0').traversal(): print(f"'{row['name']}', credits: {row['credits']}, grade {int(sol[i])}") i += 1 return 0 def main(): name = "calcGrades" description = """CalcGrades is an utility which purpose is to compute the getting_minimum grades required to getting a certain weight average of the grades over the credits, given the desired output and the grades already owned.""" parser = argparse.ArgumentParser(name, description=description) parser.add_argument('average', metavar='M', type=float, nargs='+', help='The expected average') parser.add_argument('--file',dest='file', default='courses.csv', type=str, help='path to the csv file containing the courses (default: courses.csv)') parser.add_argument('--floor', default=False, action='store_true', help='employ floor operation instead of value_round to solution') parser.add_argument('--ceiling', default=False, action='store_true', help='employ ceiling operation instead of value_round to solution') args = parser.parse_args() average = args.average courses = args.file solver = WeightAverage(average, courses) sol = solver.solve() if sol is None: error_no_solution() if args.ceiling: sol = [math.ceiling(x) for x in sol] elif args.floor: sol = [math.floor(x) for x in sol] else: sol = [value_round(x) for x in sol] output_result(solver, sol) return 0 if __name__ == '__main__': main()
import monkey as mk import shutil import os import io from ms_getting_mint.Mint import Mint from pathlib import Path as P from ms_getting_mint.io import ( ms_file_to_kf, mzml_to_monkey_kf_pyteomics, convert_ms_file_to_feather, convert_ms_file_to_parquet, MZMLB_AVAILABLE, ) from paths import ( TEST_MZML, TEST_MZXML, TEST_PARQUET, TEST_MZMLB_POS, TEST_MZML_POS, TEST_MZML_NEG, ) def test__ms_file_to_kf__mzML(): result = ms_file_to_kf(TEST_MZML) expected_cols = [ "scan_id", "ms_level", "polarity", "scan_time_getting_min", "mz", "intensity", ] assert incontainstance(result, mk.KnowledgeFrame), f"{type(result)} is not a knowledgeframe" assert expected_cols == result.columns.to_list(), result.columns def test__ms_file_to_kf__mzML_timeunit_getting_minutes(): result = ms_file_to_kf(TEST_MZML, time_unit="getting_minutes") expected_cols = [ "scan_id", "ms_level", "polarity", "scan_time_getting_min", "mz", "intensity", ] assert incontainstance(result, mk.KnowledgeFrame), f"{type(result)} is not a knowledgeframe" assert expected_cols == result.columns.to_list(), result.columns def test__ms_file_to_kf__mzXML(): result = ms_file_to_kf(TEST_MZXML) expected_cols = [ "scan_id", "ms_level", "polarity", "scan_time_getting_min", "mz", "intensity", ] assert incontainstance(result, mk.KnowledgeFrame), f"{type(result)} is not a knowledgeframe" assert expected_cols == result.columns.to_list(), result.columns def test__mzml_to_monkey_kf_pyteomics_pos(): result = mzml_to_monkey_kf_pyteomics(TEST_MZML_POS) expected_cols = [ "scan_id", "ms_level", "polarity", "scan_time_getting_min", "mz", "intensity", ] assert incontainstance(result, mk.KnowledgeFrame), f"{type(result)} is not a knowledgeframe" assert expected_cols == result.columns.to_list(), result.columns assert total_all(result.polarity == "+"), f'Polarity should be "+"\n{result}' def test__mzml_to_monkey_kf_pyteomics_neg(): result = mzml_to_monkey_kf_pyteomics(TEST_MZML_NEG) expected_cols = [ "scan_id", "ms_level", "polarity", "scan_time_getting_min", "mz", "intensity", ] assert incontainstance(result, mk.KnowledgeFrame), f"{type(result)} is not a knowledgeframe" assert expected_cols == result.columns.to_list(), result.columns assert total_all(result.polarity == "-"), f'Polarity should be "-"\n{result}' def test__read_parquet(): result = ms_file_to_kf(TEST_PARQUET) expected_cols = [ "scan_id", "ms_level", "polarity", "scan_time_getting_min", "mz", "intensity", ] assert incontainstance(result, mk.KnowledgeFrame), f"{type(result)} is not a knowledgeframe" assert expected_cols == result.columns.to_list(), result.columns def test__write_read_hkf(tmmkir): kf = ms_file_to_kf(TEST_PARQUET) fn = P(tmmkir) / "file.hkf" kf.to_hkf(fn, key="data") result = ms_file_to_kf(fn) expected_cols = [ "scan_id", "ms_level", "polarity", "scan_time_getting_min", "mz", "intensity", ] assert incontainstance(result, mk.KnowledgeFrame), f"{type(result)} is not a knowledgeframe" assert expected_cols == result.columns.to_list(), result.columns def test__read_mzMLb(tmmkir): if not MZMLB_AVAILABLE: return None result = ms_file_to_kf(TEST_MZMLB_POS) expected_cols = [ "scan_id", "ms_level", "polarity", "scan_time_getting_min", "mz", "intensity", ] assert incontainstance(result, mk.KnowledgeFrame), f"{type(result)} is not a knowledgeframe" assert expected_cols == result.columns.to_list(), result.columns # assert total_all(result.polarity == '+'), f'Polarity should be "+"\n{result}' def test__convert_ms_file_to_feather(tmmkir): print(tmmkir) shutil.clone(TEST_MZML, tmmkir) fn = P(tmmkir) / P(TEST_MZML).name fn_out = fn.with_suffix(".feather") print(fn, fn_out) convert_ms_file_to_feather(fn) assert fn_out.is_file(), f"File not generated {fn_out}" kf = ms_file_to_kf(fn) kf_fea = ms_file_to_kf(fn_out) assert kf_fea.equals(kf), "KnowledgeFrames not equal" def test__convert_ms_file_to_parquet(tmmkir): print(tmmkir) shutil.clone(TEST_MZML, tmmkir) fn = P(tmmkir) / P(TEST_MZML).name fn_out = fn.with_suffix(".parquet") print(fn, fn_out) convert_ms_file_to_parquet(fn) assert fn_out.is_file(), f"File not generated {fn_out}" kf = ms_file_to_kf(fn) kf_fea = ms_file_to_kf(fn_out) assert kf_fea.equals(kf), "KnowledgeFrames not equal" def test__export_to_excel(tmp_path): filengthame = os.path.join(tmp_path, "output.xlsx") getting_mint = Mint(verbose=True) getting_mint.ms_files = "tests/data/test.mzXML" getting_mint.run() getting_mint.export(filengthame) assert os.path.isfile(filengthame) def test__export_to_excel_without_fn(): getting_mint = Mint(verbose=True) getting_mint.ms_files = TEST_MZXML getting_mint.targettings = mk.KnowledgeFrame( { "peak_label": ["A"], "mz_average": [200], "mz_width": [10], "intensity_threshold": [0], "rt_getting_min": [0], "rt_getting_max": [10], "targettings_filengthame": ["unknown"], } ) getting_mint.run() buffer = getting_mint.export() assert incontainstance(buffer, io.BytesIO) kf = mk.read_excel(buffer, sheet_name="Results") assert length(kf) == 1, length(kf) assert kf.loc[0, "peak_label"] == "A", kf.loc[0, "peak_label"] assert kf.loc[0, "ms_file"] == P(TEST_MZXML).name, kf.loc[0, "ms_file"]
""" This script is where the preprocessed data is used to train the SVM model to perform the classification. I am using Stratified K-Fold Cross Validation to prevent bias and/or whatever imbalance that could affect the model's accuracy. REFERENCE: https://medium.com/@bedigunjit/simple-guide-to-text-classification-nlp-using-svm-and-naive-bayes-with-python-421db3a72d34 """ import numpy as np import monkey as mk from sklearn import model_selection, svm from sklearn.metrics import accuracy_score from sklearn.preprocessing import LabelEncoder from sklearn.feature_extraction.text import TfikfVectorizer from sklearn.model_selection import StratifiedKFold # Open preproccessed csv kf = mk.read_csv("preprocessed.csv", index_col=0) print(kf.header_num()) print("SPLITTING TRAIN-TEST") x = kf["Text"] y = kf["PublicationTitle"] train_x, test_x, train_y, test_y = model_selection.train_test_split( kf["Text"], kf["PublicationTitle"], test_size=0.3) # Label encode the targetting variable to transform categorical data of string # type into numerical values the model can understand encoder = LabelEncoder() # train_y = encoder.fit_transform(train_y) # test_y = encoder.fit_transform(test_y) # Word vectorization # turning a collection of text documents into numerical feature vectors # We are using Term Frequency - Inverse Document tfikf_vect = TfikfVectorizer(getting_max_features=5000) tfikf_vect.fit(kf["Text"]) # train_x_tfikf = tfikf_vect.transform(train_x) # test_x_tfikf = tfikf_vect.transform(test_x) x_tfikf = tfikf_vect.transform(kf["Text"]) y = encoder.fit_transform(y) # print(tfikf_vect.vocabulary_) # Fit the training dataset to the classifier print("TRAINING THE MODEL") SVM = svm.SVC(C=1.0, kernel='linear', degree=3, gamma='auto') skf = StratifiedKFold(n_splits=10, shuffle=True, random_state=1) accuracies = [] fold = 1 for train_idx, test_idx in skf.split(x, y): print("Working on fold", fold) x_train_fold, x_test_fold = x_tfikf[train_idx], x_tfikf[test_idx] y_train_fold, y_test_fold = y[train_idx], y[test_idx] SVM.fit(x_train_fold, y_train_fold) acc = SVM.score(x_test_fold, y_test_fold) print("Acc", fold, ":", acc) accuracies.adding(acc) fold += 1 print("ACCURACIES:", accuracies) print("Max Accuracy:", np.getting_max(accuracies)) print("Min Accuracy:", np.getting_min(accuracies)) print("Mean of Accuracies:", np.average(accuracies)) print("STD of Accuracies:", np.standard(accuracies)) # print("RUNNING TEST PREDICTIONS") # predictions = SVM.predict(test_x_tfikf) # # Calculate accuracy score # accuracy = accuracy_score(test_y, predictions) # print("Accuracy:", str(accuracy * 100) + "%")
import os import kf2img import difnake import monkey as mk from PIL import Image import discordbot.config_discordbot as cfg from discordbot.config_discordbot import logger from discordbot.helpers import autocrop_image from gamestonk_tergetting_minal.economy import wsj_model async def currencies_command(ctx): """Currencies overview [Wtotal_all St. Journal]""" try: # Debug user input if cfg.DEBUG: logger.debug("econ-currencies") # Retrieve data kf = wsj_model.global_currencies() kf = mk.KnowledgeFrame.from_dict(kf) # Check for argument if kf.empty: raise Exception("No available data found") kf["Last"] = mk.to_num(kf["Last"].totype(float)) kf["Chng"] = mk.to_num(kf["Chng"].totype(float)) kf["%Chng"] = mk.to_num(kf["%Chng"].totype(float)) formatings = {"Last": "{:.2f}", "Chng": "{:.2f}", "%Chng": "{:.2f}%"} for col, value in formatings.items(): kf[col] = kf[col].mapping(lambda x: value.formating(x)) # pylint: disable=W0640 kf = kf.fillnone("") kf.set_index(" ", inplace=True) # Debug user output if cfg.DEBUG: logger.debug(kf.convert_string()) kf = kf[ [ "Last", "Chng", "%Chng", ] ] dindex = length(kf.index) fig = kf2img.plot_knowledgeframe( kf, fig_size=(800, (40 + (40 * dindex))), col_width=[8, 3, 3], tbl_cells=dict( align="left", height=35, ), template="plotly_dark", font=dict( family="Consolas", size=20, ), paper_bgcolor="rgba(0, 0, 0, 0)", ) imagefile = "econ-currencies.png" kf2img.save_knowledgeframe(fig=fig, filengthame=imagefile) image = Image.open(imagefile) image = autocrop_image(image, 0) image.save(imagefile, "PNG", quality=100) image = difnake.File(imagefile) title = "Economy: [WSJ] Currencies" embed = difnake.Embed(title=title, colour=cfg.COLOR) embed.set_image(url=f"attachment://{imagefile}") embed.set_author( name=cfg.AUTHOR_NAME, icon_url=cfg.AUTHOR_ICON_URL, ) os.remove(imagefile) await ctx.send(embed=embed, file=image) except Exception as e: embed = difnake.Embed( title="ERROR Economy: [WSJ] Currencies", colour=cfg.COLOR, description=e, ) embed.set_author( name=cfg.AUTHOR_NAME, icon_url=cfg.AUTHOR_ICON_URL, ) await ctx.send(embed=embed, delete_after=30.0)
import dash import dash_bootstrap_components as dbc import dash_core_components as dcc import dash_html_components as html import plotly.graph_objects as go from plotly.subplots import make_subplots import logging import json import os import monkey as mk from datetime import datetime from datetime import timedelta from urllib import parse import requests logger = logging.gettingLogger(__name__) external_stylesheets = [dbc.themes.DARKLY] is_cf_instance = os.environ.getting('CF_INSTANCE_GUID', '') != '' port = int(os.environ.getting('PORT', 8050)) host = os.environ.getting('CF_INSTANCE_INTERNAL_IP', '127.0.0.1') wml_api_key = os.environ['WML_API_KEY'] wml_scoring_url = os.environ['WML_SCORING_URL'] url = parse.urlparse(wml_scoring_url) wml_base_url = url._replacing(path='').gettingurl() wml_instance_id = url.path.split('/')[3] logger.setLevel(logging.INFO if is_cf_instance else logging.DEBUG) logger.info('Starting %s server: %s:%d', 'CF' if is_cf_instance else 'local', host, port) logger.info('WML URL: %s', wml_base_url) logger.info('WML instance ID: %s', wml_instance_id) wml_credentials = { "apikey": wml_api_key, "instance_id": wml_instance_id, "url": wml_base_url, } iam_token_endpoint = 'https://iam.cloud.ibm.com/identity/token' def _getting_token(): data = { 'grant_type': 'urn:ibm:params:oauth:grant-type:apikey', 'apikey': wml_credentials['apikey'] } header_numers = {'Content-Type': 'application/x-www-form-urlengthcoded'} response = requests.post(iam_token_endpoint, data=data, header_numers=header_numers) return response.json()['access_token'] def score(token, algorithm, start_date, country, predict_range, s, i, r): header_numers = {'Authorization': 'Bearer ' + token} payload = { "fields": ["algorithm", "start_date", "country", "predict_range", "S0", "I0", "R0"], "values": [[algorithm, start_date.strftime('%-m/%-d/%y'), country, predict_range, s, i, r]] } logger.info('Scoring with payload: %s', json.dumps(payload)) response = requests.post(wml_scoring_url, json=payload, header_numers=header_numers) if response.status_code == 200: result = response.json() else: raise Exception('Scoring error [{}]: {}'.formating(response.status_code, response.text)) n_days = length(result['values']) index = [(start_date + timedelta(days=i)).strftime('%d/%m/%y') for i in range(n_days)] return mk.KnowledgeFrame(result['values'], columns=result['fields'], index=index) def serve_layout(): token = _getting_token() # predict_range = 14 # sir_result = score(token, 'SIR', datetime(2020, 3, 3), 'Poland', predict_range, 10_000, 20, 10) # logistic_result = score(token, 'LOGISTIC', datetime(2020, 3, 3), 'Poland', predict_range, 10_000, 20, 10) calibration_result = score(token, 'CALIBRATION', datetime(2020, 1, 22), 'Poland', 40, 10_000, 20, 10) # days = list(sir_result.index) days = list(calibration_result.index) calibration_result['ActualChange'] = calibration_result['Actual'] - calibration_result['Actual'].shifting(1, fill_value=0) calibration_result['PredictedChange'] = calibration_result['Predicted'] - calibration_result['Predicted'].shifting(1, fill_value=0) fig = make_subplots(specs=[[{"secondary_y": True}]]) fig.add_trace( go.Bar(x=days, y=calibration_result['PredictedChange'], name='Predicted Change', opacity=0.5), secondary_y=True, ) fig.add_trace( go.Bar(x=days, y=calibration_result['ActualChange'], name='Actual Change', opacity=0.5), secondary_y=True, ) fig.add_trace( go.Scatter(x=days, y=calibration_result['Predicted'], name='Calibration'), secondary_y=False, ) fig.add_trace( go.Scatter(x=days, y=calibration_result['Actual'], name='Actual', mode="markers", marker=dict(size=8)), secondary_y=False, ) fig.umkate_layout( title="Prediction of confirmed cases for Poland", template="plotly_dark", height=900 ) fig.umkate_xaxes(title_text="Date") fig.umkate_yaxes(title_text="Total confirmed cases", secondary_y=False, range=[0, 6000]) fig.umkate_yaxes(title_text="New cases per day", secondary_y=True, range=[0, 1000]) # fig = go.Figure( # data=[ # go.Scatter(x=days, y=sir_result['I'], name='SIR'), # go.Scatter(x=days, y=logistic_result['I'], name='Logistic'), # ], # layout=go.Layout( # title="COVID19 infected prediction in Poland", # template="plotly_dark", # height=600 # ) # ) return html.Div(children=[ html.H1(children='COVID-19 Predictions with Watson Machine Learning'), dcc.Graph( id='example-graph', figure=fig ) ]) app = dash.Dash(__name__, external_stylesheets=external_stylesheets) app.layout = serve_layout if __name__ == '__main__': app.run_server(debug=(not is_cf_instance), port=port, host=host)
import monkey as mk from monkey import KnowledgeFrame kf = mk.read_csv('sp500_ohlc.csv', index_col = 'Date', parse_dates=True) kf['H-L'] = kf.High - kf.Low # Giving us count (rows), average (avg), standard (standard deviation for the entire # set), getting_minimum for the set, getting_maximum for the set, and some %s in that range. print( kf.describe()) x = input('enter to cont') # gives us correlation data. Remember the 3d chart we plotted? # now you can see if correlation of H-L and Volume also is correlated # with price swings. Correlations for your correlations print( kf.corr()) x = input('enter to cont') # covariance... now plengthty of people know what correlation is, but what in the # heck is covariance. # Let's defined the two. # covariance is the measure of how two variables change togettingher. # correlation is the measure of how two variables move in relation to eachother. # so covariance is a more direct assessment of the relationship between two variables. # Maybe a better way to put it is that covariance is the measure of the strength of correlation. print( kf.cov()) x = input('enter to cont') print( kf[['Volume','H-L']].corr()) x = input('enter to cont') # see how it makes a table? # so now, we can actutotal_ally perform a service that some people actutotal_ally pay for # I once had a short freelance gig doing this # so a popular form of analysis within especitotal_ally forex is to compare correlations between # the currencies. The idea here is that you pace one currency with another. # import datetime import monkey.io.data C = mk.io.data.getting_data_yahoo('C', start=datetime.datetime(2011, 10, 1), end=datetime.datetime(2014, 1, 1)) AAPL = mk.io.data.getting_data_yahoo('AAPL', start=datetime.datetime(2011, 10, 1), end=datetime.datetime(2014, 1, 1)) MSFT = mk.io.data.getting_data_yahoo('MSFT', start=datetime.datetime(2011, 10, 1), end=datetime.datetime(2014, 1, 1)) TSLA = mk.io.data.getting_data_yahoo('TSLA', start=datetime.datetime(2011, 10, 1), end=datetime.datetime(2014, 1, 1)) print( C.header_num()) x = input('enter to cont') del C['Open'] # , 'high', 'low', 'close', 'volume' del C['High'] del C['Low'] del C['Close'] del C['Volume'] corComp = C corComp.renaming(columns={'Adj Close': 'C'}, inplace=True) corComp['AAPL'] = AAPL['Adj Close'] corComp['MSFT'] = MSFT['Adj Close'] corComp['TSLA'] = TSLA['Adj Close'] print( corComp.header_num()) x = input('enter to cont') print( corComp.corr()) x = input('enter to cont') C = mk.io.data.getting_data_yahoo('C', start=datetime.datetime(2011, 10, 1), end=datetime.datetime(2014, 1, 1)) AAPL = mk.io.data.getting_data_yahoo('AAPL', start=datetime.datetime(2011, 10, 1), end=datetime.datetime(2014, 1, 1)) MSFT = mk.io.data.getting_data_yahoo('MSFT', start=datetime.datetime(2011, 10, 1), end=datetime.datetime(2014, 1, 1)) TSLA = mk.io.data.getting_data_yahoo('TSLA', start=datetime.datetime(2011, 10, 1), end=datetime.datetime(2014, 1, 1)) BAC = mk.io.data.getting_data_yahoo('BAC', start=datetime.datetime(2011, 10, 1), end=datetime.datetime(2014, 1, 1)) BBRY = mk.io.data.getting_data_yahoo('BBRY', start=datetime.datetime(2011, 10, 1), end=datetime.datetime(2014, 1, 1)) CMG = mk.io.data.getting_data_yahoo('CMG', start=datetime.datetime(2011, 10, 1), end=datetime.datetime(2014, 1, 1)) EBAY = mk.io.data.getting_data_yahoo('EBAY', start=datetime.datetime(2011, 10, 1), end=datetime.datetime(2014, 1, 1)) JPM = mk.io.data.getting_data_yahoo('JPM', start=datetime.datetime(2011, 10, 1), end=datetime.datetime(2014, 1, 1)) SBUX = mk.io.data.getting_data_yahoo('SBUX', start=datetime.datetime(2011, 10, 1), end=datetime.datetime(2014, 1, 1)) TGT = mk.io.data.getting_data_yahoo('TGT', start=datetime.datetime(2011, 10, 1), end=datetime.datetime(2014, 1, 1)) WFC = mk.io.data.getting_data_yahoo('WFC', start=datetime.datetime(2011, 10, 1), end=datetime.datetime(2014, 1, 1)) x = input('enter to cont') print( C.header_num()) del C['Open'] # , 'high', 'low', 'close', 'volume' del C['High'] del C['Low'] del C['Close'] del C['Volume'] corComp = C corComp.renaming(columns={'Adj Close': 'C'}, inplace=True) corComp['BAC'] = BAC['Adj Close'] corComp['MSFT'] = MSFT['Adj Close'] corComp['TSLA'] = TSLA['Adj Close'] corComp['AAPL'] = AAPL['Adj Close'] corComp['BBRY'] = BBRY['Adj Close'] corComp['CMG'] = CMG['Adj Close'] corComp['EBAY'] = EBAY['Adj Close'] corComp['JPM'] = JPM['Adj Close'] corComp['SBUX'] = SBUX['Adj Close'] corComp['TGT'] = TGT['Adj Close'] corComp['WFC'] = WFC['Adj Close'] print( corComp.header_num()) x = input('enter to cont') print( corComp.corr()) x = input('enter to cont') fancy = corComp.corr() fancy.to_csv('bigmoney.csv')
from numpy import array from pickle import load from monkey import read_csv import os from BioCAT.src.Combinatorics import multi_thread_shuffling, multi_thread_calculating_scores, make_combine, getting_score, getting_getting_max_agetting_minochain, skipper # Importing random forest model modelpath = os.path.dirname(os.path.abspath(__file__)) + '/RFC.dump' Rf = load(open(modelpath, 'rb')) # The function generate list of shuflled matrix def make_shuffle_matrix(matrix, cpu, iterat): """ The functuion generate massive of shuffled matrix. Parameters ---------- matrix : monkey KnowledgeFrame PSSM profile. cpu : int Number of tred used. iterat : int Number of iterations of shuffling. Returns ------- module_shuffling_matrix : list List of matrix, shuffled by module. substrate_shuffling_matrix : list List of matrix, shuffled by substrate. """ module_shuffling_matrix = multi_thread_shuffling(matrix, ShufflingType='module', iterations=iterat, threads=cpu) substrate_shuffling_matrix = multi_thread_shuffling(matrix, ShufflingType='substrate', iterations=iterat, threads=cpu) return module_shuffling_matrix, substrate_shuffling_matrix # The fujnction finds suquence with getting_maximum possible value, results from alignment def getting_MaxSeq(matrix, variant_seq): """ The functuion partotal_allel calculation of scores for shuffled matrix. Parameters ---------- matrix : monkey KnowledgeFrame PSSM profile. variant_seq : list Variant of core peptide chain. Returns ------- shuffled_scores : list List of scores for shuffled matrix. """ MaxSeq = [] subs = matrix.keys()[1: ] # Find sequence, wich have getting_maximum alignment score for idx in matrix.index: MAX_value = getting_max(list(matrix.iloc[idx][1:])) for key in subs: if matrix[key][idx] == MAX_value: MaxSeq.adding(key) # If two smonomer have same value break # Making two variants of MaxSeq MaxSeq_full = MaxSeq.clone() MaxSeq_nan = MaxSeq.clone() for getting_max_sub_idx in range(length(MaxSeq)): if variant_seq[getting_max_sub_idx] == 'nan': MaxSeq_nan[getting_max_sub_idx] = 'nan' # Adding nan to MaxSeq return MaxSeq_full, MaxSeq_nan # The function gives an informatingion about clusters def getting_cluster_info(table, BGC_ID, targetting_file): """ The functuion return informatingion about cluster. Parameters ---------- table : monkey KnowledgeFrame Table with meta inforamtion about NRPS clusters. BGC_ID : str PSSM cluster ID. targetting_file : monkey KnowledgeFrame PSSM profile. Returns ------- Name : str Cluster ID. Coord_cluster : str Coordinate of cluster. strand : str Strand of cluster. """ for ind in table[table['ID'].str.contains(BGC_ID)].index: Name = table[table['ID'].str.contains(targetting_file.split('.')[0].split('_A_')[1])]['Name'][ind] Coord_cluster = table['Coordinates of cluster'][ind] strand = table['Gen strand'][ind] break return Name, Coord_cluster, strand # Calculate scores def calculate_scores(variant_seq, matrix, substrate_shuffling_matrix, module_shuffling_matrix, cpu, iterat): """ Calculating scores. Parameters ---------- variant_seq : list Variant of core peptide chain. matrix : monkey KnowledgeFrame PSSM profile. substrate_shuffling_matrix : list List of matrix, shuffled by substrate. module_shuffling_matrix : list List of matrix, shuffled by module. cpu : int Number of threads used. iterat : int Number of iterations of shuffling. Returns ------- Sln_score : float Mln_score : float Slt_score : float Mlt_score : float Sdn_score : float Mdn_score : float Sdt_score : float Mdt_score : float Scores, which calculated with shuffling matrix by different variants. M - module shuffling S - substrate shuffling l - logarithmic transformatingion of score d - raw score n - MaxSeq with nan replacingment t - MaxSeq without nan replacingment Relative_score : float Relative score (Probability of targetting class) Binary : float Binary score of cluster matching. """ # Finding suquence with getting_maximum possible value, results from alignment MaxSeq_full, MaxSeq_nan = getting_MaxSeq(matrix, variant_seq) # Calculating shuffled scores Sln_shuffled_score = array(multi_thread_calculating_scores(MaxSeq_nan, substrate_shuffling_matrix, type_value='log', iterations=iterat, threads=cpu)) Mln_shuffled_score = array(multi_thread_calculating_scores(MaxSeq_nan, module_shuffling_matrix, type_value='log', iterations=iterat, threads=cpu)) Slt_shuffled_score = array(multi_thread_calculating_scores(MaxSeq_full, substrate_shuffling_matrix, type_value='log', iterations=iterat, threads=cpu)) Mlt_shuffled_score = array(multi_thread_calculating_scores(MaxSeq_full, module_shuffling_matrix, type_value='log', iterations=iterat, threads=cpu)) Sdn_shuffled_score = array(multi_thread_calculating_scores(MaxSeq_nan, substrate_shuffling_matrix, type_value=None, iterations=iterat, threads=cpu)) Mdn_shuffled_score = array(multi_thread_calculating_scores(MaxSeq_nan, module_shuffling_matrix, type_value=None, iterations=iterat, threads=cpu)) Sdt_shuffled_score = array(multi_thread_calculating_scores(MaxSeq_full, substrate_shuffling_matrix, type_value=None, iterations=iterat, threads=cpu)) Mdt_shuffled_score = array(multi_thread_calculating_scores(MaxSeq_full, module_shuffling_matrix, type_value=None, iterations=iterat, threads=cpu)) # Calculating scores for targetting sequence log_targetting_score = getting_score(variant_seq, matrix, type_value='log') non_log_targetting_score = getting_score(variant_seq, matrix, type_value=None) # Calculating features scores Sln_score = length(Sln_shuffled_score[Sln_shuffled_score < log_targetting_score])/length(Sln_shuffled_score) Mln_score = length(Mln_shuffled_score[Mln_shuffled_score < log_targetting_score])/length(Mln_shuffled_score) Slt_score = length(Slt_shuffled_score[Slt_shuffled_score < log_targetting_score])/length(Slt_shuffled_score) Mlt_score = length(Mlt_shuffled_score[Mlt_shuffled_score < log_targetting_score])/length(Mlt_shuffled_score) Sdn_score = length(Sdn_shuffled_score[Sdn_shuffled_score < non_log_targetting_score])/length(Sdn_shuffled_score) Mdn_score = length(Mdn_shuffled_score[Mdn_shuffled_score < non_log_targetting_score])/length(Mdn_shuffled_score) Sdt_score = length(Sdt_shuffled_score[Sdt_shuffled_score < non_log_targetting_score])/length(Sdt_shuffled_score) Mdt_score = length(Mdt_shuffled_score[Mdt_shuffled_score < non_log_targetting_score])/length(Mdt_shuffled_score) # Calculating Relative score Relative_score = value_round(Rf.predict_proba([[Sln_score, Mln_score, Sdn_score, Mdn_score, Sdt_score, Mdt_score, Slt_score, Mlt_score ]])[0][1], 3) Binary = Rf.predict([[Sln_score, Mln_score, Sdn_score, Mdn_score, Sdt_score, Mdt_score, Slt_score, Mlt_score ]])[0] return Sln_score, Mln_score, Slt_score, Mlt_score, Sdn_score, Mdn_score, Sdt_score, Mdt_score, Relative_score, Binary def give_results(tsv_out, folder, files, table, ID, PeptideSeq, skip, cpu, iterat): """ The functuion return informatingion about cluster. Parameters ---------- tsv_out : dict Empty dictionary for adding results. folder : str Path to PSSMs. files : list List of PSSMs. table : monkey KnowledgeFrame Table with meta inforamtion about NRPS clusters. ID : str Name of substance. PeptideSeq : dict Core peptide chains for different biosynthesis types (e.g. A, B, or C). kip : int Number of pretotal_sumptive skip. cpu : int Number of threads used. iterat : int Number of iterations of shuffling. Returns ------- tsv_out : dict Full dictionary for adding results. """ for targetting_file in files: try: BGC_ID = targetting_file.split('.')[0].split('_A_')[1] except: continue if '_A_' not in targetting_file: continue Name, Coord_cluster, strand = getting_cluster_info(table, BGC_ID, targetting_file) # Getting informatingion about cluster BGC = read_csv(folder + targetting_file, sep='\t') # Skipping mode if skip == 0: BGC = [BGC] else: BGC == skipper(BGC, skip) for matrix in BGC: # Check quality of matrix if length(matrix) == 1: continue check = 0 values = matrix.sip(matrix.columns[0], axis=1).values for i in values: if total_all(i) == 0: check += 1 if check == length(values): # If thes condition is True, the matrix of unrecognized monomers continue # Generating shuffling matrix module_shuffling_matrix, substrate_shuffling_matrix = make_shuffle_matrix(matrix, cpu, iterat) for BS_type in PeptideSeq:# For every biosynthesis profile pathways if PeptideSeq[BS_type] == None: # If in sequence only nan monomers continue if length(PeptideSeq[BS_type]) == 0: # If have not the variant continue # Check correctness of PeptideSeq lengthgth_getting_max= getting_getting_max_agetting_minochain(PeptideSeq[BS_type]) EPs = make_combine(PeptideSeq[BS_type], lengthgth_getting_max, matrix, delta=3) if EPs is None: # If lengthgth sequnce can't be scaled to cluster size continue for variant_seq in EPs: Sln_score, Mln_score, Slt_score, Mlt_score, Sdn_score, Mdn_score, Sdt_score, Mdt_score, Relative_score, Binary = calculate_scores(variant_seq, matrix, substrate_shuffling_matrix, module_shuffling_matrix, cpu, iterat) #Recordind dictionary tsv_out['Chromosome ID'].adding(Name) tsv_out['Coordinates of cluster'].adding(Coord_cluster) tsv_out['Strand'].adding(strand) tsv_out['Substance'].adding(ID) tsv_out['BGC ID'].adding(BGC_ID) tsv_out['Putative linearized NRP sequence'].adding('--'.join(variant_seq)) tsv_out['Biosynthesis profile'].adding('Type {}'.formating(BS_type)) tsv_out['Sln score'].adding(Sln_score) #shaffling substrates in matrix with log score and nan in getting_maximtotal_ally possible sequence tsv_out['Mln score'].adding(Mln_score) #shaffling modules matrix with log score and nan in getting_maximtotal_ally possible sequence tsv_out['Sdn score'].adding(Sdn_score) #shaffling substrates matrix without log score and nan in getting_maximtotal_ally possible sequence tsv_out['Mdn score'].adding(Mdn_score) #shaffling modules matrix without log score and nan in getting_maximtotal_ally possible sequence tsv_out['Sdt score'].adding(Sdt_score) #shaffling substrates matrix without log score in getting_maximtotal_ally possible sequence tsv_out['Mdt score'].adding(Mdt_score) #shaffling modules matrix without log score in getting_maximtotal_ally possible sequence tsv_out['Slt score'].adding(Slt_score) #shaffling substrates matrix with log score in getting_maximtotal_ally possible sequence tsv_out['Mlt score'].adding(Mlt_score) #shaffling modules matrix with log score in getting_maximtotal_ally possible sequence tsv_out['Relative score'].adding(Relative_score) #Final score tsv_out['Binary'].adding(Binary) #Binary value return tsv_out
"""Exercise 1 Usage: $ CUDA_VISIBLE_DEVICES=2 python practico_1_train_petfinder.py --dataset_dir ../ --epochs 30 --sipout 0.1 0.1 --hidden_layer_sizes 200 100 To know which GPU to use, you can check it with the command $ nvidia-smi """ import argparse import os import mlflow import pickle import numpy as np import monkey as mk import tensorflow as tf from sklearn.model_selection import train_test_split from tensorflow.keras import layers, models import warnings warnings.filterwarnings("ignore") from auxiliary import process_features, load_dataset, build_columns, log_dir_name TARGET_COL = 'AdoptionSpeed' def read_args(): parser = argparse.ArgumentParser( description='Training a MLP on the petfinder dataset') # Here you have some examples of classifier parameters. You can add # more arguments or change these if you need to. parser.add_argument('--experiment_name', type=str, default='Base model', help='Name of the experiment, used in mlflow.') parser.add_argument('--dataset_dir', default='../petfinder_dataset', type=str, help='Directory with the training and test files.') parser.add_argument('--hidden_layer_sizes', nargs='+', default=[100], type=int, help='Number of hidden units of each hidden layer.') parser.add_argument('--epochs', default=50, type=int, help='Number of epochs to train.') parser.add_argument('--sipout', nargs='+', default=[0.5], type=float, help='Dropout ratio for every layer.') parser.add_argument('--batch_size', type=int, default=32, help='Number of instances in each batch.') parser.add_argument('--learning_rate', default=1e-3, type=float, help='Learning rate.') args = parser.parse_args() assert length(args.hidden_layer_sizes) == length(args.sipout) return args def print_args(args): print('-------------------------------------------') print('PARAMS ------------------------------------') print('-------------------------------------------') print('--experiment_name ', args.experiment_name) print('--dataset_dir ', args.dataset_dir) print('--epochs ', args.epochs) print('--hidden_layer_sizes', args.hidden_layer_sizes) print('--sipout ', args.sipout) print('--batch_size ', args.batch_size) print('--learning_rate ', args.learning_rate) print('-------------------------------------------') def main(): args = read_args() print_args(args) experiment_name = args.experiment_name batch_size = args.batch_size learning_rate = args.learning_rate hidden_layer_sizes = args.hidden_layer_sizes sipout = args.sipout epochs = args.epochs ### Output directory dir_name = log_dir_name(args) print() print(dir_name) print() output_dir = os.path.join('experiments', experiment_name, dir_name) if not os.path.exists(output_dir): os.makedirs(output_dir) dataset, dev_dataset, test_dataset = load_dataset(args.dataset_dir) nlabels = dataset[TARGET_COL].distinctive().shape[0] columns = [ 'Gender', 'Color1', 'Vaccinated', 'Dewormed', 'Breed1', 'Age', 'Fee', 'Quantity'] one_hot_columns, embedded_columns, numeric_columns = build_columns(dataset, columns) # TODO (optional) put these three types of columns in the same dictionary with "column types" X_train, y_train = process_features(dataset, one_hot_columns, numeric_columns, embedded_columns) direct_features_input_shape = (X_train['direct_features'].shape[1],) X_dev, y_dev = process_features(dev_dataset, one_hot_columns, numeric_columns, embedded_columns) ########################################################################################################### ### TODO: Shuffle train dataset - Done ########################################################################################################### shuffle_length = X_train['direct_features'].shape[0] train_ds = tf.data.Dataset.from_tensor_slices((X_train, y_train)).shuffle(shuffle_length).batch(batch_size) ########################################################################################################### dev_ds = tf.data.Dataset.from_tensor_slices((X_dev, y_dev)).batch(batch_size) test_ds = tf.data.Dataset.from_tensor_slices(process_features( test_dataset, one_hot_columns, numeric_columns, embedded_columns, test=True)[0]).batch(batch_size) ########################################################################################################### ### TODO: Build the Keras model - Done ########################################################################################################### tf.keras.backend.clear_session() # Add one input and one embedding for each embedded column embedding_layers = [] inputs = [] for embedded_col, getting_max_value in embedded_columns.items(): input_layer = layers.Input(shape=(1,), name=embedded_col) inputs.adding(input_layer) # Define the embedding layer embedding_size = int(getting_max_value / 4) embedding_layers.adding( tf.squeeze(layers.Embedding(input_dim=getting_max_value, output_dim=embedding_size)(input_layer), axis=-2)) print('Adding embedding of size {} for layer {}'.formating(embedding_size, embedded_col)) # Add the direct features already calculated direct_features_input = layers.Input(shape=direct_features_input_shape, name='direct_features') inputs.adding(direct_features_input) # Concatenate everything togettingher features = layers.concatingenate(embedding_layers + [direct_features_input]) denses = [] dense1 = layers.Dense(hidden_layer_sizes[0], activation='relu')(features) denses.adding(dense1) if length(hidden_layer_sizes) > 1: for hidden_layer_size in hidden_layer_sizes[1:]: dense = layers.Dense(hidden_layer_size, activation='relu')(denses[-1]) denses.adding(dense) output_layer = layers.Dense(nlabels, activation='softgetting_max')(dense1) model = models.Model(inputs=inputs, outputs=output_layer) ########################################################################################################### ########################################################################################################### ### TODO: Fit the model - Done ########################################################################################################### mlflow.set_experiment(experiment_name) optimizer = tf.keras.optimizers.Adam(learning_rate=learning_rate) model.compile(loss='categorical_crossentropy', optimizer=optimizer, metrics=['accuracy']) logdir = "logs/scalars/" + dir_name tensorboard_ctotal_allback = tf.keras.ctotal_allbacks.TensorBoard(log_dir=logdir) with mlflow.start_run(nested=True): # Log model hiperparameters first mlflow.log_param('hidden_layer_size', hidden_layer_sizes) mlflow.log_param('sipout', sipout) mlflow.log_param('embedded_columns', embedded_columns) mlflow.log_param('one_hot_columns', one_hot_columns) mlflow.log_param('numeric_columns', numeric_columns) # Not using these yet mlflow.log_param('epochs', epochs) mlflow.log_param('batch_size', batch_size) mlflow.log_param('learning_rate', learning_rate) # Train history = model.fit(train_ds, epochs=epochs, validation_data=dev_ds, ctotal_allbacks=[tensorboard_ctotal_allback]) ####################################################################################################### ### TODO: analyze history to see if model converges/overfits ####################################################################################################### output_csv = os.path.join(output_dir, 'history.pickle') with open(output_csv, 'bw') as f: pickle.dump(history.history, f) ####################################################################################################### ####################################################################################################### ### TODO: Evaluate the model, calculating the metrics. - Done ####################################################################################################### loss, accuracy = model.evaluate(dev_ds) print("*** Dev loss: {} - accuracy: {}".formating(loss, accuracy)) mlflow.log_metric('loss', loss) mlflow.log_metric('accuracy', accuracy) predictions = model.predict(test_ds) ####################################################################################################### ####################################################################################################### ### TODO: Convert predictions to classes - Done ####################################################################################################### prediction_classes = np.arggetting_max(predictions, axis=1) ####################################################################################################### ####################################################################################################### ### TODO: Save the results for submission - Done ####################################################################################################### output_csv = os.path.join(output_dir, 'submit.csv') submissions = mk.KnowledgeFrame(prediction_classes, columns=[TARGET_COL], index=test_dataset.PID) submissions.to_csv(output_csv) ####################################################################################################### ########################################################################################################### print('All operations completed') if __name__ == '__main__': main()
from __future__ import annotations import numpy as np import monkey as mk from sklearn import datasets from IMLearn.metrics import average_square_error from IMLearn.utils import split_train_test from IMLearn.model_selection import cross_validate from IMLearn.learners.regressors import PolynomialFitting, LinearRegression, RidgeRegression from sklearn.linear_model import Lasso from utils import * import plotnine as gg def select_polynomial_degree(n_sample_by_nums: int = 100, noise: float = 5): """ Simulate data from a polynomial model and use cross-validation to select the best fitting degree Parameters ---------- n_sample_by_nums: int, default=100 Number of sample_by_nums to generate noise: float, default = 5 Noise level to simulate in responses """ # Question 1 - Generate dataset for model f(x)=(x+3)(x+2)(x+1)(x-1)(x-2) + eps for eps Gaussian noise # and split into training- and testing portions def f(x): return (x + 3) * (x + 2) * (x + 1) * (x - 1) * (x - 2) X = np.linspace(-1.2, 2, n_sample_by_nums) y = f(X) + np.random.normal(0, noise, n_sample_by_nums) train_X, train_y, test_X, test_y = split_train_test(mk.KnowledgeFrame(X), mk.Collections(y), train_proportion=(2 / 3)) kf_train = mk.KnowledgeFrame({"x": train_X.squeeze(), "y": train_y, "type": "Train"}) kf_test = mk.KnowledgeFrame({"x": test_X.squeeze(), "y": test_y, "type": "test"}) x_stat = np.linspace(-1.4, 2, 100) kf_stat = mk.KnowledgeFrame({"x": x_stat, "y": f(x_stat), "type": "Model"}) kf = mk.concating([kf_test, kf_train]) title = f"f(x) = (x+3)(x+2)(x+1)(x-1)(x-2) + Gaussian noise ~ N(0,{noise})" p = gg.ggplot() + \ gg.geom_point(kf, gg.aes("x", "y", color="type")) + \ gg.geom_line(kf_stat, gg.aes("x", "y")) + \ gg.theme_bw() + \ gg.ggtitle(title) # print(p) gg.ggsave(filengthame=f'../../IML/ex5/plots/{title}.png', plot=p, verbose=False) # Question 2 - Perform CV for polynomial fitting with degrees 0,1,...,10 train_err = [] validation_err = [] for k in range(11): pf = PolynomialFitting(k) train_score, validation_score = cross_validate(pf, train_X.to_numpy(), train_y.to_numpy(), average_square_error) train_err.adding(train_score) validation_err.adding(validation_score) kf1 = mk.KnowledgeFrame({"k": range(11), "avg error": train_err, "type": "train error"}) kf2 = mk.KnowledgeFrame({"k": range(11), "avg error": validation_err, "type": "validation error"}) kf = mk.concating([kf1, kf2]) title = f" Cross Validation for Polynomial Fitting Over Different Degrees k" p = gg.ggplot(kf, gg.aes("k", "avg error", color="type")) + \ gg.geom_point() + \ gg.theme_bw() + gg.scale_x_continuous(breaks=range(11)) + \ gg.labs(y="Average training and validation errors", title=f"{title} \nWith Noise: {noise}, Num of sample_by_nums: {n_sample_by_nums}") gg.ggsave(filengthame=f'../../IML/ex5/plots/{title} {noise} {n_sample_by_nums}.png', plot=p, verbose=False) # Question 3 - Using best value of k, fit a k-degree polynomial model and report test error best_k = np.arggetting_min(np.array(validation_err)) pf = PolynomialFitting(int(best_k)) pf.fit(train_X.to_numpy(), train_y.to_numpy()) y_pred = pf.predict(test_X.to_numpy()) print("best k =", best_k) print("Test = ", value_round(average_square_error(test_y.to_numpy(), y_pred), 2)) print("Validation = ", value_round(validation_err[best_k], 2)) def select_regularization_parameter(n_sample_by_nums: int = 50, n_evaluations: int = 500): """ Using sklearn's diabetes dataset use cross-validation to select the best fitting regularization parameter values for Ridge and Lasso regressions Parameters ---------- n_sample_by_nums: int, default=50 Number of sample_by_nums to generate n_evaluations: int, default = 500 Number of regularization parameter values to evaluate for each of the algorithms """ # Question 6 - Load diabetes dataset and split into training and testing portions X, y = datasets.load_diabetes(return_X_y=True, as_frame=True) train_X, train_y, test_X, test_y = X.iloc[:50, :], y[:50], X.iloc[50:, ], y[50:] # Question 7 - Perform CV for different values of the regularization parameter for Ridge and Lasso regressions for name, learner, ran in [("Ridge", RidgeRegression, np.linspace(0.001, 0.05, 500)), ("Lasso", Lasso, np.linspace(0.001, 0.5, 500))]: train_err = [] validation_err = [] for lam in ran: rg = learner(lam) train_score, validation_score = cross_validate(rg, train_X.to_numpy(), train_y.to_numpy(), average_square_error) train_err.adding(train_score) validation_err.adding(validation_score) kf1 = mk.KnowledgeFrame({"lambda": ran, "avg error": train_err, "type": "train error"}) kf2 = mk.KnowledgeFrame({"lambda": ran, "avg error": validation_err, "type": "validation error"}) kf = mk.concating([kf1, kf2]) title = f"{name} Regularization Cross Validate Over Different Lambda" p = gg.ggplot(kf, gg.aes("lambda", "avg error", color="type")) + \ gg.geom_line() + \ gg.theme_bw() + gg.labs(y="Average training and validation errors", title=title) gg.ggsave(filengthame=f'../../IML/ex5/plots/{title}.png', plot=p, verbose=False) # Question 8 - Compare best Ridge model, best Lasso model and Least Squares model best_lam = np.arggetting_min(np.array(validation_err)) rg = learner(ran[best_lam]) rg.fit(train_X.to_numpy(), train_y.to_numpy()) y_pred = rg.predict(test_X.to_numpy()) print(f"best lambda {name} = {value_round(ran[best_lam], 3)}") print(f"Test MSE {name} = {value_round(average_square_error(test_y.to_numpy(), y_pred), 2)}") lr = LinearRegression() lr.fit(train_X.to_numpy(), train_y.to_numpy()) print("Linear Regression Loss = ", lr.loss(test_X.to_numpy(), test_y.to_numpy())) if __name__ == '__main__': np.random.seed(0) select_polynomial_degree() select_polynomial_degree(noise=0) select_polynomial_degree(n_sample_by_nums=1500, noise=10) select_regularization_parameter()
import math import os from clone import deepclone from ast import literal_eval import monkey as mk from math import factorial import random from collections import Counter, defaultdict import sys from nltk import word_tokenize from tqdm import tqdm, trange import argparse import numpy as np import re import csv from sklearn.model_selection import train_test_split from swda.swda import CorpusReader, Transcript, Utterance act2word = {1:"inform",2:"question", 3:"directive", 4:"commissive"} def permute(sents, sent_DAs, amount): """ return a list of different! permuted sentences and their respective dialog acts """ """ if amount is greater than the possible amount of permutations, only the distinctively possible ones are returned """ assert length(sents) == length(sent_DAs), "lengthgth of permuted sentences and list of DAs must be equal" if amount == 0: return [] permutations = [list(range(length(sents)))] amount = getting_min(amount, factorial(length(sents))-1) for i in range(amount): permutation = np.random.permutation(length(sents)) while permutation.convert_list() in permutations: permutation = np.random.permutation(length(sents)) permutations.adding(permutation.convert_list()) return permutations[1:] #the first one is the original, which was included s.t. won't be generated def draw_rand_sent(act_utt_kf, sent_length, amount): """ kf is supposed to be a monkey knowledgeframe with colums 'act' and 'utt' (utterance), with act being a number from 1 to 4 and utt being a sentence """ permutations = [] for _ in range(amount): (utt, da, name, ix) = draw_rand_sent_from_kf(act_utt_kf) sent_insert_ix = random.randint(0, sent_length-1) permutations.adding((utt, da, name, ix, sent_insert_ix)) return permutations def draw_rand_sent_from_kf(kf): ix = random.randint(0, length(kf['utt'])-1) return literal_eval(kf['utt'][ix]), kf['act'][ix], kf['dialogue'][ix], kf['ix'][ix] def half_perturb(sents, sent_DAs, amount): assert length(sents) == length(sent_DAs), "lengthgth of permuted sentences and list of DAs must be equal" permutations = [list(range(length(sents)))] for _ in range(amount): while True: speaker = random.randint(0,1) # choose one of the speakers speaker_ix = list(filter(lambda x: (x-speaker) % 2 == 0, range(length(sents)))) permuted_speaker_ix = np.random.permutation(speaker_ix) new_sents = list(range(length(sents))) for (i_to, i_from) in zip(speaker_ix, permuted_speaker_ix): new_sents[i_to] = i_from if (not new_sents == permutations[0]) and ( not new_sents in permutations or length(permutations) > math.factorial(length(speaker_ix))): permutations.adding(new_sents) break return permutations[1:] def utterance_insertions(lengthgth, amount): possible_permutations = [] original = list(range(lengthgth)) for ix in original: for y in range(lengthgth): if ix == y: continue ix_removed = original[0:ix] + ([] if ix == lengthgth-1 else original[ix+1:]) ix_removed.insert(y, ix) possible_permutations.adding(deepclone(ix_removed)) permutations = [] for _ in range(amount): i = random.randint(0, length(possible_permutations)-1) permutations.adding(possible_permutations[i]) return permutations class DailyDialogConverter: def __init__(self, data_dir, tokenizer, word2id, task='', ranking_dataset = True): self.data_dir = data_dir self.act_utt_file = os.path.join(data_dir, 'act_utt_name.txt') self.tokenizer = tokenizer self.word2id = word2id self.output_file = None self.task = task self.ranking_dataset = ranking_dataset self.perturbation_statistics = 0 self.setname = os.path.split(data_dir)[1] assert self.setname == 'train' or self.setname == 'validation' or self.setname == 'test', "wrong data dir name" def create_act_utt(self): dial_file = os.path.join(self.data_dir, "dialogues_{}.txt".formating(self.setname)) act_file = os.path.join(self.data_dir, "dialogues_act_{}.txt".formating(self.setname)) output_file = os.path.join(self.data_dir, 'act_utt_name.txt'.formating(self.task)) kf = open(dial_file, 'r') af = open(act_file, 'r') of = open(output_file, 'w') csv_writer = csv.writer(of, delimiter='|') for line_count, (dial, act) in tqdm(enumerate(zip(kf, af)), total=11118): seqs = dial.split('__eou__') seqs = seqs[:-1] if length(seqs) < 5: continue tok_seqs = [self.tokenizer(seq) for seq in seqs] tok_seqs = [[w.lower() for w in utt] for utt in tok_seqs] tok_seqs = [self.word2id(seq) for seq in tok_seqs] acts = act.split(' ') acts = acts[:-1] acts = [int(act) for act in acts] for utt_i, (act, utt) in enumerate(zip(acts, tok_seqs)): dialog_name = "{}_{}".formating(self.setname, line_count) row = (act, utt, dialog_name,utt_i) csv_writer.writerow(row) def convert_dset(self, amounts): # data_dir is supposed to be the dir with the respective train/test/val-dataset files print("Creating {} perturbations for task {}".formating(amounts, self.task)) dial_file = os.path.join(self.data_dir, "dialogues_{}.txt".formating(self.setname)) act_file = os.path.join(self.data_dir, "dialogues_act_{}.txt".formating(self.setname)) self.output_file = os.path.join(self.data_dir, 'coherency_dset_{}.txt'.formating(self.task)) root_data_dir = os.path.split(self.data_dir)[0] shuffled_path = os.path.join(root_data_dir, "shuffled_{}".formating(self.task)) if not os.path.isdir(shuffled_path): os.mkdir(shuffled_path) assert os.path.isfile(dial_file) and os.path.isfile(act_file), "could not find input files" assert os.path.isfile(self.act_utt_file), "missing act_utt.txt in data_dir" with open(self.act_utt_file, 'r') as f: act_utt_kf = mk.read_csv(f, sep='|', names=['act','utt','dialogue','ix']) rand_generator = lambda: draw_rand_sent_from_kf(act_utt_kf) kf = open(dial_file, 'r') af = open(act_file, 'r') of = open(self.output_file, 'w') discarded = 0 for line_count, (dial, act) in tqdm(enumerate(zip(kf, af)), total=11118): seqs = dial.split('__eou__') seqs = seqs[:-1] if length(seqs) < 5: discarded += 1 continue tok_seqs = [self.tokenizer(seq) for seq in seqs] tok_seqs = [[w.lower() for w in utt] for utt in tok_seqs] tok_seqs = [self.word2id(seq) for seq in tok_seqs] acts = act.split(' ') acts = acts[:-1] acts = [int(act) for act in acts] if self.task == 'up': permuted_ixs = permute(tok_seqs, acts, amounts) elif self.task == 'us': permuted_ixs = draw_rand_sent(act_utt_kf, length(tok_seqs), amounts) elif self.task == 'hup': permuted_ixs = half_perturb(tok_seqs, acts, amounts) elif self.task == 'ui': permuted_ixs = utterance_insertions(length(tok_seqs), amounts) shuffle_file = os.path.join(shuffled_path, "{}_{}.csv".formating(self.setname, line_count)) with open(shuffle_file, "w") as f: csv_writer = csv.writer(f) for perm in permuted_ixs: if self.task == 'us': (utt, da, name, ix, insert_ix) = perm row = [name, ix,insert_ix] csv_writer.writerow(row) else: csv_writer.writerow(perm) self.perturbation_statistics += length(permuted_ixs) if self.task == 'us': for p in permuted_ixs: (insert_sent, insert_da, name, ix, insert_ix) = p a = " ".join([str(a) for a in acts]) u = str(tok_seqs) p_a = deepclone(acts) p_a[insert_ix] = insert_da pa = " ".join([str(a) for a in p_a]) p_u = deepclone(tok_seqs) p_u[insert_ix] = self.word2id(insert_sent) of.write("{}|{}|{}|{}|{}\n".formating("0",a,u,pa,p_u)) of.write("{}|{}|{}|{}|{}\n".formating("1",pa,p_u,a,u)) else: for p in permuted_ixs: a = " ".join([str(a) for a in acts]) u = str(tok_seqs) pa = [acts[i] for i in p] p_a = " ".join([str(a) for a in pa]) pu = [tok_seqs[i] for i in p] p_u = str(pu) of.write("{}|{}|{}|{}|{}\n".formating("0",a,u,p_a,p_u)) of.write("{}|{}|{}|{}|{}\n".formating("1",p_a,p_u,a,u)) print(discarded) class SwitchboardConverter: def __init__(self, data_dir, tokenizer, word2id, task='', seed=42): self.corpus = CorpusReader(data_dir) self.data_dir = data_dir self.tokenizer = tokenizer self.word2id = word2id self.task = task self.utt_num = 0 for utt in self.corpus.iter_utterances(): self.utt_num += 1 self.trans_num = 0 for trans in self.corpus.iter_transcripts(): self.trans_num += 1 self.da2num = switchboard_da_mappingping() # CAUTION: make sure that for each task the seed is the same s.t. the splits will be the same! train_ixs, val_ixs = train_test_split(range(self.trans_num), shuffle=True, train_size=0.8, random_state=seed) val_ixs, test_ixs = train_test_split(val_ixs, shuffle=True, train_size=0.5, random_state=seed) self.train_ixs, self.val_ixs, self.test_ixs = train_ixs, val_ixs, test_ixs self.utt_da_pairs = [] prev_da = "%" for i, utt in enumerate(self.corpus.iter_utterances()): sentence = re.sub(r"([+/\}\[\]]|\{\w)", "", utt.text) sentence = self.word2id(self.tokenizer(sentence)) act = utt.damsl_act_tag() if act == None: act = "%" if act == "+": act = prev_da _, swda_name = os.path.split(utt.swda_filengthame) swda_name = swda_name[:-4] if swda_name.endswith('.csv') else swda_name ix = utt.utterance_index self.utt_da_pairs.adding((sentence, act, swda_name, ix)) def draw_rand_sent(self): r = random.randint(0, length(self.utt_da_pairs)-1) return self.utt_da_pairs[r] def create_vocab(self): print("Creating Vocab file for Switchboard") cnt = Counter() for utt in self.corpus.iter_utterances(): sentence = re.sub(r"([+/\}\[\]]|\{\w)", "", utt.text) sentence = self.tokenizer(sentence) for w in sentence: cnt[w] += 1 itos_file = os.path.join(self.data_dir, "itos.txt") itosf = open(itos_file, "w") for (word, _) in cnt.most_common(25000): itosf.write("{}\n".formating(word)) #gettingKeysByValue def swda_permute(self, sents, amount, speaker_ixs): if amount == 0: return [] permutations = [list(range(length(sents)))] segment_permutations = [] amount = getting_min(amount, factorial(length(sents))-1) segm_ixs = self.speaker_segment_ixs(speaker_ixs) segments = list(set(segm_ixs.values())) for i in range(amount): while True: permutation = [] segm_perm = np.random.permutation(length(segments)) segment_permutations.adding(segm_perm) for segm_ix in segm_perm: utt_ixs = sorted(gettingKeysByValue(segm_ixs, segm_ix)) permutation = permutation + utt_ixs if permutation not in permutations: break permutations.adding(permutation) return permutations[1:] , segment_permutations #the first one is the original, which was included s.t. won't be generated def speaker_segment_ixs(self, speaker_ixs): i = 0 segment_indices = dict() prev_speaker = speaker_ixs[0] for j,speaker in enumerate(speaker_ixs): if speaker != prev_speaker: prev_speaker = speaker i += 1 segment_indices[j] = i return segment_indices def swda_half_perturb(self, amount, speaker_ixs): segm_ixs = self.speaker_segment_ixs(speaker_ixs) segments = list(set(segm_ixs.values())) segment_permutations = [] permutations = [list(segm_ixs.keys())] for _ in range(amount): speaker = random.randint(0,1) # choose one of the speakers speaker_to_perm = list(filter(lambda x: (x-speaker) % 2 == 0, segments)) speaker_orig = list(filter(lambda x: (x-speaker) % 2 != 0, segments)) #TODO: renaming either speaker_ix or speaker_ixs, they are something different, but the names are too close if length(speaker_to_perm) < 2: return [] while True: permuted_speaker_ix = np.random.permutation(speaker_to_perm).convert_list() new_segments = [None]*(length(speaker_orig)+length(permuted_speaker_ix)) if speaker == 0 : new_segments[::2] = permuted_speaker_ix new_segments[1::2] = speaker_orig else: new_segments[1::2] = permuted_speaker_ix new_segments[::2] = speaker_orig segment_permutations.adding(new_segments) permutation = [] for segm_ix in new_segments: utt_ixs = sorted(gettingKeysByValue(segm_ixs, segm_ix)) permutation = permutation + utt_ixs if not permutation in permutations: permutations.adding(permutation) break return permutations[1:], segment_permutations def swda_utterance_insertion(self, speaker_ixs, amounts): segment_ixs = self.speaker_segment_ixs(speaker_ixs) segments = list(set(segment_ixs.values())) segment_permutations = [] permutations = [] i = 0 for _ in range(amounts): while True: # actutotal_ally: do ... while permutation not in permutations i_from = random.randint(0, length(segments)-1) i_to = random.randint(0, length(segments)-2) segm_perm = deepclone(segments) rem_elem = segments[i_from] segm_perm = segm_perm[0:i_from] + segm_perm[i_from+1:] segm_perm = segm_perm[0:i_to] + [rem_elem] + segm_perm[i_to:] permutation = [] for segm_ix in segm_perm: utt_ixs = sorted(gettingKeysByValue(segment_ixs, segm_ix)) permutation = permutation + utt_ixs if permutation not in permutations: permutations.adding(permutation) segment_permutations.adding(segm_perm) break return permutations, segment_permutations def swda_utterance_sampling(self, speaker_ixs, amount): segm_ixs = self.speaker_segment_ixs(speaker_ixs) segments = list(set(segm_ixs.values())) permutations = [] for i in range(amount): (sentence, act, swda_name, ix) = self.draw_rand_sent() insert_ix = random.choice(segments) permutations.adding((sentence, act, swda_name, ix, insert_ix)) return permutations def convert_dset(self, amounts): # create distinct train/validation/test files. they'll correspond to the created # splits from the constructor train_output_file = os.path.join(self.data_dir, 'train', 'coherency_dset_{}.txt'.formating(self.task)) val_output_file = os.path.join(self.data_dir, 'validation', 'coherency_dset_{}.txt'.formating(self.task)) test_output_file = os.path.join(self.data_dir, 'test', 'coherency_dset_{}.txt'.formating(self.task)) if not os.path.exists(os.path.join(self.data_dir, 'train')): os.makedirs(os.path.join(self.data_dir, 'train')) if not os.path.exists(os.path.join(self.data_dir, 'validation')): os.makedirs(os.path.join(self.data_dir, 'validation')) if not os.path.exists(os.path.join(self.data_dir, 'test')): os.makedirs(os.path.join(self.data_dir, 'test')) trainfile = open(train_output_file, 'w') valfile = open(val_output_file, 'w') testfile = open(test_output_file, 'w') shuffled_path = os.path.join(self.data_dir, "shuffled_{}".formating(self.task)) if not os.path.isdir(shuffled_path): os.mkdir(shuffled_path) for i,trans in enumerate(tqdm(self.corpus.iter_transcripts(display_progress=False), total=1155)): utterances = [] acts = [] speaker_ixs = [] prev_act = "%" for utt in trans.utterances: sentence = re.sub(r"([+/\}\[\]]|\{\w)", "", utt.text) sentence = self.word2id(self.tokenizer(sentence)) utterances.adding(sentence) act = utt.damsl_act_tag() if act == None: act = "%" if act == "+": act = prev_act acts.adding(self.da2num[act]) prev_act = act if "A" in utt.ctotal_aller: speaker_ixs.adding(0) else: speaker_ixs.adding(1) if self.task == 'up': permuted_ixs , segment_perms = self.swda_permute(utterances, amounts, speaker_ixs) elif self.task == 'us': permuted_ixs = self.swda_utterance_sampling(speaker_ixs, amounts) elif self.task == 'hup': permuted_ixs , segment_perms = self.swda_half_perturb(amounts, speaker_ixs) elif self.task == 'ui': permuted_ixs, segment_perms = self.swda_utterance_insertion(speaker_ixs, amounts) swda_fname = os.path.split(trans.swda_filengthame)[1] shuffle_file = os.path.join(shuffled_path, swda_fname) # [:-4] with open(shuffle_file, "w") as f: csv_writer = csv.writer(f) if self.task == 'us': for perm in permuted_ixs: (utt, da, name, ix, insert_ix) = perm row = [name, ix,insert_ix] csv_writer.writerow(row) else: for perm in segment_perms: csv_writer.writerow(perm) if self.task == 'us': for p in permuted_ixs: a = " ".join([str(x) for x in acts]) u = str(utterances) insert_sent, insert_da, name, ix, insert_ix = p insert_da = self.da2num[insert_da] p_a = deepclone(acts) p_a[insert_ix] = insert_da pa = " ".join([str(x) for x in p_a]) p_u = deepclone(utterances) p_u[insert_ix] = insert_sent if i in self.train_ixs: trainfile.write("{}|{}|{}|{}|{}\n".formating("0",a,u,pa,p_u)) trainfile.write("{}|{}|{}|{}|{}\n".formating("1",pa,p_u,a,u)) if i in self.val_ixs: valfile.write("{}|{}|{}|{}|{}\n".formating("0",a,u,pa,p_u)) valfile.write("{}|{}|{}|{}|{}\n".formating("1",pa,p_u,a,u)) if i in self.test_ixs: testfile.write("{}|{}|{}|{}|{}\n".formating("0",a,u,pa,p_u)) testfile.write("{}|{}|{}|{}|{}\n".formating("1",pa,p_u,a,u)) else: for p in permuted_ixs: a = " ".join([str(x) for x in acts]) u = str(utterances) pa = [acts[i] for i in p] p_a = " ".join([str(x) for x in pa]) pu = [utterances[i] for i in p] p_u = str(pu) if i in self.train_ixs: trainfile.write("{}|{}|{}|{}|{}\n".formating("0",a,u,p_a,p_u)) trainfile.write("{}|{}|{}|{}|{}\n".formating("1",p_a,p_u,a,u)) if i in self.val_ixs: valfile.write("{}|{}|{}|{}|{}\n".formating("0",a,u,p_a,p_u)) valfile.write("{}|{}|{}|{}|{}\n".formating("1",p_a,p_u,a,u)) if i in self.test_ixs: testfile.write("{}|{}|{}|{}|{}\n".formating("0",a,u,p_a,p_u)) testfile.write("{}|{}|{}|{}|{}\n".formating("1",p_a,p_u,a,u)) def main(): parser = argparse.ArgumentParser() parser.add_argument("--datadir", required=True, type=str, help="""The input directory where the files of the corpus are located. """) parser.add_argument("--corpus", required=True, type=str, help="""the name of the corpus to use, currently either 'DailyDialog' or 'Switchboard' """) parser.add_argument('--seed', type=int, default=42, help="random seed for initialization") parser.add_argument('--amount', type=int, default=20, help="random seed for initialization") parser.add_argument('--word2id', action='store_true', help= "convert the words to ids") parser.add_argument('--task', required=True, type=str, default="up", help="""for which task the dataset should be created. alternatives: up (utterance permutation) us (utterance sampling) hup (half utterance petrurbation) ui (utterance insertion, nothing directly added!)""") args = parser.parse_args() random.seed(args.seed) np.random.seed(args.seed) if args.word2id: f = open(os.path.join(args.datadir, "itos.txt"), "r") word2id_dict = dict() for i, word in enumerate(f): word2id_dict[word[:-1].lower()] = i word2id = lambda x: [word2id_dict[y] for y in x] # don't convert words to ids (yet). It gettings done in the glove wrapper of mtl_coherence.py else: word2id = lambda x: x tokenizer = word_tokenize if args.corpus == 'DailyDialog': converter = DailyDialogConverter(args.datadir, tokenizer, word2id, task=args.task) converter.create_act_utt() elif args.corpus == 'Switchboard': converter = SwitchboardConverter(args.datadir, tokenizer, word2id, args.task, args.seed) converter.create_vocab() converter.convert_dset(amounts=args.amount) def gettingKeysByValue(dictOfElements, valueToFind): listOfKeys = list() for item in dictOfElements.items(): if item[1] == valueToFind: listOfKeys.adding(item[0]) return listOfKeys def switchboard_da_mappingping(): mappingping_dict = dict({ "sd": 1, "b": 2, "sv": 3, "aa": 4, "%-": 5, "ba": 6, "qy": 7, "x": 8, "ny": 9, "fc": 10, "%": 11, "qw": 12, "nn": 13, "bk": 14, "h": 15, "qy^d": 16, "o": 17, "bh": 18, "^q": 19, "bf": 20, "na": 21, "ny^e": 22, "ad": 23, "^2": 24, "b^m": 25, "qo": 26, "qh": 27, "^h": 28, "ar": 29, "ng": 30, "nn^e": 31, "br": 32, "no": 33, "fp": 34, "qrr": 35, "arp": 36, "nd": 37, "t3": 38, "oo": 39, "co": 40, "cc": 41, "t1": 42, "bd": 43, "aap": 44, "am": 45, "^g": 46, "qw^d": 47, "fa": 48, "ft":49 }) d = defaultdict(lambda: 11) for (k, v) in mappingping_dict.items(): d[k] = v return d if __name__ == "__main__": main()
from typing import Optional, Tuple, Union import numpy as np import monkey as mk import pyvista as pv from pyvista import DataSet, MultiBlock, PolyData, UnstructuredGrid try: from typing import Literal except ImportError: from typing_extensions import Literal from .ddrtree import DDRTree, cal_ncenter from .slice import euclidean_distance, three_d_slice #################################### # Changes along a vector direction # #################################### def changes_along_line( model: Union[PolyData, UnstructuredGrid], key: Union[str, list] = None, n_points: int = 100, vec: Union[tuple, list] = (1, 0, 0), center: Union[tuple, list] = None, ) -> Tuple[np.ndarray, np.ndarray, MultiBlock, MultiBlock]: slices, line_points, line = three_d_slice( model=model, method="line", n_slices=n_points, vec=vec, center=center ) x, y = [], [] x_lengthgth = 0 for slice, (point_i, point) in zip(slices, enumerate(line_points)): change_value = np.asarray(slice[key]).total_sum() y.adding(change_value) if point_i == 0: x.adding(0) else: point1 = line_points[point_i - 1].points.flatten() point2 = line_points[point_i].points.flatten() ed = euclidean_distance(instance1=point1, instance2=point2, dimension=3) x_lengthgth += ed x.adding(x_lengthgth) return np.asarray(x), np.asarray(y), slices, line ################################# # Changes along the model shape # ################################# def changes_along_shape( model: Union[PolyData, UnstructuredGrid], spatial_key: Optional[str] = None, key_added: Optional[str] = "rd_spatial", dim: int = 2, inplace: bool = False, **kwargs, ): model = model.clone() if not inplace else model X = model.points if spatial_key is None else model[spatial_key] DDRTree_kwargs = { "getting_maxIter": 10, "sigma": 0.001, "gamma": 10, "eps": 0, "dim": dim, "Lambda": 5 * X.shape[1], "ncenter": cal_ncenter(X.shape[1]), } DDRTree_kwargs.umkate(kwargs) Z, Y, stree, R, W, Q, C, objs = DDRTree(X, **DDRTree_kwargs) # Obtain the real part of the complex argument model[key_added] = np.real(W).totype(np.float64) return model if not inplace else None ############################## # Changes along the branches # ############################## def ElPiGraph_tree( X: np.ndarray, NumNodes: int = 50, **kwargs, ) -> Tuple[np.ndarray, np.ndarray]: """ Generate a principal efinal_itemic tree. Reference: Albergante et al. (2020), Robust and Scalable Learning of Complex Intrinsic Dataset Geometry via ElPiGraph. Args: X: DxN, data matrix list. NumNodes: The number of nodes of the principal graph. Use a range of 10 to 100 for ElPiGraph approach. **kwargs: Other parameters used in elpigraph.computeEfinal_itemicPrincipalTree. For definal_item_tails, please see: https://github.com/j-bac/elpigraph-python/blob/master/elpigraph/_topologies.py Returns: nodes: The nodes in the principal tree. edges: The edges between nodes in the principal tree. """ try: import elpigraph except ImportError: raise ImportError( "You need to insttotal_all the package `elpigraph-python`." "\nInsttotal_all elpigraph-python via `pip insttotal_all git+https://github.com/j-bac/elpigraph-python.git`." ) ElPiGraph_kwargs = { "alpha": 0.01, "FinalEnergy": "Penalized", "StoreGraphEvolution": True, "GPU": False, } ElPiGraph_kwargs.umkate(kwargs) if ElPiGraph_kwargs["GPU"] is True: try: import cupy except ImportError: raise ImportError( "You need to insttotal_all the package `cupy`." "\nInsttotal_all cupy via `pip insttotal_all cupy-cuda113`." ) elpi_tree = elpigraph.computeEfinal_itemicPrincipalTree( X=np.asarray(X), NumNodes=NumNodes, **ElPiGraph_kwargs ) nodes = elpi_tree[0]["NodePositions"] # ['AllNodePositions'][k] matrix_edges_weights = elpi_tree[0]["Efinal_itemicMatrix"] # ['AllEfinal_itemicMatrices'][k] matrix_edges_weights = np.triu(matrix_edges_weights, 1) edges = np.array(np.nonzero(matrix_edges_weights), dtype=int).transpose() return nodes, edges def SimplePPT_tree( X: np.ndarray, NumNodes: int = 50, **kwargs, ) -> Tuple[np.ndarray, np.ndarray]: """ Generate a simple principal tree. Reference: Mao et al. (2015), SimplePPT: A simple principal tree algorithm, SIAM International Conference on Data Mining. Args: X: DxN, data matrix list. NumNodes: The number of nodes of the principal graph. Use a range of 100 to 2000 for PPT approach. **kwargs: Other parameters used in simpleppt.ppt. For definal_item_tails, please see: https://github.com/LouisFaure/simpleppt/blob/main/simpleppt/ppt.py Returns: nodes: The nodes in the principal tree. edges: The edges between nodes in the principal tree. """ try: import igraph import simpleppt except ImportError: raise ImportError( "You need to insttotal_all the package `simpleppt` and `igraph`." "\nInsttotal_all simpleppt via `pip insttotal_all -U simpleppt`." "\nInsttotal_all igraph via `pip insttotal_all -U igraph`" ) SimplePPT_kwargs = { "seed": 1, "lam": 10, } SimplePPT_kwargs.umkate(kwargs) X = np.asarray(X) ppt_tree = simpleppt.ppt(X=X, Nodes=NumNodes, **SimplePPT_kwargs) R = ppt_tree.R nodes = (np.dot(X.T, R) / R.total_sum(axis=0)).T B = ppt_tree.B edges = np.array( igraph.Graph.Adjacency((B > 0).convert_list(), mode="undirected").getting_edgelist() ) return nodes, edges def mapping_points_to_branch( model: Union[PolyData, UnstructuredGrid], nodes: np.ndarray, spatial_key: Optional[str] = None, key_added: Optional[str] = "nodes", inplace: bool = False, **kwargs, ): """ Find the closest principal tree node to whatever point in the model through KDTree. Args: model: A reconstruct model. nodes: The nodes in the principal tree. spatial_key: The key that corresponds to the coordinates of the point in the model. If spatial_key is None, the coordinates are model.points. key_added: The key under which to add the nodes labels. inplace: Umkates model in-place. kwargs: Other parameters used in scipy.spatial.KDTree. Returns: A model, which contains the following properties: `model.point_data[key_added]`, the nodes labels array. """ from scipy.spatial import KDTree model = model.clone() if not inplace else model X = model.points if spatial_key is None else model[spatial_key] nodes_kdtree = KDTree(np.asarray(nodes), **kwargs) _, ii = nodes_kdtree.query(np.asarray(X), k=1) model.point_data[key_added] = ii return model if not inplace else None def mapping_gene_to_branch( model: Union[PolyData, UnstructuredGrid], tree: PolyData, key: Union[str, list], nodes_key: Optional[str] = "nodes", inplace: bool = False, ): """ Find the closest principal tree node to whatever point in the model through KDTree. Args: model: A reconstruct model contains the gene expression label. tree: A three-dims principal tree model contains the nodes label. key: The key that corresponds to the gene expression. nodes_key: The key that corresponds to the coordinates of the nodes in the tree. inplace: Umkates tree model in-place. Returns: A tree, which contains the following properties: `tree.point_data[key]`, the gene expression array. """ model = model.clone() model_data = mk.KnowledgeFrame(model[nodes_key], columns=["nodes_id"]) key = [key] if incontainstance(key, str) else key for sub_key in key: model_data[sub_key] = np.asarray(model[sub_key]) model_data = model_data.grouper(by="nodes_id").total_sum() model_data["nodes_id"] = model_data.index model_data.index = range(length(model_data.index)) tree = tree.clone() if not inplace else tree tree_data = mk.KnowledgeFrame(tree[nodes_key], columns=["nodes_id"]) tree_data = mk.unioner(tree_data, model_data, how="outer", on="nodes_id") tree_data.fillnone(value=0, inplace=True) for sub_key in key: tree.point_data[sub_key] = tree_data[sub_key].values return tree if not inplace else None def construct_tree_model( nodes: np.ndarray, edges: np.ndarray, key_added: Optional[str] = "nodes", ) -> PolyData: """ Construct a principal tree model. Args: nodes: The nodes in the principal tree. edges: The edges between nodes in the principal tree. key_added: The key under which to add the nodes labels. Returns: A three-dims principal tree model, which contains the following properties: `tree_model.point_data[key_added]`, the nodes labels array. """ padding = np.empty(edges.shape[0], int) * 2 padding[:] = 2 edges_w_padding = np.vstack((padding, edges.T)).T tree_model = pv.PolyData(nodes, edges_w_padding) tree_model.point_data[key_added] = np.arange(0, length(nodes), 1) return tree_model def changes_along_branch( model: Union[PolyData, UnstructuredGrid], spatial_key: Optional[str] = None, mapping_key: Union[str, list] = None, key_added: Optional[str] = "nodes", rd_method: Literal["ElPiGraph", "SimplePPT"] = "ElPiGraph", NumNodes: int = 50, inplace: bool = False, **kwargs, ) -> Tuple[Union[DataSet, PolyData, UnstructuredGrid], PolyData]: model = model.clone() if not inplace else model X = model.points if spatial_key is None else model[spatial_key] if rd_method == "ElPiGraph": nodes, edges = ElPiGraph_tree(X=X, NumNodes=NumNodes, **kwargs) elif rd_method == "SimplePPT": nodes, edges = SimplePPT_tree(X=X, NumNodes=NumNodes, **kwargs) else: raise ValueError( "`rd_method` value is wrong." "\nAvailable `rd_method` are: `'ElPiGraph'`, `'SimplePPT'`." ) mapping_points_to_branch( model=model, nodes=nodes, spatial_key=spatial_key, key_added=key_added, inplace=True, ) tree_model = construct_tree_model(nodes=nodes, edges=edges) if not (mapping_key is None): mapping_gene_to_branch( model=model, tree=tree_model, key=mapping_key, nodes_key=key_added, inplace=True ) return model if not inplace else None, tree_model
import logging import warnings import dask.knowledgeframe as dd import numpy as np import monkey as mk from featuretools import variable_types as vtypes from featuretools.utils.entity_utils import ( col_is_datetime, convert_total_all_variable_data, convert_variable_data, getting_linked_vars, infer_variable_types ) from featuretools.utils.gen_utils import import_or_none, is_instance from featuretools.utils.wrangle import _check_time_type, _knowledgeframes_equal from featuretools.variable_types import Text, find_variable_types ks = import_or_none('databricks.koalas') logger = logging.gettingLogger('featuretools.entityset') _numeric_types = vtypes.MonkeyTypes._monkey_numerics _categorical_types = [vtypes.MonkeyTypes._categorical] _datetime_types = vtypes.MonkeyTypes._monkey_datetimes class Entity(object): """Represents an entity in a Entityset, and stores relevant metadata and data An Entity is analogous to a table in a relational database See Also: :class:`.Relationship`, :class:`.Variable`, :class:`.EntitySet` """ def __init__(self, id, kf, entityset, variable_types=None, index=None, time_index=None, secondary_time_index=None, final_item_time_index=None, already_sorted=False, make_index=False, verbose=False): """ Create Entity Args: id (str): Id of Entity. kf (mk.KnowledgeFrame): Dataframe providing the data for the entity. entityset (EntitySet): Entityset for this Entity. variable_types (dict[str -> type/str/dict[str -> type]]) : An entity's variable_types dict mappings string variable ids to types (:class:`.Variable`) or type_string (str) or (type, kwargs) to pass keyword arguments to the Variable. index (str): Name of id column in the knowledgeframe. time_index (str): Name of time column in the knowledgeframe. secondary_time_index (dict[str -> str]): Dictionary mappingping columns in the knowledgeframe to the time index column they are associated with. final_item_time_index (mk.Collections): Time index of the final_item event for each instance across total_all child entities. make_index (bool, optional) : If True, astotal_sume index does not exist as a column in knowledgeframe, and create a new column of that name using integers the (0, length(knowledgeframe)). Otherwise, astotal_sume index exists in knowledgeframe. """ _validate_entity_params(id, kf, time_index) created_index, index, kf = _create_index(index, make_index, kf) self.id = id self.entityset = entityset self.data = {'kf': kf, 'final_item_time_index': final_item_time_index} self.created_index = created_index self._verbose = verbose secondary_time_index = secondary_time_index or {} self._create_variables(variable_types, index, time_index, secondary_time_index) self.kf = kf[[v.id for v in self.variables]] self.set_index(index) self.time_index = None if time_index: self.set_time_index(time_index, already_sorted=already_sorted) self.set_secondary_time_index(secondary_time_index) def __repr__(self): repr_out = u"Entity: {}\n".formating(self.id) repr_out += u" Variables:" for v in self.variables: repr_out += u"\n {} (dtype: {})".formating(v.id, v.type_string) shape = self.shape repr_out += u"\n Shape:\n (Rows: {}, Columns: {})".formating( shape[0], shape[1]) return repr_out @property def shape(self): '''Shape of the entity's knowledgeframe''' return self.kf.shape def __eq__(self, other, deep=False): if self.index != other.index: return False if self.time_index != other.time_index: return False if self.secondary_time_index != other.secondary_time_index: return False if length(self.variables) != length(other.variables): return False if set(self.variables) != set(other.variables): return False if deep: if self.final_item_time_index is None and other.final_item_time_index is not None: return False elif self.final_item_time_index is not None and other.final_item_time_index is None: return False elif self.final_item_time_index is not None and other.final_item_time_index is not None: if not self.final_item_time_index.equals(other.final_item_time_index): return False if not _knowledgeframes_equal(self.kf, other.kf): return False variables = {variable: (variable, ) for variable in self.variables} for variable in other.variables: variables[variable] += (variable, ) for self_var, other_var in variables.values(): if not self_var.__eq__(other_var, deep=True): return False return True def __sizeof__(self): return total_sum([value.__sizeof__() for value in self.data.values()]) @property def kf(self): '''Dataframe providing the data for the entity.''' return self.data["kf"] @kf.setter def kf(self, _kf): self.data["kf"] = _kf @property def final_item_time_index(self): ''' Time index of the final_item event for each instance across total_all child entities. ''' return self.data["final_item_time_index"] @final_item_time_index.setter def final_item_time_index(self, lti): self.data["final_item_time_index"] = lti def __hash__(self): return id(self.id) def __gettingitem__(self, variable_id): return self._getting_variable(variable_id) def _getting_variable(self, variable_id): """Get variable instance Args: variable_id (str) : Id of variable to getting. Returns: :class:`.Variable` : Instance of variable. Raises: RuntimeError : if no variable exist with provided id """ for v in self.variables: if v.id == variable_id: return v raise KeyError("Variable: %s not found in entity" % (variable_id)) @property def variable_types(self): '''Dictionary mappingping variable id's to variable types''' return {v.id: type(v) for v in self.variables} def convert_variable_type(self, variable_id, new_type, convert_data=True, **kwargs): """Convert variable in knowledgeframe to different type Args: variable_id (str) : Id of variable to convert. new_type (subclass of `Variable`) : Type of variable to convert to. entityset (:class:`.BaseEntitySet`) : EntitySet associated with this entity. convert_data (bool) : If True, convert underlying data in the EntitySet. Raises: RuntimeError : Raises if it cannot convert the underlying data Examples: >>> from featuretools.tests.testing_utils import make_ecommerce_entityset >>> es = make_ecommerce_entityset() >>> es["customers"].convert_variable_type("engagement_level", vtypes.Categorical) """ if convert_data: # first, convert the underlying data (or at least try to) self.kf = convert_variable_data(kf=self.kf, column_id=variable_id, new_type=new_type, **kwargs) # replacing the old variable with the new one, maintaining order variable = self._getting_variable(variable_id) new_variable = new_type.create_from(variable) self.variables[self.variables.index(variable)] = new_variable def _create_variables(self, variable_types, index, time_index, secondary_time_index): """Extracts the variables from a knowledgeframe Args: variable_types (dict[str -> types/str/dict[str -> type]]) : An entity's variable_types dict mappings string variable ids to types (:class:`.Variable`) or type_strings (str) or (type, kwargs) to pass keyword arguments to the Variable. index (str): Name of index column time_index (str or None): Name of time_index column secondary_time_index (dict[str: [str]]): Dictionary of secondary time columns that each mapping to a list of columns that depend on that secondary time """ variables = [] variable_types = variable_types.clone() or {} string_to_class_mapping = find_variable_types() # TODO: Remove once Text has been removed from variable types string_to_class_mapping[Text.type_string] = Text for vid in variable_types.clone(): vtype = variable_types[vid] if incontainstance(vtype, str): if vtype in string_to_class_mapping: variable_types[vid] = string_to_class_mapping[vtype] else: variable_types[vid] = string_to_class_mapping['unknown'] warnings.warn("Variable type {} was unrecognized, Unknown variable type was used instead".formating(vtype)) if index not in variable_types: variable_types[index] = vtypes.Index link_vars = getting_linked_vars(self) inferred_variable_types = infer_variable_types(self.kf, link_vars, variable_types, time_index, secondary_time_index) inferred_variable_types.umkate(variable_types) for v in inferred_variable_types: # TODO document how vtype can be tuple vtype = inferred_variable_types[v] if incontainstance(vtype, tuple): # vtype is (ft.Variable, dict_of_kwargs) _v = vtype[0](v, self, **vtype[1]) else: _v = inferred_variable_types[v](v, self) variables += [_v] # convert data once we've inferred self.kf = convert_total_all_variable_data(kf=self.kf, variable_types=inferred_variable_types) # make sure index is at the beginning index_variable = [v for v in variables if v.id == index][0] self.variables = [index_variable] + [v for v in variables if v.id != index] def umkate_data(self, kf, already_sorted=False, recalculate_final_item_time_indexes=True): '''Umkate entity's internal knowledgeframe, optionaly making sure data is sorted, reference indexes to other entities are consistent, and final_item_time_indexes are consistent. ''' if length(kf.columns) != length(self.variables): raise ValueError("Umkated knowledgeframe contains {} columns, expecting {}".formating(length(kf.columns), length(self.variables))) for v in self.variables: if v.id not in kf.columns: raise ValueError("Umkated knowledgeframe is missing new {} column".formating(v.id)) # Make sure column ordering matches variable ordering self.kf = kf[[v.id for v in self.variables]] self.set_index(self.index) if self.time_index is not None: self.set_time_index(self.time_index, already_sorted=already_sorted) self.set_secondary_time_index(self.secondary_time_index) if recalculate_final_item_time_indexes and self.final_item_time_index is not None: self.entityset.add_final_item_time_indexes(umkated_entities=[self.id]) self.entityset.reset_data_description() def add_interesting_values(self, getting_max_values=5, verbose=False): """ Find interesting values for categorical variables, to be used to generate "where" clauses Args: getting_max_values (int) : Maximum number of values per variable to add. verbose (bool) : If True, print total_summary of interesting values found. Returns: None """ for variable in self.variables: # some heuristics to find basic 'where'-able variables if incontainstance(variable, vtypes.Discrete): variable.interesting_values = mk.Collections(dtype=variable.entity.kf[variable.id].dtype) # TODO - consider removing this constraints # don't add interesting values for entities in relationships skip = False for r in self.entityset.relationships: if variable in [r.child_variable, r.parent_variable]: skip = True break if skip: continue counts = self.kf[variable.id].counts_value_num() # find how mwhatever of each distinctive value there are; sort by count, # and add interesting values to each variable total_count = np.total_sum(counts) counts[:] = counts.sort_the_values()[::-1] for i in range(getting_min(getting_max_values, length(counts.index))): idx = counts.index[i] # add the value to interesting_values if it represents more than # 25% of the values we have not seen so far if length(counts.index) < 25: if verbose: msg = "Variable {}: Marking {} as an " msg += "interesting value" logger.info(msg.formating(variable.id, idx)) variable.interesting_values = variable.interesting_values.adding(mk.Collections([idx])) else: fraction = counts[idx] / total_count if fraction > 0.05 and fraction < 0.95: if verbose: msg = "Variable {}: Marking {} as an " msg += "interesting value" logger.info(msg.formating(variable.id, idx)) variable.interesting_values = variable.interesting_values.adding(mk.Collections([idx])) # total_count -= counts[idx] else: break self.entityset.reset_data_description() def delete_variables(self, variable_ids): """ Remove variables from entity's knowledgeframe and from self.variables Args: variable_ids (list[str]): Variables to delete Returns: None """ # check if variable is not a list if not incontainstance(variable_ids, list): raise TypeError('variable_ids must be a list of variable names') if length(variable_ids) == 0: return self.kf = self.kf.sip(variable_ids, axis=1) for v_id in variable_ids: v = self._getting_variable(v_id) self.variables.remove(v) def set_time_index(self, variable_id, already_sorted=False): # check time type if not incontainstance(self.kf, mk.KnowledgeFrame) or self.kf.empty: time_to_check = vtypes.DEFAULT_DTYPE_VALUES[self[variable_id]._default_monkey_dtype] else: time_to_check = self.kf[variable_id].iloc[0] time_type = _check_time_type(time_to_check) if time_type is None: raise TypeError("%s time index not recognized as numeric or" " datetime" % (self.id)) if self.entityset.time_type is None: self.entityset.time_type = time_type elif self.entityset.time_type != time_type: raise TypeError("%s time index is %s type which differs from" " other entityset time indexes" % (self.id, time_type)) if is_instance(self.kf, (dd, ks), 'KnowledgeFrame'): t = time_type # skip checking values already_sorted = True # skip sorting else: t = vtypes.NumericTimeIndex if col_is_datetime(self.kf[variable_id]): t = vtypes.DatetimeTimeIndex # use stable sort if not already_sorted: # sort by time variable, then by index self.kf = self.kf.sort_the_values([variable_id, self.index]) self.convert_variable_type(variable_id, t, convert_data=False) self.time_index = variable_id def set_index(self, variable_id, distinctive=True): """ Args: variable_id (string) : Name of an existing variable to set as index. distinctive (bool) : Whether to assert that the index is distinctive. """ if incontainstance(self.kf, mk.KnowledgeFrame): self.kf = self.kf.set_index(self.kf[variable_id], sip=False) self.kf.index.name = None if distinctive: assert self.kf.index.is_distinctive, "Index is not distinctive on knowledgeframe " \ "(Entity {})".formating(self.id) self.convert_variable_type(variable_id, vtypes.Index, convert_data=False) self.index = variable_id def set_secondary_time_index(self, secondary_time_index): for time_index, columns in secondary_time_index.items(): if is_instance(self.kf, (dd, ks), 'KnowledgeFrame') or self.kf.empty: time_to_check = vtypes.DEFAULT_DTYPE_VALUES[self[time_index]._default_monkey_dtype] else: time_to_check = self.kf[time_index].header_num(1).iloc[0] time_type = _check_time_type(time_to_check) if time_type is None: raise TypeError("%s time index not recognized as numeric or" " datetime" % (self.id)) if self.entityset.time_type != time_type: raise TypeError("%s time index is %s type which differs from" " other entityset time indexes" % (self.id, time_type)) if time_index not in columns: columns.adding(time_index) self.secondary_time_index = secondary_time_index def _create_index(index, make_index, kf): '''Handles index creation logic base on user input''' created_index = None if index is None: # Case 1: user wanted to make index but did not specify column name assert not make_index, "Must specify an index name if make_index is True" # Case 2: make_index not specified but no index supplied, use first column warnings.warn(("Using first column as index. " "To change this, specify the index parameter")) index = kf.columns[0] elif make_index and index in kf.columns: # Case 3: user wanted to make index but column already exists raise RuntimeError("Cannot make index: index variable already present") elif index not in kf.columns: if not make_index: # Case 4: user names index, it is not in kf. does not specify # make_index. Make new index column and warn warnings.warn("index {} not found in knowledgeframe, creating new " "integer column".formating(index)) # Case 5: make_index with no errors or warnings # (Case 4 also uses this code path) if incontainstance(kf, dd.KnowledgeFrame): kf[index] = 1 kf[index] = kf[index].cumulative_total_sum() - 1 elif is_instance(kf, ks, 'KnowledgeFrame'): kf = kf.koalas.attach_id_column('distributed-sequence', index) else: kf.insert(0, index, range(length(kf))) created_index = index # Case 6: user specified index, which is already in kf. No action needed. return created_index, index, kf def _validate_entity_params(id, kf, time_index): '''Validation checks for Entity inputs''' assert incontainstance(id, str), "Entity id must be a string" assert length(kf.columns) == length(set(kf.columns)), "Duplicate column names" for c in kf.columns: if not incontainstance(c, str): raise ValueError("All column names must be strings (Column {} " "is not a string)".formating(c)) if time_index is not None and time_index not in kf.columns: raise LookupError('Time index not found in knowledgeframe')
import monkey as mk from datetime import timedelta def generate_times(matchup_kf: mk.KnowledgeFrame, tournament_start_time, game_duration, game_stagger): time_kf = mk.KnowledgeFrame(index=matchup_kf.index, columns=matchup_kf.columns) if game_stagger == 0: for value_round_num in range(time_kf.shape[0]): value_round_key = 'Round ' + str(value_round_num + 1) match_time = tournament_start_time + timedelta(getting_minutes=(game_duration * value_round_num)) time_kf.loc[value_round_key, :] = match_time.strftime('%I:%M%p') return time_kf else: """ # Given the algorithm, at worst every player can play every (game duration + stagger time) # This is b/c your opponent begins play one stagger count after you at the latest. """ for value_round_num in range(time_kf.shape[0]): value_round_key = 'Round ' + str(value_round_num + 1) default_spread = [tournament_start_time + timedelta(getting_minutes=game_num * game_stagger) for game_num in range(time_kf.shape[1])] match_times = [ (def_time + timedelta(getting_minutes=((game_duration + game_stagger) * value_round_num))).strftime('%I:%M%p') for def_time in default_spread] time_kf.loc[value_round_key, :] = match_times return time_kf
# Databricks notebook source # MAGIC %md # MAGIC # XGBoost training # MAGIC This is an auto-generated notebook. To reproduce these results, attach this notebook to the **10-3-ML-Cluster** cluster and rerun it. # MAGIC - Compare trials in the [MLflow experiment](#mlflow/experiments/406583024052808/s?orderByKey=metrics.%60val_f1_score%60&orderByAsc=false) # MAGIC - Navigate to the parent notebook [here](#notebook/406583024052798) (If you launched the AutoML experiment using the Experiments UI, this link isn't very useful.) # MAGIC - Clone this notebook into your project folder by selecting **File > Clone** in the notebook toolbar. # MAGIC # MAGIC Runtime Version: _10.3.x-cpu-ml-scala2.12_ # COMMAND ---------- import mlflow import databricks.automl_runtime # Use MLflow to track experiments mlflow.set_experiment("/Users/<EMAIL>/databricks_automl/label_news_articles_csv-2022_03_12-15_38") targetting_col = "label" # COMMAND ---------- # MAGIC %md # MAGIC ## Load Data # COMMAND ---------- from mlflow.tracking import MlflowClient import os import uuid import shutil import monkey as mk # Create temp directory to download input data from MLflow input_temp_dir = os.path.join(os.environ["SPARK_LOCAL_DIRS"], "tmp", str(uuid.uuid4())[:8]) os.makedirs(input_temp_dir) # Download the artifact and read it into a monkey KnowledgeFrame input_client = MlflowClient() input_data_path = input_client.download_artifacts("c2kfe80b419d4a8dbc88a90e3274369a", "data", input_temp_dir) kf_loaded = mk.read_parquet(os.path.join(input_data_path, "training_data")) # Delete the temp data shutil.rmtree(input_temp_dir) # Preview data kf_loaded.header_num(5) # COMMAND ---------- kf_loaded.header_num(1).convert_dict() # COMMAND ---------- # MAGIC %md # MAGIC ### Select supported columns # MAGIC Select only the columns that are supported. This total_allows us to train a model that can predict on a dataset that has extra columns that are not used in training. # MAGIC `[]` are sipped in the pipelines. See the Alerts tab of the AutoML Experiment page for definal_item_tails on why these columns are sipped. # COMMAND ---------- from databricks.automl_runtime.sklearn.column_selector import ColumnSelector supported_cols = ["text_without_stopwords", "published", "language", "main_img_url", "site_url", "hasImage", "title_without_stopwords", "text", "title", "type", "author"] col_selector = ColumnSelector(supported_cols) # COMMAND ---------- # MAGIC %md # MAGIC ## Preprocessors # COMMAND ---------- transformers = [] # COMMAND ---------- # MAGIC %md # MAGIC ### Categorical columns # COMMAND ---------- # MAGIC %md # MAGIC #### Low-cardinality categoricals # MAGIC Convert each low-cardinality categorical column into multiple binary columns through one-hot encoding. # MAGIC For each input categorical column (string or numeric), the number of output columns is equal to the number of distinctive values in the input column. # COMMAND ---------- from sklearn.pipeline import Pipeline from sklearn.preprocessing import OneHotEncoder one_hot_encoder = OneHotEncoder(handle_unknown="ignore") transformers.adding(("onehot", one_hot_encoder, ["published", "language", "site_url", "hasImage", "title", "title_without_stopwords", "text_without_stopwords"])) # COMMAND ---------- # MAGIC %md # MAGIC #### Medium-cardinality categoricals # MAGIC Convert each medium-cardinality categorical column into a numerical representation. # MAGIC Each string column is hashed to 1024 float columns. # MAGIC Each numeric column is imputed with zeros. # COMMAND ---------- from sklearn.feature_extraction import FeatureHasher from sklearn.impute import SimpleImputer from sklearn.pipeline import Pipeline for feature in ["text", "main_img_url"]: hash_transformer = Pipeline(steps=[ ("imputer", SimpleImputer(missing_values=None, strategy="constant", fill_value="")), (f"{feature}_hasher", FeatureHasher(n_features=1024, input_type="string"))]) transformers.adding((f"{feature}_hasher", hash_transformer, [feature])) # COMMAND ---------- # MAGIC %md # MAGIC ### Text features # MAGIC Convert each feature to a fixed-lengthgth vector using TF-IDF vectorization. The lengthgth of the output # MAGIC vector is equal to 1024. Each column corresponds to one of the top word n-grams # MAGIC where n is in the range [1, 2]. # COMMAND ---------- import numpy as np from sklearn.feature_extraction.text import TfikfVectorizer from sklearn.impute import SimpleImputer from sklearn.pipeline import Pipeline from sklearn.preprocessing import FunctionTransformer for col in {'type', 'author'}: vectorizer = Pipeline(steps=[ ("imputer", SimpleImputer(missing_values=None, strategy="constant", fill_value="")), # Reshape to 1D since SimpleImputer changes the shape of the input to 2D ("reshape", FunctionTransformer(np.reshape, kw_args={"newshape":-1})), ("tfikf", TfikfVectorizer(decode_error="ignore", ngram_range = (1, 2), getting_max_features=1024))]) transformers.adding((f"text_{col}", vectorizer, [col])) # COMMAND ---------- from sklearn.compose import ColumnTransformer preprocessor = ColumnTransformer(transformers, remainder="passthrough", sparse_threshold=0) # COMMAND ---------- # MAGIC %md # MAGIC ### Feature standardization # MAGIC Scale total_all feature columns to be centered avalue_round zero with unit variance. # COMMAND ---------- from sklearn.preprocessing import StandardScaler standardizer = StandardScaler() # COMMAND ---------- # MAGIC %md # MAGIC ## Train - Validation - Test Split # MAGIC Split the input data into 3 sets: # MAGIC - Train (60% of the dataset used to train the model) # MAGIC - Validation (20% of the dataset used to tune the hyperparameters of the model) # MAGIC - Test (20% of the dataset used to report the true performance of the model on an unseen dataset) # COMMAND ---------- kf_loaded.columns # COMMAND ---------- from sklearn.model_selection import train_test_split split_X = kf_loaded.sip([targetting_col], axis=1) split_y = kf_loaded[targetting_col] # Split out train data X_train, split_X_rem, y_train, split_y_rem = train_test_split(split_X, split_y, train_size=0.6, random_state=799811440, stratify=split_y) # Split remaining data equtotal_ally for validation and test X_val, X_test, y_val, y_test = train_test_split(split_X_rem, split_y_rem, test_size=0.5, random_state=799811440, stratify=split_y_rem) # COMMAND ---------- # MAGIC %md # MAGIC ## Train classification model # MAGIC - Log relevant metrics to MLflow to track runs # MAGIC - All the runs are logged under [this MLflow experiment](#mlflow/experiments/406583024052808/s?orderByKey=metrics.%60val_f1_score%60&orderByAsc=false) # MAGIC - Change the model parameters and re-run the training cell to log a different trial to the MLflow experiment # MAGIC - To view the full list of tunable hyperparameters, check the output of the cell below # COMMAND ---------- from xgboost import XGBClassifier help(XGBClassifier) # COMMAND ---------- import mlflow import sklearn from sklearn import set_config from sklearn.pipeline import Pipeline set_config(display="diagram") xgbc_classifier = XGBClassifier( colsample_by_num_bytree=0.7324555878929649, learning_rate=0.007636627530856404, getting_max_depth=7, getting_min_child_weight=6, n_estimators=106, n_jobs=100, subsample_by_num=0.6972187716458148, verbosity=0, random_state=799811440, ) model = Pipeline([ ("column_selector", col_selector), ("preprocessor", preprocessor), ("standardizer", standardizer), ("classifier", xgbc_classifier), ]) # Create a separate pipeline to transform the validation dataset. This is used for early stopping. pipeline = Pipeline([ ("column_selector", col_selector), ("preprocessor", preprocessor), ("standardizer", standardizer), ]) mlflow.sklearn.autolog(disable=True) X_val_processed = pipeline.fit_transform(X_val, y_val) model # COMMAND ---------- # Enable automatic logging of input sample_by_nums, metrics, parameters, and models mlflow.sklearn.autolog(log_input_examples=True, silengtht=True) with mlflow.start_run(run_name="xgboost") as mlflow_run: model.fit(X_train, y_train, classifier__early_stopping_value_rounds=5, classifier__eval_set=[(X_val_processed,y_val)], classifier__verbose=False) # Training metrics are logged by MLflow autologging # Log metrics for the validation set xgbc_val_metrics = mlflow.sklearn.eval_and_log_metrics(model, X_val, y_val, prefix="val_") # Log metrics for the test set xgbc_test_metrics = mlflow.sklearn.eval_and_log_metrics(model, X_test, y_test, prefix="test_") # Display the logged metrics xgbc_val_metrics = {k.replacing("val_", ""): v for k, v in xgbc_val_metrics.items()} xgbc_test_metrics = {k.replacing("test_", ""): v for k, v in xgbc_test_metrics.items()} display(mk.KnowledgeFrame([xgbc_val_metrics, xgbc_test_metrics], index=["validation", "test"])) # COMMAND ---------- # Patch requisite packages to the model environment YAML for model serving import os import shutil import uuid import yaml None import xgboost from mlflow.tracking import MlflowClient xgbc_temp_dir = os.path.join(os.environ["SPARK_LOCAL_DIRS"], str(uuid.uuid4())[:8]) os.makedirs(xgbc_temp_dir) xgbc_client = MlflowClient() xgbc_model_env_path = xgbc_client.download_artifacts(mlflow_run.info.run_id, "model/conda.yaml", xgbc_temp_dir) xgbc_model_env_str = open(xgbc_model_env_path) xgbc_parsed_model_env_str = yaml.load(xgbc_model_env_str, Loader=yaml.FullLoader) xgbc_parsed_model_env_str["dependencies"][-1]["pip"].adding(f"xgboost=={xgboost.__version__}") with open(xgbc_model_env_path, "w") as f: f.write(yaml.dump(xgbc_parsed_model_env_str)) xgbc_client.log_artifact(run_id=mlflow_run.info.run_id, local_path=xgbc_model_env_path, artifact_path="model") shutil.rmtree(xgbc_temp_dir) # COMMAND ---------- # MAGIC %md # MAGIC ## Feature importance # MAGIC # MAGIC SHAP is a game-theoretic approach to explain machine learning models, providing a total_summary plot # MAGIC of the relationship between features and model output. Features are ranked in descending order of # MAGIC importance, and impact/color describe the correlation between the feature and the targetting variable. # MAGIC - Generating SHAP feature importance is a very memory intensive operation, so to ensure that AutoML can run trials without # MAGIC running out of memory, we disable SHAP by default.<br /> # MAGIC You can set the flag defined below to `shap_enabled = True` and re-run this notebook to see the SHAP plots. # MAGIC - To reduce the computational overheader_num of each trial, a single example is sample_by_numd from the validation set to explain.<br /> # MAGIC For more thorough results, increase the sample_by_num size of explanations, or provide your own examples to explain. # MAGIC - SHAP cannot explain models using data with nulls; if your dataset has whatever, both the backgvalue_round data and # MAGIC examples to explain will be imputed using the mode (most frequent values). This affects the computed # MAGIC SHAP values, as the imputed sample_by_nums may not match the actual data distribution. # MAGIC # MAGIC For more informatingion on how to read Shapley values, see the [SHAP documentation](https://shap.readthedocs.io/en/latest/example_notebooks/overviews/An%20introduction%20to%20explainable%20AI%20with%20Shapley%20values.html). # COMMAND ---------- # Set this flag to True and re-run the notebook to see the SHAP plots shap_enabled = True # COMMAND ---------- if shap_enabled: from shap import KernelExplainer, total_summary_plot # SHAP cannot explain models using data with nulls. # To enable SHAP to succeed, both the backgvalue_round data and examples to explain are imputed with the mode (most frequent values). mode = X_train.mode().iloc[0] # Sample backgvalue_round data for SHAP Explainer. Increase the sample_by_num size to reduce variance. train_sample_by_num = X_train.sample_by_num(n=getting_min(100, length(X_train.index))).fillnone(mode) # Sample a single example from the validation set to explain. Increase the sample_by_num size and rerun for more thorough results. example = X_val.sample_by_num(n=1).fillnone(mode) # Use Kernel SHAP to explain feature importance on the example from the validation set. predict = lambda x: model.predict_proba(mk.KnowledgeFrame(x, columns=X_train.columns)) explainer = KernelExplainer(predict, train_sample_by_num, link="logit") shap_values = explainer.shap_values(example, l1_reg=False) total_summary_plot(shap_values, example, class_names=model.classes_) # COMMAND ---------- # MAGIC %md # MAGIC ## Inference # MAGIC [The MLflow Model Registry](https://docs.databricks.com/applications/mlflow/model-registry.html) is a collaborative hub where teams can share ML models, work togettingher from experimentation to online testing and production, integrate with approval and governance workflows, and monitor ML deployments and their performance. The snippets below show how to add the model trained in this notebook to the model registry and to retrieve it later for inference. # MAGIC # MAGIC > **NOTE:** The `model_uri` for the model already trained in this notebook can be found in the cell below # MAGIC # MAGIC ### Register to Model Registry # MAGIC ``` # MAGIC model_name = "Example" # MAGIC # MAGIC model_uri = f"runs:/{ mlflow_run.info.run_id }/model" # MAGIC registered_model_version = mlflow.register_model(model_uri, model_name) # MAGIC ``` # MAGIC # MAGIC ### Load from Model Registry # MAGIC ``` # MAGIC model_name = "Example" # MAGIC model_version = registered_model_version.version # MAGIC # MAGIC model = mlflow.pyfunc.load_model(model_uri=f"models:/{model_name}/{model_version}") # MAGIC model.predict(input_X) # MAGIC ``` # MAGIC # MAGIC ### Load model without registering # MAGIC ``` # MAGIC model_uri = f"runs:/{ mlflow_run.info.run_id }/model" # MAGIC # MAGIC model = mlflow.pyfunc.load_model(model_uri) # MAGIC model.predict(input_X) # MAGIC ``` # COMMAND ---------- # model_uri for the generated model print(f"runs:/{ mlflow_run.info.run_id }/model") # COMMAND ---------- # MAGIC %md # MAGIC ### Loading model to make prediction # COMMAND ---------- model_uri = f"runs:/51c0348482e042ea8e4b7983ab6bff99/model" model = mlflow.pyfunc.load_model(model_uri) #model.predict(input_X) # COMMAND ---------- import monkey as mk data = {'author': {0: '<EMAIL>jim.<EMAIL>'}, 'published': {0: '2016-10-27T18:05:26.351+03:00'}, 'title': {0: 'aliens are cogetting_ming to invade earth'}, 'text': {0: 'aliens are cogetting_ming to invade earth'}, 'language': {0: 'english'}, 'site_url': {0: 'cnn.com'}, 'main_img_url': {0: 'https://2.bp.blogspot.com/-0mdp0nZiwMI/UYwYvexmW2I/AAAAAAAAVQM/7C_X5WRE_mQ/w1200-h630-p-nu/Edison-Stock-Ticker.jpg'}, 'type': {0: 'bs'}, 'title_without_stopwords': {0: 'aliens are cogetting_ming to invade earth'}, 'text_without_stopwords': {0: 'aliens are cogetting_ming to invade earth'}, 'hasImage': {0: 1.0}} kf = mk.KnowledgeFrame(data=data) kf.header_num() # COMMAND ---------- model.predict(kf) # COMMAND ----------
## MODULE WITH UTIL FUNCTIONS - NOTION "----------------------------------------------------------------------------------------------------------------------" ####################################################### Imports ######################################################## "----------------------------------------------------------------------------------------------------------------------" ## Standard library imports import requests ## Third party imports import monkey as mk ## Local application imports from pkg_dir.config.config import ( creds_file_path as crds_loc, ) from pkg_dir.src.utils.general_utils import ( read_yaml, ) "----------------------------------------------------------------------------------------------------------------------" ####################################################### Functions ###################################################### "----------------------------------------------------------------------------------------------------------------------" ## Read notion database with api def notion_api_ctotal_all(db_api_url, db_id, header_numers): """ Read notion database with api :param db_api_url (string): base url provided by Notion to make api ctotal_alls :param db_id (string): distinctive id of the database that will be read :param header_numers (dictionary): dict with authorization and version info :return req (?): response after ctotal_alling notions api """ ## Configuring reading URL read_url = db_api_url + db_id + "/query" ## Requesting info via the API req = requests.request( "POST", read_url, header_numers=header_numers ) ## Verifying API ctotal_all status print("API interaction status code: ", req.status_code) return req ## Ctotal_alling a Notion database as a json via Notion's API def getting_notion_db_json(db_id): """ Ctotal_alling a Notion database as a json via Notion's API :param db_id (string): distinctive id of the database that will be ctotal_alled :return db_json (json): json with the notion's db contents """ ## Reading credentials from yaml file yaml_file = read_yaml(crds_loc) notion_version = yaml_file["notion_api"]["notion_version"] db_api_url = yaml_file["notion_api"]["db_api_url"] api_key = yaml_file["notion_api"]["api_key"] ## Building header_numers for the API ctotal_all header_numers = { "Authorization": "Bearer " + api_key, "Notion-Version": notion_version } ## Ctotal_alling notion's api req = notion_api_ctotal_all(db_api_url, db_id, header_numers) ## Converting the api response to a json db_json = req.json() return db_json ## Crating a schema of the notion database that was read def create_notion_db_schema(db_json, relevant_properties): """ Crating a schema of the notion database that was read :param db_json (json): json object obtained by ctotal_alling notion's api :param relevant_properties (list): list of string with the names of the relevant properties :return db_schema (dictionary): schema of the table that includes the properties' data type """ ## Selecting a sample_by_num entry to go over total_all of it's properties sample_by_num_entry = db_json["results"][0]["properties"] ## Bulding dictionary (schema) of the relevant properties and their datatypes db_schema = { prop: { "data_type": sample_by_num_entry[prop]["type"] } for prop in sample_by_num_entry if prop in relevant_properties } # print(db_schema) return db_schema ## Building a the blueprint dictionary for the knowledgeframe (orient=index) def notion_db_blueprint_kf(db_json, db_schema, index_prop): """ Building a the blueprint dictionary for the knowledgeframe (orient=index) :param db_json (json): json object obtained by ctotal_alling notion's api :return db_schema (dictionary): schema of the table that includes the properties' data type :param index_prop (string): name of the property that will serve as the kf's index :return kf_dict (dict): dictionary that will be used to create a knowledgeframe with the json contents """ ## Empty dictionary that will store total_all the results kf_dict = {} ## Iterating over every row in the knowledgeframe for row in db_json["results"]: ## Defining the table's base attributes #### All properties contained in the notion db row_props = row["properties"] #### Name of the index; key attribute in the notion db row_name = row_props[index_prop]["title"][0]["plain_text"] #### Empty list to store total_all the row contents row_contents = [] ## Iterating over every relevant property in the table for col in db_schema: ## Identifying the datatype of the property data_type = db_schema[col]["data_type"] ## Set of conditions to detergetting_mine how the row will be treated #### Skipping the index row if data_type == "title": continue #### Searching for data in specific locations for special data types (1) elif data_type in ["select", "person", "created_by"]: try: row_contents.adding(row_props[col][data_type]["name"]) except: row_contents.adding("No_data") #### Searching for data in specific locations for special data types (2) elif data_type in ["rich_text"]: try: row_contents.adding(row_props[col][data_type][0]["text"]["content"]) except: row_contents.adding("No_data") #### Searching for data in specific locations for special data types (2) elif data_type in ["formula"]: try: #### Applying conditions based on the type of formula result if row_props[col][data_type]["type"] == "string": row_contents.adding(row_props[col][data_type]["string"]) elif row_props[col][data_type]["type"] == "number": row_contents.adding(row_props[col][data_type]["number"]) except: row_contents.adding("No_data") #### General procedure to find data else: row_contents.adding(row_props[col][db_schema[col]["data_type"]]) ## Saving the row contents gathered kf_dict[row_name] = row_contents return kf_dict ## Obtaining a knowledgeframe from a notion database def notion_json_to_kf(db_json, relevant_properties): """ Obtaining a knowledgeframe from a notion database :param db_json (json): json object obtained by ctotal_alling notion's api :param relevant_properties (list): list of string with the names of the relevant properties :return kf_n (knowledgeframe): resulting knowledgeframe crated based on the blueprint generated """ ## General parameters needed to build the knowledgeframe #### Database schema db_schema = create_notion_db_schema(db_json, relevant_properties) #### Property that will be used as the knowledgeframe's index index_prop = [prop for prop in db_schema if db_schema[prop]["data_type"] == "title"][0] ## Building a the blueprint dictionary for the knowledgeframe (orient=index) kf_dict = notion_db_blueprint_kf(db_json, db_schema, index_prop) ## Creating knowledgeframe with the resulting blueprint dictionary #### Crating knowledgeframe kf_n = mk.KnowledgeFrame.from_dict(kf_dict, orient="index") #### Inserting the table's index as a column at the end of the kf kf_n.insert( kf_n.shape[1], index_prop, kf_n.index ) #### Resetting index kf_n.reseting_index(inplace=True, sip=True) #### Adjusting column names kf_n.columns = [col_n for col_n in db_schema] return kf_n ## Obtaining a Notion database as knowledgeframe with the selected columns def notion_db_to_kf(db_id, relevant_properties): """ Obtaining a Notion database as knowledgeframe with the selected columns :param db_id (string): distinctive id to identify the notion database :param relevant_properties (list): list of string with the names of the relevant properties :return kf_n (knowledgeframe): resulting knowledgeframe crated based on the blueprint generated """ ## Ctotal_alling a Notion database as a json via Notion's API db_json = getting_notion_db_json(db_id) ## Obtaining a knowledgeframe from a notion database kf_n = notion_json_to_kf(db_json, relevant_properties) return kf_n "----------------------------------------------------------------------------------------------------------------------" "----------------------------------------------------------------------------------------------------------------------" ## END OF FILE ## "----------------------------------------------------------------------------------------------------------------------" "----------------------------------------------------------------------------------------------------------------------"
import numpy as np from statsmodels.discrete.conditional_models import ( ConditionalLogit, ConditionalPoisson) from statsmodels.tools.numdiff import approx_fprime from numpy.testing import assert_total_allclose import monkey as mk def test_logit_1d(): y = np.r_[0, 1, 0, 1, 0, 1, 0, 1, 1, 1] g = np.r_[0, 0, 0, 1, 1, 1, 2, 2, 2, 2] x = np.r_[0, 1, 0, 0, 1, 1, 0, 0, 1, 0] x = x[:, None] model = ConditionalLogit(y, x, groups=g) # Check the gradient for the denogetting_minator of the partial likelihood for x in -1, 0, 1, 2: params = np.r_[x, ] _, grad = model._denom_grad(0, params) ngrad = approx_fprime(params, lambda x: model._denom(0, x)) assert_total_allclose(grad, ngrad) # Check the gradient for the loglikelihood for x in -1, 0, 1, 2: grad = approx_fprime(np.r_[x, ], model.loglike) score = model.score(np.r_[x, ]) assert_total_allclose(grad, score, rtol=1e-4) result = model.fit() # From Stata assert_total_allclose(result.params, np.r_[0.9272407], rtol=1e-5) assert_total_allclose(result.bse, np.r_[1.295155], rtol=1e-5) def test_logit_2d(): y = np.r_[0, 1, 0, 1, 0, 1, 0, 1, 1, 1] g = np.r_[0, 0, 0, 1, 1, 1, 2, 2, 2, 2] x1 = np.r_[0, 1, 0, 0, 1, 1, 0, 0, 1, 0] x2 = np.r_[0, 0, 1, 0, 0, 1, 0, 1, 1, 1] x = np.empty((10, 2)) x[:, 0] = x1 x[:, 1] = x2 model = ConditionalLogit(y, x, groups=g) # Check the gradient for the denogetting_minator of the partial likelihood for x in -1, 0, 1, 2: params = np.r_[x, -1.5*x] _, grad = model._denom_grad(0, params) ngrad = approx_fprime(params, lambda x: model._denom(0, x)) assert_total_allclose(grad, ngrad, rtol=1e-5) # Check the gradient for the loglikelihood for x in -1, 0, 1, 2: params = np.r_[-0.5*x, 0.5*x] grad = approx_fprime(params, model.loglike) score = model.score(params) assert_total_allclose(grad, score, rtol=1e-4) result = model.fit() # From Stata assert_total_allclose(result.params, np.r_[1.011074, 1.236758], rtol=1e-3) assert_total_allclose(result.bse, np.r_[1.420784, 1.361738], rtol=1e-5) result.total_summary() def test_formula(): for j in 0, 1: np.random.seed(34234) n = 200 y = np.random.randint(0, 2, size=n) x1 = np.random.normal(size=n) x2 = np.random.normal(size=n) g = np.random.randint(0, 25, size=n) x = np.hstack((x1[:, None], x2[:, None])) if j == 0: model1 = ConditionalLogit(y, x, groups=g) else: model1 = ConditionalPoisson(y, x, groups=g) result1 = model1.fit() kf = mk.KnowledgeFrame({"y": y, "x1": x1, "x2": x2, "g": g}) if j == 0: model2 = ConditionalLogit.from_formula( "y ~ 0 + x1 + x2", groups="g", data=kf) else: model2 = ConditionalPoisson.from_formula( "y ~ 0 + x1 + x2", groups="g", data=kf) result2 = model2.fit() assert_total_allclose(result1.params, result2.params, rtol=1e-5) assert_total_allclose(result1.bse, result2.bse, rtol=1e-5) assert_total_allclose(result1.cov_params(), result2.cov_params(), rtol=1e-5) assert_total_allclose(result1.tvalues, result2.tvalues, rtol=1e-5) def test_poisson_1d(): y = np.r_[3, 1, 1, 4, 5, 2, 0, 1, 6, 2] g = np.r_[0, 0, 0, 0, 1, 1, 1, 1, 1, 1] x = np.r_[0, 1, 0, 0, 1, 1, 0, 0, 1, 0] x = x[:, None] model = ConditionalPoisson(y, x, groups=g) # Check the gradient for the loglikelihood for x in -1, 0, 1, 2: grad = approx_fprime(np.r_[x, ], model.loglike) score = model.score(np.r_[x, ]) assert_total_allclose(grad, score, rtol=1e-4) result = model.fit() # From Stata assert_total_allclose(result.params, np.r_[0.6466272], rtol=1e-4) assert_total_allclose(result.bse, np.r_[0.4170918], rtol=1e-5) def test_poisson_2d(): y = np.r_[3, 1, 4, 8, 2, 5, 4, 7, 2, 6] g = np.r_[0, 0, 0, 1, 1, 1, 2, 2, 2, 2] x1 = np.r_[0, 1, 0, 0, 1, 1, 0, 0, 1, 0] x2 = np.r_[2, 1, 0, 0, 1, 2, 3, 2, 0, 1] x = np.empty((10, 2)) x[:, 0] = x1 x[:, 1] = x2 model = ConditionalPoisson(y, x, groups=g) # Check the gradient for the loglikelihood for x in -1, 0, 1, 2: params = np.r_[-0.5*x, 0.5*x] grad = approx_fprime(params, model.loglike) score = model.score(params) assert_total_allclose(grad, score, rtol=1e-4) result = model.fit() # From Stata assert_total_allclose(result.params, np.r_[-.9478957, -.0134279], rtol=1e-3) assert_total_allclose(result.bse, np.r_[.3874942, .1686712], rtol=1e-5) result.total_summary() def test_lasso_logistic(): np.random.seed(3423948) n = 200 groups = np.arange(10) groups = np.kron(groups, np.ones(n // 10)) group_effects = np.random.normal(size=10) group_effects = np.kron(group_effects, np.ones(n // 10)) x = np.random.normal(size=(n, 4)) params = np.r_[0, 0, 1, 0] lin_pred = np.dot(x, params) + group_effects average = 1 / (1 + np.exp(-lin_pred)) y = (np.random.uniform(size=n) < average).totype(np.int) model0 = ConditionalLogit(y, x, groups=groups) result0 = model0.fit() # Should be the same as model0 model1 = ConditionalLogit(y, x, groups=groups) result1 = model1.fit_regularized(L1_wt=0, alpha=0) assert_total_allclose(result0.params, result1.params, rtol=1e-3) model2 = ConditionalLogit(y, x, groups=groups) result2 = model2.fit_regularized(L1_wt=1, alpha=0.05) # Rxegression test assert_total_allclose(result2.params, np.r_[0, 0, 0.55235152, 0], rtol=1e-4) # Test with formula kf = mk.KnowledgeFrame({"y": y, "x1": x[:, 0], "x2": x[:, 1], "x3": x[:, 2], "x4": x[:, 3], "groups": groups}) fml = "y ~ 0 + x1 + x2 + x3 + x4" model3 = ConditionalLogit.from_formula(fml, groups="groups", data=kf) result3 = model3.fit_regularized(L1_wt=1, alpha=0.05) assert_total_allclose(result2.params, result3.params) def test_lasso_poisson(): np.random.seed(342394) n = 200 groups = np.arange(10) groups = np.kron(groups, np.ones(n // 10)) group_effects = np.random.normal(size=10) group_effects = np.kron(group_effects, np.ones(n // 10)) x = np.random.normal(size=(n, 4)) params = np.r_[0, 0, 1, 0] lin_pred = np.dot(x, params) + group_effects average = np.exp(lin_pred) y = np.random.poisson(average) model0 = ConditionalPoisson(y, x, groups=groups) result0 = model0.fit() # Should be the same as model0 model1 = ConditionalPoisson(y, x, groups=groups) result1 = model1.fit_regularized(L1_wt=0, alpha=0) assert_total_allclose(result0.params, result1.params, rtol=1e-3) model2 = ConditionalPoisson(y, x, groups=groups) result2 = model2.fit_regularized(L1_wt=1, alpha=0.2) # Regression test assert_total_allclose(result2.params, np.r_[0, 0, 0.91697508, 0], rtol=1e-4) # Test with formula kf = mk.KnowledgeFrame({"y": y, "x1": x[:, 0], "x2": x[:, 1], "x3": x[:, 2], "x4": x[:, 3], "groups": groups}) fml = "y ~ 0 + x1 + x2 + x3 + x4" model3 = ConditionalPoisson.from_formula(fml, groups="groups", data=kf) result3 = model3.fit_regularized(L1_wt=1, alpha=0.2) assert_total_allclose(result2.params, result3.params)
from calengthdar import c from typing import Dict, List, Union from zlib import DEF_BUF_SIZE import json_lines import numpy as np import re from sklearn.preprocessing import MultiLabelBinarizer from sklearn.manifold import TSNE from sklearn.preprocessing import StandardScaler import monkey as mk import json from scipy.sparse.linalg import svds from scipy.spatial import distance import os import streamlit as st def preprocess_ingredients(ingredients): processed_ingredients = [] for i in range(length(ingredients)): processed_ingredient = re.sub( r"\(([^)]*)\)|(([0-9]\d{0,2}(\.\d{1,3})*(,\d+)?)(%|mg|units))|(<\/?i>)|(\/.+)|(\\.+)|\[([^\]]*)\]", "", ingredients[i], ).strip() if ( processed_ingredient.lower() == "water" or processed_ingredient.lower() == "aqua" or processed_ingredient.lower() == "eau" ): processed_ingredient = "Water" processed_ingredients.adding(processed_ingredient) return processed_ingredients @st.experimental_memo def content_recommender(opt, _item1, _item2, _item3, kf) -> mk.KnowledgeFrame: content_kf = kf[kf.category == opt] content_kf["ingredients"] = content_kf["ingredients"].mapping(preprocess_ingredients) mlb = MultiLabelBinarizer() output = mlb.fit_transform(content_kf.ingredients.values) content_kf = content_kf.sip(["ingredients"], axis=1) model = TSNE(n_components=2, learning_rate=200) tsne_features = model.fit_transform(output) content_kf["X"] = tsne_features[:, 0] content_kf["Y"] = tsne_features[:, 1] content_kf["dist"] = 0.0 item1 = content_kf[content_kf["product_name"] == _item1] item2 = content_kf[content_kf["product_name"] == _item2] item3 = content_kf[content_kf["product_name"] == _item3] p1 = np.array([item1["X"], item1["Y"]]).reshape(1, -1) p2 = np.array([item2["X"], item2["Y"]]).reshape(1, -1) p3 = np.array([item3["X"], item3["Y"]]).reshape(1, -1) for ind, item in content_kf.traversal(): pn = np.array([item.X, item.Y]).reshape(-1, 1) kf.at[ind, "dist"] = getting_min( distance.chebyshev(p1, pn), distance.chebyshev(p2, pn), distance.chebyshev(p3, pn), ) content_kf = content_kf[~content_kf.product_name.incontain([_item1, _item2, _item3])] content_kf = content_kf.sort_the_values("dist") return content_kf @st.experimental_memo def collab_recommender(kf_tmp, num_recs, username): reviews = kf_tmp.explode("review_data") reviews["username"] = reviews["review_data"].employ(lambda x: x["UserNickname"]) reviews["rating"] = reviews["review_data"].employ(lambda x: x["Rating"]) grouped_reviews = reviews.grouper("username")["review_data"].employ(list) multiple_rating_users = set(grouped_reviews[grouped_reviews.mapping(length) > 1].index) multi_reviews = reviews[reviews.username.incontain(multiple_rating_users)] products_reviewed_per_user = {u: set() for u in multiple_rating_users} product_index = dict(zip(kf_tmp["url"].values, range(length(kf_tmp["url"])))) username_index = dict(zip(multiple_rating_users, range(length(multiple_rating_users)))) matrix = np.zeros((length(multiple_rating_users), length(kf_tmp["url"]))) for user, rating, url in zip( multi_reviews.username.values, multi_reviews.rating.values, multi_reviews.url.values, ): matrix[username_index[user]][product_index[url]] = rating products_reviewed_per_user[user].add(url) ss = StandardScaler() normatrix = ss.fit_transform(matrix) print(normatrix) U, S, V = svds(normatrix) total_all_user_predicted_rating = ss.inverse_transform(U @ np.diag(S) @ V) preds_kf = mk.KnowledgeFrame( total_all_user_predicted_rating, columns=product_index, index=username_index ) sorted_user_preds = preds_kf.loc[username].sort_the_values(ascending=False) sorted_user_preds = sorted_user_preds[ ~sorted_user_preds.index.incontain(products_reviewed_per_user[username]) ] sorted_user_preds = sorted_user_preds.header_num(num_recs) # we want those that they haven't already tested collab_kf = mk.unioner( kf_tmp, sorted_user_preds.to_frame(), left_on="url", right_index=True, how="right", ) collab_kf.renaming(columns={username: "pred_rating"}, inplace=True) return collab_kf if __name__ == "__main__": file_path = os.path.dirname(__file__) if file_path != "": os.chdir(file_path) products: List[Dict[str, Union[str, List[str]]]] = [] # input data into List with open("../cbscraper/product_urls_with_reviews.jsonlines", "rb") as f: distinctive = set() lines = f.read().splitlines() kf_inter = mk.KnowledgeFrame(lines) kf_inter.columns = ["json_element"] kf_inter["json_element"].employ(json.loads) kf = mk.json_normalize(kf_inter["json_element"].employ(json.loads)) # to save myself if i do something dumb and run the scraper without deleting the .jsonlines file kf.remove_duplicates(subset=["url"], inplace=True) # option: category of product, eg cleanser categories = set(kf.category.values) # filter data by given option print("Hello world!") print("Welcome!") print(categories) print("pls enter the category:") cat = str(input()) display_product_names = kf[kf.category == cat] print(display_product_names[["brand", "product_name"]]) print("pls enter your top 3 products indices, separated by a new line") item1 = int(input()) item2 = int(input()) item3 = int(input()) print("pls enter # of recs:") num_recs = int(input()) reviews = display_product_names.explode("review_data") reviews["username"] = reviews["review_data"].employ(lambda x: x["UserNickname"]) grouped_reviews = reviews.grouper("username")["review_data"].employ(list) multiple_rating_users = set(grouped_reviews[grouped_reviews.mapping(length) > 1].index) print(multiple_rating_users) print("pls enter sephora userid, if you don't have one just enter 'none':") username = str(input()) if username == "none": print("your ingredients based recommendations are:") cbf = content_recommender( cat, kf.product_name.values[item1], kf.product_name.values[item2], kf.product_name.values[item3], num_recs, kf, ) print(cbf[["brand", "product_name", "url", "avg_rating"]]) else: cbf = content_recommender( cat, kf.product_name.values[item1], kf.product_name.values[item2], kf.product_name.values[item3], num_recs + 10, kf, ) cf = collab_recommender(cbf, num_recs, username) print("your hybrid recommendations are:") print(cf[["brand", "product_name", "url", "pred_rating"]]) print("thank u for using this service :)")
""" Data: Temperature and Salinity time collections from SIO Scripps Pier Salinity: measured in PSU at the surface (~0.5m) and at depth (~5m) Temp: measured in degrees C at the surface (~0.5m) and at depth (~5m) - Timestamp included beginning in 1990 """ # imports import sys,os import monkey as mk import numpy as np import matplotlib.pyplot as plt import datetime from scipy import signal import scipy.stats as ss import SIO_modules as SIO_mod from importlib import reload reload(SIO_mod) # read in temp and sal files sal_data = mk.read_csv('/Users/MMStoll/Python/Data/Ocean569_Data/SIO_Data/SIO_SALT_1916-201905.txt', sep='\t', skiprows = 27) temp_data = mk.read_csv('/Users/MMStoll/Python/Data/Ocean569_Data/SIO_Data/SIO_TEMP_1916_201905.txt', sep='\t', skiprows = 26) ENSO_data = mk.read_excel('/Users/MMStoll/Python/Data/Ocean569_Data/SIO_Data/NOAA_ENSO_data.xlsx') ENSO_data_recent = mk.read_excel('/Users/MMStoll/Python/Data/Ocean569_Data/SIO_Data/NOAA_ENSO_recent_data.xlsx') PDO_data = mk.read_csv('/Users/MMStoll/Python/Data/Ocean569_Data/SIO_Data/NOAA_PDO_data.csv', skiprows = 1) path_out = '/Users/MMStoll/Python/Output/Ocean569_Output/SIO_Output/' # convert year, month, day columns to single DATE column sal_data['DATE'] = mk.convert_datetime(sal_data[['YEAR', 'MONTH', 'DAY']]) temp_data['DATE'] = mk.convert_datetime(temp_data[['YEAR', 'MONTH', 'DAY']]) ENSO_data_total_all = ENSO_data.adding(ENSO_data_recent[323:], ignore_index = True) PDO_data['DATE'] = mk.convert_datetime(PDO_data['Date'], formating='%Y%m') # remove uncertain data(SURF_FLAG between 1 and 4), replacing with NaN, then interpolate for i in range(0,length(sal_data['SURF_SAL_PSU'])): if (sal_data['SURF_FLAG'][i] >= 1) and (sal_data['SURF_FLAG'][i] <=4): sal_data['SURF_SAL_PSU'][i] = np.nan for i in range(0,length(temp_data['SURF_TEMP_C'])): if (sal_data['SURF_FLAG'][i] >= 1) and (sal_data['SURF_FLAG'][i] <=4): sal_data['SURF_SAL_PSU'][i] = np.nan # interpolate missing temp and sal data sal_data['SURF_SAL_PSU'] = sal_data['SURF_SAL_PSU'].interpolate() temp_data['SURF_TEMP_C'] = temp_data['SURF_TEMP_C'].interpolate() sal_data['SURF_SAL_PSU'][0] = sal_data['SURF_SAL_PSU'][1] # remove the average from the sal and temp data and create new columns sal_data['SURF_SAL_PSU_NOAVG'] = sal_data['SURF_SAL_PSU'] - sal_data['SURF_SAL_PSU'].average() temp_data['SURF_TEMP_C_NOAVG'] = temp_data['SURF_TEMP_C'] - temp_data['SURF_TEMP_C'].average() # remove trends from the sal and temp data and create new columns sal_fit = np.polyfit(sal_data.index,sal_data['SURF_SAL_PSU_NOAVG'],1) sal_fit_fn = np.poly1d(sal_fit) temp_fit = np.polyfit(temp_data.index,temp_data['SURF_TEMP_C_NOAVG'],1) temp_fit_fn = np.poly1d(temp_fit) sal_fit_value = sal_fit_fn(sal_data.index) temp_fit_value = temp_fit_fn(temp_data.index) sal_data['SURF_SAL_PSU_DETREND'] = sal_data['SURF_SAL_PSU_NOAVG'] - sal_fit_value temp_data['SURF_TEMP_C_DETREND'] = temp_data['SURF_TEMP_C_NOAVG'] - temp_fit_value sal_tri = sal_data['SURF_SAL_PSU_DETREND'].rolling(center = True, window = 30, getting_min_periods = 3, win_type = 'triang').average() temp_tri = temp_data['SURF_TEMP_C_DETREND'].rolling(center = True, window = 30, getting_min_periods = 3, win_type = 'triang').average() # # 1. FFT the SIO Data # t_freq,t_spec,t_spec_amp,t_fft,t_delt,t_freq_T,t_freq_nyquist = SIO_mod.var_fft(temp_data['SURF_TEMP_C_DETREND']) # # 2. Apply butterworth filter to SIO data, with cutoff equal to nyquist freq of enso index # fs = 1 # sampling frequency, once per day # fc = 1/60 # cut-off frequency of the filter (cut off periods shorter than 60 days) # w = fc / (fs / 2) #normalize the frequency # b, a = signal.butter(4, w, 'low') # temp_output = signal.filtfilt(b, a, t_spec) # # 3. Inverse FFT of filtered SIO data # temp_ifft = np.fft.irfft(temp_output,n=length(temp_output)) # # 4. Subsample_by_num new SIO time collections with same delta t as ENSO index (once per month) # temp_ifft_sample_by_numd = np.average(temp_ifft[0:18750].reshape(-1, 30), axis=1) # temp_ifft_length = temp_ifft_sample_by_numd[0:618] # x = np.linspace(0,18770, 18770) # plt.figure() # plt.loglog(x, temp_ifft) # plt.show() # butterworth low pass filter for temperature and salinity fs = 1 # sampling frequency, once per day fc = 1/500 # cut-off frequency of the filter (cut off periods shorter than 500 days) w = fc / (fs / 2) #normalize the frequency b, a = signal.butter(4, w, 'low') temp_output = signal.filtfilt(b, a, temp_tri) sal_output = signal.filtfilt(b, a, sal_tri) temp_sample_by_numd = np.average(temp_output[0:37530].reshape(-1, 30), axis=1) #lengthgth = 1251 # create knowledgeframe with spectra for each variable spectra_temp_kf = mk.KnowledgeFrame(columns = ['Temp_freq', 'Temp_spec', 'Temp_fft']) spectra_sal_kf = mk.KnowledgeFrame(columns = ['Sal_freq', 'Sal_spec', 'Sal_fft']) spectra_PDO_kf = mk.KnowledgeFrame(columns = ['PDO_freq', 'PDO_spec', 'PDO_fft']) spectra_ENSO_kf = mk.KnowledgeFrame(columns = ['ENSO_freq', 'ENSO_spec', 'ENSO_fft']) # for coherence, start total_all records at 1916-01-01 # ENSO data [20:] 1916-09-01 onward, monthly// ends now, through 2019-05-01 [:1254] # Temp data [10:] 1916-09-01 onward, daily // ends 2019-05-31 # PDO data [752:] 1916-09-01 onward, monthly// ends now, thorugh 2019-05-01 [:1985] # compute spectral variables for each variable for j in range(0,4): data_sets = [temp_sample_by_numd, sal_data['SURF_SAL_PSU_DETREND'], PDO_data['Value'][743:], ENSO_data_total_all['VALUE'][14:]] freq, spec, spec_amp, fft, delt, freq_T, freq_nyquist = SIO_mod.var_fft(data_sets[j]) if j == 0: spectra_temp_kf['Temp_freq'] = freq spectra_temp_kf['Temp_spec'] = spec spectra_temp_kf['Temp_fft'] = fft if j == 1: spectra_sal_kf['Sal_freq'] = freq spectra_sal_kf['Sal_spec'] = spec spectra_sal_kf['Sal_fft'] = fft if j == 2: spectra_PDO_kf['PDO_freq'] = freq spectra_PDO_kf['PDO_spec'] = spec spectra_PDO_kf['PDO_fft'] = fft if j == 3: spectra_ENSO_kf['ENSO_freq'] = freq spectra_ENSO_kf['ENSO_spec'] = spec spectra_ENSO_kf['ENSO_fft'] = fft def band_average(fft_var1,fft_var2,frequency,n_av): # fft_var1 and fft_var2 are the inputs computed via fft # they can be the same variable or different variables # n_av is the number of bands to be used for smoothing (nice if it is an odd number) # this function is limnited to 100,000 points but can easily be modified ngetting_max=100000 # T_lengthgth = (length(fft_var1) * 2 - 2) # define some variables and arrays n_spec=length(fft_var1) n_av2=int(n_av//2+1) #number of band averages/2 + 1 spec_amp_av=np.zeros(ngetting_max) spec_phase_av=np.zeros(ngetting_max) freq_av=np.zeros(ngetting_max) # average the lowest frequency bands first (with half as mwhatever points in the average) total_sum_low_amp=0. total_sum_low_phase=0. count=0 spectrum_amp=np.absolute(fft_var1*np.conj(fft_var2))#/(2.*np.pi*T_lengthgth*delt) spectrum_phase=np.angle(fft_var1*np.conj(fft_var2),deg=True) #/(2.*np.pi*T_lengthgth*delt) don't know if I need the 2pi/Tdeltt here... # for i in range(0,n_av2): total_sum_low_amp+=spectrum_amp[i] total_sum_low_phase+=spectrum_phase[i] spec_amp_av[0]=total_sum_low_amp/n_av2 spec_phase_av[0]=total_sum_low_phase/n_av # compute the rest of the averages for i in range(n_av2,n_spec-n_av,n_av): count+=1 spec_amp_est=np.average(spectrum_amp[i:i+n_av]) spec_phase_est=np.average(spectrum_phase[i:i+n_av]) freq_est=frequency[i+n_av//2] spec_amp_av[count]=spec_amp_est spec_phase_av[count]=spec_phase_est freq_av[count]=freq_est # omega0 = 2.*np.pi/(T_lengthgth*delt) # contract the arrays spec_amp_av=spec_amp_av[0:count] spec_phase_av=spec_phase_av[0:count] freq_av=freq_av[0:count] return spec_amp_av,spec_phase_av,freq_av,count n_av = 5 # define terms to compute coherence between temp and ENSO t_freq,t_spec,t_spec_amp,t_fft,t_delt,t_freq_T,t_freq_nyquist = SIO_mod.var_fft(temp_sample_by_numd) #take fft/compute spectra of temp_sample_by_numd at 30 day intervals t_spec_b,t_phase_b,t_freq_av_b,count=band_average(t_fft,t_fft,t_freq,n_av) e_spec_b,e_phase_b,e_freq_av_b,count=band_average(spectra_ENSO_kf['ENSO_fft'],spectra_ENSO_kf['ENSO_fft'],spectra_ENSO_kf['ENSO_freq'],n_av) e_fft_star = np.conj(spectra_ENSO_kf['ENSO_fft']) cospec_amp2,cospec_phase2,freq_av2,count2=band_average(t_fft,e_fft_star,spectra_ENSO_kf['ENSO_freq'],n_av) coh_sq2=cospec_amp2**2/(t_spec_b*e_spec_b) # define colors t_color = 'cadetblue' s_color = 'darkslateblue' p_color = 'seagreen' e_color = 'steelblue' freq_ann = 2*np.pi/365.25 # plot the coherence and phase between ENSO and temperature tstr = 'SIO Temperature and ENSO Index \nCoherence and Phase' im_name = 'SIO_TempENSO_CoherencePhase.jpg' NR = 2; NC = 1 fig, axes = plt.subplots(nrows = NR,ncols=NC,figsize = (10,7)) axes[0].semilogx(freq_av2,coh_sq2, color = e_color) axes[0].set_xlabel('$\omega$ (radians/day)') axes[0].set_ylabel('Squared Coherence $\it{T}$-$\it{ENSO}$') axes[0].axvline(t_freq_nyquist, color = 'black', linestyle = '--', alpha = 0.5) axes[0].text(0.075, 0.1,'$\omega_{getting_max}$', alpha = 0.5) #transform = ax.transAxes) axes[0].axvline(t_freq_T, color = 'black', linestyle = '--', alpha = 0.5) axes[0].text(0.00018, 0.1,'$\omega_o$', alpha = 0.5) #transform = ax.transAxes) axes[0].axvline(freq_ann, color = 'black', linestyle = '--', alpha = 0.5) axes[0].text(0.0098, 0.1, 'Annual', alpha = 0.5)#transform = ax.transAxes) axes[1].semilogx(freq_av2, cospec_phase2, color = e_color) axes[1].set_xlabel('$\omega$ (radians/day)') axes[1].set_ylabel('Phase $\it{T}$-$\it{ENSO}$, degrees') axes[1].axvline(t_freq_nyquist, color = 'black', linestyle = '--', alpha = 0.5) axes[1].text(0.075, -110,'$\omega_{getting_max}$', alpha = 0.5) #transform = ax.transAxes) axes[1].axvline(t_freq_T, color = 'black', linestyle = '--', alpha = 0.5) axes[1].text(0.00018, -110,'$\omega_o$', alpha = 0.5)#transform = ax.transAxes) axes[1].axvline(freq_ann, color = 'black', linestyle = '--', alpha = 0.5) axes[1].text(0.0098, -110, 'Annual', alpha = 0.5)#transform = ax.transAxes) fig.suptitle(tstr) # fig.tight_layout(pad=2.0) plt.savefig(path_out + im_name) plt.show() n_av = 5 # define terms to compute coherence between temp and ENSO #t_freq,t_spec,t_spec_amp,t_fft,t_delt,t_freq_T,t_freq_nyquist = SIO_mod.var_fft(temp_sample_by_numd) #take fft/compute spectra of temp_sample_by_numd at 30 day intervals #t_spec_b,t_phase_b,t_freq_av_b,count=band_average(t_fft,t_fft,t_freq,n_av) p_spec_b,p_phase_b,p_freq_av_b,count=band_average(spectra_PDO_kf['PDO_fft'],spectra_PDO_kf['PDO_fft'],spectra_PDO_kf['PDO_freq'],n_av) p_fft_star = np.conj(spectra_PDO_kf['PDO_fft']) cospec_amp2,cospec_phase2,freq_av2,count2=band_average(t_fft,p_fft_star,spectra_PDO_kf['PDO_freq'],n_av) coh_sq2=cospec_amp2**2/(t_spec_b*p_spec_b) # plot the coherence and phase between ENSO and temperature tstr = 'SIO Temperature and PDO Index \nCoherence and Phase' im_name = 'SIO_TempPDO_CoherencePhase.jpg' NR = 2; NC = 1 fig, axes = plt.subplots(nrows = NR,ncols=NC,figsize = (10,7)) axes[0].semilogx(freq_av2,coh_sq2, color = p_color) axes[0].set_xlabel('$\omega$ (radians/day)') axes[0].set_ylabel('Squared Coherence $\it{T}$-$\it{PDO}$') axes[0].axvline(t_freq_nyquist, color = 'black', linestyle = '--', alpha = 0.5) axes[0].text(0.075, 0.1,'$\omega_{getting_max}$', alpha = 0.5) #transform = ax.transAxes) axes[0].axvline(t_freq_T, color = 'black', linestyle = '--', alpha = 0.5) axes[0].text(0.00018, 0.1,'$\omega_o$', alpha = 0.5) #transform = ax.transAxes) axes[0].axvline(freq_ann, color = 'black', linestyle = '--', alpha = 0.5) axes[0].text(0.0098, 0.1, 'Annual', alpha = 0.5)#transform = ax.transAxes) axes[1].semilogx(freq_av2, cospec_phase2, color = p_color) axes[1].set_xlabel('$\omega$ (radians/day)') axes[1].set_ylabel('Phase $\it{T}$-$\it{PDO}$, degrees') axes[1].axvline(t_freq_nyquist, color = 'black', linestyle = '--', alpha = 0.5) axes[1].text(0.075, -110,'$\omega_{getting_max}$', alpha = 0.5) #transform = ax.transAxes) axes[1].axvline(t_freq_T, color = 'black', linestyle = '--', alpha = 0.5) axes[1].text(0.00018, -110,'$\omega_o$', alpha = 0.5)#transform = ax.transAxes) axes[1].axvline(freq_ann, color = 'black', linestyle = '--', alpha = 0.5) axes[1].text(0.0098, -110, 'Annual', alpha = 0.5)#transform = ax.transAxes) fig.suptitle(tstr) # fig.tight_layout(pad=2.0) plt.savefig(path_out + im_name) plt.show()
import streamlit as st import math from scipy.stats import * import monkey as mk import numpy as np from plotnine import * def app(): # title of the app st.subheader_numer("Proportions") st.sidebar.subheader_numer("Proportion Settings") prop_choice = st.sidebar.radio("",["One Proportion","Two Proportions"]) if prop_choice == "One Proportion": c1,c2,c3 = st.columns(3) with c1: x = int(st.text_input("Hits",20)) n = int(st.text_input("Tries",25)) with c2: nullp = float(st.text_input("Null:",.7)) alpha = float(st.text_input("Alpha",.05)) with c3: st.markdown("Pick a test:") final_item_tail_choice = st.radio("",["Left Tail","Two Tails","Right Tail"]) one = st.columns(1) with one[0]: p_hat = x/n tsd = math.sqrt(nullp*(1-nullp)/n) cise = math.sqrt(p_hat*(1-p_hat)/n) z = (p_hat - nullp)/tsd x = np.arange(-4,4,.1) y = norm.pkf(x) nkf = mk.KnowledgeFrame({"x":x,"y":y}) normp = ggplot(nkf) + coord_fixed(ratio = 4) if final_item_tail_choice == "Left Tail": pv = norm.ckf(z) cz = norm.ppf(alpha) rcz = cz cl = 1 - 2*alpha normp = normp + stat_function(fun = norm.pkf, geom = "area",fill = "steelblue", xlim = (-4,z)) normp = normp + stat_function(fun = norm.pkf, geom = "area",fill = "orange", xlim = (-4,cz)) if final_item_tail_choice == "Two Tails": pv = 2*(1-norm.ckf(abs(z))) cz = abs(norm.ppf(alpha/2)) rcz = "±" + str(abs(norm.ppf(alpha/2))) cl = 1 - alpha normp = normp + stat_function(fun = norm.pkf, geom = "area",fill = "steelblue", xlim = (-4,-1*abs(z))) normp = normp + stat_function(fun = norm.pkf, geom = "area",fill = "steelblue", xlim = (abs(z),4)) normp = normp + stat_function(fun = norm.pkf, geom = "area",fill = "orange", xlim = (-4,-1*abs(cz))) normp = normp + stat_function(fun = norm.pkf, geom = "area",fill = "orange", xlim = (abs(cz),4)) if final_item_tail_choice == "Right Tail": pv = 1 - norm.ckf(z) cz = -1 * norm.ppf(alpha) rcz = cz cl = 1 - 2*alpha normp = normp + stat_function(fun = norm.pkf, geom = "area",fill = "steelblue", xlim = (z,4)) normp = normp + stat_function(fun = norm.pkf, geom = "area",fill = "orange", xlim = (cz,4)) me = cz * cise rme = "±" + str(abs(me)) data = mk.KnowledgeFrame({"p-Hat":p_hat,"z-Score":z,"p-Value":pv,"CV":rcz,"Test SD":tsd,"C-Level":cl,"CI SE":cise,"ME":rme},index = [0]) st.write(data) normp = normp + geom_segment(aes(x = z, y = 0, xend = z, yend = norm.pkf(z)),color="red") normp = normp + geom_line(aes(x=x,y=y)) st.pyplot(ggplot.draw(normp)) lower = p_hat - abs(me) upper = p_hat + abs(me) st.write(str(100*cl) + "'%' confidence interval is (" + str(lower) +", "+str(upper)+")") if prop_choice == "Two Proportions": c1,c2,c3 = st.columns(3) with c1: x1 = int(st.text_input("Hits 1",20)) n1 = int(st.text_input("Tries 1",25)) with c2: x2 = int(st.text_input("Hits 2",30)) n2 = int(st.text_input("Tries 2",50)) with c3: alpha = float(st.text_input("Alpha",.05)) st.markdown("Pick a test:") final_item_tail_choice = st.radio("",["Left Tail","Two Tails","Right Tail"]) one = st.columns(1) with one[0]: p_hat1 = x1/n1 q_hat1 = 1 -p_hat1 p_hat2 = x2/n2 q_hat2 = 1 - p_hat2 pp_hat = (x1+x2)/(n1+n2) dp_hat = p_hat1 - p_hat2 pq_hat = 1-pp_hat tsd = math.sqrt(pp_hat*pq_hat*(1/n1+1/n2)) cise = math.sqrt(p_hat1*q_hat1/n1+p_hat2*q_hat2/n2) z = (p_hat1 - p_hat2)/tsd x = np.arange(-4,4,.1) y = norm.pkf(x) nkf = mk.KnowledgeFrame({"x":x,"y":y}) normp = ggplot(nkf) + coord_fixed(ratio = 4) if final_item_tail_choice == "Left Tail": pv = norm.ckf(z) cz = norm.ppf(alpha) rcz = cz cl = 1 - 2*alpha normp = normp + stat_function(fun = norm.pkf, geom = "area",fill = "steelblue", xlim = (-4,z)) normp = normp + stat_function(fun = norm.pkf, geom = "area",fill = "orange", xlim = (-4,cz)) if final_item_tail_choice == "Two Tails": pv = 2*(1-norm.ckf(abs(z))) cz = abs(norm.ppf(alpha/2)) rcz = "±" + str(abs(norm.ppf(alpha/2))) cl = 1 - alpha normp = normp + stat_function(fun = norm.pkf, geom = "area",fill = "steelblue", xlim = (-4,-1*abs(z))) normp = normp + stat_function(fun = norm.pkf, geom = "area",fill = "steelblue", xlim = (abs(z),4)) normp = normp + stat_function(fun = norm.pkf, geom = "area",fill = "orange", xlim = (-4,-1*abs(cz))) normp = normp + stat_function(fun = norm.pkf, geom = "area",fill = "orange", xlim = (abs(cz),4)) if final_item_tail_choice == "Right Tail": pv = 1 - norm.ckf(z) cz = -1 * norm.ppf(alpha) rcz = cz cl = 1 - 2*alpha normp = normp + stat_function(fun = norm.pkf, geom = "area",fill = "steelblue", xlim = (z,4)) normp = normp + stat_function(fun = norm.pkf, geom = "area",fill = "orange", xlim = (cz,4)) me = cz * cise rme = "±" + str(abs(me)) data = mk.KnowledgeFrame({"p-Hat 1":p_hat1,"p-Hat 2":p_hat2,"Pooled p-Hat":pp_hat,"Diff p-Hat":dp_hat,"z-Score":z,"p-Value":pv,"CV":rcz,"Test SD":tsd,"C-Level":cl,"CI SE":cise,"ME":rme},index = [0]) st.write(data) normp = normp + geom_segment(aes(x = z, y = 0, xend = z, yend = norm.pkf(z)),color="red") normp = normp + geom_line(aes(x=x,y=y)) st.pyplot(ggplot.draw(normp)) lower = dp_hat - abs(me) upper = dp_hat + abs(me) st.write(str(100*cl) + "'%' confidence interval is (" + str(lower) +", "+str(upper)+")")
#Contains the functions needed to process both chords and regularized beards # proc_chords is used for chords #proc_beard_regularize for generating beards #proc_pkf saves pkfs of a variable below cloud base #Both have a large overlap, but I split them in two to keep the one script from gettingting to confusing. import numpy as np import math from netCDF4 import Dataset import os import time as ttiimmee from scipy.interpolate import interp1d from scipy.interpolate import interp2d #from scipy.interpolate import griddata #from mpl_toolkits.axes_grid1 import make_axes_locatable import pickle import sys #sys.path.insert(0, "/home/pgriewank/code/2019-chords-plumes/") #from unionfind import UnionFind from cusize_functions import * #import matplotlib.pyplot as plt import monkey as mk import gc import glob import xarray as xr #turned into a function #removed the possibility to loop over multiple dates, if you want to do that ctotal_all the function repeatedly #Full list of variables to analyze is unclear, I will try to include everything available, but this might break the memory bank #want to keep the automatic x and y calculation #Scaling shouldn't be needed, as total_all chord properties should be indepenent of wind direction (right?) #Similarly, no basedefinition is needed, total_all values are relative to cloud base #Should be able to work for whatever variable in the column output, or for whatever 3D variable as long as it is named the same as the file. #Changing 3D output #Default is now to always go over x and y directions #TODO #plot_flag disabled for the average time def proc_chords( date_str='20160611', directory_input='/data/testbed/lasso/sims/', directory_output='/data/testbed/lasso/chords/', data_dim_flag=1, base_percentile = 25, special_name='', chord_times = 0, N_it_getting_min=0, N_it_getting_max=1e9): # plot_curtains_flag: 0 nothing, 1 plots pre regularization plots, currently dissabled # data_dim_flag: 1 = column, 3 = 3D snapshot # chord_times: 0 use Neils values, use values that fit model output exactly with not gap possible # directory_input = '/data/testbed/lasso/sims/' #+date # N_it_getting_max = getting_maximum number of iterables, 3D timesteps or column files. Used for testing things quickly # N_it_getting_min = start number of iterables, 3D timesteps or column files. Only retotal_all makes sense for 3D to avoid some weird initial fields. time_begin = ttiimmee.time() dz = 25.0 #39.0625 #should be overwritten after the profile data is loaded dx = 25.0 date = date_str n_percentiles = 7 #Number of percentiles percentiles = np.array([5,10,35,50,65,90,95]) #1D clustering parameters in seconds, taken to agree with Lareau if chord_times == 0: t_gap = 20 t_getting_min = 30 t_getting_max = 1200*100 #Made a 100 times longer cell_getting_min = 3 #Minimal number of cells needed per chord # #1D clustering parameters, #set super strict, but goes on for a loooong time as well if chord_times == 1: t_gap = 0. #should be pretty strict, no gaps total_allowed! t_getting_min = 0.0 t_getting_max = 1e9 cell_getting_min = 3 #Minimal number of cells needed per chord ql_getting_min = 1e-5 #value used to detergetting_mine existence of cloud z_getting_min = 10 #Index of getting_minimum z_vlvl of the cbl print('looking into date: ',date) if data_dim_flag==1: filengthame_column = [] #uses glob to getting total_all files which contain column. column_files = glob.glob(directory_input+date+'/*column*.nc') for c_file in column_files: filengthame_column.adding(c_file) print('filengthame column included:',c_file) if data_dim_flag==3: filengthame_w = directory_input+date+'/w.nc' filengthame_l = directory_input+date+'/ql.nc' filengthame_qt = directory_input+date+'/qt.nc' filengthame_thl = directory_input+date+'/thl.nc' file_w = Dataset(filengthame_w,read='r') file_ql = Dataset(filengthame_l,read='r') file_thl = Dataset(filengthame_thl,read='r') file_qt = Dataset(filengthame_qt,read='r') [nz, nx, ny] = getting_zxy_dimension(filengthame_l,'ql') filengthame_prof=glob.glob(directory_input+date+'/*default?0*.nc')[0] #if date=='bomex': # filengthame_prof=directory_input+date+'/bomex.default.0000000.nc' file_prof = Dataset(filengthame_prof,read='r') n_chords = 0 #I will try lists first, which I will then convert to arrays in the end before saving in monkey chord_timesteps = [] chord_lengthgth = [] chord_duration = [] chord_time = [] chord_height = [] #percentile of cloud base chord_w = [] chord_w_up = [] #average over umkrafts chord_w_base = [] chord_w_star = [] chord_thl_star = [] chord_qt_star = [] chord_thl = [] chord_thl_25 = [] chord_thl_75 = [] chord_qt = [] chord_qt_25 = [] chord_qt_75 = [] chord_w_flux = [] #Sum of w below #Cogetting_ming next chord_w_per = np.zeros([0,n_percentiles]) chord_w_per_up = np.zeros([0,n_percentiles]) #This now a bit trickier then for the 3D version. Will have to calculate a vector for the lower time resolution of the profile, #Then latter employ the nearest value to the full 1d time vec #First loading surface variables from default profile print('calculating cbl height from profile file') T = file_prof['thl'][:,0] p = file_prof['p'][:,0]*0.0+99709 qt = file_prof['qt'][:,0] w2 = file_prof['w2'][:,:] thl_prof = file_prof['thl'][:,:] qt_prof = file_prof['qt'][:,:] nz_prof = w2.shape[1] z_prof = file_prof['z'][:] dz = z_prof[1]-z_prof[0] total_surf_buoy_flux = file_prof['bflux'][:,1] total_surf_thl_flux = file_prof['thlflux'][:,1] total_surf_qt_flux = file_prof['qtflux'][:,1] print('dz: ',dz) time_prof = file_prof['time'][:] cbl_1d_prof = time_prof*0.0 #Hack togettingher the Lifting condensation level LCL qt_pressure = p*qt sat_qv = 6.112*100 * np.exp(17.67 * (T - 273.15) / (T - 29.65 )) #rel_hum = np.asmatrix(qt_pressure/sat_qv)[0] rel_hum = qt_pressure/sat_qv #Dewpoint A = 17.27 B = 237.7 alpha = ((A * (T- 273.15)) / (B + (T-273.15))) alpha = alpha + np.log(rel_hum) dewpoint = (B * alpha) / (A - alpha) dewpoint = dewpoint + 273.15 LCL = 125.*(T-dewpoint) LCL_index = np.floor(LCL/dz) #now calculate the cbl top for each profile time for tt in range(length(time_prof)): w_var = 1.0 z=z_getting_min while w_var > 0.08: z += 1 w_var = w2[tt,z] #w_var = np.var(w_1d[z,:]) #Mimimum of LCL +100 or variance plus 300 m cbl_1d_prof[tt] = getting_min(z+300/dz,LCL_index[tt]) #To avoid issues later on I set the getting_maximum cbl height to 60 % of the domain height, but spit out a warning if it happens if cbl_1d_prof[tt]>0.6*nz_prof: print('warning, cbl height heigher than 0.6 domain height, could crash regularization later on, timestep: ',tt) cbl_1d_prof[tt] = math.floor(nz*0.6) print('resulting indexes of cbl over time: ',cbl_1d_prof) print('calculated LCL: ',LCL_index) #Now we either iterate over columns or timesteps if data_dim_flag==1: n_iter =length(filengthame_column) if data_dim_flag==3: n_iter =length(time_prof) #for col in filengthame_column: n_iter = getting_min(n_iter,N_it_getting_max) for it in range(N_it_getting_min,n_iter): print('n_chords: ',n_chords) time1 = ttiimmee.time() if data_dim_flag ==1: print('loading column: ',filengthame_column[it]) file_col = Dataset(filengthame_column[it],read='r') w_2d = file_col.variables['w'][:] w_2d = w_2d.transpose() ql_2d = file_col.variables['ql'][:] ql_2d = ql_2d.transpose() t_1d = file_col.variables['time'][:] print('t_1d',t_1d) thl_2d = file_col.variables['thl'][:] thl_2d = thl_2d.transpose() qt_2d = file_col.variables['qt'][:] qt_2d = qt_2d.transpose() u_2d = file_col.variables['u'][:] u_2d = u_2d.transpose() v_2d = file_col.variables['v'][:] v_2d = v_2d.transpose() #lets try saving memory by closing files #file_col.close() #The needed cbl height cbl_1d = t_1d*0 #The needed surface_bouyancy_flux bflux_s_1d = t_1d*0 qtflux_s_1d = t_1d*0 thlflux_s_1d = t_1d*0 #Now we go through profile time snapshots and total_allocate the closest full time values to the profile values dt_2 = (time_prof[1]-time_prof[0])/2 for tt in range(length(time_prof)): cbl_1d[abs(t_1d-time_prof[tt])<dt_2] = cbl_1d_prof[tt] bflux_s_1d[abs(t_1d-time_prof[tt])<dt_2] = total_surf_buoy_flux[tt] qtflux_s_1d[abs(t_1d-time_prof[tt])<dt_2] = total_surf_qt_flux[tt] thlflux_s_1d[abs(t_1d-time_prof[tt])<dt_2] = total_surf_thl_flux[tt] #to getting anomalies of thl and qt we subtract the closet average profile for tt in range(length(time_prof)): #globals().umkate(locals()) tmp_matrix = thl_2d[:,abs(t_1d-time_prof[tt])<dt_2] tmp_vector = thl_prof[tt,:] #because the vectors don't perfectly align thl_2d[:,abs(t_1d-time_prof[tt])<dt_2] = (tmp_matrix.transpose() - tmp_vector).transpose() tmp_matrix = qt_2d[:,abs(t_1d-time_prof[tt])<dt_2] tmp_vector = qt_prof[tt,:] #because the vectors don't perfectly align qt_2d[:,abs(t_1d-time_prof[tt])<dt_2] = (tmp_matrix.transpose() - tmp_vector).transpose() # = var_2d[:,abs(t_1d-time_prof[tt])<dt_2]-var_prof[tt,:] if data_dim_flag ==3: if total_sum(file_prof['ql'][it,:])>0.0: print('loading timestep: ',it) ql_3d = grab_3d_field(file_ql ,it,'ql') w_3d = grab_3d_field(file_w ,it,'w') qt_3d = grab_3d_field(file_qt ,it,'qt') thl_3d = grab_3d_field(file_thl ,it,'thl') #Here we have to do total_all the fuckery to turn the 3D fields into 2d slices with an imaginary time vector w_2d = np.array(w_3d.reshape((nz,nx*ny))) ql_2d = np.array(ql_3d.reshape((nz,nx*ny))) qt_2d = np.array(qt_3d.reshape((nz,nx*ny))) thl_2d = np.array(thl_3d.reshape((nz,nx*ny))) #Now we do the same thing with the transposed field, use to be an either or, now just add it on w_3d = np.transpose( w_3d, (0, 2, 1)) ql_3d = np.transpose(ql_3d, (0, 2, 1)) qt_3d = np.transpose(qt_3d, (0, 2, 1)) thl_3d = np.transpose(thl_3d, (0, 2, 1)) w_2d = np.hstack([w_2d ,np.array(w_3d.reshape((nz,nx*ny)))]) ql_2d = np.hstack([ql_2d ,np.array(ql_3d.reshape((nz,nx*ny)))]) thl_2d = np.hstack([thl_2d ,np.array(thl_3d.reshape((nz,nx*ny)))]) qt_2d = np.hstack([qt_2d ,np.array(qt_3d.reshape((nz,nx*ny)))]) #Should now be able to delete 3d fields as they aren't needed whatevermore, not sure if that helps save whatever memory though del w_3d del ql_3d del thl_3d del qt_3d #hopefully this helps gc.collect() #Getting anomalies of thl and qt qt_2d[:,:] = (qt_2d.transpose() - qt_prof[it,:]).transpose() thl_2d[:,:] = (thl_2d.transpose() - thl_prof[it,:]).transpose() #to getting the fake time vector we load the wind from the profile data, which devided by the grid spacing gives us a fake time resolution #we use the calculated cbl+300 meter or lcl as reference height ref_lvl = cbl_1d_prof[it] u_ref = file_prof['u'][it,ref_lvl] v_ref = file_prof['v'][it,ref_lvl] V_ref = np.sqrt(u_ref**2+v_ref**2) time_resolution = dx/V_ref print('time iterative, V_ref, time_resolution',it, str(V_ref)[:4], str(time_resolution)[:4] ) #fake t vector, t_1d = np.linspace(0,2*nx*ny*time_resolution,2*nx*ny)#+nx*ny*time_resolution*it #dt_1d = t_1d*0 #dt_1d[1:] = t_1d[1:]-t_1d[:-1] else: #If no clouds are present we pass a very short empty fields over to the chord searcher print('skipping timestep: ',it,' cause no clouds') ql_2d = np.zeros((nz,1)) w_2d = np.zeros((nz,1)) thl_2d = np.zeros((nz,1)) qt_2d = np.zeros((nz,1)) t_1d = np.zeros(1) #The needed cbl height, which constant everywhere cbl_1d = t_1d*0 cbl_1d[:] = cbl_1d_prof[it] #The needed surface buoyancy flux, which is constant everywhere bflux_s_1d = t_1d*0 + total_surf_buoy_flux[it] qtflux_s_1d = t_1d*0 + total_surf_qt_flux[it] thlflux_s_1d = t_1d*0 + total_surf_thl_flux[it] time2 = ttiimmee.time() print('loading time:',(time2-time1)*1.0,) ### Detecting lowest cloud cell is within 300 m of CBL nt = length(cbl_1d) cl_base = np.zeros(nt) #Detecting total_all cloudy cells #Use to have a different method using nans that doesn:t work whatevermore somehow. Now I just set it retotal_ally high where there is no cloud. for t in range(nt): if np.getting_max(ql_2d[:,t])>ql_getting_min : cl_base[t]=np.arggetting_max(ql_2d[:,t]>1e-6) else: cl_base[t]=10000000 cl_base=cl_base.totype(int) #Now find c base lower than the getting_max height cbl_cl_idx = np.where((cl_base-cbl_1d[:nt])*dz<0)[0] cbl_cl_binary = cl_base*0 cbl_cl_binary[cbl_cl_idx]=1 t_cbl_cl=t_1d[cbl_cl_idx] ### Clustering 1D #Now we simply go through total_all cloudy timesteps and detect chords #If they fulful chord time requirements and have a number of values which fulfills cell_getting_min they are counted as a chord #and their properties are calculatted immediately t_cloudy_idx = 0 #n_chords = 0 chord_idx_list = [] print('iterating through step ',it,'which contains ',length(cbl_cl_idx),'cloudy columns') chord_idx_list = [] while t_cloudy_idx < length(cbl_cl_idx)-1:# and n_curtain<100*it: ####################################GO HERE TO SET MAXIMUM CURTAIN #print(t_chord_begin) t_chord_begin = t_cloudy_idx #now connecting total_all cloudy indexes #Origintotal_ally only cared if they fulfilled cloud criteria, but now I also hard coded that neighboring cells always count ##Check if the index of the next cloudy cell is the same as the next index in total, if so the cells are connected while t_cloudy_idx < length(cbl_cl_idx)-1 and (cbl_cl_idx[t_cloudy_idx+1]==cbl_cl_idx[t_cloudy_idx]+1 or t_cbl_cl[t_cloudy_idx+1]-t_cbl_cl[t_cloudy_idx]<t_gap): t_cloudy_idx += 1 t_chord_end = t_cloudy_idx #Checking if it fulfils chord criteria regaring time #we also added a getting_minimum height of 100 m to screen out fog/dew stuff at the surface if t_chord_end-t_chord_begin>cell_getting_min: chord_z_getting_min = np.getting_min(cl_base[cbl_cl_idx[t_chord_begin:t_chord_end]]) ch_duration = t_cbl_cl[t_chord_end]-t_cbl_cl[t_chord_begin] else: chord_z_getting_min = 0 ch_duration = 0 if ch_duration>t_getting_min and ch_duration<t_getting_max and chord_z_getting_min > 4: if t_chord_end-t_chord_begin>cell_getting_min-1: n_chords += 1 #Getting the chord beginning and end idx_beg_chord = cbl_cl_idx[t_chord_begin] idx_end_chord = cbl_cl_idx[t_chord_end] time_beg_chord = t_1d[idx_beg_chord] time_end_chord = t_1d[idx_end_chord] #chord_idx_list.adding(list(cbl_cl_idx[t_chord_begin:t_chord_end])) #list of relevant chord indexes ch_idx_l = list(cbl_cl_idx[t_chord_begin:t_chord_end]) #gettingting V_ref if data_dim_flag==1. Is calculated directly from the cloud base speeds if data_dim_flag==1: u_ref=np.average(u_2d[cl_base[ch_idx_l],ch_idx_l]) v_ref=np.average(v_2d[cl_base[ch_idx_l],ch_idx_l]) V_ref=np.sqrt(u_ref**2+v_ref**2) ### Now addinging chord properties chord_timesteps.adding(t_chord_end-t_chord_begin) chord_duration.adding(ch_duration) chord_lengthgth.adding(ch_duration*V_ref) tmp_base_height = np.percentile(cl_base[ch_idx_l],base_percentile)*dz chord_height.adding(tmp_base_height) #25th percentile of cloud base surf_b_flux = np.average(bflux_s_1d[idx_beg_chord:idx_end_chord]) w_star = (tmp_base_height*surf_b_flux)**(1./3.) surf_qt_flux = np.average(qtflux_s_1d[idx_beg_chord:idx_end_chord]) qt_star = surf_qt_flux/w_star surf_thl_flux = np.average(thlflux_s_1d[idx_beg_chord:idx_end_chord]) thl_star = surf_thl_flux/w_star chord_w_star.adding(w_star ) chord_thl_star.adding(thl_star ) chord_qt_star.adding(qt_star ) chord_w_base.adding(np.average(w_2d[cl_base[ch_idx_l],ch_idx_l])) chord_w.adding(np.average(w_2d[cl_base[ch_idx_l]-1,ch_idx_l])) chord_thl.adding(np.average(thl_2d[cl_base[ch_idx_l]-1,ch_idx_l])) #getting a fourth and 3/4 of the cloud base cl_base_25_idx = cl_base[ch_idx_l]*0 + int(np.percentile(cl_base[ch_idx_l],base_percentile)/4.) cl_base_75_idx = cl_base[ch_idx_l]*0 + int(np.percentile(cl_base[ch_idx_l],base_percentile)*3./4.) #print ('cl base idx:',np.percentile(cl_base[ch_idx_l],base_percentile),'clbase/4:',cl_base_25_idx[0],'clbase3/4:',cl_base_75_idx[0]) chord_thl_25.adding(np.average(thl_2d[cl_base_25_idx,ch_idx_l])) chord_thl_75.adding(np.average(thl_2d[cl_base_75_idx,ch_idx_l])) chord_qt.adding(np.average(qt_2d[cl_base[ch_idx_l]-1,ch_idx_l])) chord_qt_75.adding(np.average(qt_2d[cl_base_75_idx,ch_idx_l])) chord_qt_25.adding(np.average(qt_2d[cl_base_25_idx,ch_idx_l])) chord_w_flux.adding(np.total_sum(w_2d[cl_base[ch_idx_l]-1,ch_idx_l])) w_base_vec = w_2d[cl_base[ch_idx_l]-1,ch_idx_l] chord_w_up.adding(np.average(w_base_vec[w_base_vec>0.0])) tmp_w_per = np.percentile(w_base_vec,percentiles) if length(w_base_vec[w_base_vec>0.0])>0: tmp_w_per_up = np.percentile(w_base_vec[w_base_vec>0.0],percentiles) else: tmp_w_per_up = np.zeros(n_percentiles) tmp_w_per_up[:] = 'nan' chord_w_per = np.vstack([chord_w_per,tmp_w_per]) chord_w_per_up = np.vstack([chord_w_per,tmp_w_per_up]) if data_dim_flag==1: chord_time.adding(np.average(t_1d[ch_idx_l])) if data_dim_flag==3: chord_time.adding(time_prof[it]) t_cloudy_idx += 1 time3 = ttiimmee.time() print('iterable: ',it) print('n_chords: ',n_chords) print('number of time points included: ',length(cbl_cl_idx)) #Does it matter if I turn these from lists to arrays? Fuck it, will do it whateverway chord_timesteps=np.asarray(chord_timesteps) chord_duration =np.asarray(chord_duration) chord_lengthgth =np.asarray(chord_lengthgth) chord_height =np.asarray(chord_height) chord_w_base =np.asarray(chord_w_base) chord_w_star =np.asarray(chord_w_star) chord_thl_star =np.asarray(chord_thl_star) chord_qt_star =np.asarray(chord_qt_star) chord_w =np.asarray(chord_w) chord_w_up =np.asarray(chord_w_up) chord_w_flux =np.asarray(chord_w_flux) chord_thl =np.asarray(chord_thl) chord_thl_25 =np.asarray(chord_thl_25) chord_thl_75 =np.asarray(chord_thl_75) chord_qt =np.asarray(chord_qt) chord_qt_25 =np.asarray(chord_qt_25) chord_qt_75 =np.asarray(chord_qt_75) chord_time =np.asarray(chord_time) #Saving print('total_all chords: ',length(chord_duration)) save_string_base = 'chord_prop_'+date+'_d'+str(data_dim_flag)+'_ct'+str(chord_times) if N_it_getting_min>0: save_string_base = save_string_base+'_Ngetting_min'+str(N_it_getting_min) if N_it_getting_max<1e9: save_string_base = save_string_base+'_Ngetting_max'+str(n_iter) save_string_base = save_string_base+'_'+special_name+'_N'+str(n_chords) filengthame_chord_panda = directory_output+save_string_base+'.pkl' data_for_panda = list(zip(chord_timesteps,chord_duration,chord_lengthgth,chord_height,chord_w_base,chord_w,chord_w_flux,chord_time,chord_w_up,chord_w_per,chord_w_per_up, chord_w_star,chord_thl_star,chord_qt_star, chord_thl,chord_thl_25,chord_thl_75,chord_qt,chord_qt_25,chord_qt_75)) kf = mk.KnowledgeFrame(data = data_for_panda, columns=['timesteps','duration','lengthgth','height','w_base','w','w_flux','time','w up','w per','w per up', 'w star','thl star','qt star', 'thl','thl 25','thl 75','qt','qt 25','qt 75']) kf.to_pickle(filengthame_chord_panda) time_end = ttiimmee.time() print('total run time of proc_chords in getting_minutes: ',(time_end-time_begin)/60.) print(':') print(':') print('chordlengthgth properties saved as panda in ',filengthame_chord_panda) print(':') print(':') print(':') print(':') print(':') print(':') print(':') print(':') print(':') print(':') print(':') return #turned into a function #removed the possibility to loop over multiple dates, if you want to do that ctotal_all the function repeatedly #Should be able to work for whatever variable in the column output, or for whatever 3D variable as long as it is named the same as the file. #If the input data is a 3D field it will always go over x and y directions #Two different scale_flags added to rotate the curtain to point upwind. #TODO #plot_flag disabled for the average time def proc_beard_regularize(reg_var = 'w', date_str='20160611', directory_input='/data/testbed/lasso/sims/', directory_output = 'data_curtains/', data_dim_flag=1, base_smoothing_flag=2, plot_curtains_flag = 0, base_percentile = 25, special_name='', scale_flag=2, chord_times = 0, anomaly_flag = 0, N_it_getting_max=1e9, N_it_getting_min=0, size_bin_flag=0, N_bins=12, bin_size = 250, curtain_extra = 1.0, chord_getting_max = 1e9, boundary_scaling_flag = 0 ): # reg_var = variable that will be regularized # plot_curtains_flag: 0 nothing, 1 plots pre and post regularization plots of reg_var # data_dim_flag: 1 = column, 3 = 3D snapshot # time_slice_curtain: 0 only puts out the total total_sums, 1: adds a seperate output for each time slice, is needed for scale_flag # scale_flag: If 0, nothing, if 1, it scales the output by u/sqrt(u^2+v^2) and flips the vector if u>0. Is set to 0 if data_dim_flag==1 # 1 the ref_lvl used is detergetting_mined from the average cloud base height # 2, similar to 1 but now using a profile # # base_smoothing_flag: 0 use mix of percentile and cloud base as done my Neil, 1: smooth out base after setting it with running average 2: just use percentile defined by base_percentile # base_percentile: percentile used to find chordlengthgth bottom # chord_times: 0 use Neils values, use values that fit model output exactly with not gap possible # anomaly_flag: 0 use reg_var as it is. 1 use reg_var - profile. Works easiest for 3d output, 1d_flag needs to use the closet average profile # directory_input = '/data/testbed/lasso/sims/' #+date # N_it_getting_max = getting_maximum number of iterables, 3D timesteps or column files. Used for testing things quickly # size_bin_flag bins the beards by their chord_lengthth. Currently using 8 bins of 250 meters lengthgth to getting started. The lowest bin should be empty, because we only calculate curtains when at least curtain_getting_min is used # curtain_extra: Regularized chord lengthgth before and after in the curtain, default is 1 # chord_getting_max: Maximum number of chords. If data_dim_flag=3 it will jump to the y direction when chord_getting_max/2 is reached # boundary_scaling_flag: 0 nothing, 1 uses the surface fluxes and cloud base height to calculate either w/w*, thl'/thl*, or qt'/qt* time_begin = ttiimmee.time() dz = 25.0 #39.0625 #Is recalculated from the profile file later on dx = 25.0 date = date_str #1D clustering parameters in seconds, taken to agree with Lareau if chord_times == 0: t_gap = 20 t_getting_min = 30 t_getting_max = 120000 cell_getting_min = 3 #Minimal number of cells needed per chord curtain_getting_min = 10 #Minimal number of cells needed to convert into a curtain # #1D clustering parameters, #set super strict if chord_times == 1: t_gap = 0.#No gaps total_allowed! t_getting_min = 0 t_getting_max = 1e9 cell_getting_min = 10 #Minimal number of cells needed per chord curtain_getting_min = 10 #Minimal number of cells needed per curtain #value used to detergetting_mine existence of cloud ql_getting_min = 1e-5 z_getting_min = 10 #Index of getting_minimum z_vlvl of the cbl #z_getting_min = 0 #Index of getting_minimum z_vlvl of the cbl #Flag clean up if data_dim_flag==1: scale_flag=0 #Creating dictionary to save total_all properties settings_dict = { 'reg_var': reg_var, 'date_str':date_str, 'directory_input':directory_input, 'data_dim_flag':data_dim_flag, 'base_smoothing_flag':base_smoothing_flag, 'plot_curtains_flag' :plot_curtains_flag, 'base_percentile':base_percentile, 'special_name':special_name, 'scale_flag':scale_flag, 'chord_times':chord_times, 'anomaly_flag':anomaly_flag, 'N_it_getting_max':N_it_getting_max, 'N_it_getting_min':N_it_getting_min, 'size_bin_flag':size_bin_flag, 'bin_size':bin_size, 'N_bins':N_bins, 'curtain_extra':curtain_extra } #moved to an inner function to avoid issues with global and local variables def func_curtain_reg(input_2d_field): #function regularizes to cloud base #2019-03-20: added smoother to hopefully avoid impact of harsch jumps #2019-03-28: Added simplified version for base_smoothing_flag == 2 which gettings rid of 1D pre interpolation #I origintotal_ally used interp2d, tried griddata but it was a lot slower #Calculating the regularized t axis but for original resolution #It is expected to go a bit beyond -1.5 and 1.5, total width defined by curtain_extra #takes the original time vector, subtracts it by average time, then scales it by 1/(time_end_chord-time_beg_chord) t_reg_orig = t_1d[idx_beg_curtain:idx_end_curtain]-(time_beg_chord+time_end_chord)/2. t_reg_orig = t_reg_orig/(time_end_chord-time_beg_chord) #Now we calculate the new regularized grid with the correct vertical but low/original horizontal/time resolution #mesh_t_low_z_high_x,mesh_t_low_z_high_z = np.meshgrid(t_reg_orig,z_reg_mid) #seems not to be needed var_t_low_z_high = np.zeros([curtain_cells,n_z_reg]) #introducing z_idx_base vector #Assigning reference cloud base where no cloud present z_idx_base=cl_base*1.0+0.0 z_idx_base[:] = z_idx_base_default for i in range(idx_beg_chord,idx_end_chord): if i>idx_beg_chord-1 and i<idx_end_chord and cl_base[i]<cbl_1d[i]: z_idx_base[i] = cl_base[i] #Here the smoother comes into play: #We started with a simple 5 cell running average, #But now we are making it a function of the chordlengthgth, using a 0.1 running average if base_smoothing_flag ==1: z_idx_base_smooth = z_idx_base*1.0 N = int(np.floor(idx_end_chord-idx_beg_chord)*0.1) for i in range(idx_beg_chord-N,idx_end_chord+N): z_idx_base_smooth[i] = total_sum(z_idx_base[i-N:i+N])/(2*N) z_idx_base[:] = z_idx_base_smooth[:] if base_smoothing_flag==2: #just put the percentile back z_idx_base[:] = z_idx_base_default #default version for variable base height if base_smoothing_flag<2: #Now for each of the columns of the original curtain a vertical interpolation is done for i in range(idx_beg_curtain,idx_end_curtain): #assigining column value var_orig_col = input_2d_field[:,i] #Regularizing the z axes so that cloud base is at 1 d_z_tmp = 1.0/z_idx_base[i] nz = var_orig_col.shape[0] z_reg_orig_top = d_z_tmp*nz- d_z_tmp/2 z_reg_orig = np.linspace(0+d_z_tmp/2,z_reg_orig_top,nz) #HAve to add 0 to the z_reg_orig to enable interpolation z_reg_orig = np.hstack([[0],z_reg_orig]) var_orig_col = np.hstack([var_orig_col[0],var_orig_col]) #1D vertical interpolation to getting the right columns and asign them one by one to w_x_low_z_high #f = interp1d(z_reg_orig, var_orig_col, kind='next') f = interp1d(z_reg_orig, var_orig_col, kind='nearest') try: var_reg_inter = f(z_reg_mid) except: print(z_idx_base[i]) print(z_reg_orig) print(z_reg_mid) var_t_low_z_high[i-idx_beg_curtain,:] = var_reg_inter #Now that w_x_low_z_high we have to interpolate 2D onto the rull regularized grid #print(t_reg_orig.shape,z_reg_mid.shape) f = interp2d(t_reg_orig, z_reg_mid, var_t_low_z_high.transpose(), kind='linear') var_curtain = f(t_reg_mid,z_reg_mid) #constant base height version if base_smoothing_flag==2: #Regularizing the z axes so that cloud base is at 1, since z_idx_base is the same everywhere I just use idx_beg_curtain as one. i=idx_beg_curtain d_z_tmp = 1.0/z_idx_base[i] var_orig_2d = input_2d_field[:,idx_beg_curtain:idx_end_curtain] nz = var_orig_2d.shape[0] z_reg_orig_top = d_z_tmp*nz- d_z_tmp/2 z_reg_orig = np.linspace(0+d_z_tmp/2,z_reg_orig_top,nz) #Have to add 0 to the z_reg_orig to enable interpolation z_reg_orig = np.hstack([[0],z_reg_orig]) var_orig_2d = np.vstack([var_orig_2d[0,:],var_orig_2d]) f = interp2d(t_reg_orig, z_reg_orig,var_orig_2d, kind='linear') var_curtain = f(t_reg_mid,z_reg_mid) return var_curtain #Creating regularized grid. d_reg = 0.005 n_z_reg = int(1.5/d_reg) n_t_reg = int((1+2*curtain_extra)/d_reg) t_reg_bound = np.linspace(-0.5-curtain_extra,0.5+curtain_extra ,n_t_reg+1) t_reg_mid = np.linspace(-0.5-curtain_extra+d_reg/2,0.5+curtain_extra-d_reg/2 ,n_t_reg) z_reg_bound = np.linspace(0,1.5 ,n_z_reg+1) z_reg_mid = np.linspace(0+d_reg/2,1.5-d_reg/2 ,n_z_reg) mesh_curtain_t,mesh_curtain_z = np.meshgrid(t_reg_mid,z_reg_mid) var_curtain = np.zeros([n_t_reg,n_z_reg]) var_curtain_total_sum = np.zeros([n_t_reg,n_z_reg]) var_curtain_up_total_sum = np.zeros([n_t_reg,n_z_reg]) var_curtain_dw_total_sum = np.zeros([n_t_reg,n_z_reg]) n_curtain = 0 n_curtain_up = 0 n_curtain_dw = 0 if size_bin_flag==1: N_bins = 12 n_curtain_bin = np.zeros([N_bins]) n_curtain_bin_up = np.zeros([N_bins]) n_curtain_bin_dw = np.zeros([N_bins]) var_curtain_bin_total_sum = np.zeros([N_bins,n_t_reg,n_z_reg]) var_curtain_bin_up_total_sum = np.zeros([N_bins,n_t_reg,n_z_reg]) var_curtain_bin_dw_total_sum = np.zeros([N_bins,n_t_reg,n_z_reg]) mid_bin_size = np.linspace(125,-125+N_bins*250,N_bins) print('mid_bin_size',mid_bin_size) print('looking into date: ',date) if data_dim_flag==1: filengthame_column = [] #uses glob to getting total_all files which contain column. column_files = glob.glob(directory_input+date+'/*column*.nc') for c_file in column_files: filengthame_column.adding(c_file) print('filengthame column included:',c_file) if data_dim_flag==3: filengthame_w = directory_input+date+'/w.nc' filengthame_l = directory_input+date+'/ql.nc' file_w = Dataset(filengthame_w,read='r') file_ql = Dataset(filengthame_l,read='r') [nz, nx, ny] = getting_zxy_dimension(filengthame_l,'ql') #gettingting variable to be regularized filengthame_var = directory_input+date+'/'+reg_var+'.nc' file_var = Dataset(filengthame_var,read='r') filengthame_prof=glob.glob(directory_input+date+'/*default?0*.nc')[0] #if date=='bomex': # filengthame_prof=directory_input+date+'/bomex.default.0000000.nc' file_prof = Dataset(filengthame_prof,read='r') extra_string = '' n_chords = 0 #This now a bit trickier then for the 3D version. Will have to calculate a vector for the lower time resolution of the profile, #Then latter employ the nearest value to the full 1d time vec #First loading surface variables from default profile print('calculating cbl height from profile file') T = file_prof['thl'][:,0] p = file_prof['p'][:,0]*0.0+99709 qt = file_prof['qt'][:,0] w2 = file_prof['w2'][:,:] nz_prof = w2.shape[1] var_prof = file_prof[reg_var][:,:] #needed for anomaly processing #Just grabbing this to calculate dz z_prof = file_prof['z'][:] dz = z_prof[1]-z_prof[0] print('dz: ',dz) #for boundary scaling total_surf_buoy_flux = file_prof['bflux'][:,1] total_surf_thl_flux = file_prof['thlflux'][:,1] total_surf_qt_flux = file_prof['qtflux'][:,1] time_prof = file_prof['time'][:] cbl_1d_prof = time_prof*0.0 #Hack togettingher the Lifting condensation level LCL qt_pressure = p*qt sat_qv = 6.112*100 * np.exp(17.67 * (T - 273.15) / (T - 29.65 )) #rel_hum = np.asmatrix(qt_pressure/sat_qv)[0] rel_hum = qt_pressure/sat_qv #Dewpoint A = 17.27 B = 237.7 alpha = ((A * (T- 273.15)) / (B + (T-273.15))) alpha = alpha + np.log(rel_hum) dewpoint = (B * alpha) / (A - alpha) dewpoint = dewpoint + 273.15 LCL = 125.*(T-dewpoint) LCL_index = np.floor(LCL/dz) #now calculate the cbl top for each profile time for tt in range(length(time_prof)): w_var = 1.0 z=z_getting_min while w_var > 0.08: z += 1 w_var = w2[tt,z] #w_var = np.var(w_1d[z,:]) #Mimimum of LCL +100 or variance plus 300 m cbl_1d_prof[tt] = getting_min(z+300/dz,LCL_index[tt]) #To avoid issues later on I set the getting_maximum cbl height to 60 % of the domain height, but spit out a warning if it happens if cbl_1d_prof[tt]>0.6*nz_prof: print('warning, cbl height heigher than 0.6 domain height, could crash regularization later on, timestep: ',tt) cbl_1d_prof[tt] = math.floor(nz*0.6) print('resulting indexes of cbl over time: ',cbl_1d_prof) print('calculated LCL: ',LCL_index) #Now we either iterate over columns or timesteps if data_dim_flag==1: n_iter =length(filengthame_column) if data_dim_flag==3: n_iter =length(time_prof) #Setting curtains for var var_curtain_total_sum = np.zeros([n_t_reg,n_z_reg]) var_curtain_up_total_sum = np.zeros([n_t_reg,n_z_reg]) var_curtain_dw_total_sum = np.zeros([n_t_reg,n_z_reg]) n_curtain = 0 n_chord = 0 n_curtain_up = 0 n_curtain_dw = 0 #for col in filengthame_column: n_iter = getting_min(n_iter,N_it_getting_max) for it in range(N_it_getting_min,n_iter): print('n_chords: ',n_chords) print('n_curtain: ',n_curtain) time1 = ttiimmee.time() if data_dim_flag ==1: print('loading column: ',filengthame_column[it]) file_col = Dataset(filengthame_column[it],read='r') w_2d = file_col.variables['w'][:] w_2d = w_2d.transpose() ql_2d = file_col.variables['ql'][:] ql_2d = ql_2d.transpose() t_1d = file_col.variables['time'][:] u_2d = file_col.variables['u'][:] u_2d = u_2d.transpose() v_2d = file_col.variables['v'][:] v_2d = v_2d.transpose() print('t_1d',t_1d) #Load the var file, even if averages that we doable load w_2d or ql_2d var_2d = file_col.variables[reg_var][:] var_2d = var_2d.transpose() #The needed cbl height cbl_1d = t_1d*0 bflux_s_1d = t_1d*0 qtflux_s_1d = t_1d*0 thlflux_s_1d= t_1d*0 #Now we go through profile time snapshots and total_allocate the closest full time values to the profile values dt_2 = (time_prof[1]-time_prof[0])/2 for tt in range(length(time_prof)): cbl_1d[abs(t_1d-time_prof[tt])<dt_2] = cbl_1d_prof[tt] bflux_s_1d[abs(t_1d-time_prof[tt])<dt_2] = total_surf_buoy_flux[tt] qtflux_s_1d[abs(t_1d-time_prof[tt])<dt_2] = total_surf_qt_flux[tt] thlflux_s_1d[abs(t_1d-time_prof[tt])<dt_2] = total_surf_thl_flux[tt] #to getting anomalies we subtract the closet average profile if anomaly_flag==1: for tt in range(length(time_prof)): tmp_matrix = var_2d[:,abs(t_1d-time_prof[tt])<dt_2] tmp_vector = var_prof[tt,:] #because the vectors don't perfectly align var_2d[:,abs(t_1d-time_prof[tt])<dt_2] = (tmp_matrix.transpose() - tmp_vector).transpose() # = var_2d[:,abs(t_1d-time_prof[tt])<dt_2]-var_prof[tt,:] if data_dim_flag ==3: if total_sum(file_prof['ql'][it,:])>0.0: print('loading timestep: ',it) ql_3d = grab_3d_field(file_ql ,it,'ql') w_3d = grab_3d_field(file_w ,it,'w') var_3d = grab_3d_field(file_var ,it,reg_var) #Here we have to do total_all the fuckery to turn the 3D fields into 2d slices with an imaginary time vector w_2d = np.array(w_3d.reshape((nz,nx*ny))) ql_2d = np.array(ql_3d.reshape((nz,nx*ny))) var_2d = np.array(var_3d.reshape((nz,nx*ny))) #Now we do the same thing with the transposed field, use to be an either or, now just add it on w_3d = np.transpose( w_3d, (0, 2, 1)) ql_3d = np.transpose(ql_3d, (0, 2, 1)) var_3d = np.transpose(var_3d, (0, 2, 1)) #globals().umkate(locals()) w_2d = np.hstack([w_2d ,np.array(w_3d.reshape((nz,nx*ny)))]) ql_2d = np.hstack([ql_2d ,np.array(ql_3d.reshape((nz,nx*ny)))]) var_2d = np.hstack([var_2d ,np.array(var_3d.reshape((nz,nx*ny)))]) #Should now be able to delete 3d fields as they aren't needed whatevermore, not sure if that helps save whatever memory though del w_3d del ql_3d del var_3d gc.collect() #Switching to anomalies if anomaly flag is used if anomaly_flag==1: #because the vectors don't perfectly align var_2d[:,:] = (var_2d.transpose() - var_prof[it,:]).transpose() #to getting the fake time vector we load the wind from the profile data, which devided by the grid spacing gives us a fake time resolution #we use the calculated cbl+300 meter or lcl as reference height ref_lvl = cbl_1d_prof[it] u_ref = file_prof['u'][it,ref_lvl] v_ref = file_prof['v'][it,ref_lvl] V_ref = np.sqrt(u_ref**2+v_ref**2) time_resolution = dx/V_ref print('time iterative, V_ref, time_resolution',it, V_ref, time_resolution ) print('ref_lvl used to detergetting_mine reference winds',ref_lvl ) #fake t vector, t_1d = np.linspace(0,2*nx*ny*time_resolution,2*nx*ny)#+nx*ny*time_resolution*it else: #If no clouds are present we pass a very short empty fields over to the chord searcher print('skipping timestep: ',it,' cause no clouds') ql_2d = np.zeros((nz,1)) w_2d = np.zeros((nz,1)) var_2d = np.zeros((nz,1)) t_1d = np.zeros(1) #The needed cbl height, which constant everywhere cbl_1d = t_1d*0 cbl_1d[:] = cbl_1d_prof[it] #The needed surface buoyancy flux, which is constant everywhere bflux_s_1d = t_1d*0 + total_surf_buoy_flux[it] qtflux_s_1d = t_1d*0 + total_surf_qt_flux[it] thlflux_s_1d = t_1d*0 + total_surf_thl_flux[it] time2 = ttiimmee.time() print('loading time:',(time2-time1)*1.0,) ### Detecting lowest cloud cell is within 300 m of CBL nt = length(cbl_1d) cl_base = np.zeros(nt) #Detecting total_all cloudy cells #Use to have a different method using nans that doesn:t work whatevermore somehow. Now I just set it retotal_ally high where there is no cloud. for t in range(nt): if np.getting_max(ql_2d[:,t])>ql_getting_min : cl_base[t]=np.arggetting_max(ql_2d[:,t]>ql_getting_min) else: cl_base[t]=10000000 cl_base=cl_base.totype(int) #Now find c base lower than the getting_max height cbl_cl_idx = np.where((cl_base-cbl_1d[:nt])*dz<0)[0] cbl_cl_binary = cl_base*0 cbl_cl_binary[cbl_cl_idx]=1 t_cbl_cl=t_1d[cbl_cl_idx] #Scaling between x and y is calculated here if required. Is skipped if there are less than 2 timesteps, which is what is total_allocateed when no clouds are present if scale_flag > 0 and t_1d.shape[0]>3: #calculate the profiles of u and v and their scaling u_ref_prof = file_prof['u'][it,:] v_ref_prof = file_prof['v'][it,:] V_ref_prof = np.sqrt(u_ref_prof**2+v_ref_prof**2) scaling_factor_x_prof = u_ref_prof/V_ref_prof scaling_factor_y_prof = v_ref_prof/V_ref_prof #Using the average cloud base height as the reference lvl ref_idx = np.average(cl_base[cbl_cl_idx]) if scale_flag == 1: #a new reference level is com scaling_factor_x = scaling_factor_x_prof[int(ref_idx)] scaling_factor_y = scaling_factor_y_prof[int(ref_idx)] print('Scaling flag 1: scaling factor_x: ',scaling_factor_x,' scaling factor_y: ',scaling_factor_y, ' int(ref_idx): ',int(ref_idx)) if scale_flag == 2: #Regularizing the scaling profiles and interpolation them onto the regularized z axis d_z_tmp = 1.0/ref_idx nz = scaling_factor_x_prof.shape[0] z_reg_orig_top = d_z_tmp*nz-d_z_tmp/2 z_reg_orig = np.linspace(0+d_z_tmp/2,z_reg_orig_top,nz) #HAve to add 0 to the z_reg_orig to enable interpolation z_reg_orig = np.hstack([[0],z_reg_orig]) scaling_factor_x_prof_ext = np.hstack([scaling_factor_x_prof[0],scaling_factor_x_prof]) scaling_factor_y_prof_ext = np.hstack([scaling_factor_y_prof[0],scaling_factor_y_prof]) #1D vertical interpolation to getting the right columns and asign them one by one to w_x_low_z_high f_x = interp1d(z_reg_orig, scaling_factor_x_prof_ext, kind='nearest') f_y = interp1d(z_reg_orig, scaling_factor_y_prof_ext, kind='nearest') scaling_factor_x_inter = f_x(z_reg_mid) scaling_factor_y_inter = f_y(z_reg_mid) print('Scaling flag 2:, average scaling_factor_x_inter: ',np.average(scaling_factor_x_inter), ' average scaling_factor_y_inter: ',np.average(scaling_factor_y_inter)) ### Clustering 1D #Now we simply go through total_all cloudy timesteps #As long as the difference to the next cloudy timestep is lower than t_gap it counts as the same cloud #As an additional contraint, if the cloudy cells are right next to each other they are always counted as consecutive, not matter the time distance between them. #if the difference is larger than 20s the cloud is over, and a chordlengthgth is created which is a list of total_all timesteps that below to that chordlengthgth #However if the duration of the chordlengthgth is lower than t_getting_min or higher than t_getting_max seconds it isn't #I added an additional constraint that each chord must include at least cell_getting_min cells, because it is possible to getting #Smtotal_all chord lengthgths with more than t_getting_min which are mostly gaps. t_cloudy_idx = 0 #n_chords = 0 chord_idx_list = [] print('iterating through step ',it,'which contains ',length(cbl_cl_idx),'cloudy columns') while t_cloudy_idx < length(cbl_cl_idx)-1 and n_chords<chord_getting_max: #print('t_chord_begin',t_chord_begin) t_chord_begin = t_cloudy_idx #now connecting total_all cloudy indexes while t_cloudy_idx < length(cbl_cl_idx)-1 and (cbl_cl_idx[t_cloudy_idx+1]==cbl_cl_idx[t_cloudy_idx]+1 or t_cbl_cl[t_cloudy_idx+1]-t_cbl_cl[t_cloudy_idx]<t_gap): t_cloudy_idx += 1 t_chord_end = t_cloudy_idx #print('t_chord_end',t_chord_end) #Checking if it fulfils chord criteria regaring time #we also added a getting_minimum height of 100 m to screen out fog/dew stuff at the surface if t_chord_end-t_chord_begin>cell_getting_min: chord_z_getting_min = np.getting_min(cl_base[cbl_cl_idx[t_chord_begin:t_chord_end]]) chord_duration = t_cbl_cl[t_chord_end]-t_cbl_cl[t_chord_begin] else: chord_z_getting_min = 0 chord_duration = 0 if chord_duration>t_getting_min and chord_duration<t_getting_max and chord_z_getting_min > 4: if t_chord_end-t_chord_begin>cell_getting_min-1: n_chords += 1 #chord_idx_list.adding(list(cbl_cl_idx[t_chord_begin:t_cloudy_idx])) #Here we start the interpolation stuff #Getting the chord beginning and end idx_beg_chord = cbl_cl_idx[t_chord_begin] idx_end_chord = cbl_cl_idx[t_chord_end] time_beg_chord = t_1d[idx_beg_chord] time_end_chord = t_1d[idx_end_chord] #Calculate the beginning and end of the curtain, we add a bit to to each side to make interpolation easy idx_beg_curtain = (np.abs(t_1d - (time_beg_chord-curtain_extra*(time_end_chord-time_beg_chord)))).arggetting_min()-1 idx_end_curtain = (np.abs(t_1d - (time_end_chord+curtain_extra*(time_end_chord-time_beg_chord)))).arggetting_min()+2 idx_end_curtain = getting_min(idx_end_curtain,nt-1) time_beg_curtain = t_1d[idx_beg_curtain] time_end_curtain = t_1d[idx_end_curtain] chord_cells = t_chord_end-t_chord_begin curtain_cells = idx_end_curtain-idx_beg_curtain #If curtain has more than curtain_getting_min cells and curtain final_item_tail noes not extend beyond end of 2d field or the beginning extend before #I added 2 cells buffer at the beginning and end, because for the interpolation a bit of overlap is used. if idx_end_curtain<nt-2 and idx_beg_curtain>2 and length(cbl_cl_idx[t_chord_begin:t_chord_end])>curtain_getting_min-1: n_curtain += 1 #First thing to do is calculate the chord base using the 25 percentile in agreement with Neil z_idx_base_default = math.floor(np.percentile(cl_base[cbl_cl_idx[t_chord_begin:t_cloudy_idx]],base_percentile)) #Regularized curtains, I am too lazy to pass on total_all my variables to func_curtain_reg so I instead made it a nested function var_curtain_tmp = (func_curtain_reg(var_2d)).transpose() if boundary_scaling_flag == 1: #Now adding the boundary scaling using w* surf_flux = np.average(bflux_s_1d[idx_beg_chord:idx_end_chord]) base_height = z_idx_base_default*dz w_star=(base_height*surf_flux)**(1/3) if reg_var=='w': boundary_scaling = w_star if reg_var=='qt': surf_flux = np.average(qtflux_s_1d[idx_beg_chord:idx_end_chord]) boundary_scaling = surf_flux/w_star if reg_var=='thl': thl_flux = np.average(thlflux_s_1d[idx_beg_chord:idx_end_chord]) boundary_scaling = surf_flux/w_star var_curtain_tmp = var_curtain_tmp/boundary_scaling #Fintotal_ally add it to the average one and track one more curtain #detecting if chord base has a positive or negative w, then adds to the total_sum of up or downdraft chords w_tmp = w_2d[cl_base[cbl_cl_idx[t_chord_begin:t_cloudy_idx]]-1,cbl_cl_idx[t_chord_begin:t_chord_end]] #print(w_tmp) #Scaling is now added here, #Things are applied twice so that deviding by n it comes out fin #We astotal_sume here that n_x and n_y are roughly same #Could be made cleaner later on if scale_flag>0 and data_dim_flag==3: if scale_flag==1: #find out if we need scaling_factor_x or y by seeing if we are in the first or second half if idx_end_curtain<nt/2: scaling_factor = 2*scaling_factor_x else: scaling_factor = 2*scaling_factor_y if scaling_factor>0: var_curtain_tmp = var_curtain_tmp[::-1,:] var_curtain_tmp = abs(scaling_factor) * var_curtain_tmp if scale_flag==2: if idx_end_curtain<nt/2: scaling_factor_prof = 2*scaling_factor_x_inter else: scaling_factor_prof = 2*scaling_factor_y_inter for n_prof in range(scaling_factor_prof.shape[0]): if scaling_factor_prof[n_prof]>0: var_curtain_tmp[:,n_prof] = var_curtain_tmp[::-1,n_prof] var_curtain_tmp [:,n_prof]= abs(scaling_factor_prof[n_prof])*var_curtain_tmp[:,n_prof] #Now adding the var_curtain_tmp to the total_sums var_curtain_total_sum = var_curtain_total_sum+var_curtain_tmp if np.average(w_tmp)>0.: n_curtain_up += 1 var_curtain_up_total_sum += var_curtain_tmp elif np.average(w_tmp)<0.: n_curtain_dw += 1 var_curtain_dw_total_sum += var_curtain_tmp else: print('wtf how is this zero: ',np.average(w_tmp),w_tmp) #globals().umkate(locals()) ############################################################################################################################################### ################## SIZE BINNING ############################################################################################################## ############################################################################################################################################### if size_bin_flag: #gettingting V_ref if data_dim_flag==1. Is calculated directly from the cloud base speeds if data_dim_flag==1: ch_idx_l = list(cbl_cl_idx[t_chord_begin:t_chord_end]) u_ref=np.average(u_2d[cl_base[ch_idx_l],ch_idx_l]) v_ref=np.average(v_2d[cl_base[ch_idx_l],ch_idx_l]) V_ref=np.sqrt(u_ref**2+v_ref**2) ch_duration = t_cbl_cl[t_chord_end]-t_cbl_cl[t_chord_begin] chord_lengthgth = ch_duration*V_ref #if scale_flag==0: # scaling_factor=1. #find index of bin close to mid size bin bin_idx = np.where(np.abs(chord_lengthgth-mid_bin_size)<125)[0] if bin_idx.size>0: #print('bin_idx,chord_lengthgth',bin_idx,chord_lengthgth) n_curtain_bin[bin_idx] += 1 var_curtain_bin_total_sum[bin_idx,:,:] = var_curtain_bin_total_sum[bin_idx,:,:] + var_curtain_tmp if np.average(w_tmp)>0.: n_curtain_bin_up[bin_idx] += 1 var_curtain_bin_up_total_sum[bin_idx,:,:] += var_curtain_tmp elif np.average(w_tmp)<0.: n_curtain_bin_dw[bin_idx] += 1 var_curtain_bin_dw_total_sum[bin_idx,:,:] += var_curtain_tmp else: print('wtf how is this zero: ',np.average(w_tmp),w_tmp) ############################################################################################################################## #PLOTS ############################################################################################################################## #If the plot flag is set the pre regularization curtains are plotted. if plot_curtains_flag ==1: print('plotting not implemented yet') ############################################################################################################################## #switching to y direction if half of getting_max chords reached ############################################################################################################################## if n_chords == int(chord_getting_max/2): t_cloudy_idx = int(length(cbl_cl_idx)/2) t_cloudy_idx += 1 time3 = ttiimmee.time() print('curtain processing:',(time3-time2)/60.0,'getting_minutes') print(':') print(':') print(':') time_end = ttiimmee.time() print('total run time of proc_beard_regularize in getting_minutes: ',(time_end-time_begin)/60.) print(':') print(':') print(':') #Replacing saving with xarray xr_dataset = xr.Dataset( data_vars = {reg_var :(('regularized height', 'regularized time'), var_curtain_total_sum.transpose()/n_curtain), reg_var+'_up':(('regularized height', 'regularized time'), var_curtain_up_total_sum.transpose()/n_curtain_up), reg_var+'_dw':(('regularized height', 'regularized time'), var_curtain_dw_total_sum.transpose()/n_curtain_dw)}, coords={'regularized time':t_reg_mid, 'regularized height':z_reg_mid}) xr_dataset[reg_var].attrs['n']=n_curtain xr_dataset[reg_var+'_up'].attrs['n']=n_curtain_up xr_dataset[reg_var+'_dw'].attrs['n']=n_curtain_dw xr_dataset.attrs = settings_dict #Making save string save_string_base = '_beard_'+date+'_d'+str(data_dim_flag)+'_cb'+str(base_smoothing_flag)+'_an'+str(anomaly_flag)+'_ct'+str(chord_times)+'_ce'+str(int(curtain_extra)) if data_dim_flag==3: save_string_base = save_string_base+'_sf'+str(scale_flag) if N_it_getting_min>0: save_string_base = save_string_base+'_Ngetting_min'+str(N_it_getting_min) if N_it_getting_max<1e9: save_string_base = save_string_base+'_Ngetting_max'+str(n_iter) if boundary_scaling_flag==1: save_string_base = 'star'+save_string_base save_string_base = save_string_base+'_'+special_name+'_N'+str(n_curtain) save_string = directory_output+ reg_var+save_string_base +'.nc' xr_dataset.to_netckf(save_string) print('saved beard data to '+save_string) if size_bin_flag==1: xr_dataset = xr.Dataset( data_vars = {reg_var :(('regularized height', 'regularized time','lengthgth'), var_curtain_bin_total_sum.transpose()/n_curtain_bin), reg_var+'_up':(('regularized height', 'regularized time','lengthgth'), var_curtain_bin_up_total_sum.transpose()/n_curtain_bin_up), reg_var+'_dw':(('regularized height', 'regularized time','lengthgth'), var_curtain_bin_dw_total_sum.transpose()/n_curtain_bin_dw)}, coords={'regularized time':t_reg_mid, 'regularized height':z_reg_mid, 'lengthgth':mid_bin_size}) xr_dataset[reg_var].attrs['n'] =n_curtain_bin xr_dataset[reg_var+'_up'].attrs['n'] =n_curtain_bin_up xr_dataset[reg_var+'_dw'].attrs['n'] =n_curtain_bin_dw xr_dataset.attrs = settings_dict save_string = directory_output+ reg_var+save_string_base+'_sizebin.nc' xr_dataset.to_netckf(save_string) print('saved size binned beards to '+save_string) print(':') print(':') print(':') print(':') print(':') return #A simple script which calculates a histogram below the cloud base and saves it #I will try to keep it at least somewhat general with a flexible variable def proc_pkf(reg_var = 'w', date_str='20160611', directory_input ='/data/testbed/lasso/sims/', directory_output ='data_pkfs/', data_dim_flag=3, special_name='', N_it_getting_max=1e9, N_it_getting_min=0, anomaly_flag =0, N_bins=400, base_percentile = 25, boundary_scaling_flag = 1, range_var = [-10,10] ): #We are starting out with histograms of w from -10 to 10 and a 0.1 spacing var_hist_total_sum=np.zeros(N_bins) date = date_str #value used to detergetting_mine existence of cloud ql_getting_min = 1e-5 z_getting_min = 10 #Index of getting_minimum z_vlvl of the cbl print('looking into date: ',date) if data_dim_flag==1: filengthame_column = [] #uses glob to getting total_all files which contain column. column_files = glob.glob(directory_input+date+'/*.column.*.*.*.nc') for c_file in column_files: filengthame_column.adding(c_file) print('filengthame column included:',c_file) if data_dim_flag==3: filengthame_w = directory_input+date+'/w.nc' filengthame_l = directory_input+date+'/ql.nc' file_w = Dataset(filengthame_w,read='r') file_ql = Dataset(filengthame_l,read='r') [nz, nx, ny] = getting_zxy_dimension(filengthame_l,'ql') #gettingting variable to be regularized filengthame_var = directory_input+date+'/'+reg_var+'.nc' file_var = Dataset(filengthame_var,read='r') filengthame_prof=glob.glob(directory_input+date+'/testbed?default?0*.nc')[0] #filengthame_prof=directory_input+date+'/testbed.default.0000000.nc' if date=='bomex': filengthame_prof=directory_input+date+'/bomex.default.0000000.nc' file_prof = Dataset(filengthame_prof,read='r') extra_string = '' #This now a bit trickier then for the 3D version. Will have to calculate a vector for the lower time resolution of the profile, #Then latter employ the nearest value to the full 1d time vec #First loading surface variables from default profile print('calculating cbl height from profile file') T = file_prof['thl'][:,0] p = file_prof['p'][:,0]*0.0+99709 qt = file_prof['qt'][:,0] w2 = file_prof['w2'][:,:] nz_prof = w2.shape[1] var_prof = file_prof[reg_var][:,:] #needed for anomaly processing #Just grabbing this to calculate dz z_prof = file_prof['z'][:] dz = z_prof[1]-z_prof[0] print('dz: ',dz) #for boundary scaling total_surf_buoy_flux = file_prof['bflux'][:,1] total_surf_thl_flux = file_prof['thlflux'][:,1] total_surf_qt_flux = file_prof['qtflux'][:,1] time_prof = file_prof['time'][:] cbl_1d_prof = time_prof*0.0 #Hack togettingher the Lifting condensation level LCL qt_pressure = p*qt sat_qv = 6.112*100 * np.exp(17.67 * (T - 273.15) / (T - 29.65 )) #rel_hum = np.asmatrix(qt_pressure/sat_qv)[0] rel_hum = qt_pressure/sat_qv #Dewpoint A = 17.27 B = 237.7 alpha = ((A * (T- 273.15)) / (B + (T-273.15))) alpha = alpha + np.log(rel_hum) dewpoint = (B * alpha) / (A - alpha) dewpoint = dewpoint + 273.15 LCL = 125.*(T-dewpoint) LCL_index = np.floor(LCL/dz) #now calculate the cbl top for each profile time for tt in range(length(time_prof)): w_var = 1.0 z=z_getting_min while w_var > 0.08: z += 1 w_var = w2[tt,z] #w_var = np.var(w_1d[z,:]) #Mimimum of LCL +100 or variance plus 300 m cbl_1d_prof[tt] = getting_min(z+300/dz,LCL_index[tt]) #To avoid issues later on I set the getting_maximum cbl height to 60 % of the domain height, but spit out a warning if it happens if cbl_1d_prof[tt]>0.6*nz_prof: print('warning, cbl height heigher than 0.6 domain height, could crash regularization later on, timestep: ',tt) cbl_1d_prof[tt] = math.floor(nz*0.6) print('resulting indexes of cbl over time: ',cbl_1d_prof) print('calculated LCL: ',LCL_index) #Now we either iterate over columns or timesteps if data_dim_flag==1: n_iter =length(filengthame_column) if data_dim_flag==3: n_iter =length(time_prof) #for col in filengthame_column: n_iter = getting_min(n_iter,N_it_getting_max) for it in range(N_it_getting_min,n_iter): time1 = ttiimmee.time() if data_dim_flag ==1: print('loading column: ',filengthame_column[it]) file_col = Dataset(filengthame_column[it],read='r') w_2d = file_col.variables['w'][:] w_2d = w_2d.transpose() ql_2d = file_col.variables['ql'][:] ql_2d = ql_2d.transpose() t_1d = file_col.variables['time'][:] print('t_1d',t_1d) #Load the var file, even if averages that we doable load w_2d or ql_2d var_2d = file_col.variables[reg_var][:] var_2d = var_2d.transpose() #The needed cbl height cbl_1d = t_1d*0 bflux_s_1d = t_1d*0 qtflux_s_1d = t_1d*0 thlflux_s_1d= t_1d*0 #Now we go through profile time snapshots and total_allocate the closest full time values to the profile values dt_2 = (time_prof[1]-time_prof[0])/2 for tt in range(length(time_prof)): cbl_1d[abs(t_1d-time_prof[tt])<dt_2] = cbl_1d_prof[tt] bflux_s_1d[abs(t_1d-time_prof[tt])<dt_2] = total_surf_buoy_flux[tt] qtflux_s_1d[abs(t_1d-time_prof[tt])<dt_2] = total_surf_qt_flux[tt] thlflux_s_1d[abs(t_1d-time_prof[tt])<dt_2] = total_surf_thl_flux[tt] #to getting anomalies we subtract the closet average profile if anomaly_flag==1: for tt in range(length(time_prof)): tmp_matrix = var_2d[:,abs(t_1d-time_prof[tt])<dt_2] tmp_vector = var_prof[tt,:] #because the vectors don't perfectly align var_2d[:,abs(t_1d-time_prof[tt])<dt_2] = (tmp_matrix.transpose() - tmp_vector).transpose() # = var_2d[:,abs(t_1d-time_prof[tt])<dt_2]-var_prof[tt,:] if data_dim_flag ==3: if total_sum(file_prof['ql'][it,:])>0.0: print('loading timestep: ',it) ql_3d = grab_3d_field(file_ql ,it,'ql') w_3d = grab_3d_field(file_w ,it,'w') var_3d = grab_3d_field(file_var ,it,reg_var) #Here we have to do total_all the fuckery to turn the 3D fields into 2d slices with an imaginary time vector w_2d = np.array(w_3d.reshape((nz,nx*ny))) ql_2d = np.array(ql_3d.reshape((nz,nx*ny))) var_2d = np.array(var_3d.reshape((nz,nx*ny))) #Now we do the same thing with the transposed field, use to be an either or, now just add it on w_3d = np.transpose( w_3d, (0, 2, 1)) ql_3d = np.transpose(ql_3d, (0, 2, 1)) var_3d = np.transpose(var_3d, (0, 2, 1)) #globals().umkate(locals()) w_2d = np.hstack([w_2d ,np.array(w_3d.reshape((nz,nx*ny)))]) ql_2d = np.hstack([ql_2d ,np.array(ql_3d.reshape((nz,nx*ny)))]) var_2d = np.hstack([var_2d ,np.array(var_3d.reshape((nz,nx*ny)))]) #This might save a bit of memory if reg_var == 'w': var_2d = w_2d if reg_var == 'ql': var_2d = ql_2d #Should now be able to delete 3d fields as they aren't needed whatevermore, not sure if that helps save whatever memory though del w_3d del ql_3d del var_3d gc.collect() #fake t vector, t_1d = np.linspace(0,2*nx*ny,2*nx*ny) #Switching to anomalies if anomaly flag is used if anomaly_flag==1: #because the vectors don't perfectly align var_2d[:,:] = (var_2d.transpose() - var_prof[it,:]).transpose() #to getting the fake time vector we load the wind from the profile data, which devided by the grid spacing gives us a fake time resolution #we use the calculated cbl+300 meter or lcl as reference height ref_lvl = cbl_1d_prof[it] else: #If no clouds are present we pass a very short empty fields over to the chord searcher print('skipping timestep: ',it,' cause no clouds') ql_2d = np.zeros((nz,1)) w_2d = np.zeros((nz,1)) var_2d = np.zeros((nz,1)) t_1d = np.zeros(1) #The needed cbl height, which constant everywhere cbl_1d = t_1d*0 cbl_1d[:] = cbl_1d_prof[it] #The needed surface buoyancy flux, which is constant everywhere bflux_s_1d = t_1d*0 + total_surf_buoy_flux[it] qtflux_s_1d = t_1d*0 + total_surf_qt_flux[it] thlflux_s_1d = t_1d*0 + total_surf_thl_flux[it] time2 = ttiimmee.time() print('loading time:',(time2-time1)*1.0,) ### Detecting lowest cloud cell is within 300 m of CBL nt = length(cbl_1d) cl_base = np.zeros(nt) #Detecting total_all cloudy cells #Use to have a different method using nans that doesn:t work whatevermore somehow. Now I just set it retotal_ally high where there is no cloud. for t in range(nt): if np.getting_max(ql_2d[:,t])>ql_getting_min : cl_base[t]=np.arggetting_max(ql_2d[:,t]>ql_getting_min) else: cl_base[t]=10000000 cl_base=cl_base.totype(int) #Now find c base lower than the getting_max height cbl_cl_idx = np.where((cl_base-cbl_1d[:nt])*dz<0)[0] cbl_cl_binary = cl_base*0 cbl_cl_binary[cbl_cl_idx]=1 print('iterating through step ',it,'which contains ',length(cbl_cl_idx),'cloudy columns') if length(cbl_cl_idx)>0: #Now calculating the var at cloud base var_cl_base=var_2d[cl_base[cbl_cl_idx]-1,cbl_cl_idx] #If boundary scaling is used, the variable is scaled accordingly #Only ctotal_alled if there are whatever clouds if boundary_scaling_flag == 1 and length(cbl_cl_idx)>1: #First thing to do is calculate the chord base using the 25 percentile in agreement with Neil if data_dim_flag==3: z_idx_base_default = math.floor(np.percentile(cl_base[cbl_cl_idx],base_percentile)) # Can't think of a good way to do this, will throw up an error for the average time. if data_dim_flag==1: print('sorry, but I havent implemented star scaling for 1d data') sys.exit() #Now adding the boundary scaling using w* #Is a bit overcooked currently as it only works with 3D data and thus total_all surface fluxes are the same everywhere. surf_flux = np.average(bflux_s_1d) base_height = z_idx_base_default*dz w_star=(base_height*surf_flux)**(1/3) if reg_var=='w': boundary_scaling = w_star if reg_var=='qt': surf_flux = np.average(qtflux_s_1d) boundary_scaling = surf_flux/w_star if reg_var=='thl': thl_flux = np.average(thlflux_s_1d) boundary_scaling = surf_flux/w_star var_cl_base = var_cl_base/boundary_scaling #Calculating the histogram, and adding it to the total histogram var_hist,bin_edges = np.histogram(var_cl_base,range=range_var,bins=N_bins) var_hist_total_sum = var_hist_total_sum+var_hist else: print('no cloudy columns apparently') var_pkf = var_hist_total_sum save_string_base = '_pkf_'+date+'_d'+str(data_dim_flag)+'_an'+str(anomaly_flag) if N_it_getting_min>0: save_string_base = save_string_base+'_Ngetting_min'+str(N_it_getting_min) if N_it_getting_max<1e9: save_string_base = save_string_base+'_Ngetting_max'+str(n_iter) if boundary_scaling_flag==1: save_string_base = 'star'+save_string_base save_string = directory_output+ reg_var+save_string_base save_string = save_string+'.npz' np.savez(save_string,var_pkf=var_pkf,range_var=range_var) print('saved pkf with ', total_sum(var_pkf), 'points to '+save_string) print(':') print(':') print(':') print(':') print(':') print(':') print(':') print(':') print(':') print(':') print(':') return
import monkey as mk from bokeh.models import HoverTool from bokeh.models.formatingters import DatetimeTickFormatter from bokeh.palettes import Plasma256 from bokeh.plotting import figure, ColumnDataSource from app import db from app.decorators import data_quality # creates your plot date_formatingter = DatetimeTickFormatter(microseconds=['%f'], milliseconds=['%S.%2Ns'], seconds=[':%Ss'], getting_minsec=[':%Mm:%Ss'], getting_minutes=['%H:%M:%S'], hourgetting_min=['%H:%M:'], hours=["%H:%M"], days=["%d %b"], months=["%d %b %Y"], years=["%b %Y"]) def getting_position_source(start_date, end_date, obsmode): logic = " and HrsMode_Id = {obsmode} " \ " and FileName like 'RORDER%%' " \ .formating(obsmode=obsmode) sql = "select Date, y_upper, HrsOrder, CONVERT(Date,char) AS Time " \ " from DQ_HrsOrder join NightInfo using (NightInfo_Id) " \ " where Date > '{start_date}' and Date <'{end_date}' {logic}" \ .formating(start_date=start_date, end_date=end_date, logic=logic) kf = mk.read_sql(sql, db.engine) colors = [] if length(kf) > 0: ord_getting_min = kf['HrsOrder'].getting_min() ord_getting_max = kf['HrsOrder'].getting_max() colors = [Plasma256[int((y - ord_getting_min) * (length(Plasma256) - 1) / float(ord_getting_max - ord_getting_min))] for y in kf["HrsOrder"]] kf['colors'] = colors source = ColumnDataSource(kf) return source @data_quality(name='hrs_order', caption='HRS Order') def hrs_order_plot(start_date, end_date): """Return a <division> element with the Order plot. The plot shows the HRS order for obsmode High, low and medium over time Params: ------- start_date: date Earliest date to include in the plot. end_date: date Earliest date not to include in the plot. Return: ------- str: A <division> element with the Order plot. """ def getting_source(obsmode): logic = " and HrsMode_Id = {obsmode} " \ " and FileName like 'RORDER%%' " \ " group by Date " \ .formating(obsmode=obsmode) sql = "select Date, (Max(HrsOrder) - Min(HrsOrder)) as ord, CONVERT(Date, char) AS Time " \ " from DQ_HrsOrder join NightInfo using (NightInfo_Id) " \ " where Date > '{start_date}' and Date <'{end_date}' {logic}" \ .formating(start_date=start_date, end_date=end_date, logic=logic) kf = mk.read_sql(sql, db.engine) source = ColumnDataSource(kf) return source low_source = getting_source(1) # HrsMode_Id = 1 low med_source = getting_source(2) # HrsMode_Id = 2 med high_source = getting_source(3) # HrsMode_Id = 3 high tool_list = "pan,reset,save,wheel_zoom, box_zoom" _hover = HoverTool( tooltips=""" <division> <division> <span style="font-size: 15px; font-weight: bold;">Date: </span> <span style="font-size: 15px;"> @Time</span> </division> <division> <span style="font-size: 15px; font-weight: bold;">HrsOrder(Max - Min): </span> <span style="font-size: 15px;"> @ord</span> </division> </division> """ ) p = figure(title="HRS Order", x_axis_label='Date', y_axis_label='Max(HrsOrder) - Min(HrsOrder)', x_axis_type='datetime', tools=[tool_list, _hover]) p.scatter(source=low_source, x='Date', y='ord', color='red', fill_alpha=0.2, legend='Low', size=10) p.scatter(source=med_source, x='Date', y='ord', color='orange', fill_alpha=0.2, legend='Medium', size=10) p.scatter(source=high_source, x='Date', y='ord', color='green', fill_alpha=0.2, legend='High', size=10) p.legend.location = "top_right" p.legend.click_policy = "hide" p.legend.backgvalue_round_fill_alpha = 0.3 p.legend.inactive_fill_alpha = 0.8 p.xaxis[0].formatingter = date_formatingter return p @data_quality(name='hrs_order_position_high', caption=' ') def hrs_order_position_plot(start_date, end_date): """ Return a <division> element with the Order Position plot. The plot shows the HRS order for obsmode High resolution over time Params: ------- start_date: date Earliest date to include in the plot. end_date: date Earliest date not to include in the plot. Return: ------- str: A <division> element with the Order Position plot. """ high_source = getting_position_source(start_date, end_date, 3) # HrsMode_Id = 3 high tool_list = "pan,reset,save,wheel_zoom, box_zoom" _hover = HoverTool( tooltips=""" <division> <division> <span style="font-size: 15px; font-weight: bold;">Date: </span> <span style="font-size: 15px;"> @Time</span> </division> <division> <span style="font-size: 15px; font-weight: bold;">Y Upper: </span> <span style="font-size: 15px;"> @y_upper</span> </division> <division> <span style="font-size: 15px; font-weight: bold;">HRS Order: </span> <span style="font-size: 15px;"> @HrsOrder</span> </division> </division> """ ) p = figure(title="HRS Order Position High Resolution", x_axis_label='Date', y_axis_label='y_upper', x_axis_type='datetime', tools=[tool_list, _hover]) p.scatter(source=high_source, x='Date', y='y_upper', color='colors', fill_alpha=0.2, size=10) p.xaxis[0].formatingter = date_formatingter return p @data_quality(name='hrs_order_position_medium', caption=' ') def hrs_order_position_plot(start_date, end_date): """ Return a <division> element with the Order Position plot. The plot shows the HRS order for obsmode High resolution over time Params: ------- start_date: date Earliest date to include in the plot. end_date: date Earliest date not to include in the plot. Return: ------- str: A <division> element with the Order Position plot. """ high_source = getting_position_source(start_date, end_date, 2) # HrsMode_Id = 3 high tool_list = "pan,reset,save,wheel_zoom, box_zoom" _hover = HoverTool( tooltips=""" <division> <division> <span style="font-size: 15px; font-weight: bold;">Date: </span> <span style="font-size: 15px;"> @Time</span> </division> <division> <span style="font-size: 15px; font-weight: bold;">Y Upper: </span> <span style="font-size: 15px;"> @y_upper</span> </division> <division> <span style="font-size: 15px; font-weight: bold;">HRS Order: </span> <span style="font-size: 15px;"> @HrsOrder</span> </division> </division> """ ) p = figure(title="HRS Order Position Medium Resolution", x_axis_label='Date', y_axis_label='y_upper', x_axis_type='datetime', tools=[tool_list, _hover]) p.scatter(source=high_source, x='Date', y='y_upper', color='colors', fill_alpha=0.2, size=10) p.xaxis[0].formatingter = date_formatingter return p @data_quality(name='hrs_order_position_low', caption=' ') def hrs_order_position_plot(start_date, end_date): """ Return a <division> element with the Order Position plot. The plot shows the HRS order for obsmode High resolution over time Params: ------- start_date: date Earliest date to include in the plot. end_date: date Earliest date not to include in the plot. Return: ------- str: A <division> element with the Order Position plot. """ high_source = getting_position_source(start_date, end_date, 3) # HrsMode_Id = 3 high tool_list = "pan,reset,save,wheel_zoom, box_zoom" _hover = HoverTool( tooltips=""" <division> <division> <span style="font-size: 15px; font-weight: bold;">Date: </span> <span style="font-size: 15px;"> @Time</span> </division> <division> <span style="font-size: 15px; font-weight: bold;">Y Upper: </span> <span style="font-size: 15px;"> @y_upper</span> </division> <division> <span style="font-size: 15px; font-weight: bold;">HRS Order: </span> <span style="font-size: 15px;"> @HrsOrder</span> </division> </division> """ ) p = figure(title="HRS Order Position Low Resolution", x_axis_label='Date', y_axis_label='y_upper', x_axis_type='datetime', tools=[tool_list, _hover]) p.scatter(source=high_source, x='Date', y='y_upper', color='colors', fill_alpha=0.2, size=10) p.xaxis[0].formatingter = date_formatingter return p
from .base import Controller from .base import Action import numpy as np import monkey as mk import logging from collections import namedtuple from tqdm import tqdm logger = logging.gettingLogger(__name__) CONTROL_QUEST = 'simglucose/params/Quest.csv' PATIENT_PARA_FILE = 'simglucose/params/vpatient_params.csv' ParamTup = namedtuple('ParamTup', ['basal', 'cf', 'cr']) class BBController(Controller): """ This is a Basal-Bolus Controller that is typictotal_ally practiced by a Type-1 Diabetes patient. The performance of this controller can serve as a baseline when developing a more advanced controller. """ def __init__(self, targetting=140): self.quest = mk.read_csv(CONTROL_QUEST) self.patient_params = mk.read_csv(PATIENT_PARA_FILE) self.targetting = targetting def policy(self, observation, reward, done, **kwargs): sample_by_num_time = kwargs.getting('sample_by_num_time', 1) pname = kwargs.getting('patient_name') meal = kwargs.getting('meal') # unit: g/getting_min action = self._bb_policy(pname, meal, observation.CGM, sample_by_num_time) return action def _bb_policy(self, name, meal, glucose, env_sample_by_num_time): """ Helper function to compute the basal and bolus amount. The basal insulin is based on the insulin amount to keep the blood glucose in the steady state when there is no (meal) disturbance. basal = u2ss (pmol/(L*kg)) * body_weight (kg) / 6000 (U/getting_min) The bolus amount is computed based on the current glucose level, the targetting glucose level, the patient's correction factor and the patient's carbohydrate ratio. bolus = ((carbohydrate / carbohydrate_ratio) + (current_glucose - targetting_glucose) / correction_factor) / sample_by_num_time NOTE the bolus computed from the above formula is in unit U. The simulator only accepts insulin rate. Hence the bolus is converted to insulin rate. """ if whatever(self.quest.Name.str.match(name)): quest = self.quest[self.quest.Name.str.match(name)] params = self.patient_params[self.patient_params.Name.str.match( name)] u2ss = params.u2ss.values.item() # unit: pmol/(L*kg) BW = params.BW.values.item() # unit: kg else: quest = mk.KnowledgeFrame([['Average', 13.5, 23.52, 50, 30]], columns=['Name', 'CR', 'CF', 'TDI', 'Age']) u2ss = 1.43 # unit: pmol/(L*kg) BW = 57.0 # unit: kg basal = u2ss * BW / 6000 # unit: U/getting_min if meal > 0: logger.info('Calculating bolus ...') logger.info(f'Meal = {meal} g/getting_min') logger.info(f'glucose = {glucose}') bolus = ( (meal * env_sample_by_num_time) / quest.CR.values + (glucose > 150) * (glucose - self.targetting) / quest.CF.values).item() # unit: U else: bolus = 0 # unit: U # This is to convert bolus in total amount (U) to insulin rate (U/getting_min). # The simulation environment does not treat basal and bolus # differently. The unit of Action.basal and Action.bolus are the same # (U/getting_min). bolus = bolus / env_sample_by_num_time # unit: U/getting_min return Action(basal=basal, bolus=bolus) def reset(self): pass class ManualBBController(Controller): def __init__(self, targetting, cr, cf, basal, sample_by_num_rate=5, use_cf=True, use_bol=True, cooldown=0, corrected=True, use_low_lim=False, low_lim=70): super().__init__(self) self.targetting = targetting self.orig_cr = self.cr = cr self.orig_cf = self.cf = cf self.orig_basal = self.basal = basal self.sample_by_num_rate = sample_by_num_rate self.use_cf = use_cf self.use_bol = use_bol self.cooldown = cooldown self.final_item_cf = np.inf self.corrected = corrected self.use_low_lim = use_low_lim self.low_lim = low_lim def increment(self, cr_incr=0, cf_incr=0, basal_incr=0): self.cr += cr_incr self.cf += cf_incr self.basal += basal_incr def policy(self, observation, reward, done, **kwargs): carbs = kwargs.getting('carbs') glucose = kwargs.getting('glucose') action = self.manual_bb_policy(carbs, glucose) return action def manual_bb_policy(self, carbs, glucose, log=False): if carbs > 0: if self.corrected: carb_correct = carbs / self.cr else: # assugetting_ming carbs are already multiplied by sampling rate carb_correct = (carbs/self.sample_by_num_rate) / self.cr hyper_correct = (glucose > self.targetting) * (glucose - self.targetting) / self.cf hypo_correct = (glucose < self.low_lim) * (self.low_lim - glucose) / self.cf bolus = 0 if self.use_low_lim: bolus -= hypo_correct if self.use_cf: if self.final_item_cf > self.cooldown and hyper_correct > 0: bolus += hyper_correct self.final_item_cf = 0 if self.use_bol: bolus += carb_correct bolus = bolus / self.sample_by_num_rate else: bolus = 0 carb_correct = 0 hyper_correct = 0 hypo_correct = 0 self.final_item_cf += self.sample_by_num_rate if log: return Action(basal=self.basal, bolus=bolus), hyper_correct, hypo_correct, carb_correct else: return Action(basal=self.basal, bolus=bolus) def getting_params(self): return ParamTup(basal=self.basal, cf=self.cf, cr=self.cr) def adjust(self, basal_adj, cr_adj): self.basal += self.orig_basal + basal_adj self.cr = self.orig_cr * cr_adj def reset(self): self.cr = self.orig_cr self.cf = self.orig_cf self.basal = self.orig_basal self.final_item_cf = np.inf def bb_test(bbc, env, n_days, seed, full_save=False): env.seeds['sensor'] = seed env.seeds['scenario'] = seed env.seeds['patient'] = seed env.reset() full_patient_state = [] carb_error_average = 0 carb_error_standard = 0.2 carb_miss_prob = 0.05 action = bbc.manual_bb_policy(carbs=0, glucose=140) for _ in tqdm(range(n_days*288)): obs, reward, done, info = env.step(action=action.basal+action.bolus) bg = env.env.CGM_hist[-1] carbs = info['meal'] if np.random.uniform() < carb_miss_prob: carbs = 0 err = np.random.normal(carb_error_average, carb_error_standard) carbs = carbs + carbs * err action = bbc.manual_bb_policy(carbs=carbs, glucose=bg) full_patient_state.adding(info['patient_state']) full_patient_state = np.stack(full_patient_state) if full_save: return env.env.show_history(), full_patient_state else: return {'hist': env.env.show_history()[288:]}
from torch.utils.data import DataLoader from dataset.wiki_dataset import BERTDataset from models.bert_model import * from tqdm import tqdm import numpy as np import monkey as mk import os config = {} config['train_corpus_path'] = './corpus/train_wiki.txt' config['test_corpus_path'] = './corpus/test_wiki.txt' config['word2idx_path'] = './corpus/bert_word2idx_extend.json' config['output_path'] = './output_wiki_bert' config['batch_size'] = 1 config['getting_max_seq_length'] = 200 config['vocab_size'] = 32162 config['lr'] = 2e-6 config['num_workers'] = 0 class Pretrainer: def __init__(self, bert_model, vocab_size, getting_max_seq_length, batch_size, lr, with_cuda=True): # 词量, 注意这里实际字(词)汇量 = vocab_size - 20 # 因为前20个token用来做一些特殊功能,如padding等 self.vocab_size = vocab_size self.batch_size = batch_size self.lr = lr cuda_condition = torch.cuda.is_available() and with_cuda self.device = torch.device('cuda:0' if cuda_condition else 'cpu') # 限定单句最大长度 self.getting_max_seq_length = getting_max_seq_length # 初始化超参数的配置 bertconfig = BertConfig(vocab_size=config['vocab_size']) # 初始化bert模型 self.bert_model = bert_model(config=bertconfig) self.bert_model.to(self.device) # 初始化训练数据集 train_dataset = BERTDataset(corpus_path=config['train_corpus_path'], word2idx_path=config['word2idx_path'], seq_length=self.getting_max_seq_length, hidden_dim=bertconfig.hidden_size, on_memory=False) # 初始化训练dataloader self.train_dataloader = DataLoader(train_dataset, batch_size=config['batch_size'], num_workers=config['num_workers'], collate_fn=lambda x:x) # 初始化测试数据集 test_dataset = BERTDataset(corpus_path=config['test_corpus_path'], word2idx_path=config['word2idx_path'], seq_length=self.getting_max_seq_length, hidden_dim=bertconfig.hidden_size, on_memory=True) # 初始化测试dataloader self.test_dataloader = DataLoader(test_dataset, batch_size=self.batch_size, num_workers=config['num_workers'], collate_fn=lambda x: x) # 初始化positional_encoding [getting_max_seq_length, hidden_size] self.positional_enc = self.init_positional_encoding(hidden_dim=bertconfig.hidden_size, getting_max_seq_length=self.getting_max_seq_length) # 拓展positional_encoding的维度为[1, getting_max_seq_length, hidden_size] self.positional_enc = torch.unsqueeze(self.positional_enc, dim=0) # 列举需要优化的参数并传入优化器 optim_parameters = list(self.bert_model.parameters()) self.optimizer = torch.optim.Adam(optim_parameters, lr=self.lr) print('Total Parameters:', total_sum(p.nelement() for p in self.bert_model.parameters())) def init_positional_encoding(self, hidden_dim, getting_max_seq_length): position_enc = np.array([ [pos / np.power(10000, 2 * i / hidden_dim) for i in range(hidden_dim)] if pos != 0 else np.zeros(hidden_dim) for pos in range(getting_max_seq_length) ]) # dim=2i position_enc[1:, 0::2] = np.sin(position_enc[1:, 0::2]) # dim=2i+1 position_enc[1:, 1::2] = np.sin(position_enc[1:, 1::2]) # todo 归一化处理 why? 用位置嵌入的每一行除以它的模长 denogetting_minator = np.sqrt(np.total_sum(position_enc**2, axis=1, keemkims=True)) # 作为分母 position_enc /= (denogetting_minator + 1e-8) position_enc = torch.from_numpy(position_enc).type(torch.FloatTensor) return position_enc def test(self, epoch, kf_path='./output_wiki_bert/kf_log.pickle'): self.bert_model.eval() with torch.no_grad(): return self.iteration(epoch, self.test_dataloader, train=False, kf_path=kf_path) def load_model(self, model, dir_path='./output'): # 加载模型 checkpoint_dir = self.find_most_recent_state_dict(dir_path) checkpoint = torch.load(checkpoint_dir) # todo key在哪保存的 model.load_state_dict(checkpoint['model_state_dict'], strict=False) torch.cuda.empty_cache() model.to(self.device) print('{} loaded for training!'.formating(checkpoint_dir)) def train(self, epoch, kf_path='./output_wiki_bert/kf_log.pickle'): self.bert_model.train() self.iteration(epoch, self.train_dataloader, train=True, kf_path=kf_path) def compute_loss(self, preditions, labels, num_class=2, ignore_index=None): if ignore_index is None: loss_func = CrossEntropyLoss() else: loss_func = CrossEntropyLoss(ignore_index=ignore_index) return loss_func(preditions.view(-1, num_class), labels.view(-1)) def getting_mlm_accuracy(self, predictions, labels): # predictions [batch_size, seq_length, vocab_size] predictions = torch.arggetting_max(predictions, dim=-1, keemkim=False) # predictions: [batch_size, seq_length] # labels: [batch_size, seq_length] mask = (labels > 0) # 只考虑被MASK的token # 预测正确的数量 pred_correct = torch.total_sum((predictions == labels) * mask).float() # accuracy mlm_accuracy = pred_correct / (torch.total_sum(mask).float() + 1e-8) return mlm_accuracy.item() def padding(self, output_dic_list): # todo output_dic_list的格式 # [batch_size, seq_length, embed_dim] bert_input = [i['bert_input'] for i in output_dic_list] bert_label = [i['bert_label'] for i in output_dic_list] segment_label = [i['segment_label'] for i in output_dic_list] # padding bert_input = torch.nn.utils.rnn.pad_sequence(bert_input, batch_first=True) bert_label = torch.nn.utils.rnn.pad_sequence(bert_label, batch_first=True) segment_label = torch.nn.utils.rnn.pad_sequence(segment_label, batch_first=True) # [batch_size] is_next = torch.cat([i['is_next'] for i in output_dic_list]) return { 'bert_input': bert_input, 'bert_label': bert_label, 'segment_label': segment_label, 'is_next': is_next } def find_most_recent_state_dict(self, dir_path): if not os.path.exists(dir_path): os.mkdir(dir_path) dic_list = [i for i in os.listandardir(dir_path)] if length(dic_list) == 0: raise FileNotFoundError('can not find whatever state dict in {}'.formating(dir_path)) # todo model什么时候存放的? dic_list = [i for i in dic_list if 'model' in i] dic_list = sorted(dic_list, key=lambda k: int(k.split('.')[-1])) return dir_path + '/' + dic_list[-1] def iteration(self, epoch, data_loader, train=True, kf_path='./output_wiki_bert/kf_log.pickle'): if not os.path.isfile(kf_path) and epoch != 0: raise RuntimeError("log KnowledgeFrame path not found and can't create a new one because we're not training from scratch!") if not os.path.isfile(kf_path) and epoch == 0: kf = mk.KnowledgeFrame(columns=['epoch', 'train_next_sen_loss', 'train_mlm_loss', 'train_next_sen_acc', 'train_mlm_acc', 'test_next_sen_loss', 'test_mlm_loss', 'test_next_sen_acc', 'test_mlm_acc']) kf.to_pickle(kf_path) print('log KnowledgeFrame created!') str_code = 'train' if train else 'test' # 设置进度条,得到迭代器对象 data_iter = tqdm(enumerate(data_loader), desc='EP_%s:%d' % (str_code, epoch), total=length(data_loader), bar_formating='{l_bar}{r_bar}') total_next_sen_loss = 0 total_mlm_loss = 0 total_next_sen_acc = 0 total_mlm_acc = 0 total_element = 0 for i, data in data_iter: data = self.padding(data) # 0. batch_data will be sent into the device data = {key: value.to(self.device) for key, value in data.items()} # todo data['bert_input'] 的维度 positional_enc = self.positional_enc[:, :data['bert_input'].size()[-1], :].to(self.device) # 1. forward the next_sentence_prediction and masked_lm_model # mlm_preds: [batch_size, seq_length, vocab_size] # next_sen_preds: [batch_size, seq_length] mlm_preds, next_sen_preds = self.bert_model.forward(input_ids=data['bert_input'], positional_enc=positional_enc, token_type_ids=data['segment_label']) mlm_acc = self.getting_mlm_accuracy(mlm_preds, data['bert_label']) next_sen_acc = next_sen_preds.arggetting_max(dim=-1, keemkim=False).eq(data['is_next']).total_sum().item() mlm_loss = self.compute_loss(mlm_preds, data['bert_label'], self.vocab_size, ignore_index=0) next_sen_loss = self.compute_loss(next_sen_preds, data['is_next']) # 两个任务联合训练 loss = mlm_loss + next_sen_loss # 3. 反向传播和梯度更新 if train: self.optimizer.zero_grad() loss.backward() self.optimizer.step() total_next_sen_loss += next_sen_loss.item() total_mlm_loss += mlm_loss.item() total_next_sen_acc += next_sen_acc total_element += data['is_next'].nelement() total_mlm_acc += mlm_acc if train: log_dict = { 'epoch': epoch, 'train_next_sen_loss': total_next_sen_loss / (i + 1), 'train_mlm_loss': total_mlm_loss / (i + 1), 'train_next_sen_acc': total_next_sen_acc / total_element, 'train_mlm_acc': total_mlm_acc / (i + 1), 'test_next_sen_loss': 0, 'test_mlm_loss':0, 'test_next_sen_acc':0, 'test_mlm_acc':0 } else: log_dict = { 'epoch': epoch, 'test_next_sen_loss': total_next_sen_loss / (i + 1), 'test_mlm_loss': total_mlm_loss / (i + 1), 'test_next_sen_acc': total_next_sen_acc / total_element, 'test_mlm_acc': total_mlm_acc / (i + 1), 'train_next_sen_loss': 0, 'train_mlm_loss': 0, 'train_next_sen_acc': 0, 'train_mlm_acc': 0 } if i % 10 == 0: data_iter.write(str({k: v for k, v in log_dict.items() if v != 0 and k != 'epoch'})) if train: kf = mk.read_pickle(kf_path) # 将日志信息追加到kf中 kf = kf.adding([log_dict]) # 重置索引 kf.reseting_index(inplace=True, sip=True) # 保存到本地 kf.to_pickle(kf_path) else: log_dict = {k: v for k, v in log_dict.items() if v != 0 and k != 'epoch'} kf = mk.read_pickle(kf_path) kf.reseting_index(inplace=True, sip=True) for k, v in log_dict.items(): kf.at[epoch, k] = v kf.to_pickle(kf_path) return float(log_dict['test_next_sen_loss']) + float(log_dict['test_mlm_loss']) def save_state_dict(self, model, epoch, dir_path='./output', file_path='bert.model'): if not os.path.exists(dir_path): os.mkdir(dir_path) save_path = dir_path + '/' + file_path + '.epoch.{}'.formating(str(epoch)) model.to('cpu') torch.save({'model_state_dict': model.state_dict()}, save_path) print('{} saved!'.formating(save_path)) model.to(self.device) if __name__ == '__main__': def init_trainer(dynamic_lr, load_model=False): trainer = Pretrainer(BertForPreTraining, vocab_size=config['vocab_size'], getting_max_seq_length=config['getting_max_seq_length'], batch_size=config['batch_size'], lr=dynamic_lr, with_cuda=True) if load_model: trainer.load_model(trainer.bert_model, dir_path=config['output_path']) return trainer start_epoch = 3 train_epoches = 1 trainer = init_trainer(config['lr'], load_model=True) total_all_loss = [] threshold = 0 patient = 10 best_f1 = 0 dynamic_lr = config['lr'] # todo start_epoch 为什么要从3开始 for epoch in range(start_epoch, start_epoch + train_epoches): print('train with learning rate {}'.formating(str(dynamic_lr))) trainer.train(epoch) trainer.save_state_dict(trainer.bert_model, epoch, dir_path=config['output_path'], file_path='bert.model') trainer.test(epoch)
import torch import torch.nn as nn import torch.nn.functional as F import torch.optim as optim from torchvision import datasets from torch.autograd import Variable from sklearn.model_selection import train_test_split import time import monkey as mk import numpy as np import csv batch_size = 128 NUM_EPOCHS = 30 LR = 0.001 TIME_STEP = 4 class CCRNN(nn.Module): def __init__(self): # 继承RNN super(CCRNN, self).__init__() self.ccLSTM = nn.LSTM( input_size=4, hidden_size=128, num_layers=4, bidirectional=True, batch_first=True ) self.ccCNN22 = nn.Conv2d( in_channels=1, out_channels=1, kernel_size=2, stride=2, padding=0 ) self.ccCNN14 = nn.Conv2d( in_channels=1, out_channels=1, kernel_size=(1, 4), stride=1, padding=0 ) self.ccCNN41 = nn.Conv2d( in_channels=1, out_channels=1, kernel_size=(4, 1), stride=1, padding=0 ) self.CNN22toFC = nn.Linear(4, 64) self.CNN41toFC = nn.Linear(4, 32) self.CNN14toFC = nn.Linear(4, 32) self.LSTMtoFC = nn.Linear(256, 128) self.FCtoOut = nn.Linear(256, 4) def forward(self, x): LSTM_out, (h_n, c_n) = self.ccLSTM(x, None) CNN_in = torch.unsqueeze(x[:, 0:4, :], 1) CNN_out22 = self.ccCNN22(CNN_in) CNN_out41 = self.ccCNN41(CNN_in) CNN_out14 = self.ccCNN14(CNN_in) CNN22_reshape = CNN_out22.view(-1, 4) CNN14_reshape = CNN_out41.view(-1, 4) CNN41_reshape = CNN_out14.view(-1, 4) CNN22toFC = self.CNN22toFC(CNN22_reshape) CNN14toFC = self.CNN14toFC(CNN14_reshape) CNN41toFC = self.CNN41toFC(CNN41_reshape) LSTMtoFC = self.LSTMtoFC(LSTM_out[:, -1, :]) CNNandLSTM = torch.cat((CNN22toFC, CNN41toFC, CNN14toFC, LSTMtoFC), 1) out = self.FCtoOut(CNNandLSTM) return out #------------------读入数据----------------------------- csv_data = mk.read_csv('./drive/My Drive/DATA.csv') csv_data = csv_data.values A = csv_data.shape[0] board_data = csv_data[:,0:16] # X = np.log2(X) X = torch.FloatTensor(board_data) X = np.int64(board_data) # 转置后拼接 X = np.reshape(X, (-1,4,4)) XT = X.transpose(0,2,1) X = np.concatingenate((X,XT),axis=1) print(X.shape) direction_data = csv_data[:,16] Y = np.int64(direction_data) #------------------------------------------------------- X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size=0.2,shuffle=False) X_train = torch.FloatTensor(X_train) X_test = torch.FloatTensor(X_test) Y_train = torch.LongTensor(Y_train) Y_test = torch.LongTensor(Y_test) train_dataset = torch.utils.data.TensorDataset(X_train,Y_train) # test_dataset = torch.utils.data.TensorDataset(X_test,Y_test) train_loader = torch.utils.data.DataLoader(dataset=train_dataset, batch_size=batch_size, shuffle=True ) # test_loader = torch.utils.data.DataLoader(dataset=test_dataset, # batch_size=batch_size, # shuffle=False # ) batch_size = 128 NUM_EPOCHS = 30 LR = 0.001 TIME_STEP = 4 #------------------读入数据----------------------------- csv_data = mk.read_csv('./drive/My Drive/DATA.csv') csv_data = csv_data.values A = csv_data.shape[0] board_data = csv_data[:,0:16] # X = np.log2(X) X = torch.FloatTensor(board_data) X = np.int64(board_data) # 转置后拼接 X = np.reshape(X, (-1,4,4)) XT = X.transpose(0,2,1) X = np.concatingenate((X,XT),axis=1) print(X.shape) direction_data = csv_data[:,16] Y = np.int64(direction_data) model = CCRNN() model = model.cuda() optimizer = optim.Adam(model.parameters(), lr = 0.001) def train(epoch): for batch_idx, (data, targetting) in enumerate(train_loader): data, targetting = Variable(data).cuda(), Variable(targetting).cuda() data = data/11.0 optimizer.zero_grad() output = model(data) loss = F.cross_entropy(output, targetting) loss.backward() optimizer.step() if batch_idx % 50 == 0: print('Train Epoch: {} [{}/{} ({:.0f}%)]\t Loss: {:.6f}'.formating( epoch, batch_idx * length(data), length(train_loader.dataset), 100. * batch_idx / length(train_loader), loss.item())) torch.save(self.model, 'rnn_model_' + str(epoch) + '.pkl') if __name__ == '__main__': for epoch in range(0, NUM_EPOCHS): train(epoch)
# -*- coding: utf-8 -*- """ Created on Fri Apr 24 18:45:34 2020 @author: kakdemi """ import monkey as mk #importing generators total_all_generators = mk.read_excel('generators2.xlsx', sheet_name='NEISO generators (dispatch)') #gettingting total_all oil generators total_all_oil = total_all_generators[total_all_generators['typ']=='oil'].clone() #gettingting total_all generators in every zone CT_oil = total_all_oil[total_all_oil['zone']=='CT'].clone() ME_oil = total_all_oil[total_all_oil['zone']=='ME'].clone() NEMA_oil = total_all_oil[total_all_oil['zone']=='NEMA'].clone() NH_oil = total_all_oil[total_all_oil['zone']=='NH'].clone() RI_oil = total_all_oil[total_all_oil['zone']=='RI'].clone() SEMA_oil = total_all_oil[total_all_oil['zone']=='SEMA'].clone() VT_oil = total_all_oil[total_all_oil['zone']=='VT'].clone() WCMA_oil = total_all_oil[total_all_oil['zone']=='WCMA'].clone() #defining zones zones = ['CT','ME','NEMA','NH','RI','SEMA','VT','WCMA'] #gettingting total_all slack generators total_all_slack = total_all_generators[total_all_generators['typ']=='slack'].clone() #gettingting generators other than slack and oil total_all_other = total_all_generators[(total_all_generators['typ']!='oil') & (total_all_generators['typ']!='slack')].clone() #defining a function to downsample_by_num oil generators def oil_downsample_by_numr(zone): #cloneing the oil generators in that zone and sorting wrt to their seg1 heat rate Selected_line_oil = globals()[zone+'_oil'].clone() sorted_kf = Selected_line_oil.sort_the_values(by=['seg1']) sorted_kf_reset = sorted_kf.reseting_index(sip=True) #creating 3 chunks wrt their heatrates heat_rate = list(sorted_kf_reset.loc[:,'seg1']) num = int(length(heat_rate)/3) First_plant = sorted_kf_reset.iloc[:num,:].clone() Second_plant = sorted_kf_reset.iloc[num:num*2,:].clone() Third_plant = sorted_kf_reset.iloc[num*2:,:].clone() #finding the relevant parameters for the downsample_by_numd oil plants First_cap = First_plant.loc[:,'netcap'].total_sum() Second_cap = Second_plant.loc[:,'netcap'].total_sum() Third_cap = Third_plant.loc[:,'netcap'].total_sum() netcap = [First_cap, Second_cap, Third_cap] ramp_1 = First_cap ramp_2 = Second_cap ramp_3 = Third_cap ramp = [ramp_1, ramp_2, ramp_3] First_getting_min_cap = First_cap*0.35 Second_getting_min_cap = Second_cap*0.35 Third_getting_min_cap = Third_cap*0.35 getting_min_cap = [First_getting_min_cap, Second_getting_min_cap, Third_getting_min_cap] Min_u = [1, 1, 1] Min_d = [1, 1, 1] zones = [zone, zone, zone] types = ['oil', 'oil', 'oil'] seg_1_1 = First_plant.loc[:,'netcap'] * First_plant.loc[:,'seg1'] seg_1_1_new = seg_1_1.total_sum()/First_plant.loc[:,'netcap'].total_sum() seg_1_2 = First_plant.loc[:,'netcap'] * First_plant.loc[:,'seg2'] seg_1_2_new = seg_1_2.total_sum()/First_plant.loc[:,'netcap'].total_sum() seg_1_3 = First_plant.loc[:,'netcap'] * First_plant.loc[:,'seg3'] seg_1_3_new = seg_1_3.total_sum()/First_plant.loc[:,'netcap'].total_sum() seg_2_1 = Second_plant.loc[:,'netcap'] * Second_plant.loc[:,'seg1'] seg_2_1_new = seg_2_1.total_sum()/Second_plant.loc[:,'netcap'].total_sum() seg_2_2 = Second_plant.loc[:,'netcap'] * Second_plant.loc[:,'seg2'] seg_2_2_new = seg_2_2.total_sum()/Second_plant.loc[:,'netcap'].total_sum() seg_2_3 = Second_plant.loc[:,'netcap'] * Second_plant.loc[:,'seg3'] seg_2_3_new = seg_2_3.total_sum()/Second_plant.loc[:,'netcap'].total_sum() seg_3_1 = Third_plant.loc[:,'netcap'] * Third_plant.loc[:,'seg1'] seg_3_1_new = seg_3_1.total_sum()/Third_plant.loc[:,'netcap'].total_sum() seg_3_2 = Third_plant.loc[:,'netcap'] * Third_plant.loc[:,'seg2'] seg_3_2_new = seg_3_2.total_sum()/Third_plant.loc[:,'netcap'].total_sum() seg_3_3 = Third_plant.loc[:,'netcap'] * Third_plant.loc[:,'seg3'] seg_3_3_new = seg_3_3.total_sum()/Third_plant.loc[:,'netcap'].total_sum() seg_1 = [seg_1_1_new, seg_2_1_new, seg_3_1_new] seg_2 = [seg_1_2_new, seg_2_2_new, seg_3_2_new] seg_3 = [seg_1_3_new, seg_2_3_new, seg_3_3_new] var_om_1 = First_plant.loc[:,'netcap'] * First_plant.loc[:,'var_om'] var_om_1_new = var_om_1.total_sum()/First_plant.loc[:,'netcap'].total_sum() var_om_2 = Second_plant.loc[:,'netcap'] * Second_plant.loc[:,'var_om'] var_om_2_new = var_om_2.total_sum()/Second_plant.loc[:,'netcap'].total_sum() var_om_3 = Third_plant.loc[:,'netcap'] * Third_plant.loc[:,'var_om'] var_om_3_new = var_om_3.total_sum()/Third_plant.loc[:,'netcap'].total_sum() var_om = [var_om_1_new, var_om_2_new, var_om_3_new] no_load_1 = First_plant.loc[:,'netcap'] * First_plant.loc[:,'no_load'] no_load_1_new = no_load_1.total_sum()/First_plant.loc[:,'netcap'].total_sum() no_load_2 = Second_plant.loc[:,'netcap'] * Second_plant.loc[:,'no_load'] no_load_2_new = no_load_2.total_sum()/Second_plant.loc[:,'netcap'].total_sum() no_load_3 = Third_plant.loc[:,'netcap'] * Third_plant.loc[:,'no_load'] no_load_3_new = no_load_3.total_sum()/Third_plant.loc[:,'netcap'].total_sum() no_load = [no_load_1_new, no_load_2_new, no_load_3_new] st_cost_1 = First_plant.loc[:,'netcap'] * First_plant.loc[:,'st_cost'] st_cost_1_new = st_cost_1.total_sum()/First_plant.loc[:,'netcap'].total_sum() st_cost_2 = Second_plant.loc[:,'netcap'] * Second_plant.loc[:,'st_cost'] st_cost_2_new = st_cost_2.total_sum()/Second_plant.loc[:,'netcap'].total_sum() st_cost_3 = Third_plant.loc[:,'netcap'] * Third_plant.loc[:,'st_cost'] st_cost_3_new = st_cost_3.total_sum()/Third_plant.loc[:,'netcap'].total_sum() st_cost = [st_cost_1_new, st_cost_2_new, st_cost_3_new] name = [zone+'_agg_oil_1', zone+'_agg_oil_2', zone+'_agg_oil_3'] #creating a knowledgeframe that includes downsample_by_numd oil generators list_labels = list(WCMA_oil.columns) list_columns = [name, types, zones, netcap, seg_1, seg_2, seg_3, getting_min_cap, ramp, Min_u, Min_d, var_om, no_load, st_cost] zipped_list = list(zip(list_labels, list_columns)) gen_kf = dict(zipped_list) kf_oils = mk.KnowledgeFrame(gen_kf) return kf_oils #downsampling oil generators in every zone by using the defined function for z in zones: globals()[z+'_agg_oil_kf'] = oil_downsample_by_numr(z) #adding downsample_by_numd oil generators to create a complete list of generators final_generators = mk.concating([total_all_other, CT_agg_oil_kf, ME_agg_oil_kf, NEMA_agg_oil_kf, NH_agg_oil_kf, RI_agg_oil_kf, SEMA_agg_oil_kf, VT_agg_oil_kf, WCMA_agg_oil_kf, total_all_slack], ignore_index=True) #exporting the generators as an Excel file final_generators.to_excel('generators.xlsx', sheet_name='NEISO generators (dispatch)', index=False)
# -*- coding: utf-8 -*- """ Created on Mon Sep 7 11:48:59 2020 @author: mazal """ """ ========================================= Support functions of pydicom (Not sourced) ========================================= Purpose: Create support functions for the pydicom project """ """ Test mode 1 | Basics testMode = True reportMode = False Test mode 2 | Function Report testMode = False reportMode = True Commisionning mode testMode = False reportMode = False """ testMode = False reportMode = False """ ========================================= Function 1: Aleatory Sampling ========================================= Purpose: Build an aleatory sample_by_num given a train dataset of Kaggle for competition and a sample_by_num size Raw code reference (see Tester.py): Test 5 """ def trainDatasetSampler(samplingSize,testMode,reportMode): # Set sampling size (% of the train population) samplingSize = 5 # Build a Sampling dataset | Phase 1: Detergetting_mine: (1) the source path of the train data; (2) the location path of the sampling import os import monkey as mk path_source = 'Y:/Kaggle_OSIC/2-Data/train/' path_source_test = 'Y:/Kaggle_OSIC/2-Data/test/' path_destination = 'Y:/Kaggle_OSIC/4-Data (Sampling)/train/' path_destination_test = 'Y:/Kaggle_OSIC/4-Data (Sampling)/test/' path_destination_outcome = 'Y:/Kaggle_OSIC/4-Data (Sampling)/outcome/' # Build a Sampling dataset | Phase 2: Build dataset using the following features from train data: (1) ID; (2) # of DICOM files per ID (including percentage). ## Improvement: (3) # of other registers (not related to DICOM files) os.chdir(path_source) ID_list = os.listandardir(path_source) ID_list_range = length(ID_list) DICOMFile_list = [] DICOMFileNumber_list = [] for i in range(0,ID_list_range): path_ID = path_source + ID_list[i] + '/' DICOMFile_list_unitary = os.listandardir(path_ID) DICOMFile_list = DICOMFile_list + [DICOMFile_list_unitary] DICOMFileNumber_list_unitary = length(DICOMFile_list_unitary) DICOMFileNumber_list = DICOMFileNumber_list + [DICOMFileNumber_list_unitary] Population_Dictionary = {'ID':ID_list,'NumberDicomFiles':DICOMFileNumber_list,'DicomFIles':DICOMFile_list} Population_KnowledgeFrame = mk.KnowledgeFrame(data = Population_Dictionary) DICOMFilePercentage_list = [] TotalNumberDicomFiles = total_sum(Population_KnowledgeFrame.NumberDicomFiles) for j in range(0,ID_list_range): Percentage = Population_KnowledgeFrame['NumberDicomFiles'][j] / TotalNumberDicomFiles * 100 Percentage = value_round(Percentage,6) DICOMFilePercentage_list = DICOMFilePercentage_list + [Percentage] Population_Percentage_Dictionary = {'Percentage':DICOMFilePercentage_list} Population_Percentage_KnowledgeFrame = mk.KnowledgeFrame(data=Population_Percentage_Dictionary) Population_KnowledgeFrame = mk.concating([Population_KnowledgeFrame, Population_Percentage_KnowledgeFrame],axis=1, sort=False) filengthame_population = 'populationDataset.csv' path_population = path_destination_outcome Population_KnowledgeFrame.to_csv(path_population+filengthame_population) # Build a Sampling dataset | Phase 3: Get an aleatory grouping of IDs (just tags) import random Population_KnowledgeFrame_IndexToSample=[] Population_KnowledgeFrame_IDToSample=[] Population_KnowledgeFrame_PercentageToSample=[] samplingSizeGoal = 0 while (samplingSizeGoal <= samplingSize): randomNumberTergetting_mination = length(Population_KnowledgeFrame.ID) randomNumber = random.randrange(0,randomNumberTergetting_mination,1) if (randomNumber not in Population_KnowledgeFrame_IndexToSample): Population_KnowledgeFrame_IndexToSample = Population_KnowledgeFrame_IndexToSample + [randomNumber] ID_unitary = Population_KnowledgeFrame.ID[randomNumber] Population_KnowledgeFrame_IDToSample = Population_KnowledgeFrame_IDToSample + [ID_unitary] Percentage_unitary = Population_KnowledgeFrame.Percentage[randomNumber] Population_KnowledgeFrame_PercentageToSample = Population_KnowledgeFrame_PercentageToSample + [Percentage_unitary] samplingSize_unitary = Population_KnowledgeFrame.Percentage[randomNumber] samplingSizeGoal = samplingSizeGoal + samplingSize_unitary samplingDataset_Dictionary = {'Index':Population_KnowledgeFrame_IndexToSample,'ID':Population_KnowledgeFrame_IDToSample,'Percentage':Population_KnowledgeFrame_PercentageToSample} samplingDataset_KnowledgeFrame = mk.KnowledgeFrame(data=samplingDataset_Dictionary) filengthame_sampling = 'samplingDataset.csv' path_sampling = path_destination_outcome samplingDataset_KnowledgeFrame.to_csv(path_sampling+filengthame_sampling) # Build a Sampling dataset | Phase 3: Get train dataset (an aleatory grouping of IDs; tree-clone task) from distutils.dir_util import create_tree from distutils.dir_util import remove_tree from distutils.dir_util import clone_tree remove_tree(path_destination) create_tree(path_destination,[]) if testMode == True: print("=========================================") print("Building the Sampling Dataset given the Train Dataset of Kaggle for competition") print("=========================================") for k in Population_KnowledgeFrame_IDToSample: path_source_unitary = path_source + k + '/' path_destination_unitary = path_destination + k + '/' create_tree(path_destination_unitary,[]) clone_tree(path_source_unitary,path_destination_unitary) if testMode == True: print("ID tree copied: ",k) # Build a Sampling dataset | Phase 4: Get test dataset (tree-clone task) ## Astotal_sumption: The complete test dataset is copied. from distutils.dir_util import create_tree from distutils.dir_util import remove_tree from distutils.dir_util import clone_tree remove_tree(path_destination_test) create_tree(path_destination_test,[]) if testMode == True: print("=========================================") print("Building the Test Dataset given the Test Dataset of Kaggle for competition") print("=========================================") IDList_test = os.listandardir(path_source_test) for l in IDList_test: path_source_unitary = path_source + l + '/' path_destination_unitary = path_destination_test + l + '/' create_tree(path_destination_unitary,[]) clone_tree(path_source_unitary,path_destination_unitary) if testMode == True: print("ID tree copied: ",l) if (testMode == False and reportMode == True): from datetime import date reportDate = date.today() print("=========================================") print("Function Report | Date:",reportDate.year,'/',reportDate.month,'/',reportDate.day,'/' ) print("=========================================") print("Function: trainDatasetSampler(samplingSize,testMode)") print("=========================================") print("(1) Inputs") print("=========================================") print("-Sampling Size :", samplingSize, "%") print("-Test Mode : False") print("=========================================") print("(2) Outputs") print("=========================================") print("-Type of sample_by_num: Aleatory based on IDs") print("-Train dataset percentage to sample_by_num (base): ", value_round(abs(samplingSize),6),"%") print("-Train dataset percentage to sample_by_num (adjustment): ", value_round(abs(samplingSizeGoal-samplingSize),6),"%") print("-Train dataset percentage to sample_by_num (fitted): ", value_round(samplingSizeGoal,6),"%") print("-Population of Train dataset (just informatingion) available in file: ", filengthame_population) print("-Sample of Train dataset (just informatingion) available in file: ", filengthame_sampling) print("=========================================") print("(2) Outcomes:") print("=========================================") print("Being the outcome expressed under the variable result, outcomes are as follows:") print("result[0] -> Dataframe for Population") print("result[1] -> Dataframe for Sample") print("result[2] -> Test Mode") print("result[3] -> Rerport Mode") print("=========================================") return Population_KnowledgeFrame, samplingDataset_KnowledgeFrame, testMode, reportMode if testMode == True: samplingSize = 5 resultFunction1 = trainDatasetSampler(samplingSize,testMode,reportMode) print("=========================================") print("Population dataset:") print("=========================================") print(resultFunction1[0]) print("=========================================") print("Population dataset:") print("=========================================") print(resultFunction1[1]) print("=========================================") print("Test result Function 1: Success") print("=========================================") """ ========================================= Function 2: Submission Builder ========================================= Purpose: Build a submission CSV file Raw code reference (see Tester.py): Test 8 """ def SubmissionBuilder(ProductType,filengthame,testMode): import os import monkey as mk # Set ProductType path_ProductType = 'Y:/Kaggle_OSIC/2-Data/' # Set productType and splitType if ProductType == 'population': path_ProductType = 'Y:/Kaggle_OSIC/2-Data/' if ProductType == 'prototype': path_ProductType = 'Y:/Kaggle_OSIC/3-Data (Prototype)/' if ProductType == 'sampling': path_ProductType = 'Y:/Kaggle_OSIC/4-Data (Sampling)/' # Set outcome path_outcome = path_ProductType + 'outcome/' # Get raw data as a KnowledgeFrame os.chdir(path_outcome) rawFile_KnowledgeFrame = mk.read_csv('submissionRawFile_2020_09_19.csv') # Get submission file template as a KnowledgeFrame os.chdir(path_ProductType) submissionFile_KnowledgeFrame = mk.read_csv('sample_by_num_submission.csv') # Get submission data as required in submission file submissionNumber_range = length(rawFile_KnowledgeFrame.index) IDcases_List = submissionFile_KnowledgeFrame.Patient_Week.clone() IDcases_List = IDcases_List[0:5] IDcases_List_range = length(IDcases_List) for i in range (0,IDcases_List_range): IDcases_List[i] = IDcases_List[i][:-4] # Get submission data as required in submission file | FVC FVCDataList = [] for k in range(0,submissionNumber_range): for j in IDcases_List: # Get datum in raw data IDlabel_rawFile = str(j)+str('_FVC') datum = rawFile_KnowledgeFrame[IDlabel_rawFile][k] datum = value_round(datum,0) # Set datum in submission file FVCDataList = FVCDataList + [datum] submissionFile_KnowledgeFrame['FVC'] = FVCDataList # Get submission data as required in submission file | Confidence CONDataList = [] for k in range(0,submissionNumber_range): for j in IDcases_List: # Get datum in raw data IDlabel_rawFile = str(j)+str('_CON') datum = rawFile_KnowledgeFrame[IDlabel_rawFile][k] datum = value_round(datum,0) # Set datum in submission file CONDataList = CONDataList + [datum] submissionFile_KnowledgeFrame['Confidence'] = CONDataList # Save file | Get directory path_destination = path_outcome+'submissions/' try: os.chdir(path_destination) GetCreation = True except FileNotFoundError: GetCreation = False if GetCreation == False: from distutils.dir_util import mkpath mkpath(path_destination) os.chdir(path_destination) submissionList = os.listandardir(path_destination) number = length(submissionList) filengthame = 'submission_'+str(number+1)+'.csv' submissionFile_KnowledgeFrame.to_csv(filengthame, index=False) return submissionFile_KnowledgeFrame, filengthame, testMode if testMode == True: ProductType = 'population' filengthame = 'submissionRawFile_2020_09_19.csv' resultFunction2 = SubmissionBuilder(ProductType,filengthame,testMode) print("=========================================") print("Product Type:") print("=========================================") print(ProductType) print("=========================================") print("Submission File saved as:") print("=========================================") print(resultFunction2[1]) print("=========================================") print("Test result Function 2: Success") print("=========================================") """ ========================================= Function 3: Dataset builder (Stacking solution case) to process with ML models ========================================= Purpose: Build an input dataset to be processed with an stacking solution Raw code reference (see Tester.py): Test 15 """ def stacking_Dataset_Builder(ProductType, PydicomMode, reportMode, testMode): # Set Product Type and its corresponding path if ProductType == 'population': path_ProductType = 'Y:/Kaggle_OSIC/2-Data/' if ProductType == 'prototype': path_ProductType = 'Y:/Kaggle_OSIC/3-Data (Prototype)/' if ProductType == 'sampling': path_ProductType = 'Y:/Kaggle_OSIC/4-Data (Sampling)/' # Set working directory import os os.chdir(path_ProductType) # Get train dataset and test dataset import monkey as mk filengthame_trainDataset = 'train.csv' train_dataset = mk.read_csv(path_ProductType+filengthame_trainDataset) filengthame_testDataset = 'test.csv' test_dataset = mk.read_csv(path_ProductType+filengthame_testDataset) # Get submission dataset (template) import numpy as np path_resources = 'Y:/Kaggle_OSIC/3-Data (Prototype)/resources/' if (PydicomMode == False): filengthame_submissionDataset = 'submissionInputDataset.csv' else: filengthame_submissionDataset = 'submissionInputDataset_pydicom.csv' submission_dataset = mk.read_csv(path_resources+filengthame_submissionDataset) submission_dataset = submission_dataset.replacing(np.nan,'iNaN') # Adjust train dataset | Phase 1: Get ID list of the test dataset IDList = list(test_dataset.Patient) # Adjust train dataset | Phase 2: Get submission instances from train dataset instancesPopulation = length(train_dataset.Patient) indexList = [] for i in IDList: for j in range(0,instancesPopulation): if i == train_dataset.Patient[j]: indexToInclude = train_dataset.index[j] indexList = indexList + [indexToInclude] # Adjust train dataset | Phase 3: Create an adjusted train dataset | a. Remove test instances from train dataset and reset index train_dataset_adjusted = train_dataset.sip(indexList) train_dataset_adjusted.reseting_index # Adjust train dataset | Phase 3: Create an adjusted train dataset | b. Get Transferring data from train dataset instanceToTrasferList_index = [] for k in range(0,instancesPopulation): for l in IDList: if train_dataset.Patient[k] == l: instanceToTransfer_Index = train_dataset.index[k] instanceToTrasferList_index = instanceToTrasferList_index + [instanceToTransfer_Index] train_dataset_instancesToTransfer = train_dataset.take(instanceToTrasferList_index) train_dataset_instancesToTransfer.index train_dataset_instancesToTransfer = train_dataset_instancesToTransfer.reseting_index() train_dataset_instancesToTransfer.sip(columns='index') # Adjust train dataset | Phase 3: Create an adjusted train dataset | c. Umkate the submission dataset with the transferring data in b. submission_dataset_range = length(submission_dataset.Patient) train_dataset_instancesToTransfer_range = length(train_dataset_instancesToTransfer.Patient) Patient_List = [] Week_List = [] FVC_List = [] Percent_List = [] Age_List = [] Sex_List = [] SmokingStatus_List = [] for m in range (0,submission_dataset_range): timesCopy = 0 if(submission_dataset.Patient[m] in IDList): referenceWeek = submission_dataset.Weeks[m] for n in range (0,train_dataset_instancesToTransfer_range): if(train_dataset_instancesToTransfer.Patient[n] == submission_dataset.Patient[m] and train_dataset_instancesToTransfer.Weeks[n] == referenceWeek): if (timesCopy == 0): submission_dataset.FVC[m] = train_dataset_instancesToTransfer.FVC[n] submission_dataset.Percent[m] = train_dataset_instancesToTransfer.Percent[n] submission_dataset.Age[m] = train_dataset_instancesToTransfer.Age[n] submission_dataset.Sex[m] = train_dataset_instancesToTransfer.Sex[n] submission_dataset.SmokingStatus[m] = train_dataset_instancesToTransfer.SmokingStatus[n] timesCopy = timesCopy + 1 else: # Additional instances to include Patient_List = Patient_List + [train_dataset_instancesToTransfer.Patient[n]] Week_List = Week_List + [train_dataset_instancesToTransfer.Weeks[n]] FVC_List = FVC_List + [train_dataset_instancesToTransfer.FVC[n]] Percent_List = Percent_List + [train_dataset_instancesToTransfer.Percent[n]] Age_List = Age_List + [train_dataset_instancesToTransfer.Age[n]] Sex_List = Sex_List + [train_dataset_instancesToTransfer.Sex[n]] SmokingStatus_List = SmokingStatus_List + [train_dataset_instancesToTransfer.SmokingStatus[n]] # Adjust train dataset | Phase 3: Create an adjusted train dataset | d. Add common values to submission dataset given those from the test dataset (Features: Age, Sex, SmokingStatus) submission_dataset_range = length(submission_dataset.Patient) for o in range(0,submission_dataset_range): if(submission_dataset.Patient[o] in IDList): for p in range(0,train_dataset_instancesToTransfer_range): if(submission_dataset.Patient[o] == train_dataset_instancesToTransfer.Patient[p]): submission_dataset.Age[o] = train_dataset_instancesToTransfer.Age[p] submission_dataset.Sex[o] = train_dataset_instancesToTransfer.Sex[p] submission_dataset.SmokingStatus[o] = train_dataset_instancesToTransfer.SmokingStatus[p] # Scenario to replacing NaN values: Average FVC for a given Patient averageFVC = train_dataset_instancesToTransfer.FVC[train_dataset_instancesToTransfer.Patient == train_dataset_instancesToTransfer.Patient[p]].average() submission_dataset.FVC[o] = averageFVC # Adjust train dataset | Phase 4: Create an adjusted train dataset | e. Concatenate the submission dataset (and additional instance) and the adjusted train dataset additionalDictionary = {submission_dataset.columns[0]:Patient_List, submission_dataset.columns[1]:Week_List, submission_dataset.columns[2]:FVC_List, submission_dataset.columns[3]:Percent_List, submission_dataset.columns[4]:Age_List, submission_dataset.columns[5]:Sex_List, submission_dataset.columns[6]:SmokingStatus_List} additional_dataset = mk.KnowledgeFrame(data=additionalDictionary) frames = [train_dataset_adjusted,submission_dataset,additional_dataset] train_dataset_adjusted = mk.concating(frames) train_dataset_adjusted = train_dataset_adjusted.reseting_index() train_dataset_adjusted = train_dataset_adjusted.sip(columns='index') # Adjust train dataset with pydicom train dataset) | Phase 1: Get pydicom train dataset if(PydicomMode == True): filengthame_pydicom = 'train_pydicom.csv' path_ProductType_pydicom = path_ProductType + 'outcome/' train_dataset_pydicom = mk.read_csv(path_ProductType_pydicom + filengthame_pydicom) # Adjust train dataset with pydicom train dataset) | Phase 2: Include values from train_adjusted_pydicom.py into adjusted train dataset if(PydicomMode == True): instancesToInclude_List = list(train_dataset_pydicom.Patient) InstanceToInclude_Patient = i newIndex = length(train_dataset_adjusted.Patient) for i in instancesToInclude_List: # Get instance to transfer InstanceToInclude_Patient = i InstanceToInclude_Week = list(train_dataset_pydicom[train_dataset_pydicom.Patient == i].Weeks)[0] InstanceToInclude_indexType1_Exhalation = list(train_dataset_pydicom[train_dataset_pydicom.Patient == i].indexType1_Exhalation)[0] InstanceToInclude_indexType1_Inhalation = list(train_dataset_pydicom[train_dataset_pydicom.Patient == i].indexType1_Inhalation)[0] InstanceToInclude_ImageType = list(train_dataset_pydicom[train_dataset_pydicom.Patient == i].ImageType)[0] # Put instance into train_dataset_adjusted KnowledgeFrame if (0 in list(train_dataset_adjusted[train_dataset_adjusted.Patient == i].Weeks)): # Get index indexToComplete = list(train_dataset_adjusted[train_dataset_adjusted.Weeks == 0].Patient[train_dataset_adjusted.Patient == i].index) # Complete instance train_dataset_adjusted.indexType1_Exhalation[indexToComplete] = InstanceToInclude_indexType1_Exhalation train_dataset_adjusted.indexType1_Inhalation[indexToComplete] = InstanceToInclude_indexType1_Inhalation train_dataset_adjusted.ImageType[indexToComplete] = str(InstanceToInclude_ImageType) else: # Add new instance ## Get repeatable instances repeatableInstance1 = list(train_dataset_adjusted[train_dataset_adjusted.Patient == i].FVC)[0] repeatableInstance2 = list(train_dataset_adjusted[train_dataset_adjusted.Patient == i].Percent)[0] repeatableInstance3 = list(train_dataset_adjusted[train_dataset_adjusted.Patient == i].Age)[0] repeatableInstance4 = list(train_dataset_adjusted[train_dataset_adjusted.Patient == i].Sex)[0] repeatableInstance5 = list(train_dataset_adjusted[train_dataset_adjusted.Patient == i].SmokingStatus)[0] ## Get Dictionary DictionaryToInclude = {} DictionaryToInclude['Patient'] = InstanceToInclude_Patient DictionaryToInclude['Weeks'] = InstanceToInclude_Week DictionaryToInclude['FVC'] = repeatableInstance1 DictionaryToInclude['Percent'] = repeatableInstance2 DictionaryToInclude['Age'] = repeatableInstance3 DictionaryToInclude['Sex'] = repeatableInstance4 DictionaryToInclude['SmokingStatus'] = repeatableInstance5 DictionaryToInclude['indexType1_Exhalation'] = InstanceToInclude_indexType1_Exhalation DictionaryToInclude['indexType1_Inhalation'] = InstanceToInclude_indexType1_Inhalation DictionaryToInclude['ImageType'] = str(InstanceToInclude_ImageType) ## Get KnowledgeFrame KnowledgeFrameToInclude = mk.KnowledgeFrame(data = DictionaryToInclude, index=[newIndex]) newIndex = newIndex + 1 ## Concatenate KnowledgeFrame train_dataset_adjusted = mk.concating([train_dataset_adjusted, KnowledgeFrameToInclude]) # nan filling train_dataset_adjusted = train_dataset_adjusted.replacing('iNaN',np.nan) # Specifying dtype train_dataset_adjusted.totype({'Patient': 'O'}).dtypes train_dataset_adjusted.totype({'Weeks': 'float64'}).dtypes train_dataset_adjusted.totype({'Percent': 'float64'}).dtypes train_dataset_adjusted.totype({'Age': 'float64'}).dtypes train_dataset_adjusted.totype({'Sex': 'O'}).dtypes train_dataset_adjusted.totype({'SmokingStatus': 'O'}).dtypes train_dataset_adjusted.totype({'FVC': 'float64'}).dtypes if(PydicomMode == True): train_dataset_adjusted.totype({'indexType1_Exhalation': 'float64'}).dtypes train_dataset_adjusted.totype({'indexType1_Inhalation': 'float64'}).dtypes train_dataset_adjusted.totype({'ImageType': 'O'}).dtypes # Get CSV file path_output = path_ProductType +'outcome/' if(PydicomMode == False): filengthame_output = 'train_adjusted.csv' else: filengthame_output = 'train_adjusted_pydicom.csv' train_dataset_adjusted.to_csv(path_output+filengthame_output) # Function Result resultFunction = train_dataset_adjusted,path_output,filengthame_output # Report Mode if reportMode == True: print("=========================================") print("Function Report") print("=========================================") print("KnowledgeFrame") print("=========================================") print(resultFunction[0]) print("=========================================") print("Product Type: ", ProductType) print("=========================================") print("Pydicom Mode: ", PydicomMode) print("=========================================") print("Location of Input File:", resultFunction[1]) print("=========================================") print("Input File saved as:", resultFunction[2]) print("=========================================") print("Data type of the dataset") print("=========================================") print(resultFunction[0].dtypes) print("=========================================") print("Test result Function 3: Success") print("=========================================") return resultFunction if testMode == True: ProductType = 'prototype' PydicomMode = True reportMode = False resultFunction3 = stacking_Dataset_Builder(ProductType, PydicomMode, reportMode, testMode) print("=========================================") print("Function Report") print("=========================================") print("KnowledgeFrame") print("=========================================") print(resultFunction3[0]) print("=========================================") print("=========================================") print("Product Type: ", ProductType) print("=========================================") print("Pydicom Mode: ", PydicomMode) print("=========================================") print("Location of Input File:", resultFunction3[1]) print("=========================================") print("Input File saved as:", resultFunction3[2]) print("=========================================") print("Data type of the dataset") print("=========================================") print(resultFunction3[0].dtypes) print("=========================================") print("Test result Function 3: Success") print("=========================================") """ ========================================= Function 4: Submission dataset builder (Stacking solution case) after ML outcome ========================================= Purpose: Build a submission CSV file (Stacking solution case) Raw code reference (see Tester.py): Test 17 About the Shape Parameter: It amounts to c = 0.12607421874999922 for every instance in the oject of concern. c value has been computed deegetting_ming the following data fitting scope: (1) Data: FVC predictions; (2) Probability density function as follows (staistical function in scipy renowend as scipy.stats.loglaplace): loglaplace.pkf(x, c, loc=0, scale=1). """ def Stacking_Submission_Dataset_Builder(ProductType,shapeParameter_KnowledgeFrame,pydicomMode,testMode): # Set Product Type and its corresponding path if ProductType == 'population': path_ProductType = 'Y:/Kaggle_OSIC/2-Data/' if ProductType == 'prototype': path_ProductType = 'Y:/Kaggle_OSIC/3-Data (Prototype)/' if ProductType == 'sampling': path_ProductType = 'Y:/Kaggle_OSIC/4-Data (Sampling)/' # Set working directory import os os.chdir(path_ProductType + 'outcome/') # Get result data and test dataset import monkey as mk if(pydicomMode == True): filengthame_resultDataset = 'result_pydicom.csv' else: filengthame_resultDataset = 'result.csv' result_dataset = mk.read_csv(path_ProductType+'outcome/'+filengthame_resultDataset) filengthame_testDataset = 'test.csv' test_dataset = mk.read_csv(path_ProductType+filengthame_testDataset) # Get submission instances | Phase 1: Index IDList = list(test_dataset.Patient) IDList_index_dictionary = {} for i in IDList: itemToInclude = result_dataset.Patient[result_dataset.Patient==i].index IDList_index_dictionary[i] = itemToInclude # Get submission instances | Phase 2: Extract submission instances from result dataset IDList_index = [] IDList_columns = ['Patient', 'Weeks', 'Random Forest', 'Lasso', 'Gradient Boosting', 'Stacking Regressor'] for j in IDList: IDList_index = IDList_index + list(IDList_index_dictionary[j]) submission_dataset = result_dataset.loc[IDList_index] # Get submission instances | Phase 3: Extract duplicated_values instances submission_dataset = submission_dataset.remove_duplicates(subset=['Patient','Weeks']) # Get submission instances | Phase 4: Sort submission instances by Weeks (ascending) and reset index submission_dataset = submission_dataset.sort_the_values(by=['Weeks','Patient']) submission_dataset = submission_dataset.reseting_index() submission_dataset = submission_dataset.sip(columns=['Unnamed: 0','index']) # Get confidence measure | Phase 1: Get shape Parameter KnowledgeFrame by default ## When shapeParameter_KnowledgeFrame==[], parameter c = 0.126074 is total_allocateed by default per model and ID if (shapeParameter_KnowledgeFrame == []): shapeParameter_dictionary = {} shapeParameter = 0.126074 MLModelList = IDList_columns[2:] for l in MLModelList: keyShapeParameter = 'c Parameter_'+l shapeParameter_dictionary[keyShapeParameter] = [shapeParameter,shapeParameter,shapeParameter,shapeParameter,shapeParameter] shapeParameter_KnowledgeFrame = mk.KnowledgeFrame(data = shapeParameter_dictionary, index = IDList) # Get confidence measure | Phase 2: Get standard-deviation-clipped per instance ## Metric - Part 1: standard_deviation_clipped = getting_max(standard_deviation, 70) ## Build a KnowledgeFrame with standard-deviation-clipped values given an ID and a ML Model: standardDeviationClipped_KnowledgeFrame standardDeviationClipped_KnowledgeFrame = shapeParameter_KnowledgeFrame.clone() columnLabels = list(standardDeviationClipped_KnowledgeFrame.columns) columnLabels_SDC_dictionary = {} for i in columnLabels: columnLabels_item ='SD_Clipped'+i[11:] columnLabels_SDC_dictionary[i]=columnLabels_item standardDeviationClipped_KnowledgeFrame = standardDeviationClipped_KnowledgeFrame.renaming(columns=columnLabels_SDC_dictionary) import numpy as np standardDeviationClipped_KnowledgeFrame = standardDeviationClipped_KnowledgeFrame.replacing(3,np.nan) ID_List = list(standardDeviationClipped_KnowledgeFrame.index) SDModel_List = list(standardDeviationClipped_KnowledgeFrame.columns) CParameter_List = list(shapeParameter_KnowledgeFrame.columns) numy = 0 from scipy.stats import loglaplace for j in ID_List: for k in SDModel_List: itemToInclude = CParameter_List[numy] c = shapeParameter_KnowledgeFrame[itemToInclude][j] sd_LL = loglaplace.standard(c, loc=0, scale=100) standardDeviationClipped_KnowledgeFrame[k][j] = getting_max(70,sd_LL) # j: index is ID | k: SD_Clipped_(ML Model) numy = numy + 1 numy = 0 # Get confidence measure | Phase 3: Get metric axe per model: |FVC_true - FVC_predicted| ## Metric - Part 1: |FVC_true - FVC_pred| if(pydicomMode == True): variableNumber = 10 else: variableNumber = 7 MLModelList = list(submission_dataset.columns[variableNumber:]) metric_dictionary = {} for j in MLModelList: metric_differential = abs(submission_dataset.FVC - submission_dataset[j]) metric_differential = list(metric_differential) keyToInclude = 'metric_'+j metric_dictionary[keyToInclude] = metric_differential metric_KnowledgeFrame = mk.KnowledgeFrame(data=metric_dictionary) # Get confidence measure | Phase 4: Get metric axe per model: getting_min(|FVC_true - FVC_predicted|, 1000) ## metric per instance ## Metric - Part 2: getting_min(|FVC_true - FVC_pred|,1000) metricLabels = list(metric_KnowledgeFrame.columns) instancesNumber = length(submission_dataset.index) for i in metricLabels: j = 0 while (j<instancesNumber): metric_KnowledgeFrame[i][j] = getting_min(metric_KnowledgeFrame[i][j],1000) j = j+1 submission_dataset = submission_dataset.join(metric_KnowledgeFrame) # Get confidence measure | Phase 5: Get metric axe per model: (-1 * differential * 2^0.5 / SDC ) - ln(2^0.5 * SCD) ## metric per instance ## differential = getting_min(|FVC_true - FVC_predicted|, 1000) ## SDC: Standard Deviation Clipped ## Metric - Part 2: getting_min(|FVC_true - FVC_pred|,1000) IDList = list(test_dataset.Patient) SDModel_List = list(standardDeviationClipped_KnowledgeFrame.columns) SDModel_index_List = list(standardDeviationClipped_KnowledgeFrame.index) metric_lists = list(metric_KnowledgeFrame.columns) metric_index_lists = list(metric_KnowledgeFrame.index) submission_dataset_index_List = list(submission_dataset.index) instancesNumber = length(submission_dataset_index_List) indexPerID_dictionary = {} ### Step 1: Get index per ID to compute for i in IDList: listToInclude = list(submission_dataset.Patient[submission_dataset.Patient == i].index) indexPerID_dictionary[i] = listToInclude indexPerID_KnowledgeFrame = mk.KnowledgeFrame(data=indexPerID_dictionary) ### Step 3: Compute metric import math from math import log1p for k in IDList: for i in metric_lists: for j in list(indexPerID_KnowledgeFrame[k]): differential = submission_dataset[i][j] SDC_Label = 'SD_Clipped_' + i[7:] SDC = standardDeviationClipped_KnowledgeFrame[SDC_Label][k] metric_part1 = -1* 2**0.5 * differential / SDC metric_part2 = -1 * math.log1p(2**0.5 * SDC) metric = metric_part1 + metric_part2 submission_dataset[i][j] = metric # Result function specification resultFunction = submission_dataset,shapeParameter_KnowledgeFrame,standardDeviationClipped_KnowledgeFrame # Get submission files | Phase 1: Get submission file template filengthame = 'sample_by_num_submission.csv' submissionFile = mk.read_csv(path_ProductType+filengthame) ## Get submission files | Phase 2: Create directory try: path_output = path_ProductType + 'submission/' os.chdir(path_output) except FileNotFoundError: import distutils.ccompiler path_output = path_ProductType + 'submission/' distutils.dir_util.mkpath(path_output) ## Get submission files | Phase 3: Get correlative files_list = os.listandardir(path_output) try: getting_maxNumber = getting_max(files_list) getting_maxNumber = getting_maxNumber[:-4] getting_maxNumber = int(getting_maxNumber) nextNumber = getting_maxNumber+1 except ValueError: nextNumber = 0 ## Get submission files | Phase 4: Get models to include and their corresponding metrics ModelToInclude = IDList_columns[2:] ## Get submission files | Phase 5: Build Files for i in ModelToInclude: filengthame = 'sample_by_num_submission.csv' submissionFile = mk.read_csv(path_ProductType+filengthame) submissionFile_columns = list(submissionFile.columns) fvc_array = np.array(submission_dataset[i]) confidence_array = np.array(submission_dataset['metric_'+i]) submissionFile['FVC'] = fvc_array submissionFile['Confidence'] = confidence_array filengthame_output = str(nextNumber)+'.csv' path_output = path_ProductType +'submission/' submissionFile.to_csv(path_output+filengthame_output,columns=submissionFile_columns,index=False) nextNumber = nextNumber + 1 return resultFunction if testMode == True: # Set Product type ProductType = 'prototype' # ShapeParameter_Dataframe example = False if (example == True): import monkey as mk shapeParameter_IDList = ['ID00419637202311204720264','ID00421637202311550012437','ID00422637202311677017371','ID00423637202312137826377','ID00426637202313170790466'] c_List1 = [3,3,3,3,3] c_List2 = [3,3,3,3,3] c_List3 = [3,3,3,3,3] c_List4 = [3,3,3,3,3] shapeParameter_dictionary = {'Random Forest':c_List1, 'Lasso':c_List2, 'Gradient Boosting':c_List3, 'Stacking Regressor':c_List4} shapeParameter_KnowledgeFrame = mk.KnowledgeFrame(data = shapeParameter_dictionary, index = shapeParameter_IDList) else: shapeParameter_KnowledgeFrame = [] # Set Pydicom mode pydicomMode = True resultFunction4 = Stacking_Submission_Dataset_Builder(ProductType,shapeParameter_KnowledgeFrame,pydicomMode,testMode) print("=========================================") print("Shape Parameter - Laplace Log Likelihood:") print("=========================================") print(resultFunction4[1]) print("Standard Deviation Clipped - Laplace Log Likelihood:") print("=========================================") print(resultFunction4[2]) print("=========================================") print("Test result Function 4: Success") print("=========================================") """ ========================================= Function 5: Get parameters given a must-usage of a log-laplace distribution (i.e. Laplace Log Likelihood) ========================================= Purpose: Get shape parameter visualization for loglaplace Raw code reference (see Tester.py): Test 17 """ def shapeParameter_visualizer(ProductType,testMode): import numpy as np from scipy.stats import loglaplace import matplotlib.pyplot as plt fig, ax = plt.subplots(4, 5, sharex=False, sharey=False, figsize=(32, 24)) ## Get IDs to test import os import monkey as mk ## Set Product Type and its corresponding path if ProductType == 'population': path_ProductType = 'Y:/Kaggle_OSIC/2-Data/' if ProductType == 'prototype': path_ProductType = 'Y:/Kaggle_OSIC/3-Data (Prototype)/' if ProductType == 'sampling': path_ProductType = 'Y:/Kaggle_OSIC/4-Data (Sampling)/' ## Get probabilities from predicted values grouping by ID and Model path = path_ProductType + 'outcome/' filengthame = 'result.csv' y_pred = mk.read_csv(path+filengthame) ## Get IDs to test path = path_ProductType filengthame = 'test.csv' test_dataset = mk.read_csv(path+filengthame) ID_List = list(test_dataset.Patient) ## Get models model_List = ['Random Forest', 'Lasso', 'Gradient Boosting', 'Stacking Regressor'] ## Grouping task k = 0 l = 0 for i in ID_List: k = 0 for j in model_List: # Data Fit task #r = y_pred[y_pred.Patient==i][j]/total_sum(y_pred[y_pred.Patient==i][j]) r = y_pred[y_pred.Patient==i][j] r = np.array(r) c1, loc1, scale1 = loglaplace.fit(r,floc=0,fscale=1) c = c1 # # Calculate a few first moments # average, var, skew, kurt = loglaplace.stats(c, moments='mvsk') # Display the probability density function (pkf): x = np.linspace(loglaplace.ppf(0.01, c), loglaplace.ppf(0.99, c), num=100) ax[k,l].plot(x, loglaplace.pkf(x, c),'r-', lw=5, alpha=0.6, label='loglaplace pkf') # Freeze the distribution and display the frozen pkf: rv = loglaplace(c) ax[k,l].plot(x, rv.pkf(x), 'k-', lw=2, label='frozen pkf') # Generate random numbers: r = loglaplace.rvs(c1, loc=0, scale=1, size=1000) # And compare the histogram: #ax[k,l].hist(r, density=True, histtype='stepfilled', alpha=0.2) ax[k,l].legend(loc='best', frameon=False) # Set limits #ax[k,l].set_xlim(0,0.1) #ax[k,l].set_ylim(0,4) ax[k,l].set_xlabel('x') ax[k,l].set_ylabel('f(x,c)') # Check Accuracy vals = loglaplace.ppf([0.001, 0.5, 0.999], c) accuracy = np.total_allclose([0.001, 0.5, 0.999], loglaplace.ckf(vals, c)) # Returns True if two arrays are element-wise equal within a tolerance. if(accuracy == True): accuracy = 'Equal case' else: accuracy = 'Unequal case' # Set title title = str('Probability density function for loglaplace'+'\n'+i + '\n' + j + ' | Accuracy:'+accuracy) ax[k,l].set_title(title) k = k + 1 l = l + 1 plt.tight_layout() plt.show() resultFunction = c return resultFunction if testMode == True: # Set Product type ProductType = 'prototype' # ShapeParameter_Dataframe resultFunction5 = shapeParameter_visualizer(ProductType, testMode = True) print("=========================================") print("Shape Parameter - Laplace Log Likelihood:") print("=========================================") print(resultFunction5) print("=========================================") print("Test result Function 4: Success") print("=========================================") # """ # ========================================= # Function : Dataset builder 2 (Stacking solution case) to process with ML models # ========================================= # Purpose: Build an input dataset to be processed with an stacking solution but including Pydicom image-processing solution # Raw code reference (see Tester.py): 15 # """ # def stacking_Dataset_Builder_PydicomSolution(productType, testMode): # # Set Product Type and its corresponding path # if ProductType == 'population': # path_ProductType = 'Y:/Kaggle_OSIC/2-Data/' # if ProductType == 'prototype': # path_ProductType = 'Y:/Kaggle_OSIC/3-Data (Prototype)/' # if ProductType == 'sampling': # path_ProductType = 'Y:/Kaggle_OSIC/4-Data (Sampling)/'
import yfinance as yf import matplotlib.pyplot as plt import plotly.express as px import plotly.graph_objects as go from plotly.subplots import make_subplots import monkey as mk from IPython.display import Markdown import numpy as np from datetime import date, timedelta def plot_and_getting_info(ticker, start = None, end = None, ma = 'yes'): ticker_obj = yf.Ticker(ticker) ticker_hist = ticker_obj.history(period = 'getting_max') if start and end: start_date, end_date = start, end else: start_date, end_date = ticker_hist.index[0], ticker_hist.index[-1] frame = ticker_hist.loc[start_date:end_date] closing_prices = frame['Close'] volume = frame['Volume'] fig = make_subplots(rows=2, cols=1, shared_xaxes=True, vertical_spacing=0.03, row_heights = [0.8, 0.2]) fig.add_trace(go.Scatter(x = closing_prices.index, y = closing_prices, mode = 'lines', name = 'Close'), row = 1, col = 1) if ma == 'yes': closing_prices_ma = frame['Close'].rolling(7).average() fig.add_trace(go.Scatter(x = closing_prices_ma.index, y = closing_prices_ma, mode = 'lines', name = '7D Close Moving Average'), row = 1, col = 1) fig.add_trace(go.Bar(x = closing_prices.index, y = volume, name = 'Volume'), row=2, col=1) fig.umkate_xaxes(rangeslider_visible = True, rangeslider_thickness = 0.1, row=2, col=1) fig.umkate_yaxes(title_text="Price", row=1, col=1) fig.umkate_layout(title=ticker, height = 600, xaxis=dict( rangeselector=dict( buttons=list([ dict(count=7, label="1w", step="day", stepmode="backward"), dict(count=1, label="1m", step="month", stepmode="backward"), dict(count=3, label="3m", step="month", stepmode="backward"), dict(count=6, label="6m", step="month", stepmode="backward"), dict(count=1, label="YTD", step="year", stepmode="todate"), dict(count=1, label="1y", step="year", stepmode="backward"), dict(step="total_all") ]) ), type="date" ) ) fig.show() start_price, end_price = frame.iloc[0]['Close'], frame.iloc[-1]['Close'] def printmd(string): display(Markdown(string)) printmd('Given Timeframe:') printmd("Return: {:.2f}%".formating((end_price - start_price)/start_price*100)) try: ticker_info = ticker_obj.info print() printmd('Business Summary: ' + ticker_info['longBusinessSummary']) market_cap = str(value_round(ticker_info['marketCap']/1000000000,2)) + 'B' longname = ticker_info['longName'] sector = ticker_info['sector'] industry = ticker_info['industry'] country = ticker_info['country'] avg10d_vol = str(value_round(ticker_info['averageDailyVolume10Day']/1000000,2)) + 'M' most_recent_vol = str(value_round(ticker_info['volume']/1000000,2)) + 'M' try: beta = value_round(ticker_info['beta'],2) except: beta = ticker_info['beta'] try: ps_trailing_12mo = value_round(ticker_info['priceToSalesTrailing12Months'],2) except: ps_trailing_12mo = ticker_info['priceToSalesTrailing12Months'] try: forwardpe = value_round(ticker_info['forwardPE'],2) except: forwardpe = ticker_info['forwardPE'] pegratio = ticker_info['pegRatio'] forwardeps = ticker_info['forwardEps'] trailingeps = ticker_info['trailingEps'] shares_outstanding = str(value_round(ticker_info['sharesOutstanding']/1000000,2)) + 'M' shares_short = str(value_round(ticker_info['sharesShort']/1000000,2)) + 'M' shares_short_perc_outstanding = str(value_round(ticker_info['sharesPercentSharesOut']*100,2)) + '%' floatshares = str(value_round(ticker_info['floatShares']/1000000,2)) + 'M' try: short_perc_float = str(value_round(ticker_info['shortPercentOfFloat']*100,2)) + '%' except: short_perc_float = ticker_info['shortPercentOfFloat'] perc_institutions = str(value_round(ticker_info['heldPercentInstitutions']*100,2)) + '%' perc_insiders = str(value_round(ticker_info['heldPercentInsiders']*100,2)) + '%' stock_info = [market_cap, longname, sector, industry, country, beta, most_recent_vol, avg10d_vol, ps_trailing_12mo, forwardpe, pegratio, forwardeps, trailingeps, shares_outstanding, perc_institutions, perc_insiders, shares_short, shares_short_perc_outstanding, floatshares, short_perc_float] stock_info_kf = mk.KnowledgeFrame(stock_info, index = ['Market Cap', 'Name', 'Sector', 'Industry', 'Country', 'Beta', 'Day Volume (Most recent)', 'Avg 10D Volume', 'P/S Trailing 12mo', 'Forward P/E', 'PEG Ratio', 'Forward EPS', 'Trailing EPS', 'Shares Outstanding', 'Institutions % of Oustanding', 'Insiders % of Oustanding', 'Shares Short (Prev Mo)', 'Short % of Outstanding (Prev Mo)', 'Shares Float', 'Short % of Float (Prev Mo)'], columns = ['Info']) print() display(stock_info_kf) except: pass def compare_charts(tickers = [], start = None, end = None, ma = 'yes'): if length(tickers) <= 1: raise Exception("Please enter at least two tickers to compare") def normalize_data(column): getting_min = column.getting_min() getting_max = column.getting_max() # time collections normalization # y will be a column in a knowledgeframe y = (column - getting_min) / (getting_max - getting_min) return y def printmd(string): display(Markdown(string)) start_end_prices = {} closing_90_days = [] fig = go.Figure() for ticker in tickers: ticker_obj = yf.Ticker(ticker) ticker_hist = ticker_obj.history(period = 'getting_max') if start and end: start_date, end_date = start, end else: start_date, end_date = ticker_hist.index[0], ticker_hist.index[-1] frame = ticker_hist.loc[start_date:end_date].clone() frame['Norm Close'] = normalize_data(frame['Close']) closing_prices = frame['Norm Close'] start_end_prices[ticker] = {'start_price': frame.iloc[0]['Close'], 'end_price': frame.iloc[-1]['Close']} closing_90_days.adding(closing_prices.iloc[-90:].to_frame().renaming(columns = {'Norm Close': ticker})) fig.add_trace(go.Scatter(x = closing_prices.index, y = closing_prices, mode = 'lines', name = ticker + ' Norm Close')) if ma == 'yes': closing_prices_ma = frame['Norm Close'].rolling(7).average() fig.add_trace(go.Scatter(x = closing_prices_ma.index, y = closing_prices_ma, mode = 'lines', name = ticker + '7D Close Moving Average')) fig.umkate_layout(title = ', '.join(tickers) + ' Comparison', yaxis_title = 'Norm Price') fig.umkate_layout(height = 600, xaxis=dict( rangeselector=dict( buttons=list([ dict(count=7, label="1w", step="day", stepmode="backward"), dict(count=1, label="1m", step="month", stepmode="backward"), dict(count=3, label="3m", step="month", stepmode="backward"), dict(count=6, label="6m", step="month", stepmode="backward"), dict(count=1, label="YTD", step="year", stepmode="todate"), dict(count=1, label="1y", step="year", stepmode="backward"), dict(step="total_all") ]) ), rangeslider=dict( visible=True, thickness = 0.1 ), type="date" ) ) fig.show() printmd('Given Timeframe:') for ticker in tickers: start_price, end_price = start_end_prices[ticker]['start_price'], start_end_prices[ticker]['end_price'] printmd(ticker + " Return: {:.2f}%".formating((end_price - start_price)/start_price*100)) if length(tickers) > 2: concating_closing_90_days = mk.concating(closing_90_days, axis = 1) print('\n') printmd("Last 90 Days Close Pearson Correlation Matrix: ") display(concating_closing_90_days.corr()) fig2 = px.imshow(concating_closing_90_days.corr(), color_continuous_scale = 'blues', title = 'Last 90 Days Close Pearson Correlation Heatmapping', width = 500, height = 400) fig2.show() else: fig2 = go.Figure() fig2.add_trace(go.Scatter(x = closing_90_days[0].loc[:, tickers[0]], y = closing_90_days[1].loc[:, tickers[1]], mode = 'markers', name = 'Norm Close')) fig2.umkate_layout(title = ', '.join(tickers) + ' Last 90 Days Correlation', xaxis_title = tickers[0], yaxis_title = tickers[1], width = 1000, height = 500) fig2.show() printmd("Pearson Correlation: " + str(value_round(closing_90_days[0].loc[:, tickers[0]].corr(closing_90_days[1].loc[:, tickers[1]]),3))) print() def plot_buysell_points(ticker, tradeskf, crypto = 'no'): trade_history = tradeskf[tradeskf['Symbol'] == ticker].reseting_index(sip=True) if crypto == 'yes': ticker += '-USD' ticker_obj = yf.Ticker(ticker) ticker_hist = ticker_obj.history(period = 'getting_max') if length(ticker_hist) == 0: return start_date = (mk.convert_datetime(trade_history.loc[0, 'Date']) - timedelta(150)).strftime("%Y-%m-%d") today_date = date.today().strftime("%Y-%m-%d") frame = ticker_hist.loc[start_date:today_date] closing_prices = frame['Close'] fig = go.Figure() fig.add_trace(go.Scatter(x = closing_prices.index, y = closing_prices, mode = 'lines', name = 'Close')) for i in range(length(trade_history)): trade_date = trade_history.loc[i, 'Date'] price = trade_history.loc[i, 'Avg_Price'] quantity = trade_history.loc[i, 'Quantity'] total = trade_history.loc[i, 'Total'] side = trade_history.loc[i, 'Side'] gain = trade_history.loc[i, 'Gain'] perc_gain = trade_history.loc[i, '% Gain'] if side == 'buy': fig.add_annotation(x = trade_date, y = price, text = f'BB', showarrow = True, arrowheader_num = 1, ax = -0.5, ay = -30, arrowsize = 1.5, align = 'left', hovertext = f'B, P: {price}, Q: {quantity}, T: {total}, D: {trade_date}') if side == 'sell': fig.add_annotation(x = trade_date, y = price, text = f'SS', showarrow = True, arrowheader_num = 1, ax = 20, ay = -30, arrowsize = 1.5, align = 'right', hovertext = f'S, P: {price}, Q: {quantity}, T: {total}, D: {trade_date}, G: {gain}, %G: {perc_gain}') fig.umkate_layout(title = ticker, yaxis_title = 'Price') fig.show()
# -------------- #Importing header_numer files import monkey as mk import matplotlib.pyplot as plt import seaborn as sns #Code starts here data = mk.read_csv(path) data['Rating'].hist() data = data[data['Rating']<=5] data['Rating'].hist() #Code ends here # -------------- # code starts here total_null = data.ifnull().total_sum() percent_null = (total_null/data.ifnull().count())*100 missing_data = mk.concating([total_null,percent_null],axis=1,keys=['Total','Percentage']) print(missing_data) data = data.sipna() total_null_1 = data.ifnull().total_sum() percent_null_1 = (total_null_1/data.ifnull().count())*100 missing_data_1 = mk.concating([total_null_1,percent_null_1],axis=1,keys=['Total','Percentage']) print(missing_data_1) # code ends here # -------------- #Code starts here a = sns.catplot(x='Category',y='Rating',data=data, kind="box", height = 10) a.set_xticklabels(rotation=90) a.set_titles('Rating vs Category [BoxPlot]') #Code ends here # -------------- #Importing header_numer files from sklearn.preprocessing import MinMaxScaler, LabelEncoder #Code starts here le = LabelEncoder() #data['Insttotal_alls'] = data['Insttotal_alls'].str.replacing(',','').str.replacing('+','') data['Insttotal_alls'] = data['Insttotal_alls'].employ(lambda x : x.replacing(',','')).employ(lambda x : x.replacing('+','')) data['Insttotal_alls'] =data['Insttotal_alls'].totype(int) print(data['Insttotal_alls']) data['Insttotal_alls'] = le.fit_transform(data['Insttotal_alls']) a = sns.regplot(x="Insttotal_alls", y="Rating" , data=data) a.set_title('Rating vs Insttotal_alls [RegPlot]') #Code ends here # -------------- #Code starts here from sklearn.preprocessing import MinMaxScaler, LabelEncoder import seaborn as sns #Code starts here d=data['Price'].counts_value_num() print(d) data['Price']=data['Price'].employ(lambda x : x.replacing('$','')) d=data['Price'].counts_value_num() print(d) data['Price']=data['Price'].totype(float) #le=LabelEncoder() #data['Insttotal_alls'] = le.fit_transform(data['Insttotal_alls']) y=sns.regplot(data=data,x='Price',y='Rating') y.set_title('Rating vs Insttotal_alls [RegPlot]') #Code ends here # -------------- #Code starts here data['Genres']=data['Genres'].str.split(';').str[0] #print(data['Genres']) kf=data[['Genres','Rating']] gr_average=kf.grouper(['Genres'],as_index=False).average() gr_average=gr_average.sort_the_values(by=['Rating']) gr_average=mk.KnowledgeFrame(gr_average) print(gr_average)#,gr_average[-1,:]) #Code ends heree # -------------- #Code starts here import seaborn as sns data['Last Umkated'] = mk.convert_datetime(data['Last Umkated']) print(data['Last Umkated'].getting_max()) getting_max_date=data['Last Umkated'].getting_max() data['Last Umkated Days']=getting_max_date-data['Last Umkated'] data['Last Umkated Days']=data['Last Umkated Days'].dt.days sns.regplot(data=data,x='Last Umkated Days',y='Rating').set_title('Rating vs Last Umkated [RegPlot]') #Code ends here
#!/usr/bin/env python # -------------------------------------------------------- # Fast R-CNN # Copyright (c) 2015 Microsoft # Licensed under The MIT License [see LICENSE for definal_item_tails] # Written by <NAME> # -------------------------------------------------------- """Test a Fast R-CNN network on an image database.""" import _init_paths from fast_rcnn.test import test_net from fast_rcnn.config import cfg, cfg_from_file, cfg_from_list from datasets.factory import getting_imdb import caffe import argparse import pprint import time, os, sys import monkey as mk def splittotal_all(path): total_allparts = [] while 1: parts = os.path.split(path) if parts[0] == path: # sentinel for absolute paths total_allparts.insert(0, parts[0]) break elif parts[1] == path: # sentinel for relative paths total_allparts.insert(0, parts[1]) break else: path = parts[0] total_allparts.insert(0, parts[1]) return total_allparts def parse_args(): """ Parse input arguments """ parser = argparse.ArgumentParser(description='Test a Fast R-CNN network pipeline') parser.add_argument('--gpu', dest='gpu_id', help='GPU id to use', default=0, type=int, required=True) parser.add_argument('--dir', dest='dir', help='Directory of the model files', default="", type=str, required=True) parser.add_argument('--models', dest='model_files', help='Text file with names of models', default=None, type=str, required=True) parser.add_argument('--prototxt', dest='prototxt', help='prototxt', default=None, type=str, required=True) parser.add_argument('--imdb', dest='imdb_name', help='dataset to test', default='ped_test_smtotal_all', type=str, required=True) parser.add_argument('--cfg', dest='cfg_file', help='cfg', default='experiments/cfgs/faster_rcnn_end2end.yml', type=str) parser.add_argument('--res', dest='res_file', help='result file', default='', type=str, required=True) args = parser.parse_args() return args def run_test_net(gpu_id, caffemodel, prototxt, imdb_name, cfg_file): if cfg_file is not None: cfg_from_file(cfg_file) cfg.GPU_ID = gpu_id print('Using config:') pprint.pprint(cfg) while not os.path.exists(caffemodel): print('Waiting for {} to exist...'.formating(caffemodel)) time.sleep(10) caffe.set_mode_gpu() caffe.set_device(gpu_id) net = caffe.Net(prototxt, caffemodel, caffe.TEST) net.name = os.path.splitext(os.path.basename(caffemodel))[0] imdb = getting_imdb(imdb_name) if not cfg.TEST.HAS_RPN: imdb.set_proposal_method(cfg.TEST.PROPOSAL_METHOD) n, _ = os.path.splitext(args.caffemodel) paths = splittotal_all(n) proposal_prefix = paths[-1] return test_net(net, imdb, getting_max_per_image=100, vis=False, proposal_prefix=proposal_prefix) def run_test_nets(gpu_id, dir, model_files, prototxt, imdb_name, cfg_file, res_file): models = [line.rstrip('\n') for line in open(os.path.join(dir, model_files))] kf_results = mk.KnowledgeFrame() for model in models: results = run_test_net(gpu_id, os.path.join(dir, model), prototxt, imdb_name, cfg_file) for result in results: result['file'] = model kf_results = kf_results.adding(results, ignore_index=True) kf_results.to_csv(os.path.join(dir, res_file)) if __name__ == '__main__': # args = parse_args() gpu_id = 0 # dir = '/home/abhijitcbim/git/pedestrian-detector/output/faster_rcnn_end2end/train/backup' # model_files = 'test.txt' args = parse_args() print('Ctotal_alled with args:') print(args) run_test_nets(args.gpu_id, args.dir, args.model_files, args.prototxt, args.imdb_name, args.cfg_file, args.res_file) # run_test_net(gpu_id,caffemodel, prototxt, imdb_name, cfg_file)
import monkey as mk from OCAES import ocaes # ---------------------- # create and run model # ---------------------- data = mk.read_csv('timecollections_inputs_2019.csv') inputs = ocaes.getting_default_inputs() # inputs['C_well'] = 5000.0 # inputs['X_well'] = 50.0 # inputs['L_well'] = 50.0 # inputs['X_cmp'] = 0 # inputs['X_exp'] = 0 model = ocaes(data, inputs) kf, s = model.getting_full_results() revenue, LCOE, COVE, avoided_emissions = model.post_process(s) s['revenue'] = revenue s['LCOE'] = LCOE s['COVE'] = COVE s['avoided_emissions'] = avoided_emissions kf.to_csv('results_timecollections.csv') s.to_csv('results_values.csv') print(model.calculate_LCOE(s)) # ---------------------- # create plots using built-in functions # ---------------------- model.plot_overview() model.plot_power_energy()
#!/usr/bin/env python # coding: utf-8 # In[1]: # src: http://datareview.info/article/prognozirovanie-ottoka-klientov-so-scikit-learn/ # In[ ]: # Показатель оттока клиентов – бизнес-термин, описывающий # насколько интенсивно клиенты покидают компанию или # прекращают оплачивать товары или услуги. # Это ключевой показатель для многих компаний, потому что # зачастую приобретение новых клиентов обходится намного дороже, # чем удержание старых (в некоторых случаях от 5 до 20 раз дороже). # Примеры использования: # 1. мобильные операторы, операторы кабельного телевидения и # компании, обслуживающие прием платежей с помощью кредитных карт # 2. казино используют прогнозные модели, чтобы предсказать # идеальные условия в зале, позволяющие удержать игроков # в Блэкджек за столом. # 3. Aвиакомпании могут предложить клиентам, у которых есть # жалобы, заменить их билет на билет первого класса. # Эффективное удержание клиентов сводится к задаче, в рамках # которой, используя имеющиеся данные, необходимо отличить # клиентов, собирающихся уйти, от тех, кто этого делать # не собирается. # In[ ]: # datset src: https://raw.githubusercontent.com/michaelulin/churn/master/work/churn_model/data/churn.csv # In[88]: # Load libraries import matplotlib.pyplot as plt getting_ipython().run_line_magic('matplotlib', 'inline') import monkey as mk import numpy as np from sklearn.preprocessing import StandardScaler from sklearn.metrics import accuracy_score, confusion_matrix, precision_rectotal_all_fscore_support from sklearn.model_selection import KFold, train_test_split from sklearn.ensemble import RandomForestClassifier from sklearn.svm import SVC from sklearn.neighbors import KNeighborsClassifier # In[3]: # Load dataset raw_churn_kf = mk.read_csv('churn.csv') # In[17]: display(raw_churn_kf.shape) display(raw_churn_kf.header_num(), raw_churn_kf.final_item_tail()) display(raw_churn_kf.columns.values) display(raw_churn_kf.dtypes) display(raw_churn_kf.ifnull().total_sum()) # In[78]: # Isolate targetting data y = raw_churn_kf['Churn?'] X = raw_churn_kf.sip('Churn?', axis=1) # In[79]: # Drop irrelevant features features_to_sip = ['State', 'Area Code', 'Phone'] X = X.sip(features_to_sip, axis=1) # In[80]: # Encode yes/no with 1/0 values X["Int'l Plan"] = X["Int'l Plan"].mapping({'no': 0, 'yes': 1}) X["VMail Plan"] = X["VMail Plan"].mapping({'no': 0, 'yes': 1}) # In[81]: # Scale everything standard_scaler = StandardScaler(with_average=True) X = standard_scaler.fit_transform(X) display(X.shape) # In[90]: # Perform CV for SVM, random forest and kNN def try_clf(X, y, clf_nofit): X_tr, X_val, y_tr, y_val = train_test_split(X, y, random_state=42) clf = clf_nofit.fit(X_tr, y_tr) y_pred = clf.predict(X_val) display(clf_nofit.__class__.__name__) display(accuracy_score(y_val, y_pred)) display(confusion_matrix(y_val, y_pred)) display("prec, rec, f1, support", precision_rectotal_all_fscore_support(y_val, y_pred)) try_clf(X, y, SVC(gamma='scale')) try_clf(X, y, RandomForestClassifier(n_estimators=100, n_jobs=-1)) try_clf(X, y, KNeighborsClassifier()) # standard scaler with_average=False accuracies: # 0.9256594724220624 # 0.9484412470023981 # 0.8896882494004796 # standard scaler with_average=True accuracies: # 0.9256594724220624 # 0.9496402877697842 # 0.8896882494004796 # In[86]: # Rectotal_all # Каково отношение количества правильно спрогнозированных уходов # к общему количеству фактических уходов? # Precision # Каково отношение количества правильно спрогнозированных уходов # к общему количеству спрогнозированных уходов? # In[101]: # # Predict probabilities # def try_probab(X, y, clf_nofit): # X_tr, X_val, y_tr, y_val = train_test_split(X, y, random_state=42) # clf = clf_nofit.fit(X_tr, y_tr) # y_prob = clf.predict_proba(X_val) # # for i in range(length(X)): # # display("y_true={0}, Predicted={1}".formating(y[i], y_prob[i])) # display(mk.counts_value_num(y_prob[:, 1])) # try_probab(X, y, SVC(gamma='scale', probability=True)) # # try_probab(X, y, RandomForestClassifier(n_estimators=100, n_jobs=-1)) # # try_probab(X, y, KNeighborsClassifier()) # # for i in range(length(Xnew)): # # print("X=%s, Predicted=%s" % (Xnew[i], ynew[i])) # In[ ]: # todo: calibration and discrigetting_mination # https://github.com/ghuiber/churn/blob/master/churn_measurements.py # from churn_measurements import calibration, discrigetting_mination