code
stringlengths
38
801k
repo_path
stringlengths
6
263
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # %matplotlib inline # # # Bed Instability 2D -- Gadal et al. 2019 # # Here, we recompute some of the results corresponding to the two-dimensional dune instability present in: # # <NAME>., <NAME>., <NAME>., <NAME>., & <NAME>. (2019). Incipient bedforms in a bidirectional wind regime. Journal of Fluid Mechanics, 862, 490-516. # import numpy as np import matplotlib.pyplot as plt from matplotlib.colors import to_rgba from PyDune.math import tand, cosd, sind from PyDune.physics.dune import bedinstability_2D as BI # ## Celerity and growth rate under a unidirectional wind # # We fix needed paremeters: # # # parameter space exploration (k -- alpha) k, alpha = np.linspace(0.001, 0.6, 2000), np.linspace(-90, 90, 181) K, ALPHA = np.meshgrid(k, alpha) # constant parameters A0, B0 = 3.5, 2 r, mu, delta = 2.5, tand(35), 0 # We choose an expression for the hydrodynamics coefficients: # # # + def Ax(k, alpha, A0=A0): return A0*cosd(alpha)**2 def Bx(k, alpha, B0=B0): return B0*cosd(alpha)**2 def Ay(k, alpha, A0=B0): return 0.5*A0*cosd(alpha)*sind(alpha) def By(k, alpha, B0=B0): return 0.5*B0*cosd(alpha)*sind(alpha) # - # We compute the non-dimensional growth rate and celerity: # # # + SIGMA = BI.temporal_growth_rate(K, ALPHA, Ax, Ay, Bx, By, r, mu, delta) CELERITY = BI.temporal_celerity(K, ALPHA, Ax, Ay, Bx, By, r, mu, delta) fig, axarr = plt.subplots(1, 2, constrained_layout=True, sharex=True, sharey=True) cf = axarr[0].contourf(K, ALPHA, SIGMA, 200) cb = fig.colorbar(cf, label=r'$\sigma$', location='top', ax=axarr[0], ticks=np.linspace(-1.5, 0.5, 5)*1e-1) cb.ax.ticklabel_format(axis='x', style='sci', scilimits=(0.1, 9)) axarr[0].plot(k[SIGMA.argmax(axis=1)], alpha, 'k--') # cf = axarr[1].contourf(K, ALPHA, CELERITY, 200) cb = fig.colorbar(cf, label=r'$c$', location='top', ax=axarr[1], ticks=np.linspace(0, 1.8, 7)) cb.ax.ticklabel_format(axis='x', style='sci', scilimits=(0.1, 9)) axarr[0].set_xlabel('Wavenumber, $k$') axarr[1].set_xlabel('Wavenumber, $k$') axarr[0].set_ylabel(r'Orientation, $\alpha$ [deg.]') # - # ## Growth rate under a bidirectional wind # # # + N = np.array([1, 2]) theta = np.array([70, 90, 110]) SIGMAS = BI.growth_rate_bidi(K[..., None, None], ALPHA[..., None, None], Ax, Ay, Bx, By, r, mu, delta, theta[None, None, :, None], N[None, None, None, :]) print('The shape of SIGMAS is {}'.format(SIGMAS.shape)) vmax, vmin = SIGMAS.max(), SIGMAS.min() fig, axarr = plt.subplots(2, 3, constrained_layout=True, sharex=True, sharey=True) for i, n in enumerate(N): for j, th in enumerate(theta): ax = axarr[i, j] cf = ax.contourf(K, ALPHA, SIGMAS[..., j, i], 200, vmin=vmin, vmax=vmax) ax.plot(k[SIGMAS[..., j, i].argmax(axis=1)], alpha, 'k--') ax.text(0.05, 0.95, r'$N = {}$, $\theta = {}$'.format(n, th), ha='left', va='top', transform=ax.transAxes, bbox=dict(facecolor=to_rgba('wheat', 0.8), edgecolor='black', boxstyle='round')) fig.colorbar(cf, ax=axarr, location='top', label=r'$\sigma$') for ax in axarr[:, 0].flatten(): ax.set_ylabel(r'$\alpha$') for ax in axarr[-1, :].flatten(): ax.set_xlabel(r'k') plt.show()
docs/_downloads/58e3032208bb8361fc308c10ee38ff33/plot_bed_instability_2D.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 2 # language: python # name: python2 # --- # + deletable=true editable=true import numpy as np import matplotlib.pyplot as plt import pandas as pd import sklearn from scipy import stats, optimize from sklearn.preprocessing import Imputer, StandardScaler, MinMaxScaler from sklearn.model_selection import train_test_split from sklearn.linear_model import Lasso, LinearRegression, Ridge from sklearn.pipeline import Pipeline from sklearn.base import clone from itertools import combinations from sklearn.metrics import explained_variance_score, r2_score, median_absolute_error, mean_squared_error from sklearn.ensemble import GradientBoostingRegressor from sklearn.ensemble import RandomForestRegressor from sklearn.model_selection import cross_val_score from sklearn.model_selection import StratifiedKFold from sklearn.feature_selection import RFECV print('The scikit-learn version is {}.'.format(sklearn.__version__)) print('The pandas version is {}.'.format(pd.__version__)) print('The numpy version is {}.'.format(np.__version__)) # + deletable=true editable=true goal_features = ['murders', 'murdPerPop', 'rapes', 'rapesPerPop', 'robberies','robbbPerPop', 'assaults', 'assaultPerPop', 'burglaries', 'burglPerPop', 'larcenies', 'larcPerPop', 'autoTheft', 'autoTheftPerPop', 'arsons', 'arsonsPerPop', 'violentPerPop', 'nonViolPerPop'] non_predictive_features = ['communityname', 'state', 'countyCode', 'communityCode', 'fold'] # + deletable=true editable=true df = pd.read_csv('../datasets/UnnormalizedCrimeData.csv'); df = df.replace('?',np.NAN) features = [x for x in df.columns if x not in goal_features and x not in non_predictive_features] len(features) # + deletable=true editable=true def drop_rows_with_null_goal_feature(old_df, feature): new_df = old_df.dropna(subset=[feature]) return new_df # + deletable=true editable=true class GradientBoostRegression: def __init__(self, df, goal_feature, loss_function): self.goal_df = drop_rows_with_null_goal_feature(df, goal_feature) self.goal_df[[goal_feature]] = self.goal_df[[goal_feature]].apply(pd.to_numeric) self.goal_df = self.goal_df[self.goal_df[goal_feature] <= self.goal_df[goal_feature].quantile(0.98)] imr = Imputer(missing_values='NaN', strategy='mean', axis=0) imr = imr.fit(self.goal_df[features]) imputed_data = imr.transform(self.goal_df[features]); self.df_X_train, self.df_X_test, self.df_y_train, self.df_y_test = \ train_test_split(imputed_data, self.goal_df[goal_feature], test_size=0.10) self.estimator = GradientBoostingRegressor(loss=loss_function) def train(self): self.estimator.fit(self.df_X_train, self.df_y_train) mse = mean_squared_error(self.df_y_test, self.estimator.predict(self.df_X_test)) print("MSE: %.4f" % mse) def plot_deviance(self): test_score = np.zeros((100,), dtype=np.float64) for i, y_pred in enumerate(self.estimator.staged_predict(self.df_X_test)): test_score[i] = self.estimator.loss_(self.df_y_test, y_pred) plt.figure(figsize=(12, 6)) plt.subplot(1, 2, 1) plt.title('Deviance') plt.plot(np.arange(100) + 1, self.estimator.train_score_, 'b-', label='Training Set Deviance') plt.plot(np.arange(100) + 1, test_score, 'r-', label='Test Set Deviance') plt.legend(loc='upper right') plt.xlabel('Boosting Iterations') plt.ylabel('Deviance') plt.show() def plot_feature_importance(self): plt.figure(figsize=(12, 6)) feature_importance = self.estimator.feature_importances_ feature_importance = 100.0 * (feature_importance / feature_importance.max()) sorted_index = np.argsort(feature_importance)[::-1][:10] pos = np.arange(sorted_index.shape[0]) + .5 plt.subplot(1, 2, 2) plt.barh(pos, feature_importance[sorted_index], align='center') plt.yticks(pos, self.goal_df.columns[sorted_index]) plt.xlabel('Relative Importance') plt.title('Top 10 Features') plt.show() # + deletable=true editable=true murdersRegressor = GradientBoostRegression(df, 'murders', 'ls') murdersRegressor.train() murdersRegressor.plot_deviance() # - murdersRegressor.plot_feature_importance() # + deletable=true editable=true rapesRegressor = GradientBoostRegression(df, 'rapes', 'ls') rapesRegressor.train() rapesRegressor.plot_deviance() rapesRegressor.plot_feature_importance() # + deletable=true editable=true robberiesRegressor = GradientBoostRegression(df, 'robberies', 'huber') robberiesRegressor.train() robberiesRegressor.plot_deviance() robberiesRegressor.plot_feature_importance() # + deletable=true editable=true assaultsRegressor = GradientBoostRegression(df, 'assaults', 'huber') assaultsRegressor.train() assaultsRegressor.plot_deviance() assaultsRegressor.plot_feature_importance() # + deletable=true editable=true
exploratory_data_analysis/Gradient_Boosting.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # QA Sentiment Analysis: Critical Thinking 6.3.3 # # Naive Bayes # ### By <NAME> # # Introduction # Question: "If I wanted to correlate class rank with SAT scores for a group of 50 individuals, which correlation coefficient would I use?" # Answer: "Because class ranks are an ordinal scale of measurement and SAT scores are measured on an interval/ ratio scale, you would have to convert SAT scores to an ordinal scale and use the Spearman rank-order correlation coefficient." # *** # # Importing Packages import numpy as np import pandas as pd import os from sklearn.feature_extraction.text import TfidfVectorizer from sklearn.naive_bayes import MultinomialNB from sklearn.metrics import (f1_score,precision_score,recall_score, confusion_matrix) from sklearn.ensemble import RandomForestClassifier from sklearn.model_selection import RandomizedSearchCV, train_test_split # for hyperparameter tuning # *** # # Loading and Preprocessing the Data CTC_6_3_3 = pd.read_excel("/Users/jeffreyblack/Desktop/NLPProject/QA_CTC.xlsx", sheet_name = 'CTC_6_3_3') CTC_6_3_3 X_train, X_test, y_train, y_test = train_test_split(CTC_6_3_3['Answers'] , CTC_6_3_3['Grade'], test_size=0.20, random_state=42) # *** # # Feature Extraction # ### Convert reviews into vectors using the bag-of-words model # Note: I did not remove stop-words def extract_features(x_train, x_test): # This function extracts document features for input documents, x # Source: # https://scikit-learn.org/stable/modules/generated/sklearn.feature_extraction.text.CountVectorizer.html#sklearn.feature_extraction.text.CountVectorizer vectorizer = TfidfVectorizer(max_features=10000, ngram_range = (1,3)) train = vectorizer.fit_transform(x_train) test = vectorizer.transform(x_test) test.toarray() print((vectorizer.get_feature_names())) return train, test # Calling the TF-IDF Vectorizer to extract the features for the training and test predictors. feats_train, feats_test = extract_features(X_train, X_test) # training and test set features # *** # # Model Training: Naive Bayes # ### Fit the training data using Multinomial Naive Bayes classifier def build_NB_classifier(x, y): # This function builds a Multinomial Naive Bayes classifier with input (x,y): # Source: # https://scikit-learn.org/stable/modules/generated/sklearn.naive_bayes.MultinomialNB.html clf = MultinomialNB() clf.fit(x, y) return clf nb_clf = build_NB_classifier(feats_train, y_train) # ## Hyperparameter Tuning # I decided to use Random Search Cross Validation in Scikit-Learn to determine the best hyperparameters needed for tuning the Naive Bayes classifier model. The RandomizedSearchCV allowed me to define a grid of hyperparameter randes and randomly sample from the grid, while performing K-fold cross validation with each combination of values. # Additive (Laplace/Lidstone) smoothing parameter (0 for no smoothing). alpha = [0, 1.0] # Whether to learn class prior probabilities or not. If false, a uniform prior will be used. fit_prior = [True, False] # Prior probabilities of the classes. If specified the priors are not adjusted according to the data. class_prior = [None, [0.05, 0.95],[0.1, 0.9],[0.2, 0.8],[0.25, 0.85], [0.3, 0.7],[0.35, 0.75], [0.4, 0.6],[0.45, 0.65]] # Create the random grid random_grid = {'alpha': alpha, 'fit_prior': fit_prior, 'class_prior': class_prior} print(random_grid) # Use the random grid to search for best hyperparameters # First create the base model to tune nb = MultinomialNB() # Random search of parameters, using 3 fold cross validation, # search across 100 different combinations, and use all available cores nb_random = RandomizedSearchCV(estimator = nb, param_distributions = random_grid, cv=3, scoring='f1_weighted', n_iter=1000, return_train_score = True) # Fit the random search model nb_random.fit(feats_train, y_train) # finding the best parameters nb_random.best_params_ # Using the output above, I tuned the Multinomial Naive Bayes classifier below. def build_NB_classifier_tuned(x, y): # This function builds a Multinomial Naive Bayes classifier with input (x,y): # Source: # https://scikit-learn.org/stable/modules/generated/sklearn.naive_bayes.MultinomialNB.html clf = MultinomialNB(fit_prior = True, class_prior = None, alpha = 0) clf.fit(x, y) return clf nb_clf_tuned = build_NB_classifier_tuned(feats_train, y_train) # *** # # Model Evaluation Functions # I used 3 evaluation metrics: recall, precision, and F1-score. I also used a confusion matrix to visualize false-positive, false-negative, true-positive, and true-negative. def recall_evaluator(x, y_truth, clf): # Function to evalute model performance, using recall: # Source: # https://scikit-learn.org/stable/modules/generated/sklearn.metrics.recall_score.html#sklearn.metrics.recall_score result = 0.0 result = recall_score(y_true = y_truth, y_pred = clf.predict(x), average='weighted') return result def precision_evaluator(x, y_truth, clf): # Function to evalute model performance, using precision: # Source: # https://scikit-learn.org/stable/modules/generated/sklearn.metrics.precision_score.html#sklearn.metrics.precision_score result = 0.0 result = precision_score(y_true = y_truth, y_pred = clf.predict(x), average='weighted') return result def f1_evaluator(x, y_truth, clf): # Function to evalute model performance, using F1-score: # Source: # https://scikit-learn.org/stable/modules/generated/sklearn.metrics.f1_score.html#sklearn.metrics.f1_score result = 0.0 result = f1_score(y_true = y_truth, y_pred = clf.predict(x), average='weighted') return result # *** # ## Summary Results of Naive Bayes # ### Original model evaluation: recall_nb_score = recall_evaluator(feats_test, y_test, nb_clf) precision_nb_score = precision_evaluator(feats_test, y_test, nb_clf) f1_nb_score = f1_evaluator(feats_test, y_test, nb_clf) pred_nb = nb_clf.predict(feats_test) print('Naive Bayes Recall: ', recall_nb_score) print('Naive Bayes Precision: ', precision_nb_score) print('Naive Bayes F1: ', f1_nb_score) print("Confusion Matrix for Naive Bayes Classifier:") print(confusion_matrix(y_test, pred_nb)) # ### After hyperparameter tuning: recall_nb_tuned_score = recall_evaluator(feats_test, y_test, nb_clf_tuned) precision_nb_tuned_score = precision_evaluator(feats_test, y_test, nb_clf_tuned) f1_nb_tuned_score = f1_evaluator(feats_test, y_test, nb_clf_tuned) pred_nb_tuned = nb_clf_tuned.predict(feats_test) print('Naive Bayes Recall: ', recall_nb_tuned_score) print('Naive Bayes Precision: ', precision_nb_tuned_score) print('Naive Bayes F1: ', f1_nb_tuned_score) print("Confusion Matrix for Naive Bayes Classifier:") print(confusion_matrix(y_test, pred_nb_tuned))
NaiveBayes/NB_6_3_3_QA_Sentiment_Analysis.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: venv-wmdecomp # language: python # name: venv-wmdecomp # --- # + import gensim import numpy as np import pandas as pd from wmdecompose.utils import * from gensim.models import Word2Vec from gensim.models import KeyedVectors from gensim.models.callbacks import CallbackAny2Vec from gensim.test.utils import get_tmpfile from nltk.corpus import stopwords from nltk.tokenize.toktok import ToktokTokenizer # - # ## 1. Load Data and Pretrained Embeddings # + # %%time PATH = "../data/yelp_dataset/" yelp_data = [] r_dtypes = {"review_id":str, "user_id":str, "business_id":str, "stars": np.int32, "date":str, "text":str, "useful": np.int32, "funny": np.int32, "cool": np.int32} drop = ['review_id', 'user_id','useful', 'funny', 'cool'] #query = "date >= '2017-12-01'" with open(f"{PATH}yelp_academic_dataset_review.json", "r") as f: reader = pd.read_json(f, orient="records", lines=True, dtype=r_dtypes, chunksize=1000) for chunk in reader: reduced_chunk = chunk.drop(columns=drop) #.query(query) yelp_data.append(reduced_chunk) yelp_data = pd.concat(yelp_data, ignore_index=True) # - stopword_list=stopwords.words('english') yelp_data.shape yelp_business = pd.read_json(f"{PATH}yelp_academic_dataset_business.json", orient="records", lines=True) yelp_business.shape yelp_business = yelp_business[yelp_business.city.isin(["Portland", "Atlanta"])] yelp_business.shape yelp_merged = yelp_data.merge(yelp_business, on='business_id') yelp_merged.shape # + # %%time sentences = yelp_merged.text.astype('str').tolist() tokenizer = ToktokTokenizer() # + # %%time sentences_clean=[remove_stopwords(r, stopword_list, tokenizer) for r in sentences] # + # %%capture --no-display # %%time sentences_clean=pd.Series(sentences_clean).apply(denoise_text) sentences_clean=sentences_clean.apply(remove_special_characters) sentences_clean=sentences_clean.apply(simple_lemmatizer) # + # %%time sentences_clean=[remove_stopwords(r, stopword_list, tokenizer) for r in sentences_clean] # - sentences_clean[0] # + # %%time sentences_tokenized = [w.lower() for w in sentences_clean] sentences_tokenized = [tokenizer.tokenize(i) for i in sentences_tokenized] # - print("Loading GoogleNews Vectors") # %time model = KeyedVectors.load_word2vec_format('../embeddings/GoogleNews-vectors-negative300.bin.gz', binary=True) # ## 2. Phrase Data PHRASING = True MIN = 500 THRESHOLD = 200 # + # %%time if PHRASING: sentences_phrased = get_phrases(sentences_tokenized, min_count = MIN, threshold = THRESHOLD) sentences_training = sentences_phrased else: sentences_training = sentences_tokenized # - print(sentences_training[0]) print(sentences_training[1]) print(sentences_training[2]) # ## 3. Finetune Embeddings # ### 3.1. Initialize Loggers # + class EpochLogger(CallbackAny2Vec): '''Callback to log information about training''' def __init__(self): self.epoch = 1 def on_epoch_begin(self, model): print(f"Epoch {self.epoch} starting.") def on_epoch_end(self, model): print(f"Epoch {self.epoch} ended.") self.epoch += 1 class LossLogger(CallbackAny2Vec): '''Output loss at each epoch''' def __init__(self): self.epoch = 1 self.losses = [] def on_epoch_begin(self, model): print(f'Epoch: {self.epoch}', end='\t') def on_epoch_end(self, model): loss = model.get_latest_training_loss() if self.epoch != 1: previous_loss = self.losses[self.epoch-2] else: previous_loss = 0 self.losses.append(loss) difference = loss-previous_loss print(f' Loss: {loss} Difference: {difference}') self.epoch += 1 # - epoch_logger = EpochLogger() loss_logger = LossLogger() # ### 3.2. Initialize Model SIZE = model.vector_size WINDOW = 10 EPOCHS = 4 MIN_COUNT = 2 SG = 1 HS = 0 SEED = 42 LOSS = True ALPHA = 0.01 # + # %%time model_ft = Word2Vec(vector_size= SIZE, window = WINDOW, min_count= MIN_COUNT, epochs=EPOCHS, sg = SG, hs = HS, seed = SEED) model_ft.build_vocab(sentences_training) total_examples = model_ft.corpus_count model_ft.build_vocab([list(model.key_to_index.keys())], update=True) # - # ### 3.3. Train Word2Vec outfile = "../embeddings/yelp_w2v" # + # %%time model_ft.train(sentences_training, total_examples=total_examples, epochs=model_ft.epochs, callbacks=[loss_logger], compute_loss=LOSS, start_alpha = ALPHA) model_ft.wv.save_word2vec_format(f"{outfile}.txt", binary=False) # - # ### 3.4. Load Finetuned Vectors and Test ft_vectors = gensim.models.keyedvectors.KeyedVectors.load_word2vec_format(f"{outfile}.txt") ft_vectors.distance("citizen", "kane") model.distance("citizen", "kane") ft_vectors.distance("lord", "ring") model.distance("lord", "ring") ft_vectors.get_vector("citizen_kane")[:10]
paper/notebooks/finetune_w2v_Yelp.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="view-in-github" colab_type="text" # <a href="https://colab.research.google.com/github/M-Angeles/Linear-Algebra-58020/blob/main/Applications_of_Linear_System.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # + [markdown] id="EivP1PK6uSX3" # ##Systems of Linear Equations # + [markdown] id="iNObYl1quWfs" # ##Systems of Linear Equations can be solved with arrays and Numpy # + colab={"base_uri": "https://localhost:8080/"} id="VeU3fAfmuPun" outputId="7899061a-7455-4a02-e621-01c6b0e34f49" import numpy as np from scipy.linalg import solve A = np.array([[4,5],[3,-2]]) print(A) B = np.array([[7],[11]]) print(B) inv_A = np.linalg.inv(A) print(inv_A) X = np.linalg.inv(A).dot(B) print(X) # + colab={"base_uri": "https://localhost:8080/"} id="-Sne2patwB0j" outputId="09efae2b-ba71-47e3-87f0-0ac604a73aa5" inv_A = np.linalg.inv(A) print(inv_A) X = np.dot(inv_A,B) print(X) # + colab={"base_uri": "https://localhost:8080/"} id="VfmxwkUKwWWE" outputId="aaf8cf6c-f32d-469f-dafe-b81ceaecfea7" X = solve(A,B) print(X)
Applications_of_Linear_System.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- from keras.applications.resnet50 import ResNet50 from keras.preprocessing import image from keras.applications.resnet50 import preprocess_input, decode_predictions import numpy as np import h5py import os from operator import itemgetter from collections import Counter import matplotlib.pyplot as plt import matplotlib from shutil import copy2 FOLDER = '../results' # + model = ResNet50(weights='imagenet') preds = [] for filename in os.listdir(FOLDER): if filename.endswith('.jpg'): img_path = os.path.join(FOLDER, filename) img = image.load_img(img_path, target_size=(224, 224)) x = image.img_to_array(img) x = np.expand_dims(x, axis=0) x = preprocess_input(x) x_preds = model.predict(x) x_pred = decode_predictions(x_preds, top=5)[0][0] preds.append((filename, x_pred[1], x_pred[2])) else: continue # - # Count the occurences of each class within the predictions cnt = Counter() preds_sorted = sorted(preds, key=itemgetter(2), reverse=True) for i in preds_sorted: cnt[i[1]] += 1 print(cnt) # Only include classes that have more than 20 occurences into the chart print(cnt) cnt_clean = Counter() for i in cnt: if cnt[i] < 20: cnt_clean['other'] +=1 del cnt_clean[i] else: cnt_clean[i] = cnt[i] print(cnt_clean) matplotlib.font_manager._rebuild() # + # Font matplotlib.rc('font',**{'serif':['Times New Roman'], 'family':'serif', 'size':24}) # Data to plot labels = cnt_clean.keys() sizes = cnt_clean.values() # Plot fig = plt.figure(figsize=(8,8), dpi=300) plt.pie(sizes, autopct='%1.1f%%') plt.legend(labels, loc=('upper right'), shadow=True, bbox_to_anchor=(1.2, 1.2)) plt.tight_layout() plt.axis('equal') plt.show() # - fig.savefig('plt.png', bbox_inches='tight', transparent=True, dpi=300)
4-nsfw/classify.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] toc="true" # # Table of Contents # <p><div class="lev1 toc-item"><a href="#Goodness-of-fit" data-toc-modified-id="Goodness-of-fit-1"><span class="toc-item-num">1&nbsp;&nbsp;</span>Goodness-of-fit</a></div> # - # # Goodness-of-fit # + import numpy as np import pandas as pd import scipy.stats as st import matplotlib.pyplot as plt import seaborn as sns # %matplotlib inline sns.set('notebook') from scipy.optimize import curve_fit # - f(x, y, 1) def f(x, y, deg): print(deg) p = np.polyfit(x, y, deg) xp = np.array( [[xi**d for d in range(deg,-1,-1)] for xi in x]).transpose() p = p.reshape(1,-1) print(xp,p) return (p@xp).ravel() N=20 x = np.linspace(0,1,N) y = x**2 +x/2 - 1 + np.random.normal(0,.03,N) plt.figure(figsize=(10,10)) plt.plot(x,y,'.',markersize=10) plt.plot(x,f(x,y,1)) sns.lmplot(x='',y='',data=fits)
notebooks/exploration/Goodness of fit.ipynb
# --- # jupyter: # jupytext: # cell_metadata_filter: collapsed,code_folding # cell_metadata_json: true # formats: ipynb,py:percent # notebook_metadata_filter: all # text_representation: # extension: .py # format_name: percent # format_version: '1.3' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # language_info: # codemirror_mode: # name: ipython # version: 3 # file_extension: .py # mimetype: text/x-python # name: python # nbconvert_exporter: python # pygments_lexer: ipython3 # version: 3.8.5 # latex_envs: # LaTeX_envs_menu_present: true # autoclose: false # autocomplete: false # bibliofile: biblio.bib # cite_by: apalike # current_citInitial: 1 # eqLabelWithNumbers: true # eqNumInitial: 1 # hotkeys: # equation: Ctrl-E # itemize: Ctrl-I # labels_anchors: false # latex_user_defs: false # report_style_numbering: false # user_envs_cfg: false # toc: # base_numbering: 1 # nav_menu: {} # number_sections: true # sideBar: true # skip_h1_title: false # title_cell: Table of Contents # title_sidebar: Contents # toc_cell: false # toc_position: {} # toc_section_display: true # toc_window_display: false # --- # %% [markdown] # # The Life Cycle Model: Theory vs Data # # [![badge](https://img.shields.io/badge/Launch%20using%20-Econ--ARK-blue)](https://econ-ark.org/materials/lifecyclemodeltheoryvsdata#launch) # # National registry data on income and wealth from Scandinavian countries (esp. Norway) have recently become available (with a lot of security) to some (lucky!) researchers. These data offer a uniquely powerful tool for testing (and improving) our models of consumption and saving behavior over the life cycle. # # This notebook is an example of how to construct a life cycle model with the HARK toolkit that makes predictions that can be compared to the raw data statistics=. # # For example, some papers have tabulated information about the **growth rate** of assets at different ages over the life cycle. # # The default parameters of the HARK life cycle model have not been optmized to match features of the Norwegian data; a first step in a real "structural" estimation would be to use Norwegian calibrate the inputs to the model (like the profile of income, and the magnitude of income shocks, over the life cycle), and then to find the values of parameters like the time preference rate that allow the model to fit the data best. (See [SolvingMicroDSOPs](https://econ.jhu.edu/people/ccarroll/SolvingMicroDSOPs) for how this can be done, and search for the corresponding HARK content using [our documentation](https://hark.readthedocs.io)). # %% {"code_folding": []} # Initial imports and notebook setup, click arrow to show import HARK.ConsumptionSaving.ConsIndShockModel as cShksModl # The consumption-saving micro model from HARK.utilities import plot_funcs_der, plot_funcs # Some tools import pandas as pd import numpy as np # %% # --------------------------------------------------------------------------------- # - Define all of the model parameters for SolvingMicroDSOPs and ConsumerExamples - # --------------------------------------------------------------------------------- exp_nest = 3 # Number of times to "exponentially nest" when constructing a_grid aXtraMin = 0.001 # Minimum end-of-period "assets above minimum" value aXtraMax = 20 # Maximum end-of-period "assets above minimum" value aXtraHuge = None # A very large value of assets to add to the grid, not used aXtraExtra = None # Some other value of assets to add to the grid, not used aXtraCount = 8 # Number of points in the grid of "assets above minimum" BoroCnstArt = 0.0 # Artificial borrowing constraint; imposed minimum level of end-of period assets CubicBool = True # Use cubic spline interpolation when True, linear interpolation when False vFuncBool = False # Whether to calculate the value function during solution Rfree = 1.03 # Interest factor on assets PermShkCount = 7 # Number of points in discrete approximation to permanent income shocks TranShkCount = 7 # Number of points in discrete approximation to transitory income shocks UnempPrb = 0.005 # Probability of unemployment while working UnempPrbRet = 0.000 # Probability of "unemployment" while retired IncUnemp = 0.0 # Unemployment benefits replacement rate IncUnempRet = 0.0 # "Unemployment" benefits when retired final_age = 90 # Age at which the problem ends (die with certainty) retirement_age = 65 # Age at which the consumer retires initial_age = 25 # Age at which the consumer enters the model TT = final_age - initial_age # Total number of periods in the model retirement_t = retirement_age - initial_age - 1 CRRA_start = 4.0 # Initial guess of the coefficient of relative risk aversion during estimation (rho) DiscFacAdj_start = 0.99 # Initial guess of the adjustment to the discount factor during estimation (beth) DiscFacAdj_bound = [0.0001,15.0] # Bounds for beth; if violated, objective function returns "penalty value" CRRA_bound = [0.0001,15.0] # Bounds for rho; if violated, objective function returns "penalty value" # Expected growth rates of permanent income over the lifecycle, starting from age 25 PermGroFac = [ 1.025, 1.025, 1.025, 1.025, 1.025, 1.025, 1.025, 1.025, 1.025, 1.025, 1.025, 1.025, 1.025, 1.025, 1.025, 1.025, 1.025, 1.025, 1.025, 1.025, 1.025, 1.025, 1.025, 1.025, 1.025, 1.01 , 1.01 , 1.01 , 1.01 , 1.01 , 1.01 , 1.01 , 1.01 , 1.01 , 1.01 , 1.01 , 1.01 , 1.01 , 1.01 , 0.7 , # <-- This represents retirement 1. , 1. , 1. , 1. , 1. , 1. , 1. , 1. , 1. , 1. , 1. , 1. , 1. , 1. , 1. , 1. , 1. , 1. , 1. , 1. , 1. , 1. , 1. , 1. , 1. ] # Age-varying discount factors over the lifecycle, lifted from Cagetti (2003) DiscFac_timevary = [1.064914 , 1.057997 , 1.051422 , 1.045179 , 1.039259 , 1.033653 , 1.028352 , 1.023348 , 1.018632 , 1.014198 , 1.010037 , 1.006143 , 1.002509 , 0.9991282, 0.9959943, 0.9931012, 0.9904431, 0.9880143, 0.9858095, 0.9838233, 0.9820506, 0.9804866, 0.9791264, 0.9779656, 0.9769995, 0.9762239, 0.9756346, 0.9752274, 0.9749984, 0.9749437, 0.9750595, 0.9753422, 0.9757881, 0.9763936, 0.9771553, 0.9780698, 0.9791338, 0.9803439, 0.981697 , 0.8287214, 0.9902111, 0.9902111, 0.9902111, 0.9902111, 0.9902111, 0.9902111, 0.9902111, 0.9902111, 0.9902111, 0.9902111, 0.9902111, 0.9902111, 0.9902111, 0.9902111, 0.9902111, 0.9902111, 0.9902111, 0.9902111, 0.9902111, 0.9902111, 0.9902111, 0.9902111, 0.9902111, 0.9902111, 0.9902111] # Survival probabilities over the lifecycle, starting from age 25 LivPrb = [ 1. , 1. , 1. , 1. , 1. , 1. , 1. , 1. , 1. , 1. , 1. , 1. , 1. , 1. , 1. , 1. , 1. , 1. , 1. , 1. , 1. , 1. , 1. , 1. , 1. , 1. , 1. , 1. , 1. , 1. , 1. , 1. , 1. , 1. , 1. , 1. , 1. , 1. , 1. , 1. , # <-- automatic survival to age 65 0.98438596, 0.98438596, 0.98438596, 0.98438596, 0.98438596, 0.97567062, 0.97567062, 0.97567062, 0.97567062, 0.97567062, 0.96207901, 0.96207901, 0.96207901, 0.96207901, 0.96207901, 0.93721595, 0.93721595, 0.93721595, 0.93721595, 0.93721595, 0.63095734, 0.63095734, 0.63095734, 0.63095734, 0.63095734] # Standard deviations of permanent income shocks by age, starting from age 25 PermShkStd = [0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.0, 0.0, 0.0, # <-- no permanent income shocks after retirement 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0] # Standard deviations of transitory income shocks by age, starting from age 25 TranShkStd = [0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.0, 0.0, 0.0, # <-- no transitory income shocs after retirement 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0] # Age groups for the estimation: calculate average wealth-to-permanent income ratio # for consumers within each of these age groups, compare actual to simulated data empirical_cohort_age_groups = [[ 26,27,28,29,30 ], [ 31,32,33,34,35 ], [ 36,37,38,39,40 ], [ 41,42,43,44,45 ], [ 46,47,48,49,50 ], [ 51,52,53,54,55 ], [ 56,57,58,59,60 ]] initial_wealth_income_ratio_vals = [0.17, 0.5, 0.83] # Three point discrete distribution of initial w initial_wealth_income_ratio_probs = [0.33333, 0.33333, 0.33334] # Equiprobable discrete distribution of initial w num_agents = 10000 # Number of agents to simulate bootstrap_size = 50 # Number of re-estimations to do during bootstrap seed = 31382 # Just an integer to seed the estimation # Dictionary that can be passed to ConsumerType to instantiate init_consumer_objects = {"CRRA":CRRA_start, "Rfree":Rfree, "PermGroFac":PermGroFac, "BoroCnstArt":BoroCnstArt, "PermShkStd":PermShkStd, "PermShkCount":PermShkCount, "TranShkStd":TranShkStd, "TranShkCount":TranShkCount, "T_cycle":TT, "UnempPrb":UnempPrb, "UnempPrbRet":UnempPrbRet, "T_retire":retirement_t, "T_age":TT+1, "IncUnemp":IncUnemp, "IncUnempRet":IncUnempRet, "aXtraMin":aXtraMin, "aXtraMax":aXtraMax, "aXtraCount":aXtraCount, "aXtraExtra":[aXtraExtra,aXtraHuge], "aXtraNestFac":exp_nest, "LivPrb":LivPrb, "DiscFac":DiscFac_timevary, 'AgentCount':num_agents, 'seed':seed, 'tax_rate':0.0, 'vFuncBool':vFuncBool, 'CubicBool':CubicBool } # %% {"code_folding": []} # Set up default values for CRRA, DiscFac, and simulation variables in the dictionary init_consumer_objects["CRRA"]= 2.00 # Default coefficient of relative risk aversion (rho) init_consumer_objects["DiscFac"]= 0.97 # Default intertemporal discount factor (beta) init_consumer_objects["PermGroFacAgg"]= 1.0 # Aggregate permanent income growth factor init_consumer_objects["aNrmInitMean"]= -10.0 # Mean of log initial assets init_consumer_objects["aNrmInitStd"]= 1.0 # Standard deviation of log initial assets init_consumer_objects["pLvlInitMean"]= 0.0 # Mean of log initial permanent income init_consumer_objects["pLvlInitStd"]= 0.0 # Standard deviation of log initial permanent income # %% # Make an instance of a lifecycle consumer to be used for estimation LifeCyclePop = cShksModl.IndShockConsumerType(**init_consumer_objects) # %% {"code_folding": []} # Solve and simulate the model (ignore the "warning" message) LifeCyclePop.solve() # Obtain consumption rules by age LifeCyclePop.unpack_cFunc() # Expose the consumption rules # Which variables do we want to track LifeCyclePop.track_vars = ['aNrm','aLvl','pLvl','mNrm','cNrm','TranShk'] LifeCyclePop.T_sim = 120 # Nobody lives to be older than 145 years (=25+120) LifeCyclePop.initialize_sim() # Construct the age-25 distribution of income and assets LifeCyclePop.simulate() # Simulate a population behaving according to this model # %% {"code_folding": []} # Plot the consumption functions during working life print('Consumption as a function of market resources while working:') mMin = min([LifeCyclePop.solution[t].mNrmMin for t in range(LifeCyclePop.T_cycle)]) plot_funcs(LifeCyclePop.cFunc[:LifeCyclePop.T_retire],mMin,5) # %% {"code_folding": []} # Define the saving rate function def savRteFunc(SomeType, m, t): """ Parameters: ---------- SomeType: Agent type that has been solved and simulated. m: normalized market resources of agent t: age of agent (from starting in the workforce) Returns: -------- savRte: float """ inc = (SomeType.Rfree -1.)*(m-1.)+1. # Normalized by permanent labor income cns = SomeType.solution[t].cFunc(m) # Consumption (normalized) sav = inc - cns # Flow of saving this period savRte = sav / inc # Saving Rate return savRte # %% {"code_folding": []} # Create a matrix gathering useful data: # 't_now', 'aNrm_hist', 'cNrm_hist', employment-status in date t and date t-1, # aLvlGro_hist, Saving rate w, h = 1, LifeCyclePop.T_cycle giant_list = [[0 for x in range(w)] for y in range(h)] savRte_list = [] import warnings warnings.filterwarnings("ignore") # Suppress some disturbing but harmless warnings for t in range(1,LifeCyclePop.T_cycle+1): #aLvlGro[0] = 0 # set the first growth rate to 0, since there is no data for period 0 aLvlGroNow = np.log((LifeCyclePop.history['aNrm'][t] *LifeCyclePop.history['pLvl'][t])/ \ LifeCyclePop.history['aNrm'][t-1] *LifeCyclePop.history['pLvl'][t-1]) # (10000,) # Call the saving rate function defined above savRte = savRteFunc(LifeCyclePop, LifeCyclePop.history['mNrm'][t] , t) savRte_list.append(savRte) # Add this period's saving rate to the list # Create elements of matrix list matrix_list = [0 for number in range(7)] matrix_list[0] = t matrix_list[1] = LifeCyclePop.history['aNrm'][t] matrix_list[2] = LifeCyclePop.history['cNrm'][t] matrix_list[3] = LifeCyclePop.history['TranShk'][t] matrix_list[4] = LifeCyclePop.history['TranShk'][t-1] matrix_list[5] = aLvlGroNow matrix_list[6] = savRte giant_list[t-1] = matrix_list # %% {"code_folding": []} # Construct the level of assets A from a*p where a is the ratio to permanent income p # Remember 41 is "years after entering workforce" (=age 25); 66 is the year right after retirement LifeCyclePop.history['aLvl'] = LifeCyclePop.history['aNrm']*LifeCyclePop.history['pLvl'] aGro41=LifeCyclePop.history['aLvl'][41]/LifeCyclePop.history['aLvl'][40] aGro41NoU=aGro41[aGro41[:]>0.2] # Throw out extreme outliers; don't want growth rates relative to 0 income! # %% {"code_folding": []} # Plot the (truncated) distribution of growth rates of wealth between age 65 and 66 (=25 + 41) from matplotlib import pyplot as plt n, bins, patches = plt.hist(aGro41NoU,50,density=True) # %% # put your solution here # %% # put your answer here # %% # put your answer here # %% # put your solution here # %% # put your solution here # %% [markdown] # # Saving Rates and Lifetime Income Growth # # We are interested in how income growth over the lifetime of the agent affects their saving rate and asset ratio $a=A/P$. # # %% cumulative_income_first_half = np.sum(LifeCyclePop.history['pLvl'][0:20,:]*LifeCyclePop.history['TranShk'][0:20,:],0) cumulative_income_second_half = np.sum(LifeCyclePop.history['pLvl'][20:40,:]*LifeCyclePop.history['TranShk'][20:40,:],0) lifetime_growth = cumulative_income_second_half/cumulative_income_first_half t=39 vigntiles = pd.qcut(lifetime_growth,20,labels=False) savRte = savRteFunc(LifeCyclePop, LifeCyclePop.history['mNrm'][t] , t) savRtgueseByVigtile = np.zeros(20) assetsByVigtile = np.zeros(20) assetsNrmByVigtile = np.zeros(20) savRteByVigtile = np.zeros(20) for i in range(20): savRteByVigtile[i] = np.mean(savRte[vigntiles==i]) assetsByVigtile[i] = np.mean(LifeCyclePop.history['aLvl'][t][vigntiles==i]) assetsNrmByVigtile[i] = np.mean(LifeCyclePop.history['aNrm'][t][vigntiles==i]) plt.plot(np.array(range(20)), savRteByVigtile) plt.title("Saving Rate at age 65, by Vigntile of Lifetime Income Growth") plt.xlabel("Vigntile of Lifetime Income Growth") plt.ylabel("Savings Rate") plt.figure() plt.plot(np.array(range(20)), assetsByVigtile) plt.title("Assets at age 65, by Vigntile of Lifetime Income Growth") plt.xlabel("Vigntile of Lifetime Income Growth") plt.ylabel("Assets") plt.figure() plt.plot(np.array(range(20)), assetsNrmByVigtile) plt.title("Normalized Assets at age 65, by Vigntile of Lifetime Income Growth") plt.xlabel("Vigntile of Lifetime Income Growth") plt.ylabel("Normalized Assets")
notebooks/LifeCycleModelTheoryVsData.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + id="x0DJqotopcyb" """ You can run either this notebook locally (if you have all the dependencies and a GPU) or on Google Colab. Instructions for setting up Colab are as follows: 1. Open a new Python 3 notebook. 2. Import this notebook from GitHub (File -> Upload Notebook -> "GITHUB" tab -> copy/paste GitHub URL) 3. Connect to an instance with a GPU (Runtime -> Change runtime type -> select "GPU" for hardware accelerator) 4. Run this cell to set up dependencies. """ # If you're using Google Colab and not running locally, run this cell. try: import nemo.collections.asr as nemo_asr except ModuleNotFoundError: # ! python -m pip install --upgrade git+https://github.com/NVIDIA/NeMo.git@main#egg=nemo_toolkit[all] # + id="CH7yR7cSwPKr" import json import os import wget from IPython.display import Audio import numpy as np import scipy.io.wavfile as wav # ! pip install pandas # optional # ! pip install plotly from plotly import graph_objects as go # + [markdown] id="xXRARM8XtK_g" # # Introduction # End-to-end Automatic Speech Recognition (ASR) systems surpassed traditional systems in performance but require large amounts of labeled data for training. # # This tutorial will show how to use a pre-trained with Connectionist Temporal Classification (CTC) ASR model, such as [QuartzNet Model](https://arxiv.org/abs/1910.10261) to split long audio files and the corresponding transcripts into shorter fragments that are suitable for an ASR model training. # # We're going to use [ctc-segmentation](https://github.com/lumaku/ctc-segmentation) Python package based on the algorithm described in [CTC-Segmentation of Large Corpora for German End-to-end Speech Recognition](https://arxiv.org/pdf/2007.09127.pdf). # + id="8FAZKakrIyGI" # ! pip install ctc_segmentation==1.1.0 # ! pip install num2words # ! apt-get install -y ffmpeg # + id="UD-OuFmEOX3T" # If you're running the notebook locally, update the TOOLS_DIR path below # In Colab, a few required scripts will be downloaded from NeMo github TOOLS_DIR = '<UPDATE_PATH_TO_NeMo_root>/tools/ctc_segmentation/scripts' if 'google.colab' in str(get_ipython()): TOOLS_DIR = 'scripts/' os.makedirs(TOOLS_DIR, exist_ok=True) required_files = ['prepare_data.py', 'normalization_helpers.py', 'run_ctc_segmentation.py', 'verify_segments.py', 'cut_audio.py', 'process_manifests.py', 'utils.py'] for file in required_files: if not os.path.exists(os.path.join(TOOLS_DIR, file)): file_path = 'https://raw.githubusercontent.com/NVIDIA/NeMo/main/tools/ctc_segmentation/' + TOOLS_DIR + file print(file_path) wget.download(file_path, TOOLS_DIR) elif not os.path.exists(TOOLS_DIR): raise ValueError(f'update path to NeMo root directory') # + [markdown] id="S1DZk-inQGTI" # `TOOLS_DIR` should now contain scripts that we are going to need in the next steps, all necessary scripts could be found [here](https://github.com/NVIDIA/NeMo/tree/main/tools/ctc_segmentation/scripts). # + id="1C9DdMfvRFM-" print(TOOLS_DIR) # ! ls -l $TOOLS_DIR # + [markdown] id="XUEncnqTIzF6" # # Data Download # First, let's download an audio file from [https://librivox.org/](https://librivox.org/). # + id="bkeKX2I_tIgV" ## create data directory and download an audio file WORK_DIR = 'WORK_DIR' DATA_DIR = WORK_DIR + '/DATA' os.makedirs(DATA_DIR, exist_ok=True) audio_file = 'childrensshortworks019_06acarriersdog_am_128kb.mp3' if not os.path.exists(os.path.join(DATA_DIR, audio_file)): print('Downloading audio file') wget.download('http://archive.org/download/childrens_short_works_vol_019_1310_librivox/' + audio_file, DATA_DIR) # + [markdown] id="-_XE9MkKuAA7" # Next, we need to get the corresponding transcript. # # Note, the text file and the audio file should have the same base name, for example, an audio file `example.wav` or `example.mp3` should have corresponding text data stored under `example.txt` file. # + id="3NSz3Qb7pzOe" # text source: http://www.gutenberg.org/cache/epub/24263/pg24263.txt text = """ A carrier on his way to a market town had occasion to stop at some houses by the road side, in the way of his business, leaving his cart and horse upon the public road, under the protection of a passenger and a trusty dog. Upon his return he missed a led horse, belonging to a gentleman in the neighbourhood, which he had tied to the end of the cart, and likewise one of the female passengers. On inquiry he was informed that during his absence the female, who had been anxious to try the mettle of the pony, had mounted it, and that the animal had set off at full speed. The carrier expressed much anxiety for the safety of the young woman, casting at the same time an expressive look at his dog. Oscar observed his master's eye, and aware of its meaning, instantly set off in pursuit of the pony, which coming up with soon after, he made a sudden spring, seized the bridle, and held the animal fast. Several people having observed the circumstance, and the perilous situation of the girl, came to relieve her. Oscar, however, notwithstanding their repeated endeavours, would not quit his hold, and the pony was actually led into the stable with the dog, till such time as the carrier should arrive. Upon the carrier entering the stable, Oscar wagged his tail in token of satisfaction, and immediately relinquished the bridle to his master. """ with open(os.path.join(DATA_DIR, audio_file.replace('mp3', 'txt')), 'w') as f: f.write(text) # + [markdown] id="yyUE_t4vw2et" # The `DATA_DIR` should now contain both audio and text files: # + id="VXrTzTyIpzE8" # !ls -l $DATA_DIR # + [markdown] id="FWqlbSryw_WL" # Listen to the audio: # + id="1vC2DHawIGt8" Audio(os.path.join(DATA_DIR, audio_file)) # + [markdown] id="RMT5lkPYzZHK" # As one probably noticed, the audio file contains a prologue and an epilogue that are missing in the corresponding text. The segmentation algorithm could handle extra audio fragments at the end and the beginning of the audio, but prolonged untranscribed audio segments in the middle of the file could deteriorate segmentation results. That's why to improve the segmentation quality, it is recommended to normalize text, so that transcript contains spoken equivalents of abbreviations and numbers. # # # Prepare Text and Audio # # We're going to use `prepare_data.py` script to prepare both text and audio data for segmentation. # # Text preprocessing: # * the text will be split into sentences and stored under '$OUTPUT_DIR/processed/*.txt' where each sentence is going to start with a new line (we're going to find alignments for these sentences in the next steps) # * out-of-vocabulary words will be removed based on pre-trained ASR model vocabulary, (optionally) text will be changed to lowercase # * sentences for alignment with the original punctuation and capitalization will be stored under `$OUTPUT_DIR/processed/*_with_punct.txt` # * numbers will be normalized in a naive way to replace, for example, `12` with `one two`. Such normalization is usually enough for proper segmentation but to build a high-quality training dataset, all out-vocabulary symbols should be replaced with their actual spoken representations. # # Audio preprocessing: # * `.mp3` files will be converted to `.wav` files # * audio files will be resampled to use the same sampling rate as was used to pre-train the ASR model we're using for alignment # * stereo tracks will be converted to mono # * since librivox.org audio contains relatively long prologues, we're also cutting a few seconds from the beginning of the audio files (optional step, see `--cut_prefix` argument). In some cases, if an audio contains a very long untranscribed prologue, increasing `--cut_prefix` value might help improve segmentation quality. # # # The `prepare_data.py` will preprocess all `.txt` files found in the `--in_text=$DATA_DIR` and all `.mp3` files located at `--audio_dir=$DATA_DIR`. # # + id="u4zjeVVv-UXR" MODEL = 'QuartzNet15x5Base-En' OUTPUT_DIR = WORK_DIR + '/output' # ! python $TOOLS_DIR/prepare_data.py \ # --in_text=$DATA_DIR \ # --output_dir=$OUTPUT_DIR/processed/ \ # --language='eng' \ # --cut_prefix=3 \ # --model=$MODEL \ # --audio_dir=$DATA_DIR # + [markdown] id="kmDTCuTLH7pm" # The following three files should be generated and stored at the `$OUTPUT_DIR/processed` folder: # * childrensshortworks019_06acarriersdog_am_128kb.txt # * childrensshortworks019_06acarriersdog_am_128kb.wav # * childrensshortworks019_06acarriersdog_am_128kb_with_punct.txt # + id="6R7OKAsYH9p0" # ! ls -l $OUTPUT_DIR/processed # + [markdown] id="bIvKBwRcH_9W" # The `.txt` file without punctuation contains preprocessed text phrases that we're going to align within the audio file. Here, we split the text into sentences. Each line should contain a text snippet for alignment. # + id="74GLpMgoICmk" with open(os.path.join(OUTPUT_DIR, 'processed', audio_file.replace('.mp3', '.txt')), 'r') as f: for line in f: print (line) # + [markdown] id="QrvZAjeoR9U1" # # Run CTC-Segmentation # # In this step, we're going to use the [`ctc-segmentation`](https://github.com/lumaku/ctc-segmentation) to find the start and end time stamps for the segments we created during the previous step. # # # As described in the [CTC-Segmentation of Large Corpora for German End-to-end Speech Recognition](https://arxiv.org/pdf/2007.09127.pdf), the algorithm is relying on a CTC-based ASR model to extract utterance segments with exact time-wise alignments. For this tutorial, we're using a pre-trained 'QuartzNet15x5Base-En' model. # + id="xyKtaqAd-Tvk" WINDOW = 8000 # ! python $TOOLS_DIR/run_ctc_segmentation.py \ # --output_dir=$OUTPUT_DIR \ # --data=$OUTPUT_DIR/processed \ # --model=$MODEL \ # --window_len=$WINDOW \ # --no_parallel # + [markdown] id="wY27__e3HmhH" # `WINDOW` parameter might need to be adjusted depending on the length of the utterance one wants to align, the default value should work in most cases. # # Let's take a look at the generated alignments. # The expected output for our audio sample with 'QuartzNet15x5Base-En' model looks like this: # # ``` # <PATH_TO>/processed/childrensshortworks019_06acarriersdog_am_128kb.wav # 16.03 32.39 -4.5911999284929115 | a carrier on ... a trusty dog. | ... # 33.31 45.01 -0.22886803973405373 | upon his ... passengers. | ... # 46.17 58.57 -0.3523662826061572 | on inquiry ... at full speed. | ... # 59.75 69.43 -0.04128918756038118 | the carrier ... dog. | ... # 69.93 85.31 -0.3595261826390344 | oscar observed ... animal fast. | ... # 85.95 93.43 -0.04447770533708611 | several people ... relieve her. | ... # 93.61 105.95 -0.07326174931639003 | oscar however ... arrive. | ... # 106.65 116.91 -0.14680841514778062 | upon the carrier ... his master. | ... # ``` # # Details of the file content: # - the first line of the file contains the path to the original audio file # - all subsequent lines contain: # * the first number is the start of the segment (in seconds) # * the second one is the end of the segment (in seconds) # * the third value - alignment confidence score (in log space) # * text fragments corresponding to the timestamps # * original text without pre-processing # + id="ktBAsfJRVCwI" alignment_file = str(WINDOW) + '_' + audio_file.replace('.mp3', '_segments.txt') # ! cat $OUTPUT_DIR/segments/$alignment_file # + [markdown] id="xCwEFefHZz1C" # Finally, we're going to split the original audio file into segments based on the found alignments. We're going to create three subsets and three corresponding manifests: # * high scored clips (segments with the segmentation score above the threshold value, default threshold value = -5) # * low scored clips (segments with the segmentation score below the threshold) # * deleted segments (segments that were excluded during the alignment. For example, in our sample audio file, the prologue and epilogue that don't have the corresponding transcript were excluded. Oftentimes, deleted files also contain such things as clapping, music, or hard breathing. # # The alignment score values depend on the pre-trained model quality and the dataset, the `THRESHOLD` parameter might be worth adjusting based on the analysis of the low/high scored clips. # # Also note, that the `OFFSET` parameter is something one might want to experiment with since timestamps have a delay (offset) depending on the model. # # + id="6YM64RPlitPL" OFFSET = 0 THRESHOLD = -5 # ! python $TOOLS_DIR/cut_audio.py \ # --output_dir=$OUTPUT_DIR \ # --model=$MODEL \ # --alignment=$OUTPUT_DIR/segments/ \ # --threshold=$THRESHOLD \ # --offset=$OFFSET # + [markdown] id="QoyS0T8AZxcx" # `manifests` folder should be created under `OUTPUT_DIR`, and it should contain # corresponding manifests for the three groups of clips described above: # + id="1UaSIflBZwaV" # ! ls -l $OUTPUT_DIR/manifests # + id="F-nPT8z_IVD-" def plot_signal(signal, sample_rate): """ Plot the signal in time domain """ fig_signal = go.Figure( go.Scatter(x=np.arange(signal.shape[0])/sample_rate, y=signal, line={'color': 'green'}, name='Waveform', hovertemplate='Time: %{x:.2f} s<br>Amplitude: %{y:.2f}<br><extra></extra>'), layout={ 'height': 200, 'xaxis': {'title': 'Time, s'}, 'yaxis': {'title': 'Amplitude'}, 'title': 'Audio Signal', 'margin': dict(l=0, r=0, t=40, b=0, pad=0), } ) fig_signal.show() def display_samples(manifest): """ Display audio and reference text.""" with open(manifest, 'r') as f: for line in f: sample = json.loads(line) sample_rate, signal = wav.read(sample['audio_filepath']) plot_signal(signal, sample_rate) display(Audio(sample['audio_filepath'])) display('Reference text: ' + sample['text_no_preprocessing']) display('ASR transcript: ' + sample['transcript']) print('\n' + '-' * 110) # + [markdown] id="S69UFA30ZvxV" # Let's examine the high scored segments we obtained. # # The `Reference text` in the next cell represents the original text without pre-processing, while `ASR transcript` is an ASR model prediction with greedy decoding. Also notice, that `ASR transcript` in some cases contains errors that could decrease the alignment score, but usually it doesn’t hurt the quality of the aligned segments. # + id="Q45uBtsHIaAD" high_score_manifest = str(WINDOW) + '_' + audio_file.replace('.mp3', '_high_score_manifest.json') display_samples(os.path.join(OUTPUT_DIR, 'manifests', high_score_manifest)) # + pycharm={"name": "#%%\n"} # ! cat $OUTPUT_DIR/manifests/$high_score_manifest # + id="3ZXRj6Yy5vpL" # Multiple files alignment Up until now, we were processing only one file at a time, but to create a large dataset processing of multiple files simultaneously could help speed up things considerably. Let's download another audio file and corresponding text. # + [markdown] id="yivXpD25T4Ir" # # Multiple files alignment # # Up until now, we were processing only one file at a time, but to create a large dataset processing of multiple files simultaneously could help speed up things considerably. # # Let's download another audio file and corresponding text. # + id="KRc9yMjPXPgj" # https://librivox.org/frost-to-night-by-edith-m-thomas/ audio_file_2 = 'frosttonight_thomas_bk_128kb.mp3' if not os.path.exists(os.path.join(DATA_DIR, audio_file_2)): print('Downloading audio file') wget.download('http://www.archive.org/download/frost_to-night_1710.poem_librivox/frosttonight_thomas_bk_128kb.mp3', DATA_DIR) # text source: text source: https://www.bartleby.com/267/151.html text = """ APPLE-GREEN west and an orange bar, And the crystal eye of a lone, one star … And, “Child, take the shears and cut what you will, Frost to-night—so clear and dead-still.” Then, I sally forth, half sad, half proud, And I come to the velvet, imperial crowd, The wine-red, the gold, the crimson, the pied,— The dahlias that reign by the garden-side. The dahlias I might not touch till to-night! A gleam of the shears in the fading light, And I gathered them all,—the splendid throng, And in one great sheaf I bore them along. . . . . . . In my garden of Life with its all-late flowers I heed a Voice in the shrinking hours: “Frost to-night—so clear and dead-still” … Half sad, half proud, my arms I fill. """ with open(os.path.join(DATA_DIR, audio_file_2.replace('mp3', 'txt')), 'w') as f: f.write(text) # + [markdown] id="YhLj6hZaFP_S" # `DATA_DIR` should now contain two .mp3 files and two .txt files: # + id="wpwWgZ5InuQX" # ! ls -l $DATA_DIR # + id="hlxG3bOSnHZR" Audio(os.path.join(DATA_DIR, audio_file_2)) # + [markdown] id="3ftilXu-5tzT" # Finally, we need to download a script to perform all the above steps starting from the text and audio preprocessing to segmentation and manifest creation in a single step. # + id="KSwsrkbru1s5" if 'google.colab' in str(get_ipython()) and not os.path.exists('run_sample.sh'): wget.download('https://raw.githubusercontent.com/NVIDIA/NeMo/main/tools/ctc_segmentation/run_sample.sh', '.') # + id="hJ-GcvOP9Sjt" # ! ls -l # + [markdown] id="nYXNvBDsHMEu" # Next, we're going to execute `run_sample.sh` script to find alignment for two audio/text samples. By default, if the alignment is not found for an initial WINDOW size, the initial window size will be doubled a few times to re-attempt alignment. # # `run_sample.sh` applies two initial WINDOW sizes, 8000 and 12000, and then adds segments that were similarly aligned with two window sizes to `verified_segments` folder. This could be useful to reduce the amount of manual work while checking the alignment quality. # + id="hRFAl0gO92bp" OUTPUT_DIR_2 = os.path.join(WORK_DIR, 'output_multiple_files') # ! bash $TOOLS_DIR/../run_sample.sh \ # --MODEL_NAME_OR_PATH=$MODEL \ # --DATA_DIR=$DATA_DIR \ # --OUTPUT_DIR=$OUTPUT_DIR_2 \ # --SCRIPTS_DIR=$TOOLS_DIR \ # --CUT_PREFIX=3 \ # --MIN_SCORE=$THRESHOLD # + [markdown] id="zzJTwKq2Kl9U" # High scored manifests for the data samples were aggregated to the `all_manifest.json` under `OUTPUT_DIR_2`. # + id="nacE_iQ2_85L" display_samples(os.path.join(OUTPUT_DIR_2, 'all_manifest.json')) # + [markdown] id="lcvT3P2lQ_GS" # # Next Steps # # Check out [NeMo Speech Data Explorer tool](https://github.com/NVIDIA/NeMo/tree/main/tools/speech_data_explorer#speech-data-explorer) to interactively evaluate the aligned segments. # + [markdown] id="GYylwvTX2VSF" # # References # Kürzinger, Ludwig, et al. ["CTC-Segmentation of Large Corpora for German End-to-End Speech Recognition."](https://arxiv.org/abs/2007.09127) International Conference on Speech and Computer. Springer, Cham, 2020.
tutorials/tools/CTC_Segmentation_Tutorial.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="view-in-github" colab_type="text" # <a href="https://colab.research.google.com/github/Jericohm/daa_2021_1/blob/master/23Octubre.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # + id="YIaTfSSjzQ7N" outputId="f378042c-b6e4-4102-cb25-6897b7b605a4" colab={"base_uri": "https://localhost:8080/", "height": 357} from time import time def ejemplo2( n ): contador = 0 start_time = time() for i in range( n ) : for j in range( n ) : contador += 1 elapsed_time = time() - start_time print("Tiempo transcurrido: %0.10f segundos." % elapsed_time) return contador def ejemplo3( n ): start_time = time() x = n * 2 y = 0 for m in range (100): y = x - n elapsed_time = time() - start_time print("Tiempo transcurrido: %0.10f segundos." % elapsed_time) return y def ejemplo4( n ): start_time = time() x = 3 * 3.1416 + n y = x + 3 * 3 - n z = x + y elapsed_time = time() - start_time print("Tiempo transcurrido: %0.10f segundos." % elapsed_time) return z def ejemplo5( x ): start_time = time() n = 10 for j in range( 0 , x , 1 ): n = j + n elapsed_time = time() - start_time print("Tiempo transcurrido: %0.10f segundos." % elapsed_time) return n def ejemplo6( n ): start_time = time() data=[[[1 for x in range(n)] for x in range(n)] for x in range(n)] suma = 0 for d in range(n): for r in range(n): for c in range(n): suma += data[d][r][c] elapsed_time = time() - start_time print("Tiempo transcurrido: %0.10f segundos." % elapsed_time) return suma def ejemplo7( n ): start_time = time() count = 0 for i in range( n ) : for j in range( 25 ) : for k in range( n ): count += 1 elapsed_time = time() - start_time print("Tiempo transcurrido: %0.10f segundos." % elapsed_time) return count def ejemplo7_2( n ): start_time = time() count = 0 for i in range( n ) : for j in range( 25 ) : for k in range( n ): count += 1 for k in range( n ): count += 1 elapsed_time = time() - start_time print("Tiempo transcurrido: %0.10f segundos." % elapsed_time) return count # 1 + 25n^2 +25n^2 def ejemplo8( numeros ): # numeros es una lista (arreglo en c) start_time = time() total = 0 for index in range(len(numeros)): total = numeros[index] elapsed_time = time() - start_time print("Tiempo transcurrido: %0.10f segundos." % elapsed_time) return total # 1 + n def ejemplo9( n ): start_time = time() contador = 0 basura = 0 for i in range( n ) : contador += 1 for j in range( n ) : contador += 1 basura = basura + contador elapsed_time = time() - start_time print("Tiempo transcurrido: %0.10f segundos." % elapsed_time) return contador # 3 + 2n def ejemplo10( n ): start_time = time() count = 0 for i in range( n ) : for j in range( i+1 ) : count += 1 elapsed_time = time() - start_time print("Tiempo transcurrido: %0.10f segundos." % elapsed_time) return count # 1 + n(n+1)/2 def main(): vuelta = 1 h = 15 #for h in range(100,1100,100): #print(vuelta) print("Ejemplo 2: ",ejemplo2(h)) print("Ejemplo 3: ",ejemplo3(h)) print("Ejemplo 4: ",ejemplo4(h)) print("Ejemplo 5: ",ejemplo5(h)) print("Ejemplo 6: ",ejemplo6(h)) print("Ejemplo 7: ",ejemplo7(h)) print("Ejemplo 7_2: ",ejemplo7_2(h)) print("Ejemplo 8: ",ejemplo8([1,2,3,4,5,6])) print("Ejemplo 9: ",ejemplo9(h)) print("Ejemplo 10: ",ejemplo10(h)) vuelta += 1 main()
23Octubre.ipynb
// # Automatic generation of Notebook using PyCropML // This notebook implements a crop model. // ### Domain Class EnergybalanceAuxiliary // + #include "EnergybalanceAuxiliary.h" EnergybalanceAuxiliary::EnergybalanceAuxiliary() { } float EnergybalanceAuxiliary::getminTair() {return this-> minTair; } float EnergybalanceAuxiliary::getmaxTair() {return this-> maxTair; } float EnergybalanceAuxiliary::getsolarRadiation() {return this-> solarRadiation; } float EnergybalanceAuxiliary::getvaporPressure() {return this-> vaporPressure; } float EnergybalanceAuxiliary::getextraSolarRadiation() {return this-> extraSolarRadiation; } float EnergybalanceAuxiliary::gethslope() {return this-> hslope; } float EnergybalanceAuxiliary::getplantHeight() {return this-> plantHeight; } float EnergybalanceAuxiliary::getwind() {return this-> wind; } float EnergybalanceAuxiliary::getdeficitOnTopLayers() {return this-> deficitOnTopLayers; } float EnergybalanceAuxiliary::getVPDair() {return this-> VPDair; } float EnergybalanceAuxiliary::getnetRadiation() {return this-> netRadiation; } float EnergybalanceAuxiliary::getnetOutGoingLongWaveRadiation() {return this-> netOutGoingLongWaveRadiation; } float EnergybalanceAuxiliary::getnetRadiationEquivalentEvaporation() {return this-> netRadiationEquivalentEvaporation; } float EnergybalanceAuxiliary::getenergyLimitedEvaporation() {return this-> energyLimitedEvaporation; } float EnergybalanceAuxiliary::getsoilEvaporation() {return this-> soilEvaporation; } void EnergybalanceAuxiliary::setminTair(float _minTair) { this->minTair = _minTair; } void EnergybalanceAuxiliary::setmaxTair(float _maxTair) { this->maxTair = _maxTair; } void EnergybalanceAuxiliary::setsolarRadiation(float _solarRadiation) { this->solarRadiation = _solarRadiation; } void EnergybalanceAuxiliary::setvaporPressure(float _vaporPressure) { this->vaporPressure = _vaporPressure; } void EnergybalanceAuxiliary::setextraSolarRadiation(float _extraSolarRadiation) { this->extraSolarRadiation = _extraSolarRadiation; } void EnergybalanceAuxiliary::sethslope(float _hslope) { this->hslope = _hslope; } void EnergybalanceAuxiliary::setplantHeight(float _plantHeight) { this->plantHeight = _plantHeight; } void EnergybalanceAuxiliary::setwind(float _wind) { this->wind = _wind; } void EnergybalanceAuxiliary::setdeficitOnTopLayers(float _deficitOnTopLayers) { this->deficitOnTopLayers = _deficitOnTopLayers; } void EnergybalanceAuxiliary::setVPDair(float _VPDair) { this->VPDair = _VPDair; } void EnergybalanceAuxiliary::setnetRadiation(float _netRadiation) { this->netRadiation = _netRadiation; } void EnergybalanceAuxiliary::setnetOutGoingLongWaveRadiation(float _netOutGoingLongWaveRadiation) { this->netOutGoingLongWaveRadiation = _netOutGoingLongWaveRadiation; } void EnergybalanceAuxiliary::setnetRadiationEquivalentEvaporation(float _netRadiationEquivalentEvaporation) { this->netRadiationEquivalentEvaporation = _netRadiationEquivalentEvaporation; } void EnergybalanceAuxiliary::setenergyLimitedEvaporation(float _energyLimitedEvaporation) { this->energyLimitedEvaporation = _energyLimitedEvaporation; } void EnergybalanceAuxiliary::setsoilEvaporation(float _soilEvaporation) { this->soilEvaporation = _soilEvaporation; } // - // ### Domain Class EnergybalanceRate // + #include "EnergybalanceRate.h" EnergybalanceRate::EnergybalanceRate() { } float EnergybalanceRate::getevapoTranspirationPriestlyTaylor() {return this-> evapoTranspirationPriestlyTaylor; } float EnergybalanceRate::getevapoTranspirationPenman() {return this-> evapoTranspirationPenman; } float EnergybalanceRate::getevapoTranspiration() {return this-> evapoTranspiration; } float EnergybalanceRate::getpotentialTranspiration() {return this-> potentialTranspiration; } float EnergybalanceRate::getsoilHeatFlux() {return this-> soilHeatFlux; } float EnergybalanceRate::getcropHeatFlux() {return this-> cropHeatFlux; } void EnergybalanceRate::setevapoTranspirationPriestlyTaylor(float _evapoTranspirationPriestlyTaylor) { this->evapoTranspirationPriestlyTaylor = _evapoTranspirationPriestlyTaylor; } void EnergybalanceRate::setevapoTranspirationPenman(float _evapoTranspirationPenman) { this->evapoTranspirationPenman = _evapoTranspirationPenman; } void EnergybalanceRate::setevapoTranspiration(float _evapoTranspiration) { this->evapoTranspiration = _evapoTranspiration; } void EnergybalanceRate::setpotentialTranspiration(float _potentialTranspiration) { this->potentialTranspiration = _potentialTranspiration; } void EnergybalanceRate::setsoilHeatFlux(float _soilHeatFlux) { this->soilHeatFlux = _soilHeatFlux; } void EnergybalanceRate::setcropHeatFlux(float _cropHeatFlux) { this->cropHeatFlux = _cropHeatFlux; } // - // ### Domain Class EnergybalanceState // + #include "EnergybalanceState.h" EnergybalanceState::EnergybalanceState() { } float EnergybalanceState::getdiffusionLimitedEvaporation() {return this-> diffusionLimitedEvaporation; } float EnergybalanceState::getconductance() {return this-> conductance; } float EnergybalanceState::getminCanopyTemperature() {return this-> minCanopyTemperature; } float EnergybalanceState::getmaxCanopyTemperature() {return this-> maxCanopyTemperature; } void EnergybalanceState::setdiffusionLimitedEvaporation(float _diffusionLimitedEvaporation) { this->diffusionLimitedEvaporation = _diffusionLimitedEvaporation; } void EnergybalanceState::setconductance(float _conductance) { this->conductance = _conductance; } void EnergybalanceState::setminCanopyTemperature(float _minCanopyTemperature) { this->minCanopyTemperature = _minCanopyTemperature; } void EnergybalanceState::setmaxCanopyTemperature(float _maxCanopyTemperature) { this->maxCanopyTemperature = _maxCanopyTemperature; } // - // ### Model Netradiationequivalentevaporation // + #define _USE_MATH_DEFINES #include <cmath> #include <iostream> # include<vector> # include<string> # include<numeric> # include<algorithm> # include<array> #include <map> # include <tuple> #include "Netradiationequivalentevaporation.h" using namespace std; Netradiationequivalentevaporation::Netradiationequivalentevaporation() { } float Netradiationequivalentevaporation::getlambdaV() {return this-> lambdaV; } void Netradiationequivalentevaporation::setlambdaV(float _lambdaV) { this->lambdaV = _lambdaV; } void Netradiationequivalentevaporation::Calculate_Model(EnergybalanceState& s, EnergybalanceState& s1, EnergybalanceRate& r, EnergybalanceAuxiliary& a) { //- Name: NetRadiationEquivalentEvaporation -Version: 1.0, -Time step: 1 //- Description: // * Title: NetRadiationEquivalentEvaporation Model // * Author: <NAME> // * Reference: Modelling energy balance in the wheat crop model SiriusQuality2: // Evapotranspiration and canopy and soil temperature calculations // * Institution: INRA/LEPSE Montpellier // * Abstract: It is given by dividing net radiation by latent heat of vaporization of water //- inputs: // * name: lambdaV // ** description : latent heat of vaporization of water // ** parametercategory : constant // ** datatype : DOUBLE // ** default : 2.454 // ** min : 0 // ** max : 10 // ** unit : MJ kg-1 // ** uri : http://www1.clermont.inra.fr/siriusquality/?page_id=547 // ** inputtype : parameter // * name: netRadiation // ** description : net radiation // ** variablecategory : state // ** datatype : DOUBLE // ** default : 1.566 // ** min : 0 // ** max : 5000 // ** unit : MJ m-2 d-1 // ** uri : http://www1.clermont.inra.fr/siriusquality/?page_id=547 // ** inputtype : variable //- outputs: // * name: netRadiationEquivalentEvaporation // ** variablecategory : auxiliary // ** description : net Radiation in Equivalent Evaporation // ** datatype : DOUBLE // ** min : 0 // ** max : 5000 // ** unit : g m-2 d-1 // ** uri : http://www1.clermont.inra.fr/siriusquality/?page_id=547 float netRadiation = s.getnetRadiation(); float netRadiationEquivalentEvaporation; netRadiationEquivalentEvaporation = netRadiation / lambdaV * 1000.0f; a.setnetRadiationEquivalentEvaporation(netRadiationEquivalentEvaporation); } // - class Test { EnergybalanceState s = new EnergybalanceState(); EnergybalanceState s1 = new EnergybalanceState(); EnergybalanceRate r = new EnergybalanceRate(); EnergybalanceAuxiliary a = new EnergybalanceAuxiliary(); Netradiationequivalentevaporation mod = new Netradiationequivalentevaporation(); //first); //test1 public void test1() { s.netRadiation = 1.566D; mod.lambdaV = 2.454D; mod.Calculate_netradiationequivalentevaporation(s,s1, r, a); //netRadiationEquivalentEvaporation: 638.142; Console.WriteLine("netRadiationEquivalentEvaporation estimated :"); Console.WriteLine(a.netRadiationEquivalentEvaporation); } } Test t = new Test(); t.test1();
test/Models/energybalance_pkg/test/cpp/Netradiationequivalentevaporation.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 (ipykernel) # language: python # name: python3 # --- # # Queue # + class Queue: def __init__(self): self.items = [] def __str__(self): values = [str(x) for x in self.items] return ' '.join(values) def isEmpty(self): if len(self.items) == 0: return True else: return False def enqueue(self, value): self.items.append(value) return "The element is inserted at the end of Queue" def dequeue(self): if self.isEmpty() is True: return "Thre is no element in the Queue" else: return self.items.pop(0) def peek(self): if self.isEmpty() is True: return "Thre is no element in the Queue" else: return self.items[0] def delete(self): self.items = None customQueue = Queue() print("----1. Check the emptiness----") print(customQueue.isEmpty()) print("----2. Enqueue----") customQueue.enqueue(1) customQueue.enqueue(2) customQueue.enqueue(3) print(customQueue) print("----3. Dequeue----") print(customQueue.dequeue()) print(customQueue) print("----4. Peek----") print(customQueue.peek()) print("----5. Delete----") customQueue.delete() # - # ## [1] Circular Queue # + class Queue: def __init__(self, maxSize): self.items = maxSize*[None] self.maxSize = maxSize self.start = -1 self.top = -1 def __str__(self): values = [str(x) for x in self.items] return ' '.join(values) def isFull(self): if self.top + 1 == self.start: # in the case when start index is bigger than top index return True elif self.start == 0 and self.top == self.maxSize-1: return True else: return False def isEmpty(self): if self.top == -1: return True else: return False def enqueue(self, value): if self.isFull() == True: return "The queue is full" else: if self.top + 1 == self.maxSize: # when self.top points the tip part self.top = 0 else: self.top += 1 if self.start == -1: self.start = 0 self.items[self.top] = value return "The element is inserted at the end of Queue" def dequeue(self): if self.isEmpty() == True: return "The queue is empty" else: firstElement = self.items[self.start] start = self.start if self.start == self.top: self.start = -1 self.top = -1 elif self.start + 1 == self.maxSize: self.start = 0 else: self.start += 1 self.items[start] = None return firstElement def peek(self): if self.isEmpty(): return "Queue is empty" else: return self.items[self.start] def delete(self): self.items = self.maxSize * [None] self.top = -1 self.start = -1 customQueue = Queue(3) print("----1. Enqueue----") customQueue.enqueue(1) customQueue.enqueue(2) customQueue.enqueue(3) print(customQueue) print(customQueue.isFull()) print("----2. Dequeue----") print(customQueue.dequeue()) print(customQueue) print("----3. Delete----") customQueue.delete() print(customQueue) # - # ## [2] Queue using linked list # + class Node: def __init__(self, value=None): self.value = value self.next = None def __str__(self): return str(self.value) class LinkedList: def __init__(self): self.head = None self.tail = None def __iter__(self): node = self.head while node != None: yield node node = node.next class Queue: def __init__(self): self.linkedList = LinkedList() def __str__(self): values = [str(x) for x in self.linkedList] return ' '.join(values) def enqueue(self, value): newNode = Node(value) if self.linkedList.head == None: self.linkedList.head = newNode self.linkedList.tail = newNode else: self.linkedList.tail.next = newNode self.linkedList.tail = newNode def isEmpty(self): if self.linkedList.head == None: return True else: return False def dequeue(self): if self.isEmpty() == True: return "There is not any node in the Queue" else: tempNode = self.linkedList.head if self.linkedList.head == self.linkedList.tail: self.linkedList.head = None self.linkedList.tail = None else: self.linkedList.head = self.linkedList.head.next return tempNode def peek(self): if self.isEmpty() == True: return "There is not any node in the Queue" else: return self.linkedList.head.value def delete(self): if self.isEmpty() == True: return "There is not any node in the Queue" else: self.linkedList.head = None self.linkedList.tail = None custQueue = Queue() print("----1. enqueue----") custQueue.enqueue(1) custQueue.enqueue(2) custQueue.enqueue(3) print(custQueue) print("----2. dequeue----") print(custQueue.dequeue()) print(custQueue) print("----3. peek----") print(custQueue.peek()) print("----4. delete----") print(custQueue.delete()) # - # ## Python deque Modules # + from collections import deque customQueue = deque(maxlen=3) print(customQueue) customQueue.append(1) customQueue.append(2) customQueue.append(3) customQueue.append(4) print(customQueue) print(customQueue.popleft()) print(customQueue) print(customQueue.clear()) print(customQueue) # - # ## Python Queue Modules # + import queue as q customQueue = q.Queue(maxsize=3) print(customQueue.empty()) customQueue.put(1) customQueue.put(2) customQueue.put(3) print(customQueue.full()) print(customQueue.get()) print(customQueue.qsize()) # - # ## Multiprocessing Module from multiprocessing import Queue
11_Queue.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: py37 (Python3) # language: python # name: py37 # --- # # ## *Data Science Unit 4 Sprint 3 Assignment 1* # # # Recurrent Neural Networks and Long Short Term Memory (LSTM) # # ![Monkey at a typewriter](https://upload.wikimedia.org/wikipedia/commons/thumb/3/3c/Chimpanzee_seated_at_typewriter.jpg/603px-Chimpanzee_seated_at_typewriter.jpg) # # It is said that [**infinite monkeys typing for an infinite amount of time**](https://en.wikipedia.org/wiki/Infinite_monkey_theorem) will eventually type, among other things, the complete works of <NAME>. Let's see if we can get there a bit faster, with the power of Recurrent Neural Networks and LSTM. # # We will focus specifically on Shakespeare's Sonnets in order to improve our model's ability to learn from the data. # + import random import sys import os import requests import pandas as pd import numpy as np import matplotlib.pyplot as plt from tensorflow.keras.callbacks import LambdaCallback from tensorflow.keras.preprocessing import sequence from tensorflow.keras.models import Sequential from tensorflow.keras.layers import Dense, Embedding, Bidirectional from tensorflow.keras.layers import LSTM # %matplotlib inline # a custom data prep class that we'll be using from data_cleaning_toolkit_class import data_cleaning_toolkit # - # ### Use request to pull data from a URL # # [**Read through the request documentation**](https://requests.readthedocs.io/en/master/user/quickstart/#make-a-request) in order to learn how to download the Shakespeare Sonnets from the Gutenberg website. # # **Protip:** Do not over think it. # + deletable=false nbgrader={"cell_type": "code", "checksum": "6ac79c2e9a53d747ebf8fb41f4b39340", "grade": false, "grade_id": "cell-b8ececfad1f60557", "locked": false, "schema_version": 3, "solution": true, "task": false} # download all of Shakespears Sonnets from the Project Gutenberg website # here's the link for the sonnets url_shakespeare_sonnets = "https://www.gutenberg.org/cache/epub/1041/pg1041.txt" # use request and the url to download all of the sonnets - save the result to `r` # YOUR CODE HERE raise NotImplementedError() # + deletable=false nbgrader={"cell_type": "code", "checksum": "4ab4f4f14188a9f3703d43d223bfa150", "grade": false, "grade_id": "cell-0cd0c8509bc8e8cf", "locked": false, "schema_version": 3, "solution": true, "task": false} # move the downloaded text out of the request object - save the result to `raw_text_data` # hint: take at look at the attributes of `r` # YOUR CODE HERE raise NotImplementedError() # - # check the data type of `raw_text_data` type(raw_text_data) # ### Data Cleaning # as usual, we are tasked with cleaning up messy data # Question: Do you see any characters that we could use to split up the text? raw_text_data[:3000] # + deletable=false nbgrader={"cell_type": "code", "checksum": "13b66e41cc64459f0757f6f53a78e08f", "grade": false, "grade_id": "cell-916f742d2cea299a", "locked": false, "schema_version": 3, "solution": true, "task": false} # split the text into lines and save the result to `split_data` # YOUR CODE HERE raise NotImplementedError() # - # we need to drop all the boilder plate text (i.e. titles and descriptions) as well as white spaces # so that we are left with only the sonnets themselves split_data[:20] # **Use list index slicing in order to remove the titles and descriptions so we are only left with the sonnets.** # # + deletable=false nbgrader={"cell_type": "code", "checksum": "00ead0a1024ff72116c24f6b473c1aac", "grade": false, "grade_id": "cell-1f388b88b0eec24a", "locked": false, "schema_version": 3, "solution": true, "task": false} # sonnets exists between these indicies # titles and descriptions exist outside of these indicies # use index slicing to isolate the sonnet lines - save the result to `sonnets` # YOUR CODE HERE raise NotImplementedError() # - # notice how all non-sonnet lines have far less characters than the actual sonnet lines? # well, let's use that observation to filter out all the non-sonnet lines sonnets[200:240] # + deletable=false nbgrader={"cell_type": "code", "checksum": "649cf52260448a5faf539ad6b6e8e6e8", "grade": false, "grade_id": "cell-84c4b3cf1f3c032a", "locked": false, "schema_version": 3, "solution": true, "task": false} # any string with less than n_chars characters will be filtered out - save results to `filtered_sonnets` # YOUR CODE HERE raise NotImplementedError() # - # ok - much better! # but we still need to remove all the punctuation and case normalize the text filtered_sonnets # ### Use custom data cleaning tool # # Use one of the methods in `data_cleaning_toolkit` to clean your data. # # There is an example of this in the guided project. # + deletable=false nbgrader={"cell_type": "code", "checksum": "a722083a29139936744ff9a341e1c9a3", "grade": false, "grade_id": "cell-775c14b456d8a724", "locked": false, "schema_version": 3, "solution": true, "task": false} # instantiate the data_cleaning_toolkit class - save result to `dctk` # YOUR CODE HERE raise NotImplementedError() # + deletable=false nbgrader={"cell_type": "code", "checksum": "ab91e612cd08068f3a36172979157d5d", "grade": false, "grade_id": "cell-684010b6a7360876", "locked": false, "schema_version": 3, "solution": true, "task": false} # use data_cleaning_toolkit to remove punctuation and to case normalize - save results to `clean_sonnets` # YOUR CODE HERE raise NotImplementedError() # - # much better! clean_sonnets # ### Use your data tool to create character sequences # # We'll need the `create_char_sequenes` method for this task. However this method requires a parameter call `maxlen` which is responsible for setting the maximum sequence length. # # So what would be a good sequence length, exactly? # # In order to answer that question, let's do some statistics! # + deletable=false nbgrader={"cell_type": "code", "checksum": "1deebea2ada0a7dc7d2eb08295ee1e2b", "grade": false, "grade_id": "cell-9ebdaa2654dd29ab", "locked": false, "schema_version": 3, "solution": true, "task": false} def calc_stats(corpus): """ Calculates statisics on the length of every line in the sonnets """ # write a list comprehension that calculates each sonnets line length - save the results to `doc_lens` # use numpy to calcualte and return the mean, median, std, max, min of the doc lens - all in one line of code # YOUR CODE HERE raise NotImplementedError() # - # sonnet line length statistics mean ,med, std, max_, min_ = calc_stats(clean_sonnets) mean, med, std, max_, min_ # + deletable=false nbgrader={"cell_type": "code", "checksum": "690957e46b6f2f32c1f17756d8ceab5b", "grade": false, "grade_id": "cell-35185e26897aad7e", "locked": false, "schema_version": 3, "solution": true, "task": false} # using the results of the sonnet line length statistics, use your judgement and select a value for maxlen # use .create_char_sequences() to create sequences # YOUR CODE HERE raise NotImplementedError() # - # Take a look at the `data_cleaning_toolkit_class.py` file. # # In the first 4 lines of code in the `create_char_sequences` method, class attributes `n_features` and `unique_chars` are created. Let's call them in the cells below. # number of input features for our LSTM model dctk.n_features # unique charactes that appear in our sonnets dctk.unique_chars # ## Time for Questions # # ---- # **Question 1:** # # Why are the `number of unique characters` (i.e. **dctk.unique_chars**) and the `number of model input features` (i.e. **dctk.n_features**) the same? # # **Hint:** The model that we will shortly be building here is very similar to the text generation model that we built in the guided project. # **Answer 1:** # # Write your answer here # # **Question 2:** # # Take a look at the print out of `dctk.unique_chars` one more time. Notice that there is a white space. # # Why is it desirable to have a white space as a possible character to predict? # **Answer 2:** # # Write your answer here # ---- # ### Use our data tool to create X and Y splits # # You'll need the `create_X_and_Y` method for this task. # TODO: provide a walk through of data_cleaning_toolkit with unit tests that check for understanding X, y = dctk.create_X_and_Y() # ![](https://miro.medium.com/max/891/0*jGB1CGQ9HdeUwlgB) # notice that our input matrix isn't actually a matrix - it's a rank 3 tensor X.shape # In $X$.shape we see three numbers (*n1*, *n2*, *n3*). What do these numbers mean? # # Well, *n1* tells us the number of samples that we have. But what about the other two? # first index returns a signle sample, which we can see is a sequence first_sample_index = 0 X[first_sample_index] # Notice that each sequence (i.e. $X[i]$ where $i$ is some index value) is `maxlen` long and has `dctk.n_features` number of features. Let's try to better understand this shape. # each sequence is maxlen long and has dctk.n_features number of features X[first_sample_index].shape # **Each row corresponds to a character vector** and there are `maxlen` number of character vectors. # # **Each column corresponds to a unique character** and there are `dctk.n_features` number of features. # # let's index for a single character vector first_char_vect_index = 0 X[first_sample_index][first_char_vect_index] # Notice that there is a single `TRUE` value and all the rest of the values are `FALSE`. # # This is a one-hot encoding for which character appears at each index within a sequence. Specifically, the cell above is looking at the first character in the sequence. # # Only a single character can appear as the first character in a sequence, so there will necessarily be a single `TRUE` value and the rest will be `FALSE`. # # Let's say that `TRUE` appears in the $ith$ index; by $ith$ index we simply mean some index in the general case. How can we find out which character that actually corresponds to? # # To answer this question, we need to use the character-to-integer look up dictionaries. # take a look at the index to character dictionary # if a TRUE appears in the 0th index of a character vector, # then we know that whatever char you see below next to the 0th key # is the character that that character vector is endcoding for dctk.int_char # + # let's look at an example to tie it all together seq_len_counter = 0 # index for a single sample for seq_of_char_vects in X[first_sample_index]: # get index with max value, which will be the one TRUE value index_with_TRUE_val = np.argmax(seq_of_char_vects) print (dctk.int_char[index_with_TRUE_val]) seq_len_counter+=1 print ("Sequence length: {}".format(seq_len_counter)) # - # ## Time for Questions # # ---- # **Question 1:** # # In your own words, how would you describe the numbers from the shape print out of `X.shape` to a fellow classmate? # # **Answer 1:** # # Write your answer here # ---- # # ### Build a Text Generation model # # Now that we have prepped our data (and understood that process) let's finally build out our character generation model, similar to what we did in the guided project. # + def sample(preds, temperature=1.0): """ Helper function to sample an index from a probability array """ # convert preds to array preds = np.asarray(preds).astype('float64') # scale values preds = np.log(preds) / temperature # exponentiate values exp_preds = np.exp(preds) # this equation should look familar to you (hint: it's an activation function) preds = exp_preds / np.sum(exp_preds) # Draw samples from a multinomial distribution probas = np.random.multinomial(1, preds, 1) # return the index that corresponds to the max probability return np.argmax(probas) def on_epoch_end(epoch, _): """" Function invoked at end of each epoch. Prints the text generated by our model. """ print() print('----- Generating text after Epoch: %d' % epoch) # randomly pick a starting index # will be used to take a random sequence of chars from `text` start_index = random.randint(0, len(text) - dctk.maxlen - 1) # this is our seed string (i.e. input seqeunce into the model) generated = '' # start the sentence at index `start_index` and include the next` dctk.maxlen` number of chars sentence = text[start_index: start_index + dctk.maxlen] # add to generated generated += sentence print('----- Generating with seed: "' + sentence + '"') sys.stdout.write(generated) # use model to predict what the next 40 chars should be that follow the seed string for i in range(40): # shape of a single sample in a rank 3 tensor x_dims = (1, dctk.maxlen, dctk.n_features) # create an array of zeros with shape x_dims # recall that python considers zeros and boolean FALSE as the same x_pred = np.zeros(x_dims) # create a seq vector for our randomly select sequence # i.e. create a numerical encoding for each char in the sequence for t, char in enumerate(sentence): # for sample 0 in seq index t and character `char` encode a 1 (which is the same as a TRUE) x_pred[0, t, dctk.char_int[char]] = 1 # next, take the seq vector and pass into model to get a prediction of what the next char should be preds = model.predict(x_pred, verbose=0)[0] # use the sample helper function to get index for next char next_index = sample(preds) # use look up dict to get next char next_char = dctk.int_char[next_index] # append next char to sequence sentence = sentence[1:] + next_char sys.stdout.write(next_char) sys.stdout.flush() print() # - # need this for on_epoch_end() text = " ".join(clean_sonnets) # create callback object that will print out text generation at the end of each epoch # use for real-time monitoring of model performance print_callback = LambdaCallback(on_epoch_end=on_epoch_end) # ---- # ### Train Model # # Build a text generation model using LSTMs. Feel free to reference the model used in the guided project. # # It is recommended that you train this model to at least 50 epochs (but more if you're computer can handle it). # # You are free to change up the architecture as you wish. # # Just in case you have difficultly training a model, there is a pre-trained model saved to a file called `trained_text_gen_model.h5` that you can load in (the same way that you learned how to load in Keras models in Sprint 2 Module 4). # + deletable=false nbgrader={"cell_type": "code", "checksum": "e17312b57e17284124ce562dff81b00d", "grade": false, "grade_id": "cell-f34be90367fd9071", "locked": false, "schema_version": 3, "solution": true, "task": false} # build text generation model layer by layer # fit model # YOUR CODE HERE raise NotImplementedError() # - # save trained model to file model.save("trained_text_gen_model.h5") # ### Let's play with our trained model # # Now that we have a trained model that, though far from perfect, is able to generate actual English words, we can take a look at the predictions to continue to learn more about how a text generation model works. # # We can also take this as an opportunity to unpack the `def on_epoch_end` function to better understand how it works. # this is our joinned clean sonnet data text # randomly pick a starting index # will be used to take a random sequence of chars from `text` # run this cell a few times and you'll see `start_index` is random start_index = random.randint(0, len(text) - dctk.maxlen - 1) start_index # + # next use the randomly selected starting index to sample a sequence from the `text` # this is our seed string (i.e. input seqeunce into the model) generated = '' # start the sentence at index `start_index` and include the next` dctk.maxlen` number of chars sentence = text[start_index: start_index + dctk.maxlen] # add to generated generated += sentence generated # - # this block of code let's us know what the seed string is # i.e. the input seqeunce into the model print('----- Generating with seed: "' + sentence + '"') sys.stdout.write(generated) # use model to predict what the next 40 chars should be that follow the seed string for i in range(40): # shape of a single sample in a rank 3 tensor x_dims = (1, dctk.maxlen, dctk.n_features) # create an array of zeros with shape x_dims # recall that python considers zeros and boolean FALSE as the same x_pred = np.zeros(x_dims) # create a seq vector for our randomly select sequence # i.e. create a numerical encoding for each char in the sequence for t, char in enumerate(sentence): # for sample 0 in seq index t and character `char` encode a 1 (which is the same as a TRUE) x_pred[0, t, dctk.char_int[char]] = 1 # next, take the seq vector and pass into model to get a prediction of what the next char should be preds = model.predict(x_pred, verbose=0)[0] # use the sample helper function to get index for next char next_index = sample(preds) # use look up dict to get next char next_char = dctk.int_char[next_index] # append next char to sequence sentence = sentence[1:] + next_char # this is the seed string generated # these are the 40 chars that the model thinks should come after the seed stirng sentence # how put it all together generated + sentence # + [markdown] colab_type="text" id="zE4a4O7Bp5x1" # # Resources and Stretch Goals # + [markdown] colab_type="text" id="uT3UV3gap9H6" # ## Stretch goals: # - Refine the training and generation of text to be able to ask for different genres/styles of Shakespearean text (e.g. plays versus sonnets) # - Train a classification model that takes text and returns which work of Shakespeare it is most likely to be from # - Make it more performant! Many possible routes here - lean on Keras, optimize the code, and/or use more resources (AWS, etc.) # - Revisit the news example from class, and improve it - use categories or tags to refine the model/generation, or train a news classifier # - Run on bigger, better data # # ## Resources: # - [The Unreasonable Effectiveness of Recurrent Neural Networks](https://karpathy.github.io/2015/05/21/rnn-effectiveness/) - a seminal writeup demonstrating a simple but effective character-level NLP RNN # - [Simple NumPy implementation of RNN](https://github.com/JY-Yoon/RNN-Implementation-using-NumPy/blob/master/RNN%20Implementation%20using%20NumPy.ipynb) - Python 3 version of the code from "Unreasonable Effectiveness" # - [TensorFlow RNN Tutorial](https://github.com/tensorflow/models/tree/master/tutorials/rnn) - code for training a RNN on the Penn Tree Bank language dataset # - [4 part tutorial on RNN](http://www.wildml.com/2015/09/recurrent-neural-networks-tutorial-part-1-introduction-to-rnns/) - relates RNN to the vanishing gradient problem, and provides example implementation # - [RNN training tips and tricks](https://github.com/karpathy/char-rnn#tips-and-tricks) - some rules of thumb for parameterizing and training your RNN
module1-rnn-and-lstm/LS_DS_431_RNN_and_LSTM_Assignment.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + import pandas as pd import numpy as np import matplotlib.pyplot as plt import seaborn as sns import statsmodels.api as sm import patsy from scipy.stats.distributions import chi2 # %matplotlib inline # - df_ops = pd.read_csv ("../data/frames/acl/ops.final", sep=",") df_dblp = pd.read_csv ("../data/frames/acl/dblp.final", sep=",") # **Univariate plots** # + FROM=1980 TO=2010 #df_ops["logdeg"] = np.log1p(df_ops["indeg"]) sns.set_context ("paper") fig,ax = plt.subplots (1,2, figsize=(6,1.8), sharey=False) sns.barplot (y="indeg", x="max_prog_q", data=df_ops[(df_ops["year"] > FROM) & (df_ops["year"] <= TO)], ax=ax[0], color='green', order=["<25", "<50", "<75", "<100"]) ax[0].set_title ("Legal opinions") ax[0].set_ylabel ("Number of citations") ax[0].set_xlabel ("Max innovation quantile") ax[0].set_xticklabels (["Q1", "Q2", "Q3", "Q4"]) #df_dblp["logdeg"] = np.log1p(df_dblp["indeg"]) sns.barplot (y="indeg", x="max_prog_q", data=df_dblp[(df_dblp["year"] > FROM) & (df_dblp["year"] <= TO)], ax=ax[1], color='brown', order=["<25", "<50", "<75", "<100"]) ax[1].set_title ("Scientific articles") ax[1].set_ylabel ("Number of citations") ax[1].set_xlabel ("Max innovation quantile") ax[1].set_xticklabels (["Q1", "Q2", "Q3", "Q4"]) plt.subplots_adjust(wspace=.3) plt.axis('tight') plt.savefig('../figs/acl/cites-by-max-progressiveness.pdf', bbox_inches='tight') # + FROM=1980 TO=2010 #df_ops["logdeg"] = np.log1p(df_ops["indeg"]) sns.set_context ("paper") fig,ax = plt.subplots (1,2, figsize=(6,1.8), sharey=False) sns.barplot (y="indeg", x="nprog50_q", data=df_ops[(df_ops["year"] > FROM) & (df_ops["year"] <= TO)], ax=ax[0], color='green', order=["<25", "<50", "<75", "<100"]) ax[0].set_title ("Legal opinions") ax[0].set_ylabel ("Number of citations") ax[0].set_xlabel ("Num innovations quantile") ax[0].set_xticklabels (["Q1", "Q2", "Q3", "Q4"]) #df_dblp["logdeg"] = np.log1p(df_dblp["indeg"]) sns.barplot (y="indeg", x="nprog50_q", data=df_dblp[(df_dblp["year"] > FROM) & (df_dblp["year"] <= TO)], ax=ax[1], color='brown', order=["<25", "<50", "<75", "<100"]) ax[1].set_title ("Scientific articles") ax[1].set_ylabel ("Number of citations") ax[1].set_xlabel ("Num innovations quantile") ax[1].set_xticklabels (["Q1", "Q2", "Q3", "Q4"]) plt.subplots_adjust(wspace=.3) plt.axis('tight') plt.savefig('../figs/acl/cites-by-num-progressiveness.pdf', bbox_inches='tight') # - # **Multivariate regressions** # + formulas_ops = ["indeg ~ age + outdeg + ntokens + bows"] formulas_ops.append(formulas_ops[0]+" + max_prog") # y ~ C(Location, Treatment(reference='China')) formulas_ops.append(formulas_ops[0]+" + C(max_prog_q, Treatment(reference='<25'))") formulas_ops.append(formulas_ops[0]+" + nprog50") formulas_ops.append(formulas_ops[0]+" + C(nprog50_q, Treatment(reference='<25'))") formulas_dblp = ["indeg ~ age + outdeg + nauthors + ntokens + bows"] formulas_dblp.append(formulas_dblp[0]+" + max_prog") formulas_dblp.append(formulas_dblp[0]+" + C(max_prog_q, Treatment(reference='<25'))") formulas_dblp.append(formulas_dblp[0]+" + nprog50") formulas_dblp.append(formulas_dblp[0]+" + C(nprog50_q, Treatment(reference='<25'))") # - def runAllRegressions (df, formulas): results = list () for formula in formulas: Y,X = patsy.dmatrices(formula,data=df) results.append (sm.GLM(Y, X, family=sm.families.Poisson()).fit()) return results # + results_ops = runAllRegressions (df_ops, formulas_ops) lls_ops = [result.llf for result in results_ops] results_dblp = runAllRegressions (df_dblp, formulas_dblp) lls_dblp = [result.llf for result in results_dblp] print (lls_ops) print (lls_dblp) # - def goodness_of_fit_tests (lls, formulas): for i,ll_0 in enumerate(lls): for j,ll_1 in enumerate(lls[:i]): chi2_score = ll_0-ll_1 dof = len(formulas[i].split("+")) - len(formulas[j].split('+')) if i == 3: dof += 2 #quartiles print(i,j,f'\\chi^2_{dof} = {chi2_score:.2f}, p < {chi2.sf(chi2_score, dof):.3e}') goodness_of_fit_tests (lls_ops, formulas_ops) # **Write the results as Latex tables** # + def get_latex_table_string (model_results, coeff): def get_latex_coeff_table_string (c, model_result): return "{0:.3f}".format(round(model_result.params[model_result.model.exog_names.index(c)], 3)) def get_latex_coeff_pair_table_string(c1, c2, model_result): if c1 in model_result.model.exog_names: return get_latex_coeff_table_string (c1, model_result) else: return get_latex_coeff_table_string (c2, model_result) goodnames = { "Intercept": "Constant", ("C(max_prog_q, Treatment(reference='<25'))[T.<50]", "C(nprog50_q, Treatment(reference='<25'))[T.<50]"): "Prog. Q2", ("C(max_prog_q, Treatment(reference='<25'))[T.<75]", "C(nprog50_q, Treatment(reference='<25'))[T.<75]"): "Prog. Q3", ("C(max_prog_q, Treatment(reference='<25'))[T.<100]", "C(nprog50_q, Treatment(reference='<25'))[T.<100]"): "Prog. Q4", "age": "Age", "outdeg": "Outdegree", "bows": "BoWs", "ntokens": "Length", "nauthors": "No. of Authors", ("max_prog", "nprog50"): "Prog." } if type (coeff) == tuple: c1, c2 = coeff cols = [" " if c1 not in model_result.model.exog_names and c2 not in model_result.model.exog_names else get_latex_coeff_pair_table_string(c1, c2, model_result) for model_result in model_results] else: cols = [" " if coeff not in model_result.model.exog_names else get_latex_coeff_table_string (coeff, model_result) for model_result in model_results] return "&".join ([goodnames[coeff]] + cols) + "\\\\" def get_latex_stderror_string (model_results, coeff): def get_latex_coeff_stderror_string (c, model_result): return "({0:.3f})".format(round(model_result.bse[model_result.model.exog_names.index(c)], 3)) def get_latex_coeff_pair_stderror_string(c1, c2, model_result): if c1 in model_result.model.exog_names: return get_latex_coeff_stderror_string (c1, model_result) else: return get_latex_coeff_stderror_string (c2, model_result) goodnames = { "Intercept": "Constant", ("C(max_prog_q, Treatment(reference='<25'))[T.<50]", "C(nprog50_q, Treatment(reference='<25'))[T.<50]"): "Prog. Q2", ("C(max_prog_q, Treatment(reference='<25'))[T.<75]", "C(nprog50_q, Treatment(reference='<25'))[T.<75]"): "Prog. Q3", ("C(max_prog_q, Treatment(reference='<25'))[T.<100]", "C(nprog50_q, Treatment(reference='<25'))[T.<100]"): "Prog. Q4", "age": "Age", "outdeg": "Outdegree", "bows": "BoWs", "ntokens": "Length", "nauthors": "No. of Authors", ("max_prog", "nprog50"): "Prog." } if type (coeff) == tuple: c1, c2 = coeff cols = [" " if c1 not in model_result.model.exog_names and c2 not in model_result.model.exog_names else get_latex_coeff_pair_stderror_string(c1, c2, model_result) for model_result in model_results] else: cols = [" " if coeff not in model_result.model.exog_names else get_latex_coeff_stderror_string (coeff, model_result) #else "({0:.3f})".format(round(model_result.bse[model_result.model.exog_names.index(coeff)], 3)) for model_result in model_results] #cols = list (map (lambda x:"({0})".format (x), cols)) return "&".join ([" "] + cols) + "\\\\[4pt]" def get_latex_ll_string (model_results): cols = ["{0}".format(str(int(round(model_result.llf)))) for model_result in model_results] return "&".join (["Log Lik."] + cols) # - def results_to_latex(model_results, coeffs_order, header_format="lrrrrr"): # header part #print ("\\begin{tabular}{{0}}".format (header_format)) print ("\\begin{tabular}" + "{" + "{0}".format (header_format) + "}") print ("\\toprule\n") print ("& & Models & & & \\\\\n") print ("\midrule\n") print ("Predictors & M1 & M2 & M3 & M4 & M5\\\\\n") print (" & & \\textsc{max-innov} & \\textsc{max-innov} & \\textsc{num-innovs} & \\textsc{num-innovs}\\\\\n") print ("\\toprule\n") # core part for coeff in coeffs_order: print (get_latex_table_string(model_results, coeff)) print (get_latex_stderror_string (model_results, coeff)) print (get_latex_ll_string (model_results)) print ("\\\\") print ("\\bottomrule\n") print ("\\end{tabular}\n") #lp{1.1cm}p{1.1cm}p{1.1cm}p{1.1cm}p{1.1cm} results_to_latex (results_ops, ["Intercept", "outdeg", "age", "ntokens", "bows", ("max_prog", "nprog50"), ("C(max_prog_q, Treatment(reference='<25'))[T.<50]", "C(nprog50_q, Treatment(reference='<25'))[T.<50]"), ("C(max_prog_q, Treatment(reference='<25'))[T.<75]", "C(nprog50_q, Treatment(reference='<25'))[T.<75]"), ("C(max_prog_q, Treatment(reference='<25'))[T.<100]", "C(nprog50_q, Treatment(reference='<25'))[T.<100]")], header_format="lp{1.1cm}p{1.1cm}p{1.1cm}p{1.1cm}p{1.1cm}" ) results_to_latex (results_dblp, ["Intercept", "outdeg", "nauthors", "age", "ntokens", "bows", ("max_prog", "nprog50"), ("C(max_prog_q, Treatment(reference='<25'))[T.<50]", "C(nprog50_q, Treatment(reference='<25'))[T.<50]"), ("C(max_prog_q, Treatment(reference='<25'))[T.<75]", "C(nprog50_q, Treatment(reference='<25'))[T.<75]"), ("C(max_prog_q, Treatment(reference='<25'))[T.<100]", "C(nprog50_q, Treatment(reference='<25'))[T.<100]")] ) results_to_latex (results_dblp, ["Intercept", "outdeg", "nauthors", "age", "ntokens", "bows", "max_prog", "C(max_prog_q, Treatment(reference='<25'))[T.<50]", "C(max_prog_q, Treatment(reference='<25'))[T.<75]", "C(max_prog_q, Treatment(reference='<25'))[T.<100]"])
notebooks/acl.results.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="view-in-github" colab_type="text" # <a href="https://colab.research.google.com/github/sproboticworks/ml-course/blob/master/IMDB%20Movie%20Reviews%20Sentiment%20Analysis.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # + [markdown] id="JHwlcRtzTlSW" colab_type="text" # # Import Packages # + id="VL8kfmtETgEa" colab_type="code" colab={} import tensorflow as tf import numpy as np from tensorflow import keras from tensorflow.keras.preprocessing.text import Tokenizer from tensorflow.keras.preprocessing.sequence import pad_sequences import tensorflow_datasets as tfds # + [markdown] id="uheteGtLTp9o" colab_type="text" # # Load IMDB dataset # + id="dr5zWVVP1Atj" colab_type="code" colab={} imdb, info = tfds.load("imdb_reviews", with_info=True, as_supervised=True) # + id="sJaKEdePj6cB" colab_type="code" colab={} train_data, test_data = imdb['train'], imdb['test'] # + id="hZBGLm841RDW" colab_type="code" colab={} training_sentences = [] training_labels = [] testing_sentences = [] testing_labels = [] for s,l in train_data: training_sentences.append(str(s.numpy())) training_labels.append(l.numpy()) for s,l in test_data: testing_sentences.append(str(s.numpy())) testing_labels.append(l.numpy()) # + id="kRNXvuzej1HE" colab_type="code" colab={} training_labels_final = np.array(training_labels) testing_labels_final = np.array(testing_labels) # + [markdown] id="gg-C6gWdU0VM" colab_type="text" # # Tokenization # + id="kQwdFIUqUiip" colab_type="code" colab={} vocab_size = 10000 embedding_dim = 16 max_length = 120 trunc_type='post' oov_tok = "<OOV>" # + id="AKNWbQrun2NC" colab_type="code" colab={} tokenizer = Tokenizer(num_words = vocab_size, oov_token=oov_tok) tokenizer.fit_on_texts(training_sentences) word_index = tokenizer.word_index # + id="puocklgen3z8" colab_type="code" colab={} sequences = tokenizer.texts_to_sequences(training_sentences) padded = pad_sequences(sequences,maxlen=max_length, truncating=trunc_type) testing_sequences = tokenizer.texts_to_sequences(testing_sentences) testing_padded = pad_sequences(testing_sequences,maxlen=max_length) # + id="Q8hhfTRg1w5_" colab_type="code" colab={} reverse_word_index = dict([(value, key) for (key, value) in word_index.items()]) def decode_review(text): return ' '.join([reverse_word_index.get(i, '?') for i in text]) print(decode_review(padded[0])) print(training_sentences[0]) # + [markdown] id="tgOvioZS13zH" colab_type="text" # # Build Model # + id="UnWxwnnI178Z" colab_type="code" colab={} model = tf.keras.Sequential([ tf.keras.layers.Embedding(vocab_size, embedding_dim, input_length=max_length), tf.keras.layers.Flatten(), #tf.keras.layers.GlobalAveragePooling1D(), tf.keras.layers.Dense(6, activation='relu'), tf.keras.layers.Dense(1, activation='sigmoid') ]) model.summary() # + id="Y-1TTly23H2A" colab_type="code" colab={} model.compile(loss='binary_crossentropy',optimizer='adam',metrics=['accuracy']) # + [markdown] id="JnuvoRLm1--a" colab_type="text" # # Train Model # + id="NPHZeXum172s" colab_type="code" colab={} num_epochs = 10 history = model.fit(padded, training_labels_final, epochs=num_epochs, validation_data=(testing_padded, testing_labels_final)) # + [markdown] id="nLkk7PhjEikV" colab_type="text" # # Visualize the training graph # + id="eqyKczj6Eh-L" colab_type="code" colab={} import matplotlib.pyplot as plt acc = history.history['accuracy'] val_acc = history.history['val_accuracy'] loss = history.history['loss'] val_loss = history.history['val_loss'] epochs_range = range(num_epochs) plt.figure(figsize=(10, 6)) plt.subplot(1, 2, 1) plt.plot(epochs_range, acc, label='Training Accuracy') plt.plot(epochs_range, val_acc, label='Validation Accuracy') plt.legend(loc='lower right') plt.title('Training and Validation Accuracy') plt.subplot(1, 2, 2) plt.plot(epochs_range, loss, label='Training Loss') plt.plot(epochs_range, val_loss, label='Validation Loss') plt.legend(loc='upper right') plt.title('Training and Validation Loss') #plt.savefig('./foo.png') plt.show() # + [markdown] id="71TjjoBK2Ezl" colab_type="text" # # Download Embedding files # + id="1WsXa1W-2Htm" colab_type="code" colab={} e = model.layers[0] weights = e.get_weights()[0] print(weights.shape) # shape: (vocab_size, embedding_dim) # + id="Z-2GMraw2PUM" colab_type="code" colab={} import io out_v = io.open('vecs.tsv', 'w', encoding='utf-8') out_m = io.open('meta.tsv', 'w', encoding='utf-8') for word_num in range(1, vocab_size): word = reverse_word_index[word_num] embeddings = weights[word_num] out_m.write(word + "\n") out_v.write('\t'.join([str(x) for x in embeddings]) + "\n") out_v.close() out_m.close() # + id="qcCvRjsH2R1o" colab_type="code" colab={} try: from google.colab import files except ImportError: pass else: files.download('vecs.tsv') files.download('meta.tsv') # + [markdown] id="SQsmwquESbDy" colab_type="text" # # Predicting Sentiment in new Reviews # + id="mxGS7jqBSflm" colab_type="code" colab={} # Use the model to predict a review fake_reviews = ['I love this phone', 'I hate spaghetti', 'Everything was cold', 'Everything was hot exactly as I wanted', 'Everything was green', 'the host seated us immediately', 'they gave us free chocolate cake', 'not sure about the wilted flowers on the table', 'only works when I stand on tippy toes', 'does not work when I stand on my head'] print(fake_reviews) # Create the sequences padding_type='post' sample_sequences = tokenizer.texts_to_sequences(fake_reviews) fakes_padded = pad_sequences(sample_sequences, padding=padding_type, maxlen=max_length) print('\nHOT OFF THE PRESS! HERE ARE SOME NEWLY MINTED, ABSOLUTELY GENUINE REVIEWS!\n') classes = model.predict(fakes_padded) # The closer the class is to 1, the more positive the review is deemed to be for x in range(len(fake_reviews)): print(fake_reviews[x]) print(classes[x]) print('\n') # Try adding reviews of your own # Add some negative words (such as "not") to the good reviews and see what happens # For example: # they gave us free chocolate cake and did not charge us
IMDB Movie Reviews Sentiment Analysis.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # --- # + [markdown] origin_pos=0 # # Numerical Stability and Initialization # :label:`sec_numerical_stability` # # # Thus far, every model that we have implemented # required that we initialize its parameters # according to some pre-specified distribution. # Until now, we took the initialization scheme for granted, # glossing over the details of how these choices are made. # You might have even gotten the impression that these choices # are not especially important. # To the contrary, the choice of initialization scheme # plays a significant role in neural network learning, # and it can be crucial for maintaining numerical stability. # Moreover, these choices can be tied up in interesting ways # with the choice of the nonlinear activation function. # Which function we choose and how we initialize parameters # can determine how quickly our optimization algorithm converges. # Poor choices here can cause us to encounter # exploding or vanishing gradients while training. # In this section, we delve into these topics with greater detail # and discuss some useful heuristics # that you will find useful # throughout your career in deep learning. # # # ## Vanishing and Exploding Gradients # # Consider a deep network with $L$ layers, # input $\mathbf{x}$ and output $\mathbf{o}$. # With each layer $l$ defined by a transformation $f_l$ # parameterized by weights $\mathbf{W}^{(l)}$, # whose hidden variable is $\mathbf{h}^{(l)}$ (let $\mathbf{h}^{(0)} = \mathbf{x}$), # our network can be expressed as: # # $$\mathbf{h}^{(l)} = f_l (\mathbf{h}^{(l-1)}) \text{ and thus } \mathbf{o} = f_L \circ \ldots \circ f_1(\mathbf{x}).$$ # # If all the hidden variables and the input are vectors, # we can write the gradient of $\mathbf{o}$ with respect to # any set of parameters $\mathbf{W}^{(l)}$ as follows: # # $$\partial_{\mathbf{W}^{(l)}} \mathbf{o} = \underbrace{\partial_{\mathbf{h}^{(L-1)}} \mathbf{h}^{(L)}}_{ \mathbf{M}^{(L)} \stackrel{\mathrm{def}}{=}} \cdot \ldots \cdot \underbrace{\partial_{\mathbf{h}^{(l)}} \mathbf{h}^{(l+1)}}_{ \mathbf{M}^{(l+1)} \stackrel{\mathrm{def}}{=}} \underbrace{\partial_{\mathbf{W}^{(l)}} \mathbf{h}^{(l)}}_{ \mathbf{v}^{(l)} \stackrel{\mathrm{def}}{=}}.$$ # # In other words, this gradient is # the product of $L-l$ matrices # $\mathbf{M}^{(L)} \cdot \ldots \cdot \mathbf{M}^{(l+1)}$ # and the gradient vector $\mathbf{v}^{(l)}$. # Thus we are susceptible to the same # problems of numerical underflow that often crop up # when multiplying together too many probabilities. # When dealing with probabilities, a common trick is to # switch into log-space, i.e., shifting # pressure from the mantissa to the exponent # of the numerical representation. # Unfortunately, our problem above is more serious: # initially the matrices $\mathbf{M}^{(l)}$ may have a wide variety of eigenvalues. # They might be small or large, and # their product might be *very large* or *very small*. # # The risks posed by unstable gradients # go beyond numerical representation. # Gradients of unpredictable magnitude # also threaten the stability of our optimization algorithms. # We may be facing parameter updates that are either # (i) excessively large, destroying our model # (the *exploding gradient* problem); # or (ii) excessively small # (the *vanishing gradient* problem), # rendering learning impossible as parameters # hardly move on each update. # # # ### (**Vanishing Gradients**) # # One frequent culprit causing the vanishing gradient problem # is the choice of the activation function $\sigma$ # that is appended following each layer's linear operations. # Historically, the sigmoid function # $1/(1 + \exp(-x))$ (introduced in :numref:`sec_mlp`) # was popular because it resembles a thresholding function. # Since early artificial neural networks were inspired # by biological neural networks, # the idea of neurons that fire either *fully* or *not at all* # (like biological neurons) seemed appealing. # Let us take a closer look at the sigmoid # to see why it can cause vanishing gradients. # # + origin_pos=3 tab=["tensorflow"] # %matplotlib inline import tensorflow as tf from d2l import tensorflow as d2l x = tf.Variable(tf.range(-8.0, 8.0, 0.1)) with tf.GradientTape() as t: y = tf.nn.sigmoid(x) d2l.plot(x.numpy(), [y.numpy(), t.gradient(y, x).numpy()], legend=['sigmoid', 'gradient'], figsize=(4.5, 2.5)) # + [markdown] origin_pos=4 # As you can see, (**the sigmoid's gradient vanishes # both when its inputs are large and when they are small**). # Moreover, when backpropagating through many layers, # unless we are in the Goldilocks zone, where # the inputs to many of the sigmoids are close to zero, # the gradients of the overall product may vanish. # When our network boasts many layers, # unless we are careful, the gradient # will likely be cut off at some layer. # Indeed, this problem used to plague deep network training. # Consequently, ReLUs, which are more stable # (but less neurally plausible), # have emerged as the default choice for practitioners. # # # ### [**Exploding Gradients**] # # The opposite problem, when gradients explode, # can be similarly vexing. # To illustrate this a bit better, # we draw 100 Gaussian random matrices # and multiply them with some initial matrix. # For the scale that we picked # (the choice of the variance $\sigma^2=1$), # the matrix product explodes. # When this happens due to the initialization # of a deep network, we have no chance of getting # a gradient descent optimizer to converge. # # + origin_pos=7 tab=["tensorflow"] M = tf.random.normal((4, 4)) print('a single matrix \n', M) for i in range(100): M = tf.matmul(M, tf.random.normal((4, 4))) print('after multiplying 100 matrices\n', M.numpy()) # + [markdown] origin_pos=8 # ### Breaking the Symmetry # # Another problem in neural network design # is the symmetry inherent in their parametrization. # Assume that we have a simple MLP # with one hidden layer and two units. # In this case, we could permute the weights $\mathbf{W}^{(1)}$ # of the first layer and likewise permute # the weights of the output layer # to obtain the same function. # There is nothing special differentiating # the first hidden unit vs. the second hidden unit. # In other words, we have permutation symmetry # among the hidden units of each layer. # # This is more than just a theoretical nuisance. # Consider the aforementioned one-hidden-layer MLP # with two hidden units. # For illustration, # suppose that the output layer transforms the two hidden units into only one output unit. # Imagine what would happen if we initialized # all of the parameters of the hidden layer # as $\mathbf{W}^{(1)} = c$ for some constant $c$. # In this case, during forward propagation # either hidden unit takes the same inputs and parameters, # producing the same activation, # which is fed to the output unit. # During backpropagation, # differentiating the output unit with respect to parameters $\mathbf{W}^{(1)}$ gives a gradient whose elements all take the same value. # Thus, after gradient-based iteration (e.g., minibatch stochastic gradient descent), # all the elements of $\mathbf{W}^{(1)}$ still take the same value. # Such iterations would # never *break the symmetry* on its own # and we might never be able to realize # the network's expressive power. # The hidden layer would behave # as if it had only a single unit. # Note that while minibatch stochastic gradient descent would not break this symmetry, # dropout regularization would! # # # ## Parameter Initialization # # One way of addressing---or at least mitigating---the # issues raised above is through careful initialization. # Additional care during optimization # and suitable regularization can further enhance stability. # # # ### Default Initialization # # In the previous sections, e.g., in :numref:`sec_linear_concise`, # we used a normal distribution # to initialize the values of our weights. # If we do not specify the initialization method, the framework will # use a default random initialization method, which often works well in practice # for moderate problem sizes. # # # # # # # ### Xavier Initialization # :label:`subsec_xavier` # # Let us look at the scale distribution of # an output (e.g., a hidden variable) $o_{i}$ for some fully-connected layer # *without nonlinearities*. # With $n_\mathrm{in}$ inputs $x_j$ # and their associated weights $w_{ij}$ for this layer, # an output is given by # # $$o_{i} = \sum_{j=1}^{n_\mathrm{in}} w_{ij} x_j.$$ # # The weights $w_{ij}$ are all drawn # independently from the same distribution. # Furthermore, let us assume that this distribution # has zero mean and variance $\sigma^2$. # Note that this does not mean that the distribution has to be Gaussian, # just that the mean and variance need to exist. # For now, let us assume that the inputs to the layer $x_j$ # also have zero mean and variance $\gamma^2$ # and that they are independent of $w_{ij}$ and independent of each other. # In this case, we can compute the mean and variance of $o_i$ as follows: # # $$ # \begin{aligned} # E[o_i] & = \sum_{j=1}^{n_\mathrm{in}} E[w_{ij} x_j] \\&= \sum_{j=1}^{n_\mathrm{in}} E[w_{ij}] E[x_j] \\&= 0, \\ # \mathrm{Var}[o_i] & = E[o_i^2] - (E[o_i])^2 \\ # & = \sum_{j=1}^{n_\mathrm{in}} E[w^2_{ij} x^2_j] - 0 \\ # & = \sum_{j=1}^{n_\mathrm{in}} E[w^2_{ij}] E[x^2_j] \\ # & = n_\mathrm{in} \sigma^2 \gamma^2. # \end{aligned} # $$ # # One way to keep the variance fixed # is to set $n_\mathrm{in} \sigma^2 = 1$. # Now consider backpropagation. # There we face a similar problem, # albeit with gradients being propagated from the layers closer to the output. # Using the same reasoning as for forward propagation, # we see that the gradients' variance can blow up # unless $n_\mathrm{out} \sigma^2 = 1$, # where $n_\mathrm{out}$ is the number of outputs of this layer. # This leaves us in a dilemma: # we cannot possibly satisfy both conditions simultaneously. # Instead, we simply try to satisfy: # # $$ # \begin{aligned} # \frac{1}{2} (n_\mathrm{in} + n_\mathrm{out}) \sigma^2 = 1 \text{ or equivalently } # \sigma = \sqrt{\frac{2}{n_\mathrm{in} + n_\mathrm{out}}}. # \end{aligned} # $$ # # This is the reasoning underlying the now-standard # and practically beneficial *Xavier initialization*, # named after the first author of its creators :cite:`Glorot.Bengio.2010`. # Typically, the Xavier initialization # samples weights from a Gaussian distribution # with zero mean and variance # $\sigma^2 = \frac{2}{n_\mathrm{in} + n_\mathrm{out}}$. # We can also adapt Xavier's intuition to # choose the variance when sampling weights # from a uniform distribution. # Note that the uniform distribution $U(-a, a)$ has variance $\frac{a^2}{3}$. # Plugging $\frac{a^2}{3}$ into our condition on $\sigma^2$ # yields the suggestion to initialize according to # # $$U\left(-\sqrt{\frac{6}{n_\mathrm{in} + n_\mathrm{out}}}, \sqrt{\frac{6}{n_\mathrm{in} + n_\mathrm{out}}}\right).$$ # # Though the assumption for nonexistence of nonlinearities # in the above mathematical reasoning # can be easily violated in neural networks, # the Xavier initialization method # turns out to work well in practice. # # # ### Beyond # # The reasoning above barely scratches the surface # of modern approaches to parameter initialization. # A deep learning framework often implements over a dozen different heuristics. # Moreover, parameter initialization continues to be # a hot area of fundamental research in deep learning. # Among these are heuristics specialized for # tied (shared) parameters, super-resolution, # sequence models, and other situations. # For instance, # Xiao et al. demonstrated the possibility of training # 10000-layer neural networks without architectural tricks # by using a carefully-designed initialization method :cite:`Xiao.Bahri.Sohl-Dickstein.ea.2018`. # # If the topic interests you we suggest # a deep dive into this module's offerings, # reading the papers that proposed and analyzed each heuristic, # and then exploring the latest publications on the topic. # Perhaps you will stumble across or even invent # a clever idea and contribute an implementation to deep learning frameworks. # # # ## Summary # # * Vanishing and exploding gradients are common issues in deep networks. Great care in parameter initialization is required to ensure that gradients and parameters remain well controlled. # * Initialization heuristics are needed to ensure that the initial gradients are neither too large nor too small. # * ReLU activation functions mitigate the vanishing gradient problem. This can accelerate convergence. # * Random initialization is key to ensure that symmetry is broken before optimization. # * Xavier initialization suggests that, for each layer, variance of any output is not affected by the number of inputs, and variance of any gradient is not affected by the number of outputs. # # ## Exercises # # 1. Can you design other cases where a neural network might exhibit symmetry requiring breaking besides the permutation symmetry in an MLP's layers? # 1. Can we initialize all weight parameters in linear regression or in softmax regression to the same value? # 1. Look up analytic bounds on the eigenvalues of the product of two matrices. What does this tell you about ensuring that gradients are well conditioned? # 1. If we know that some terms diverge, can we fix this after the fact? Look at the paper on layer-wise adaptive rate scaling for inspiration :cite:`You.Gitman.Ginsburg.2017`. # # + [markdown] origin_pos=11 tab=["tensorflow"] # [Discussions](https://discuss.d2l.ai/t/235) #
scripts/d21-en/tensorflow/chapter_multilayer-perceptrons/numerical-stability-and-init.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # <div> # <img src="figures/svtLogo.png"/> # </div> # <h1><center>Mathematical Optimization for Engineers</center></h1> # <h2><center>Lab 12 - Direct single shooting</center></h2> # In this exercise, we want to solve the van der Pol oscillator optimal control problem using the direct single shooting approach. # The problem without state constraints is as follows: # $\begin{align} # \min_{u(t)} \int_{t_{0}}^{t_{f}} x_1^2(t) + x_2^{2}(t) + u(t)^2 dt\\ # s.t. \dot{x} = f(x(t),u(t))\\ # x(t_{0}) = x_{0} \\ # u_{lb} \leq u(t) \leq u_{ub}\\ # \end{align} # $ # # For direct single shooting, the differential equations are solved by an integration scheme, which is needed to be set-up. # # + import numpy as np from scipy.integrate import odeint, solve_ivp from scipy.optimize import fmin,fmin_bfgs, Bounds, minimize, NonlinearConstraint from matplotlib import pyplot as plt from matplotlib import animation # - def eval_rhs(t, x, u, p): # your code return f def objective_ode_integration(p, func, x0, finalTime, output_state, is_path_constraint, doPlot): # inputs are as follows # p = parameter vector of control discretization # func: right-hand side of ODE f(t, x, u, p) # x0 = initial condition # finalTime = tN # doPlot = boolean variable, if true, plot solution N = len(p) # Number of discretization intervals h = finalTime /N # length of a single interval n_x = len(x0) # number of states x0_cur = x0 y_all = np.array([x0]).transpose() t_all = np.array([0]) output = list() for i in range(N): # loop over discretization intervals # solve initial value problem # your code # update initial conditions # your code y_all = np.concatenate((y_all, sol.y), axis=1) t_all = np.concatenate((t_all, sol.t), axis=0) output.append(y[output_state, -1]) if doPlot: y_all = np.array(y_all) plt.subplot(221) plt.plot(t_all, y_all[0, :]) plt.title('x1') plt.subplot(222) plt.plot(t_all, y_all[1, :]) plt.title('x2') plt.subplot(223) plt.plot(t_all, y_all[2, :]) plt.title('x3') # plt.subplot(224) plt.plot(np.linspace(0, finalTime, N), p) plt.title('u') plt.tight_layout() plt.show() if not is_path_constraint: output = output[-1] else: # constraint hard coded here to allow for # constraint definition with arguments # in scipy, c(x) >= 0 # your code here return output objective_ode_integration([1.0, 0.5, 0.2, 0.4], eval_rhs, [0, 1, 0], 5, [], [], True) # + finalTime = 5 N = 25 # Number of discretization intervals for the control doPlot = False p0 = 1.0*np.ones(N) # initial guess for optimization x0 = [0, 1, 0] bnds = Bounds(-0.3, 1.0) # define objective output_state = 2 # for objective is_path_constraint = False # call optimization with arguments # 'eval_rhs, x0, finalTime, output_state, is_path_constraint, doPlot' # your code here # call ODE integration to plot trajectories with optimal solution of controls # your code here # - # ## Adding state path constraint # After solvindg the problem without state constraints, we want to include the path constraint on the state $x_1$: # $ \begin{align} # x_1(t) \geq - 0.4 \forall t \in [t_{0},t_{f}] # \end{align}$ # For this task, we must add this constraint to the optimization problem. One way is to define the constraint function as constraint dictionary in Python. # + # in scipy, the inequality constraints are defined as # c(x) >= 0 cons = ({'type': 'ineq', 'fun': objective_ode_integration, 'args': (eval_rhs, x0, finalTime, 0, True, False) }) output_state = 2 # for objective is_path_constraint = False # call optimization with arguments # 'eval_rhs, x0, finalTime, output_state, is_path_constraint, doPlot' # your code here # call ODE integration to plot trajectories with optimal solution of controls # your code here # - # ## What could be made more efficient for this optimization?
EngineeringOptimization/GitLab/Lab12.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 2 # language: python # name: python2 # --- # # Covariance and Correlation # Covariance measures how two variables vary in tandem from their means. # # For example, let's say we work for an e-commerce company, and they are interested in finding a correlation between page speed (how fast each web page renders for a customer) and how much a customer spends. # # numpy offers covariance methods, but we'll do it the "hard way" to show what happens under the hood. Basically we treat each variable as a vector of deviations from the mean, and compute the "dot product" of both vectors. Geometrically this can be thought of as the angle between the two vectors in a high-dimensional space, but you can just think of it as a measure of similarity between the two variables. # # First, let's just make page speed and purchase amount totally random and independent of each other; a very small covariance will result as there is no real correlation: # + # %matplotlib inline import numpy as np from pylab import * def de_mean(x): xmean = mean(x) return [xi - xmean for xi in x] def covariance(x, y): n = len(x) return dot(de_mean(x), de_mean(y)) / (n-1) pageSpeeds = np.random.normal(3.0, 1.0, 1000) purchaseAmount = np.random.normal(50.0, 10.0, 1000) scatter(pageSpeeds, purchaseAmount) covariance (pageSpeeds, purchaseAmount) # - np.cov(pageSpeeds, purchaseAmount) # Now we'll make our fabricated purchase amounts an actual function of page speed, making a very real correlation. The negative value indicates an inverse relationship; pages that render in less time result in more money spent: # + purchaseAmount = np.random.normal(50.0, 10.0, 1000) / pageSpeeds scatter(pageSpeeds, purchaseAmount) covariance (pageSpeeds, purchaseAmount) # - np.cov(pageSpeeds, purchaseAmount) # But, what does this value mean? Covariance is sensitive to the units used in the variables, which makes it difficult to interpret. Correlation normalizes everything by their standard deviations, giving you an easier to understand value that ranges from -1 (for a perfect inverse correlation) to 1 (for a perfect positive correlation): # + def correlation(x, y): stddevx = x.std() stddevy = y.std() return covariance(x,y) / stddevx / stddevy #In real life you'd check for divide by zero here correlation(pageSpeeds, purchaseAmount) # - # numpy can do all this for you with numpy.corrcoef. It returns a matrix of the correlation coefficients between every combination of the arrays passed in: np.corrcoef(pageSpeeds, purchaseAmount) # (It doesn't match exactly just due to the math precision available on a computer.) # # We can force a perfect correlation by fabricating a totally linear relationship (again, it's not exactly -1 just due to precision errors, but it's close enough to tell us there's a really good correlation here): # + purchaseAmount = 100 - pageSpeeds * 3 scatter(pageSpeeds, purchaseAmount) correlation (pageSpeeds, purchaseAmount) # - # Remember, correlation does not imply causality! # ## Activity # numpy also has a numpy.cov function that can compute Covariance for you. Try using it for the pageSpeeds and purchaseAmounts data above. Interpret its results, and compare it to the results from our own covariance function above.
CovarianceCorrelation.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import torch import numpy as np np_data = np.arange(6).reshape((2, 3)) np_data torch_data = torch.from_numpy(np_data) torch_data torch2array = torch_data.numpy() # abs data = [-1, -2, 1, 2] tensor = torch.FloatTensor(data) tensor np.sin(data) torch.mean(tensor) # # matrix operation # + data = [[1,2],[3,4]] tensor = torch.FloatTensor(data) data = np.array(data) torch.mm(tensor, tensor) #tensor.dot(tensor) # - # # Variable # + from torch.autograd import Variable tensor = torch.FloatTensor([[1,2], [3,4]]) variable = Variable(tensor, requires_grad=True) print(tensor) print(variable) # - t_out = torch.mean(tensor*tensor) v_out = torch.mean(variable*variable) print( '\nt_out',t_out, '\nv_out',v_out ) v_out.backward() # v_out = 1/4 * sum(var*var) print(variable.grad) print(variable.data) print(variable.data.numpy()) # # Activation # + # relu/tanh/sigmoid/softplus # - import torch.nn.functional as F from torch.autograd import Variable import matplotlib.pyplot as plt x = torch.linspace(-5, 5, 200) x = Variable(x) x_np = x.data.numpy() y_relu = F.relu(x).data.numpy() y_sigmoid = F.relu(x).data.numpy() y_tanh = F.tanh(x).data.numpy() y_softplus = F.softplus(x).data.numpy() # %matplotlib inline plt.plot(x_np, y_relu) plt.show()
torch_basic/numpy_vs_pytorch.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + # # %load_ext autoreload # # %autoreload 2 import pandas as pd import numpy as np import torch import torch.nn as nn import torch.nn.functional as F from torch.utils.data import Dataset from torch.autograd import Variable from sklearn.model_selection import train_test_split from sklearn.model_selection import KFold from sklearn.metrics import roc_auc_score from gensim.models.keyedvectors import KeyedVectors import matplotlib.pyplot as plt from embedding import load_emb_vectors, build_emb_weight from loadData import create_dataset_obj, collate_func from model import create_emb_layer, two_stage_RNN, test_model from preprocess import tokenize_dataset, all_tokens_list, build_vocab, token2index_dataset from importlib import reload # reload(loadData) # from loadData import create_dataset_obj, collate_func RANDOM_STATE = 42 # - device = 'cuda' if torch.cuda.is_available() else 'cpu' print(device) # ### Get pre-trained embeddings # encode the pretrained embedding to text file model = KeyedVectors.load_word2vec_format('/home/hb1500/Plated/vocab.bin', binary=True) model.save_word2vec_format('pretrained_embd.txt', binary=False) fname = '../../data/glove.6B.50d.txt' words_emb_dict = load_emb_vectors(fname) # ### Load Cleaned Data steps = ['step_one','step_two', 'step_three', 'step_four', 'step_five', 'step_six'] steps_aug = ['step_one_sp', 'step_two_sp', 'step_three_sp', 'step_four_sp', 'step_five_sp', 'step_six_sp'] tags = ['tag_cuisine_indian', 'tag_cuisine_nordic', 'tag_cuisine_european', 'tag_cuisine_asian', 'tag_cuisine_mexican', 'tag_cuisine_latin-american', 'tag_cuisine_french', 'tag_cuisine_italian', 'tag_cuisine_african', 'tag_cuisine_mediterranean', 'tag_cuisine_american', 'tag_cuisine_middle-eastern'] data_with_aug = pd.read_csv('../data/recipe_data_with_aug.csv', index_col=0) data_with_aug_tags = data_with_aug[steps+steps_aug+tags] print(data_with_aug_tags.columns) # ### Tokenization # + print('Processing original instruction data') # tokenize each steps on original datasets steps_token = [] for step in steps: steps_token.append(step+'_token') data_with_aug_tags[step+'_token'] = tokenize_dataset(data_with_aug_tags[step]) print(step, 'has been tokenized.') # tokenize each steps on augmented datasets print('Processing augmented instruction data') steps_aug_token = [] for step in steps_aug: steps_aug_token.append(step+'_token') data_with_aug_tags[step+'_token'] = tokenize_dataset(data_with_aug_tags[step]) print(step, 'has been tokenized.') # - data_with_aug_tags = data_with_aug_tags[steps_token+steps_aug_token+tags] data_with_aug_tags.columns # # Split train and test sets train, test_data = train_test_split(data_with_aug_tags, test_size=0.1, random_state=RANDOM_STATE) test_data = test_data[steps_token+tags] #train_data, val_data, train_tags, val_tags = train_test_split(X_train, y_train, test_size=0.1, random_state=RANDOM_STATE) aug2ori_colname = dict(zip(steps_aug_token+tags, steps_token+tags)) # Cross validation for train and validation # + rnn_types = { 'rnn': nn.RNN, 'lstm': nn.LSTM, 'gru': nn.GRU } params = dict( rnn1_type = 'gru', rnn2_type = 'gru', bi = True, hidden_dim1 = 30, hidden_dim2 = 30, num_classes = 1, num_epochs = 10, batch_size = 50, learning_rate = 0.01, step_max_descent = 3, add_data_aug = True, cuda_on = True ) # - predicted_tags = 'tag_cuisine_american' kf = KFold(n_splits=5, shuffle=True, random_state=RANDOM_STATE) k = 1 val_auc_kf = [] for train_index, val_index in kf.split(train): print('===================== This is the Kfold {} ====================='.format(k)) k += 1 val_data = train[steps_token+tags].iloc[val_index] train_data = train.iloc[train_index] if params['add_data_aug']: ##### add augmentation to training set by index ##### train_org = train_data[steps_token+tags] train_aug = train_data[steps_aug_token+tags] train_aug.rename(index=str, columns=aug2ori_colname, inplace=True) # concatenate dfs train_data = pd.concat([train_org, train_aug], axis=0, ignore_index=False) ##### add augmentation to training set by index ##### else: train_data = train_data[steps_token+tags] #print(len(train_data), len(train_data.dropna())) #look up train_targets = list(train_data[predicted_tags]) val_targets = list(val_data[predicted_tags]) test_targets = list(test_data[predicted_tags]) train_X = train_data[steps_token] val_X = val_data[steps_token] test_X = test_data[steps_token] all_train_tokens = all_tokens_list(train_X) max_vocab_size = len(list(set(all_train_tokens))) token2id, id2token = build_vocab(all_train_tokens, max_vocab_size) emb_weight = build_emb_weight(words_emb_dict, id2token) train_data_indices = token2index_dataset(train_X, token2id) val_data_indices = token2index_dataset(val_X, token2id) test_data_indices = token2index_dataset(test_X, token2id) # batchify datasets: batch_size = params['batch_size'] max_sent_len = np.array([94, 86, 87, 90, 98, 91]) train_loader, val_loader, test_loader = create_dataset_obj(train_data_indices, val_data_indices, test_data_indices, train_targets, val_targets, test_targets, batch_size, max_sent_len, collate_func) val_auc, val_acc = model_train(params, emb_weight, train_loader, val_loader, test_loader) val_auc_kf.append(val_auc) def model_train(params, emb_weight, train_loader, val_loader, test_loader): rnn1_type = params['rnn1_type'] rnn_1 = rnn_types[rnn1_type] rnn2_type = params['rnn2_type'] rnn_2 = rnn_types[rnn2_type] bi = params['bi'] hidden_dim1 = params['hidden_dim1'] hidden_dim2 = params['hidden_dim2'] num_classes = params['num_classes'] batch_size = params['batch_size'] cuda_on = params['cuda_on'] weights_matrix = torch.from_numpy(emb_weight) model = two_stage_RNN(rnn_1, hidden_dim1, bi, rnn_2, hidden_dim2, batch_size, cuda_on, weights_matrix, num_classes) model_parameters = filter(lambda p: p.requires_grad, model.parameters()) print('The number of train parameters', sum([np.prod(p.size()) for p in model_parameters])) model = model.to(device) #parameter for training learning_rate = params['learning_rate'] num_epochs = params['num_epochs'] # number epoch to train # Criterion and Optimizer criterion = nn.BCEWithLogitsLoss() #torch.nn.BCELoss(); torch.nn.CrossEntropyLoss() optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate) train_loss_list = [] train_AUC_list = [] val_AUC_list = [] train_ACC_list = [] val_ACC_list = [] max_val_auc = 0 step_max_descent = params['step_max_descent'] for epoch in range(num_epochs): for i, (steps_batch, lengths_batch, labels_batch) in enumerate(train_loader): for step_id in range(6): lengths_batch[step_id] = lengths_batch[step_id].to(device) steps_batch[step_id] = steps_batch[step_id].to(device) model.train() optimizer.zero_grad() outputs = model(steps_batch, lengths_batch) loss = criterion(outputs, labels_batch.view(-1,1).float().to(device)) train_loss_list.append(loss.item()) loss.backward() optimizer.step() # validate every 10 step if i % 10 == 0: val_auc, val_acc = test_model(val_loader, model) print('{}/{}, Step:{}/{}, TrainLoss:{:.6f}, ValAUC:{:.6f} ValAcc:{:.6f}'.format( epoch+1, num_epochs, i+1, len(train_loader), loss, val_auc, val_acc)) train_auc, train_acc = test_model(train_loader, model) train_AUC_list.append(train_auc) val_AUC_list.append(val_auc) train_ACC_list.append(train_acc) val_ACC_list.append(val_acc) # early stop if max_val_auc < val_auc: max_val_auc = val_auc step_num_descent = 0 else: step_num_descent += 1 if step_max_descent == step_num_descent: print('early stop!') break print('Epoch: [{}/{}], trainAUC: {:.6f}, trainAcc: {:.6f}'.format(epoch+1, num_epochs, train_auc, train_acc)) print('Epoch: [{}/{}], ValAUC: {:.6f}, ValAcc: {:.6f}'.format(epoch+1, num_epochs, val_auc, val_acc)) if step_max_descent == step_num_descent: break #return train_loss_list, train_AUC_list, val_AUC_list, train_ACC_list, val_ACC_list val_auc_mean = np.mean(val_AUC_list[-step_max_descent*2+1:]) val_acc_mean = np.mean(val_ACC_list[-step_max_descent*2+1:]) return val_auc_mean, val_acc_mean # All tokens from training set # form all tokens list all_train_tokens = all_tokens_list(train_data) # Let's decide which tag to predict for trail data_cuisine_tags.iloc[:,1:].sum()/data_cuisine_tags.iloc[:,1:].shape[0] # Choose tag: tag_cuisine_american, which 27.3525% are 1 # ### Build vocabulary and indexing len(list(set(all_train_tokens))) token_counter = Counter(all_train_tokens) # token_counter.most_common emb_weight = build_emb_weight(words_emb_dict, id2token) sum(np.sum(emb_weight,1)==0)/emb_weight.shape[0] # Reconstruct data strcuture for datasets # tag_cuisine_indian 0.023525 85% auc # tag_cuisine_nordic 0.000399 # tag_cuisine_european 0.012360 # tag_cuisine_asian 0.182217 98% auc # tag_cuisine_mexican 0.013557 # tag_cuisine_latin-american 0.094896 90% auc # tag_cuisine_french 0.077352 72% auc # tag_cuisine_italian 0.233254 80% auc # tag_cuisine_african 0.003987 # tag_cuisine_mediterranean 0.076555 88% auc # tag_cuisine_american 0.273525 80% auc # tag_cuisine_middle-eastern 0.046252 87% auc # test model logits_all = [] labels_all = [] model.eval() for steps_batch, lengths_batch, labels_batch in test_loader: for step_id in range(6): lengths_batch[step_id] = lengths_batch[step_id].to(device) steps_batch[step_id] = steps_batch[step_id].to(devi) logits = model(steps_batch, lengths_batch) logits_all.extend(list(logits.cpu().detach().numpy())) labels_all.extend(list(labels_batch.numpy())) logits_all = np.array(logits_all) labels_all = np.array(labels_all) auc = roc_auc_score(labels_all, logits_all) predicts = (logits_all > 0.5).astype(int) acc = np.mean(predicts==labels_all) auc from sklearn import metrics fpr, tpr, thresholds = metrics.roc_curve(labels_all, logits_all, pos_label=1) plt.plot(fpr, tpr) for p in model.parameters(): if p.requires_grad: print(p.size())
src/.ipynb_checkpoints/Two_Stage_LSTM_cuisine_tags_CV_main-checkpoint.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import scrapy import requests from scrapy.http import TextResponse url = "http://job.incruit.com/entry/searchjob.asp?rgn2=11&jobty=1&occ1=150&page=1" req = requests.get(url) response = TextResponse(req.url, body = req.text, encoding = 'utf-8') response links = response.xpath('//*[@id="incruit_contents"]/div[3]/div[1]/div[2]/table/tbody/tr/td[1]/div/span[1]/a/@href').extract() links = response.xpath('//*[@id="incruit_contents"]/div[3]/div[1]/div[2]/table/tbody/tr[1]/td[1]/div/span[1]/a/@href').extract() links # + # !rm -rf incruit # !scrapy startproject incruit # - # !tree incruit # + # %%writefile incruit/incruit/items.py import scrapy class IncruitItem(scrapy.Item): title = scrapy.Field() content = scrapy.Field() employment_form = scrapy.Field() status = scrapy.Field() # + # %%writefile incruit/incruit/spiders/spider.py import scrapy import requests from incruit.items import IncruitItem from scrapy.http import TextResponse class Spider(scrapy.Spider): name = "Week_Incruit" allow_domain = ["https://www.incruit.com/"] start_urls = ["http://job.incruit.com/entry/searchjob.asp?ct=12&ty=1&cd=1"] def parse(self, response): for i in range(1,10): url = url = "http://job.incruit.com/entry/searchjob.asp?jobty=4&jobty=1&occ1=150&rgn2=11&page={}".format(i) req = requests.get(url) response = TextResponse(req.url, body = req.text, encoding = 'utf-8') links = response.xpath('//*[@id="incruit_contents"]/div[3]/div[1]/div[2]/table/tbody/tr/td[1]/div/span[1]/a/@href').extract() for link in links: yield scrapy.Request(link, callback = self.page_content) def page_content(self, response): item = IncruitItem() item["title"] = response.xpath('//*[@id="content"]/div[2]/h1/span/text()').extract() item["content"] = response.xpath('//*[@id="content"]/div[2]/h1/strong/text()').extract() item["employment_form"] = response.xpath('//*[@id="content"]/div[3]/div[1]/div[2]/div[3]/dl[1]/dd/div/div[1]/em/text()').extract() item["status"] = response.xpath('//*[@id="content"]/div[3]/div[1]/div[2]/div[1]/dl[1]/dd/div/div/text()').extract() yield item # - # %%writefile run.sh # cd incruit scrapy crawl Week_Incruit -o Week_Incruit.csv # !chmod +x run.sh # ! ./run.sh # !ls incruit/ df = pd.read_csv("incruit/Week_Incruit.csv") df['status'] = df['status'].apply(lambda x: str(x).replace('\t','')) df['status'] = df['status'].apply(lambda x: str(x).replace('\t','')) df
practice/incruit.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + import pickle import numpy as np # + with open("logistic_regression_clf.pickle", "rb") as handle: clf = pickle.load(handle) with open("test_data_batch.pickle", "rb") as handle: test_data = pickle.load(handle) with open("test_data_label.pickle", "rb") as handle: test_label = pickle.load(handle) # - predictions = clf.predict_proba(test_data)[::, 1] from sklearn import metrics from matplotlib import pyplot as plt fpr, tpr, _ = metrics.roc_curve(test_label, predictions) auc = metrics.roc_auc_score(test_label, predictions) plt.plot(fpr,tpr,label="data 1, auc="+str(auc)) plt.legend(loc=4) plt.show()
notebooks/Model Evaluation.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + id="fCqNDHM0EPpq" colab_type="code" outputId="a23a5eaf-8a78-458a-9c61-faad1b495fc5" colab={"base_uri": "https://localhost:8080/", "height": 51} import numpy as np import cv2 import matplotlib.pyplot as plt print('Versão da OpenCV: ', cv2.__version__, end='\n\n') # + id="Tzp9Blk-FAiV" colab_type="code" outputId="0f5b64ca-2ee4-4940-d65b-fd286451d8e8" colab={"base_uri": "https://localhost:8080/", "height": 286} img = cv2.imread('/content/ed7a20a362adb57310f4bec8fcd28720.jpg', cv2.IMREAD_GRAYSCALE) plt.imshow(img) # + id="qaM-6cHIFaFo" colab_type="code" outputId="8f947ee9-1f26-4eef-85e0-20a153cae142" colab={"base_uri": "https://localhost:8080/", "height": 34} img.shape # + id="DSzYtLpwW1y1" colab_type="code" colab={} def simple_subsampling(img, n): lista_imagens = [] for i in range(n+1): img = img[1::2, 1::2] lista_imagens.append(img) return lista_imagens # + id="CKqxdwSjYGC2" colab_type="code" colab={} lista_imagens = simple_subsampling(img, 4) # + id="yKKYAztAYQNz" colab_type="code" outputId="fbccf579-f1dd-49ac-ca57-a82fc73b1373" colab={"base_uri": "https://localhost:8080/", "height": 34} lista_imagens[3].shape # + id="mYVSAiBEZDXU" colab_type="code" outputId="3b0f7ce9-7e56-4621-e7ab-348bfbdb2efd" colab={"base_uri": "https://localhost:8080/", "height": 1000} plt.figure(dpi=100) plt.gray() plt.subplots(figsize=(20, 10)) plt.imshow(img) plt.subplots(figsize=(10, 5)) plt.imshow(lista_imagens[0]) plt.subplots(figsize=(5, 2.5)) plt.imshow(lista_imagens[1]) plt.subplots(figsize=(2.5, 1.75)) plt.imshow(lista_imagens[2]) plt.subplots(figsize=(1.5, 0.75)) plt.imshow(lista_imagens[3]) plt.show() # + [markdown] id="VxtRWVSskaQq" colab_type="text" # https://gist.github.com/KeremTurgutlu/68feb119c9dd148285be2e247267a203 # + id="2oVis1QOfxEw" colab_type="code" colab={} def nn_interpolate(A, new_size): """Vectorized Nearest Neighbor Interpolation""" old_size = A.shape row_ratio, col_ratio = np.array(new_size)/np.array(old_size) # row wise interpolation row_idx = (np.ceil(range(1, 1 + int(old_size[0]*row_ratio))/row_ratio) - 1).astype(int) # column wise interpolation col_idx = (np.ceil(range(1, 1 + int(old_size[1]*col_ratio))/col_ratio) - 1).astype(int) final_matrix = A[:, row_idx][col_idx, :] return final_matrix # + id="GjF9oslVklIw" colab_type="code" colab={} teste = nn_interpolate(lista_imagens[0], 512) # + id="edodVQXkO5EO" colab_type="code" outputId="abf92845-f4fd-44a8-8c52-542982f2ef9f" colab={"base_uri": "https://localhost:8080/", "height": 538} plt.figure() plt.imshow(teste) plt.show() plt.imsave('fig1tets.png', teste) plt.imshow(lista_imagens[0]) # plt.show() # plt.savefig('fig2teste.png', lista_imagens[0]) # + id="2Tl6jOuuoNhL" colab_type="code" outputId="ae6243fe-8d1f-41f1-bdb7-dba380aac1b2" colab={"base_uri": "https://localhost:8080/", "height": 269} teste1 = lista_imagens[0] plt.figure() plt.imshow(teste1) plt.show() plt.imsave('fig2tets.png', teste1) # + id="NZ8jy9z1FvC3" colab_type="code" outputId="0c9236f8-13af-4fd3-c39d-d1a271344d4d" colab={"base_uri": "https://localhost:8080/", "height": 34} teste1.shape # + id="aHqlDjc4kYO1" colab_type="code" colab={} import numpy as np from scipy import ndimage def GetBilinearPixel(imArr, posX, posY): out = [] #Get integer and fractional parts of numbers modXi = int(posX) modYi = int(posY) modXf = posX - modXi modYf = posY - modYi modXiPlusOneLim = min(modXi+1,imArr.shape[1]-1) modYiPlusOneLim = min(modYi+1,imArr.shape[0]-1) #Get pixels in four corners # for chan in range(imArr.shape[2]): bl = imArr[modYi, modXi, 1] br = imArr[modYi, modXiPlusOneLim, 1] tl = imArr[modYiPlusOneLim, modXi, 1] tr = imArr[modYiPlusOneLim, modXiPlusOneLim, 1] #Calculate interpolation b = modXf * br + (1. - modXf) * bl t = modXf * tr + (1. - modXf) * tl pxf = modYf * t + (1. - modYf) * b out.append(int(pxf+0.5)) return out # + id="ixaPxOl3-uz_" colab_type="code" colab={} # Bilinear interpolation def bilinear_interpolate(image): image = cv2.cvtColor(image,cv2.COLOR_GRAY2RGB) (h, w, channels) = image.shape h2 = 512 w2 = 512 temp = np.zeros((h2, w2, 3), np.uint8) x_ratio = float((w - 1)) / w2; y_ratio = float((h - 1)) / h2; for i in range(1, h2 - 1): for j in range(1 ,w2 - 1): x = int(x_ratio * j) y = int(y_ratio * i) x_diff = (x_ratio * j) - x y_diff = (y_ratio * i) - y a = image[x, y] & 0xFF b = image[x + 1, y] & 0xFF c = image[x, y + 1] & 0xFF d = image[x + 1, y + 1] & 0xFF blue = a[0] * (1 - x_diff) * (1 - y_diff) + b[0] * (x_diff) * (1-y_diff) + c[0] * y_diff * (1 - x_diff) + d[0] * (x_diff * y_diff) green = a[1] * (1 - x_diff) * (1 - y_diff) + b[1] * (x_diff) * (1-y_diff) + c[1] * y_diff * (1 - x_diff) + d[1] * (x_diff * y_diff) red = a[2] * (1 - x_diff) * (1 - y_diff) + b[2] * (x_diff) * (1-y_diff) + c[2] * y_diff * (1 - x_diff) + d[2] * (x_diff * y_diff) temp[j, i] = (blue, green, red) return cv2.cvtColor(temp, cv2.COLOR_BGR2GRAY) # + id="SwrW3Pqa-yqt" colab_type="code" colab={} testep = bilinear_interpolate(img) # + id="eG0wfNCJ-5hO" colab_type="code" outputId="06f6069c-1ed3-4c50-97c9-d94bd9579a8e" colab={"base_uri": "https://localhost:8080/", "height": 286} # gray = cv2.cvtColor(testep, cv2.COLOR_BGR2GRAY) plt.imshow(testep) # + id="EfwxWWnsERYe" colab_type="code" colab={} testem = cv2.cvtColor(testep, cv2.IMREAD_GRAYSCALE).shape # + id="4zhIxxcZEse0" colab_type="code" outputId="ea4956a1-eeb7-4846-d345-faa0621eb355" colab={"base_uri": "https://localhost:8080/", "height": 34} gray.shape # + id="HtmPnwN2E9I1" colab_type="code" outputId="8716d15f-9d72-4113-c915-9e681a09a95a" colab={"base_uri": "https://localhost:8080/", "height": 34} img.shape # + id="aSrk7tf-f_T0" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="1248911d-c687-46d3-a12c-ea068cc372f6" img = cv2.imread('/content/ed7a20a362adb57310f4bec8fcd28720.jpg', cv2.IMREAD_GRAYSCALE) print(img.shape) # + id="FLvFXf0AmiAh" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 337} outputId="1f1bcd2c-22b2-4ae2-ad77-d1fba080e4b1" from pylab import imread,imshow,figure,show,subplot from numpy import reshape,uint8,flipud from sklearn.cluster import MiniBatchKMeans image = imread('/content/xray.jpg') print(image.shape) # Extract width & height of image (HEIGHT, WIDTH) = image.shape[:2] # Convert image to L, A, B color space # image = cv2.cvtColor(image, cv2.COLOR_BGR2LAB) # Reshape the image to a feature vector image = image.reshape((image.shape[0] * image.shape[1], 1)) # Apply MiniBatchKMeans and then create the quantized image based on the predictions clt = MiniBatchKMeans(n_clusters = 8) labels = clt.fit_predict(image) print(labels) quant = clt.cluster_centers_.astype("uint8")[labels] # reshape the feature vectors to images quant = quant.reshape((HEIGHT, WIDTH)) image = image.reshape((HEIGHT, WIDTH)) # convert from L, A, B to RGB # quant = cv2.cvtColor(quant, cv2.COLOR_LAB2BGR) # image = cv2.cvtColor(image, cv2.COLOR_LAB2BGR) # quant = cv2.cvtColor(quant, cv2.COLOR_BGR2GRAY) # image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY) # print(type(quant)) # print(type(image)) # Display images print(quant.shape) plt.imshow(image) plt.gray() plt.imshow(quant) cv2.waitKey(0) print("Program successfully terminated") # + id="cr1pP8FOmnFA" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 850} outputId="ac3e2d9a-0415-44d5-f524-21e9e38c0cf7" img # + id="QGyS2Z8ynHQp" colab_type="code" colab={}
PROJETO_2/experimento_2 (1).ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] colab_type="text" id="7qmyIyQ3PgNu" # # Toy example to demonstrate the importance of the repulsive term in the energy distance # This notebook reproduces Figure 1 from *A Spectral Energy Distance for Parallel Speech Synthesis* ([https://arxiv.org/abs/2008.01160](https://arxiv.org/abs/2008.01160)). In this paper we use a spectrogram-based generalization of the *Energy Distance* ([wikipedia](https://en.wikipedia.org/wiki/Energy_distance)), which is a proper scoring rule for fitting generative models. The squared energy distance is given by $D^{2}[p|q] = 2\mathbb{E}_{\mathbf{x} \sim p, \mathbf{y} \sim q}||\mathbf{x} - \mathbf{y}||_{2} - \mathbb{E}_{\mathbf{x},\mathbf{x'} \sim p}||\mathbf{x} - \mathbf{x'}||_{2} - \mathbb{E}_{\mathbf{y},\mathbf{y'} \sim q}||\mathbf{y} - \mathbf{y'}||_{2}$. When $p$ is our data distribution and $q$ our model distribution this simplifies to a training loss given by $L[q] = 2\mathbb{E}_{\mathbf{x} \sim p, \mathbf{y} \sim q}||\mathbf{x} - \mathbf{y}||_{2} - \mathbb{E}_{\mathbf{y},\mathbf{y'} \sim q}||\mathbf{y} - \mathbf{y'}||_{2}$. The first term here *attracts* the model samples $\mathbf{y}$ towards the data samples $\mathbf{x}$, while the second term *repels* independent model samples $\mathbf{y}, \mathbf{y'}$ away from each other. In this notebook we estimate 2 simple toy models with and without using this repulsive term to demonstrate its importance. # + [markdown] colab_type="text" id="llATO9tJkgP-" # ## Imports # + colab={} colab_type="code" id="l7vXnbkgV8Nu" import numpy as np from scipy.optimize import minimize import functools import matplotlib.pyplot as plt import palettable # + [markdown] colab_type="text" id="ZJOc7HKryw6R" # ## This is the energy distance loss # + colab={} colab_type="code" id="y4PObq1bywUs" def loss(param, sample_from_param_fun, real_data, repulsive_term = True): """ Energy Distance loss function for training a generative model. Inputs: param: parameters of a generative model sample_from_param_fun: function that produces a set of samples from the model for given parameters real_data: training data repulsive_term: whetther to include the repulsive term in the loss or not Output: A scalar loss that can be minimized to fit our model to the data """ sample = sample_from_param_fun(param) d_real_fake = np.sqrt(np.sum(np.square(sample - real_data), axis=1)) perm = np.random.RandomState(seed=100).permutation(sample.shape[0]) sample2 = sample[perm] # we randomly match up independently generated samples d_fake_fake = np.sqrt(np.sum(np.square(sample - sample2), axis=1)) l = 2. * np.mean(d_real_fake) if repulsive_term: l -= np.mean(d_fake_fake) return l # + [markdown] colab_type="text" id="r0xW2K7gcsPB" # ## Fitting a high dimensional Gaussian using energy distance, with and without using a repulsive term # We fit a high dimensional Gaussian model to training data generated from a distribution in the same model class. We show samples from the model trained by minimizing the energy distance (blue) or the more commonly used loss without repulsive term (green), and compare to samples from the training data (red). Samples from the energy distance trained model are representative of the data, and all sampled points lie close to training examples. Samples from the model trained without repulsive term are not typical of training data. # + colab={} colab_type="code" id="zFecwWAzcpwY" n = 10000 dim = 100 def sample_from_param(param, z): mu = param[:-1] log_sigma = param[-1] sigma = np.exp(log_sigma) mu = np.reshape(mu, [1, dim]) return mu + sigma * z z_optim = np.random.normal(size=(n, dim)) sample_from_param_partial = functools.partial(sample_from_param, z=z_optim) # + colab={} colab_type="code" id="dNmuQ-EDylDU" # real data real_param = np.zeros(dim+1) real_data = sample_from_param(real_param, np.random.normal(size=(n, dim))) # + colab={} colab_type="code" id="FqYpb1QoeGqY" # with energy distance res = minimize(loss, np.zeros(dim + 1), args=(sample_from_param_partial, real_data, True), method='BFGS', tol=1e-10) sample_ged = sample_from_param_partial(res.x) # + colab={} colab_type="code" id="jp1dkkpjf0pL" # without repulsive res = minimize(loss, np.zeros(dim + 1), args=(sample_from_param_partial, real_data, False), method='BFGS', tol=1e-10) sample_naive = sample_from_param_partial(res.x) # + colab={"height": 287} colab_type="code" executionInfo={"elapsed": 459, "status": "ok", "timestamp": 1596626729566, "user": {"displayName": "", "photoUrl": "", "userId": ""}, "user_tz": -120} id="MczrHWsUfmo1" outputId="1ae3b17a-781c-4697-af45-c724b5d2a37a" def data_to_xy(sample): sample = sample[:100] x = np.sqrt(np.mean(np.square(sample), axis=1)) y = np.mean(sample, axis=1) return (x,y) data = (data_to_xy(real_data), data_to_xy(sample_ged), data_to_xy(sample_naive)) colors = palettable.colorbrewer.qualitative.Set1_3.mpl_colors groups = ("Training data", "Energy distance", "No repulsive term") fig = plt.figure() ax = fig.add_subplot(1, 1, 1) for data, color, group in zip(data, colors, groups): x, y = data ax.scatter(x, y, alpha=0.8, c=color, edgecolors='none', s=30, label=group) plt.legend(loc='best', fontsize=14) plt.xlabel('Sample norm', fontsize=14) plt.ylabel('Sample mean', fontsize=14) plt.show() # + [markdown] colab_type="text" id="Cc67BgeFwhra" # ## Fitting a mixture of 3 Gaussians in 2d # We fit a mixture of 3 Gaussians in 2d to training data generated from a distribution in the same model class. We show samples from the model trained by minimizing the energy distance (blue) or the more commonly used loss without repulsive term (green), and compare to samples from the training data (red). Samples from the energy distance trained model are representative of the data, and all sampled points lie close to training examples. Samples from the model trained without repulsive term are not typical of training data. # + colab={} colab_type="code" id="gSpmXaFiwe_r" n = 10000 def sample_from_param(param, z, perm): params = np.split(param, 3) means = [np.reshape(p[:2], [1,2]) for p in params] sigmas = [np.exp(p[2]) for p in params] samples = [m + s*zi for m,s,zi in zip(means, sigmas, z)] samples = np.concatenate(samples, axis=0)[perm] return samples z_optim = np.split(np.random.normal(size=(n, 6)), 3, axis=1) perm_optim = np.random.permutation(3*n) sample_from_param_partial = functools.partial(sample_from_param, z=z_optim, perm=perm_optim) # + colab={} colab_type="code" id="22R7StpEzhtu" # real data real_param = np.array([-10., 0., 0., 10., 0., 0., 0., np.sqrt(300.), 0.]) z_real = np.split(np.random.normal(size=(n, 6)), 3, axis=1) perm_real = np.random.permutation(3*n) real_data = sample_from_param(real_param, z=z_real, perm=perm_real) # + colab={} colab_type="code" id="Qd6A9x96zht1" # with energy distance res = minimize(loss, np.zeros(9), args=(sample_from_param_partial, real_data, True), method='BFGS', tol=1e-10) sample_ged = sample_from_param_partial(res.x) # + colab={} colab_type="code" id="2ArgfWpnzht6" # without repulsive res = minimize(loss, np.zeros(9), args=(sample_from_param_partial, real_data, False), method='BFGS', tol=1e-10) sample_naive = sample_from_param_partial(res.x) # + colab={"height": 287} colab_type="code" executionInfo={"elapsed": 827, "status": "ok", "timestamp": 1596626738524, "user": {"displayName": "", "photoUrl": "", "userId": ""}, "user_tz": -120} id="67wC5OQBzht-" outputId="35e8b4ee-<PASSWORD>" def data_to_xy(sample): sample = sample[:100] x,y = np.split(sample,2,axis=1) return (x,y) data = (data_to_xy(real_data), data_to_xy(sample_ged), data_to_xy(sample_naive)) colors = palettable.colorbrewer.qualitative.Set1_3.mpl_colors groups = ("Training data", "Energy distance", "No repulsive term") fig = plt.figure() ax = fig.add_subplot(1, 1, 1) for data, color, group in zip(data, colors, groups): x, y = data ax.scatter(x, y, alpha=0.8, c=color, edgecolors='none', s=30, label=group) plt.legend(loc='best', fontsize=14) plt.xlabel('$x_1$', fontsize=14) plt.ylabel('$x_2$', fontsize=14) plt.show() # + [markdown] colab_type="text" id="s05OY9pkklSM" # ## Copyright # # Copyright 2020 The Google Research Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License.
ged_tts/toy_example/toy_ged.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + from keras.callbacks import ModelCheckpoint from keras import backend as K from keras import optimizers from keras.layers import Dense from keras.layers import Dense, Dropout from keras.models import Sequential from keras.wrappers.scikit_learn import KerasClassifier from pandas import ExcelFile from pandas import ExcelWriter from PIL import Image from scipy import ndimage from scipy.stats import randint as sp_randint from sklearn.base import BaseEstimator from sklearn.base import TransformerMixin from sklearn.ensemble import ExtraTreesClassifier from sklearn.ensemble import RandomForestClassifier from sklearn.feature_selection import SelectFromModel from sklearn import datasets from sklearn import metrics from sklearn import pipeline from sklearn.metrics import roc_auc_score, roc_curve from sklearn.model_selection import cross_val_score from sklearn.model_selection import GridSearchCV from sklearn.model_selection import PredefinedSplit from sklearn.model_selection import RandomizedSearchCV from sklearn.model_selection import ShuffleSplit from sklearn.model_selection import StratifiedKFold from sklearn.model_selection import train_test_split from sklearn.pipeline import Pipeline from sklearn.preprocessing import FunctionTransformer from sklearn.preprocessing import Imputer from sklearn.preprocessing import LabelEncoder from sklearn.preprocessing import StandardScaler from sklearn.utils import resample from tensorflow.python.framework import ops import h5py import keras import matplotlib.pyplot as plt import numpy as np import openpyxl import pandas as pd import scipy import tensorflow as tf import xlsxwriter # %load_ext autoreload # %matplotlib inline import numpy as np from keras import layers from keras.layers import Input, Add, Dense, Activation, ZeroPadding2D, BatchNormalization, Flatten, Conv2D, AveragePooling2D, MaxPooling2D, GlobalMaxPooling2D from keras.models import Model, load_model from keras.preprocessing import image from keras.utils import layer_utils from keras.utils.data_utils import get_file from keras.applications.imagenet_utils import preprocess_input import pydot from IPython.display import SVG from keras.utils.vis_utils import model_to_dot from keras.utils import plot_model from keras.initializers import glorot_uniform import scipy.misc from matplotlib.pyplot import imshow # %matplotlib inline import keras.backend as K K.set_image_data_format('channels_last') K.set_learning_phase(1) # + from __future__ import print_function import rdkit from rdkit import Chem from rdkit.Chem import AllChem import pandas as pd import numpy as np from matplotlib import pyplot as plt # %matplotlib inline print("RDKit: %s"%rdkit.__version__) # - import keras from sklearn.utils import shuffle from keras.models import Sequential, Model from keras.layers import Conv2D, MaxPooling2D, Input, GlobalMaxPooling2D from keras.layers.core import Dense, Dropout, Activation, Flatten from keras.optimizers import Adam from keras.preprocessing.image import ImageDataGenerator from keras.callbacks import ReduceLROnPlateau print("Keras: %s"%keras.__version__) data = pd.read_excel(r'IGC50.xlsx') data["mol"] = data["smiles"].apply(Chem.MolFromSmiles) def chemcepterize_mol(mol, embed=20.0, res=0.5): dims = int(embed*2/res) #print(dims) #print(mol) #print(",,,,,,,,,,,,,,,,,,,,,,") cmol = Chem.Mol(mol.ToBinary()) #print(cmol) #print(",,,,,,,,,,,,,,,,,,,,,,") cmol.ComputeGasteigerCharges() AllChem.Compute2DCoords(cmol) coords = cmol.GetConformer(0).GetPositions() #print(coords) #print(",,,,,,,,,,,,,,,,,,,,,,") vect = np.zeros((dims,dims,4)) #Bonds first for i,bond in enumerate(mol.GetBonds()): bondorder = bond.GetBondTypeAsDouble() bidx = bond.GetBeginAtomIdx() eidx = bond.GetEndAtomIdx() bcoords = coords[bidx] ecoords = coords[eidx] frac = np.linspace(0,1,int(1/res*2)) # for f in frac: c = (f*bcoords + (1-f)*ecoords) idx = int(round((c[0] + embed)/res)) idy = int(round((c[1]+ embed)/res)) #Save in the vector first channel vect[ idx , idy ,0] = bondorder #Atom Layers for i,atom in enumerate(cmol.GetAtoms()): idx = int(round((coords[i][0] + embed)/res)) idy = int(round((coords[i][1]+ embed)/res)) #Atomic number vect[ idx , idy, 1] = atom.GetAtomicNum() #Gasteiger Charges charge = atom.GetProp("_GasteigerCharge") vect[ idx , idy, 3] = charge #Hybridization hyptype = atom.GetHybridization().real vect[ idx , idy, 2] = hyptype return vect # + mol = data["mol"][104] v = chemcepterize_mol(mol, embed=12, res=0.2) print(v.shape) plt.imshow(v[:,:,:3]) # - def vectorize(mol): return chemcepterize_mol(mol, embed=12,res=0.5) data["molimage"] = data["mol"].apply(vectorize) plt.imshow(data["molimage"][150][:,:,:3]) X_train = np.array(list(data["molimage"][data["split"]==1])) X_test = np.array(list(data["molimage"][data["split"]==0])) print(X_train.shape) print(X_test.shape) assay = "Activity" Y_train = data[assay][data["split"]==1].values.reshape(-1,1) Y_test = data[assay][data["split"]==0].values.reshape(-1,1) print("number of training examples = " + str(X_train.shape[0])) print("number of test examples = " + str(X_test.shape[0])) print("X_train shape: " + str(X_train.shape)) print("Y_train shape: " + str(Y_train.shape)) print("X_test shape: " + str(X_test.shape)) print("Y_test shape: " + str(Y_test.shape)) input_shape = X_train.shape[1:] print(input_shape) def identity_block(X, f, filters, stage, block): """ Implementation of the identity block as defined in Figure 3 Arguments: X -- input tensor of shape (m, n_H_prev, n_W_prev, n_C_prev) f -- integer, specifying the shape of the middle CONV's window for the main path filters -- python list of integers, defining the number of filters in the CONV layers of the main path stage -- integer, used to name the layers, depending on their position in the network block -- string/character, used to name the layers, depending on their position in the network Returns: X -- output of the identity block, tensor of shape (n_H, n_W, n_C) """ # defining name basis conv_name_base = 'res' + str(stage) + block + '_branch' bn_name_base = 'bn' + str(stage) + block + '_branch' # Retrieve Filters F1, F2, F3 = filters # Save the input value. You'll need this later to add back to the main path. X_shortcut = X # First component of main path X = Conv2D(filters=F1, kernel_size=(1, 1), strides=(1, 1), padding='valid', name=conv_name_base + '2a', kernel_initializer=glorot_uniform(seed=0))(X) X = BatchNormalization(axis=3, name=bn_name_base + '2a')(X) X = Activation('relu')(X) ### START CODE HERE ### # Second component of main path (≈3 lines) X = Conv2D(filters=F2, kernel_size=(f, f), strides=(1, 1), padding='same', name=conv_name_base + '2b', kernel_initializer=glorot_uniform(seed=0))(X) X = BatchNormalization(axis=3, name=bn_name_base + '2b')(X) X = Activation('relu')(X) # Third component of main path (≈2 lines) X = Conv2D(filters=F3, kernel_size=(1, 1), strides=(1, 1), padding='valid', name=conv_name_base + '2c', kernel_initializer=glorot_uniform(seed=0))(X) X = BatchNormalization(axis=3, name=bn_name_base + '2c')(X) # Final step: Add shortcut value to main path, and pass it through a RELU activation (≈2 lines) X = Add()([X, X_shortcut]) X = Activation('relu')(X) ### END CODE HERE ### return X # + # GRADED FUNCTION: convolutional_block def convolutional_block(X, f, filters, stage, block, s=2): """ Arguments: X -- input tensor of shape (m, n_H_prev, n_W_prev, n_C_prev) f -- integer, specifying the shape of the middle CONV's window for the main path filters -- python list of integers, defining the number of filters in the CONV layers of the main path stage -- integer, used to name the layers, depending on their position in the network block -- string/character, used to name the layers, depending on their position in the network s -- Integer, specifying the stride to be used Returns: X -- output of the convolutional block, tensor of shape (n_H, n_W, n_C) """ # defining name basis conv_name_base = 'res' + str(stage) + block + '_branch' bn_name_base = 'bn' + str(stage) + block + '_branch' # Retrieve Filters F1, F2, F3 = filters # Save the input value X_shortcut = X ##### MAIN PATH ##### # First component of main path X = Conv2D(filters=F1, kernel_size=(1, 1), strides=(s, s), padding='valid', name=conv_name_base + '2a', kernel_initializer=glorot_uniform(seed=0))(X) X = BatchNormalization(axis=3, name=bn_name_base + '2a')(X) X = Activation('relu')(X) ### START CODE HERE ### # Second component of main path (≈3 lines) X = Conv2D(filters=F2, kernel_size=(f, f), strides=(1, 1), padding='same', name=conv_name_base + '2b', kernel_initializer=glorot_uniform(seed=0))(X) X = BatchNormalization(axis=3, name=bn_name_base + '2b')(X) X = Activation('relu')(X) # Third component of main path (≈2 lines) X = Conv2D(filters=F3, kernel_size=(1, 1), strides=(1, 1), padding='valid', name=conv_name_base + '2c', kernel_initializer=glorot_uniform(seed=0))(X) X = BatchNormalization(axis=3, name=bn_name_base + '2c')(X) ##### SHORTCUT PATH #### (≈2 lines) X_shortcut = Conv2D(filters=F3, kernel_size=(1, 1), strides=(s, s), padding='valid', name=conv_name_base + '1', kernel_initializer=glorot_uniform(seed=0))(X_shortcut) X_shortcut = BatchNormalization(axis=3, name=bn_name_base + '1')(X_shortcut) # Final step: Add shortcut value to main path, and pass it through a RELU activation (≈2 lines) X = Add()([X, X_shortcut]) X = Activation('relu')(X) ### END CODE HERE ### return X # - def ResNet50(input_shape=input_shape): """ Implementation of the popular ResNet50 the following architecture: CONV2D -> BATCHNORM -> RELU -> MAXPOOL -> CONVBLOCK -> IDBLOCK*2 -> CONVBLOCK -> IDBLOCK*3 -> CONVBLOCK -> IDBLOCK*5 -> CONVBLOCK -> IDBLOCK*2 -> AVGPOOL -> TOPLAYER Arguments: input_shape -- shape of the images of the dataset classes -- integer, number of classes Returns: model -- a Model() instance in Keras """ # Define the input as a tensor with shape input_shape X_input = Input(input_shape) # Zero-Padding X = ZeroPadding2D((1, 1))(X_input) # Stage 1 X = Conv2D(64, (7, 7), strides=(2, 2), name='conv1', kernel_initializer=glorot_uniform(seed=0))(X) X = BatchNormalization(axis=3, name='bn_conv1')(X) X = Activation('relu')(X) X = MaxPooling2D((3, 3), strides=(2, 2))(X) # Stage 2 X = convolutional_block(X, f=3, filters=[64, 64, 256], stage=2, block='a', s=1) X = identity_block(X, 3, [64, 64, 256], stage=2, block='b') X = identity_block(X, 3, [64, 64, 256], stage=2, block='c') ### START CODE HERE ### # Stage 3 (≈4 lines) X = convolutional_block(X, f=3, filters=[128, 128, 512], stage=3, block='a', s=2) X = identity_block(X, 3, [128, 128, 512], stage=3, block='b') X = identity_block(X, 3, [128, 128, 512], stage=3, block='c') X = identity_block(X, 3, [128, 128, 512], stage=3, block='d') # Stage 4 (≈6 lines) X = convolutional_block(X, f=3, filters=[256, 256, 1024], stage=4, block='a', s=2) X = identity_block(X, 3, [256, 256, 1024], stage=4, block='b') X = identity_block(X, 3, [256, 256, 1024], stage=4, block='c') X = identity_block(X, 3, [256, 256, 1024], stage=4, block='d') X = identity_block(X, 3, [256, 256, 1024], stage=4, block='e') X = identity_block(X, 3, [256, 256, 1024], stage=4, block='f') # Stage 5 (≈3 lines) X = X = convolutional_block(X, f=3, filters=[512, 512, 2048], stage=5, block='a', s=2) X = identity_block(X, 3, [512, 512, 2048], stage=5, block='b') X = identity_block(X, 3, [512, 512, 2048], stage=5, block='c') # AVGPOOL (≈1 line). Use "X = AveragePooling2D(...)(X)" X = AveragePooling2D(pool_size=(2, 2), padding='same')(X) ### END CODE HERE ### # output layer X = Flatten()(X) #X = Dense(10, activation='relu', kernel_initializer=glorot_uniform(seed=0))(X) #X = Dropout(0.1)(X) X = Dense(1, activation='linear', kernel_initializer=glorot_uniform(seed=0))(X) # Create model model = Model(inputs=X_input, outputs=X, name='ResNet50') return model model = ResNet50(input_shape=input_shape) # + from keras.preprocessing.image import ImageDataGenerator generator = ImageDataGenerator(rotation_range=180, width_shift_range=0.1,height_shift_range=0.1, fill_mode="constant",cval = 0, horizontal_flip=True, vertical_flip=True,data_format='channels_last', ) # - def coeff_determination(y_true, y_pred): from keras import backend as K SS_res = K.sum(K.square( y_true-y_pred )) SS_tot = K.sum(K.square( y_true - K.mean(y_true) ) ) return ( 1 - SS_res/(SS_tot + K.epsilon()) ) # + def get_lr_metric(optimizer): def lr(y_true, y_pred): return optimizer.lr return lr # - optimizer = Adam(lr=0.00025) lr_metric = get_lr_metric(optimizer) model.compile(loss="mse", optimizer=optimizer, metrics=[coeff_determination, lr_metric]) # + #Concatenate for longer epochs Xt = np.concatenate([X_train]*50, axis=0) yt = np.concatenate([Y_train]*50, axis=0) batch_size=128 g = generator.flow(Xt, yt, batch_size=batch_size, shuffle=True) steps_per_epoch = 10000/batch_size callbacks_list = [ ReduceLROnPlateau(monitor='val_loss', factor=0.5, patience=5, min_lr=1e-15, verbose=1, mode='auto',cooldown=0), ModelCheckpoint(filepath="weights.best.hdf5", monitor='val_loss', save_best_only=True, verbose=1, mode='auto') ] history =model.fit_generator(g, steps_per_epoch=len(Xt)//batch_size, epochs=150, validation_data=(X_test,Y_test), callbacks=callbacks_list) # + hist = history.history plt.figure(figsize=(10, 8)) for label in ['val_coeff_determination','coeff_determination']: plt.subplot(221) plt.plot(hist[label], label = label) plt.legend() plt.xlabel("Epochs") plt.ylabel("coeff_determination") for label in ['val_loss','loss']: plt.subplot(222) plt.plot(hist[label], label = label) plt.legend() plt.xlabel("Epochs") plt.ylabel("loss") plt.subplot(223) plt.plot( hist['lr'],hist['val_coeff_determination'] ) plt.legend() plt.xlabel("lr") plt.ylabel("val_coeff_determination") plt.subplot(224) plt.plot( hist['lr'],hist['val_loss'] ) plt.legend() plt.xlabel("lr") plt.ylabel("val_loss") plt.subplots_adjust(top=0.92, bottom=0.08, left=0.10, right=0.95, hspace=0.25, wspace=0.35) # - model_json = model.to_json() with open("model.json", "w") as json_file: json_file.write(model_json) from keras.models import model_from_json # load json and create model json_file = open('model.json', 'r') loaded_model_json = json_file.read() json_file.close() loaded_model = model_from_json(loaded_model_json) # load weights into new model loaded_model.load_weights("weights.best.hdf5") print("Loaded model from disk") print(" ")
resnet.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python [conda env:anaconda] # language: python # name: conda-env-anaconda-py # --- # # Convert FIRE data # # This script will convert the FIRE .hdf5 files into a .json data files that can be read into Firefly. # # FIREreader is the class that will allow you to read in files within a directory, create the dictionary, and write out the json files # # + # %load_ext autoreload # %autoreload 2 from dataParser import FIREreader import numpy as np # - # ### Set the defaults and create the .json files reader = FIREreader( snapdir = "/Users/agurvich/research/snaps/Control_G4_20/", snapnum = 50, ptypes=['PartType0','PartType4'], UInames=['Gas','Stars'], dec_factors=[10,100], returnKeys=['Density','Velocities'], doMags=[0,0], doLogs=[1,0], filterFlags=[1,0], ## it will automatically filter on magVelocities, so you need to put 0 for Velocities JSONdir='isolatedGalaxy_s50') reader.loadData() reader.dumpToJSON() # ## Make an options file to use as a preset # + #update a few of the options, here to start by only showing the high-velocity outflows in Gas, as vectors reader.options['center'] = np.array([-0.11233689678565528, -2.3536859975959175, 0.020126853973307934]) reader.options['camera'] = np.array([12.012246024501222, 16.51869122052115, 1.722756246574182]) reader.options['sizeMult']['Gas'] = 0.4 reader.options['showVel']['Gas'] = True reader.options['velType']['Gas'] = 'arrow' reader.options['maxVrange'] = 1000 #Note: if you want to define the filterVals or filterLims above #(i.e. to define them before executing reader.run() and after definining reader.addFilter), #you would first need to execute reader.defineFilterKeys() #(reader.defineFilterKeys() is executed within reader.run() ) reader.options['filterVals']['Gas']['magVelocities'] = [500, 35000] reader.options['showParts']['Stars'] = False #This created a file names velocityPreset.json within the data directory # that can now be loaded as a preset from within Firefly # - reader.options.listKeys() reader.options.outputToJSON(reader.JSONdir, "velocityPreset.json")
data/convertFIREdata_simple.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: 'Python 3.8.10 64-bit (''MMCD'': conda)' # language: python # name: python3 # --- # # Calculation of Free Energy of Two Basins of the Muller-Brown Potential # + import matplotlib as mpl from matplotlib import cm import matplotlib.pyplot as plt import numpy as np import torch torch.set_default_dtype(torch.double) import math import matplotlib.pyplot as plt import matplotlib as mpl from matplotlib import cm from sys import exit import pickle import scipy.integrate as integrate import os # - # ## Definitions and Potential Energy Contour Plot # The function, $\text{compute\_Muller\_potential(beta, x)}$ take in a constant $\beta = k_BT$ and a two dimensional coordinate $x = (x_1, x_2)$ and returns the potential energy at that point. Below we plot a contour plot of the potential energy surface. # + def compute_Muller_potential(beta, x): A = (-200., -100., -170., 15.) b = (0., 0., 11., 0.6) ac = (x.new_tensor([-1.0, -10.0]), x.new_tensor([-1.0, -10.0]), x.new_tensor([-6.5, -6.5]), x.new_tensor([0.7, 0.7])) x0 = (x.new_tensor([ 1.0, 0.0]), x.new_tensor([ 0.0, 0.5]), x.new_tensor([-0.5, 1.5]), x.new_tensor([-1.0, 1.0])) U = 0 for i in range(4): diff = x - x0[i] U = U + A[i]*torch.exp(torch.sum(ac[i]*diff**2, -1) + b[i]*torch.prod(diff, -1)) return beta*U def generate_grid(x1_min, x1_max, x2_min, x2_max, ndim1, ndim2): x1 = torch.linspace(x1_min, x1_max, steps=ndim1) x2 = torch.linspace(x2_min, x2_max, steps=ndim2) grid_x1, grid_x2 = torch.meshgrid(x1, x2) grid = torch.stack([grid_x1, grid_x2], dim = -1) x = grid.reshape((-1, 2)) return x x1_min, x1_max = -1.5, 1.0 x2_min, x2_max = -0.5, 2.0 ndim = 100 x = generate_grid(x1_min, x1_max, x2_min, x2_max, ndim, ndim) U = compute_Muller_potential(1, x) U = U.reshape(ndim, ndim) U = U.T # - fig = plt.figure(0) fig.clf() plt.contourf(U, levels = 30, extent = (x1_min, x1_max, x2_min, x2_max), cmap = cm.viridis_r) #plt.contourf(U, levels = 30, cmap = cm.viridis_r) plt.xlabel(r"$x_1$", fontsize = 24) plt.ylabel(r"$x_2$", fontsize = 24) plt.colorbar() plt.tight_layout() def compute_Muller_potential_point(beta, r): """ Computes the Muller potential at a point r = (x, y). """ x = r[0] y = r[1] A = (-200., -100., -170., 15.) a = (-1, -1, -6.5, 0.7) b = (0., 0., 11., 0.6) c = (-10, -10, -6.5, 0.7) x0 = (1, 0, -0.5, -1) y0 = (0, 0.5, 1.5, 1) result = 0 for k in range(4): result += A[k]*np.exp(a[k]*(x-x0[k])**2 + b[k]*(x-x0[k])*(y-y0[k])+ c[k]*(y-y0[k])**2) return beta*result # + partition_function = dict() beta = 0.05 def p_x_y(x, y, beta = 0.05): """ Returns the probability density at a point (x, y). """ b_U_r = compute_Muller_potential_point(beta, (x, y)) try: q = partition_function[beta] except: q = integrate.nquad(lambda x, y: np.exp(-compute_Muller_potential_point(beta, (x , y))), [[-1.5, 1], [-0.5, 2]]) partition_function[beta] = q return np.exp(-b_U_r)/q[0] total = integrate.nquad(p_x_y, [[-1.5, 1], [-0.5, 2]]) basin1_q = integrate.nquad(p_x_y, [[-1.5, 0], [0.55, 2]]) basin2_q = integrate.nquad(p_x_y, [[-0.8, 1], [-0.5, 0.8]]) print(total) print(-np.log(basin1_q[0]/basin2_q[0])) print(-np.log(basin2_q[0]/basin1_q[0])) print(-(1/beta)*np.log(basin1_q[0]/basin2_q[0])) # - # ### Some Absolute Free Energy Calculations def exp_beta_e(x, y): beta = 0.05 A = (-200., -100., -170., 15.) a = (-1, -1, -6.5, 0.7) b = (0., 0., 11., 0.6) c = (-10, -10, -6.5, 0.7) x0 = (1, 0, -0.5, -1) y0 = (0, 0.5, 1.5, 1) result = 0 for k in range(4): result += A[k]*np.exp(a[k]*(x-x0[k])**2 + b[k]*(x-x0[k])*(y-y0[k])+ c[k]*(y-y0[k])**2) return np.exp(-beta*result) # + Z = integrate.nquad(exp_beta_e, [[-1.5, 1], [-0.5, 2]])[0] #[0] retrieves value print("Z total: ", Z) #total region Z_A = integrate.nquad(exp_beta_e, [[-1.5, 0], [0.55, 2]])[0] #[0] retrieves value print("Z_A: ", Z_A) print("F_A: ", 1/beta*np.log(Z_A)) #region A Z_B = integrate.nquad(exp_beta_e, [[-0.8, 1], [-0.5, 0.8]])[0] #[0] retrieves value print("Z_B: ", Z_B) print("F_B: ", 1/beta*np.log(Z_B)) #region B print((1/beta)*np.log(Z_A/Z_B)) # - # ### Self-test # We make sure that the two functions give the same energy for a point. print(compute_Muller_potential(0.05, torch.tensor([(0.75, 0.75), (1, 1), (1, 1)]))) print(compute_Muller_potential_point(0.05, torch.tensor([(0.75, 0.75), (1,1)]))) torch.tensor([(0.75, 0.75), (1, 1), (1, 1)]).size() # # Compute Free Energy Difference by Direct Counting From TREMC '''d = os.path.abspath('') with open('x_record_alpha_{:.3f}.pkl'.format(beta), 'rb') as file_handle: data = pickle.load(file_handle) xp = data['x_record'][:, -1, :] #xp = x_record[:, -1, :] plt.scatter(xp[:,0], xp[:,1], s = 0.1)''' #code for loading saved dataset # + alpha = 0.05 x1_min, x1_max = -1.5, 1 x2_min, x2_max = -0.5, 2.0 num_reps = 10 alphas = torch.linspace(0.001, alpha, num_reps) num_steps = 300000 x_record = [] accept_rate = 0 x = torch.stack((x1_min + torch.rand(num_reps)*(x1_max - x1_min), x2_min + torch.rand(num_reps)*(x2_max - x2_min)), dim = -1) energy = compute_Muller_potential(1.0, x) for k in range(num_steps): if (k + 1) % 50000 == 0: print("idx of steps: {}".format(k)) ## sampling within each replica delta_x = torch.normal(0, 1, size = (num_reps, 2))*0.5 x_p = x + delta_x energy_p = compute_Muller_potential(1.0, x_p) ## accept based on energy accept_prop = torch.exp(-alphas*(energy_p - energy)) accept_flag = torch.rand(num_reps) < accept_prop ## considering the bounding effects accept_flag = accept_flag & torch.all(x_p > x_p.new_tensor([x1_min, x2_min]), -1) \ & torch.all(x_p < x_p.new_tensor([x1_max, x2_max]), -1) x_p[~accept_flag] = x[~accept_flag] energy_p[~accept_flag] = energy[~accept_flag] x = x_p energy = energy_p ## calculate overall accept rate accept_rate = accept_rate + (accept_flag.float() - accept_rate)/(k+1) ## exchange if k % 10 == 0: x_ori = x for i in range(1, num_reps): accept_prop = torch.exp((alphas[i] - alphas[i-1])*(energy[i] - energy[i-1])) accept_flag = torch.rand(1) < accept_prop if accept_flag.item(): tmp = x[i].clone() x[i] = x[i-1].clone() x[i-1] = tmp.clone() #im not sure if the third clone is nessesary tmp = energy[i].clone() energy[i] = energy[i-1].clone() energy[i-1] = tmp.clone() if k >= 20000: x_record.append(x.clone().numpy()) x_record = np.array(x_record) # - # ### Demonstate the clone() is Needed For Swapping test = torch.tensor([[1,2], [3,4]]) print(test) #swap code tmp = test[1] test[1] = test[0] test[0] = test[1] #shows code after swapping print(test) # ### Free energy by direct counting # + xp = x_record[:, -1, :] basin1_N = 0 basin2_N = 0 for (x1, x2) in xp: if (-1.5 < x1 < 0) and (0.55 < x2 < 2): basin1_N += 1 if (-0.8 < x1 < 1) and (-0.5 < x2 < 0.8): basin2_N += 1 print(np.log(basin1_N/basin2_N)) print(np.log(basin2_N/basin1_N)) print(-(1/beta)*np.log(basin1_N/basin2_N)) # + x_record = np.array(x_record) xp = x_record[:, 9, :] x1, x2 = xp[:,0], xp[:,1] def p_x_9(x): p_x = integrate.quad(lambda y: p_x_y(x, y), -0.5, 2) return p_x def p_y_9(y): p_y = integrate.quad(lambda x: p_x_y(x, y), -1.5, 1) return p_y plt.hist(x1, density=True, bins = 200) lines = np.linspace(-1.5, 1, 1000) x1_curve = [p_x_9(l)[0] for l in lines] plt.plot(lines, x1_curve) plt.show() plt.hist(x2, density=True, bins = 200) lines = np.linspace(-0.5, 2.0, 1000) x2_curve = [p_y_9(l)[0] for l in lines] plt.plot(lines, x2_curve) plt.show() # - print(alphas) # + x_record = np.array(x_record) xp = x_record[:, 8, :] x1, x2 = xp[:,0], xp[:,1] def p_x_8(x): p_x = integrate.quad(lambda y: p_x_y(x, y, beta = float(alphas[8])), -0.5, 2) return p_x def p_y_8(y): p_y = integrate.quad(lambda x: p_x_y(x, y, beta = float(alphas[8])), -1.5, 1) return p_y plt.hist(x1, density=True, bins = 200) lines = np.linspace(-1.5, 1, 1000) x1_curve = [p_x_8(l)[0] for l in lines] plt.plot(lines, x1_curve) plt.show() plt.hist(x2, density=True, bins = 200) lines = np.linspace(-0.5, 2.0, 1000) x2_curve = [p_y_8(l)[0] for l in lines] plt.plot(lines, x2_curve) plt.show() # - # ## Comparison of PDF for $\beta = 0.05$ and $\beta = 0.0446$ # + betas = alphas pdf_dict = dict() for beta in betas: beta = float(beta) def p_x_given_beta(x): p_x = integrate.quad(lambda y: p_x_y(x, y, beta), -0.5, 2) return p_x pdf_dict[beta] = p_x_given_beta # - # ### $x_1$ Comparison # + xaxis = np.linspace(-1.5, 1, 1000) for beta in betas: f = pdf_dict[float(beta)] curve = [f(x)[0] for x in xaxis] plt.plot(xaxis, curve) plt.show() # - # ## Save Data with open('samples_beta_%.3f.pkl'%beta, 'wb') as file_handle: pickle.dump({'beta_lst': betas, 'x_record': x_record}, file_handle) # ## Generate Seperate Datasets Using in A and B # ### Region A # + Abounds = [[-1.5, 0], [0.55, 2]] alpha = 0.05 x1_min, x1_max = -1.5, 0 x2_min, x2_max = 0.55, 2 num_reps = 10 alphas = torch.linspace(0.001, alpha, num_reps) num_steps = 320000 x_record = [] accept_rate = 0 x = torch.stack((x1_min + torch.rand(num_reps)*(x1_max - x1_min), x2_min + torch.rand(num_reps)*(x2_max - x2_min)), dim = -1) energy = compute_Muller_potential(1.0, x) for k in range(num_steps): if (k + 1) % 50000 == 0: print("idx of steps: {}".format(k)) ## sampling within each replica delta_x = torch.normal(0, 1, size = (num_reps, 2))*0.5 x_p = x + delta_x energy_p = compute_Muller_potential(1.0, x_p) ## accept based on energy accept_prop = torch.exp(-alphas*(energy_p - energy)) accept_flag = torch.rand(num_reps) < accept_prop ## considering the bounding effects accept_flag = accept_flag & torch.all(x_p > x_p.new_tensor([x1_min, x2_min]), -1) \ & torch.all(x_p < x_p.new_tensor([x1_max, x2_max]), -1) x_p[~accept_flag] = x[~accept_flag] energy_p[~accept_flag] = energy[~accept_flag] x = x_p energy = energy_p ## calculate overall accept rate accept_rate = accept_rate + (accept_flag.float() - accept_rate)/(k+1) ## exchange if k % 10 == 0: x_ori = x for i in range(1, num_reps): accept_prop = torch.exp((alphas[i] - alphas[i-1])*(energy[i] - energy[i-1])) accept_flag = torch.rand(1) < accept_prop if accept_flag.item(): tmp = x[i].clone() x[i] = x[i-1].clone() x[i-1] = tmp.clone() #im not sure if the third clone is nessesary tmp = energy[i].clone() energy[i] = energy[i-1].clone() energy[i-1] = tmp.clone() if k >= 20000: x_record.append(x.clone().numpy()) x_record_A = np.array(x_record) # - with open('Asamples_beta_%.3f.pkl'%beta, 'wb') as file_handle: pickle.dump({'beta_lst': alphas, 'x_record': x_record_A}, file_handle) # ### Region B # + Bbounds = [[-0.8, 1], [-0.5, 0.8]] alpha = 0.05 x1_min, x1_max = -0.8, 1 x2_min, x2_max = -0.5, 0.8 num_reps = 10 alphas = torch.linspace(0.001, alpha, num_reps) num_steps = 320000 x_record = [] accept_rate = 0 x = torch.stack((x1_min + torch.rand(num_reps)*(x1_max - x1_min), x2_min + torch.rand(num_reps)*(x2_max - x2_min)), dim = -1) energy = compute_Muller_potential(1.0, x) for k in range(num_steps): if (k + 1) % 50000 == 0: print("idx of steps: {}".format(k)) ## sampling within each replica delta_x = torch.normal(0, 1, size = (num_reps, 2))*0.5 x_p = x + delta_x energy_p = compute_Muller_potential(1.0, x_p) ## accept based on energy accept_prop = torch.exp(-alphas*(energy_p - energy)) accept_flag = torch.rand(num_reps) < accept_prop ## considering the bounding effects accept_flag = accept_flag & torch.all(x_p > x_p.new_tensor([x1_min, x2_min]), -1) \ & torch.all(x_p < x_p.new_tensor([x1_max, x2_max]), -1) x_p[~accept_flag] = x[~accept_flag] energy_p[~accept_flag] = energy[~accept_flag] x = x_p energy = energy_p ## calculate overall accept rate accept_rate = accept_rate + (accept_flag.float() - accept_rate)/(k+1) ## exchange if k % 10 == 0: x_ori = x for i in range(1, num_reps): accept_prop = torch.exp((alphas[i] - alphas[i-1])*(energy[i] - energy[i-1])) accept_flag = torch.rand(1) < accept_prop if accept_flag.item(): tmp = x[i].clone() x[i] = x[i-1].clone() x[i-1] = tmp.clone() #im not sure if the third clone is nessesary tmp = energy[i].clone() energy[i] = energy[i-1].clone() energy[i-1] = tmp.clone() if k >= 20000: x_record.append(x.clone().numpy()) x_record_B = np.array(x_record) # - with open('Bsamples_beta_%.3f.pkl'%beta, 'wb') as file_handle: pickle.dump({'beta_lst': alphas, 'x_record': x_record_B}, file_handle)
my_experiments/free_energy_on_muller_potential.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python [conda env:pylatte_env] # language: python # name: conda-env-pylatte_env-py # --- # # This notebook processes single-level CAFE v1 atmospheric daily data for building into climatologies. Only the first 2 years of the forecasts from the period 2003-2015 are used. # Temporary files are written to `tmp_fldr` and are later combined using `combine_CAFE_fcst_v1_atmos_climatology.ipynb`. Note that this script, and `build_CAFE_fcst_v1_atmos_climatology_multi_level.ipynb` should be run before the combine code is run # #### IMPORTANT: If adapting this code to save other climatologies, be careful to only include full years, as pyLatte will compute monthly climatologies from the saved daily climatologies # + code_folding=[0] # Import packages ----- import pandas as pd import xarray as xr import numpy as np from pylatte import utils from ipywidgets import FloatProgress # - # #### Initialise # + code_folding=[0] # Location of forecast data ----- fcst_folder = '/OSM/CBR/OA_DCFP/data/model_output/CAFE/forecasts/v1/' fcst_filename = 'atmos_daily*' fields = pd.DataFrame( \ {'name_CAFE': ['lwflx', 'shflx', 'tau_x', 'tau_y', 't_ref', 'q_ref', 'u_ref', 'v_ref', 't_ref_min', 't_ref_max', 't_surf', 'ps', 'slp', 'h500', 'precip', 'lwdn_sfc', 'lwup_sfc', 'olr', 'swdn_sfc', 'swup_sfc', 'swup_toa', 'high_cld_amt', 'low_cld_amt', 'mid_cld_amt', 'tot_cld_amt'], 'name_std' : ['lwf', 'shf', 'tau_x', 'tau_y', 't_ref', 'q_ref', 'u_ref', 'v_ref', 't_ref_min', 't_ref_max', 't_s', 'p_s', 'slp', 'h500', 'precip', 'lwf_dn_s', 'lwf_up_s', 'olwr', 'swf_dn_s', 'swf_up_s', 'swf_up_toa', 'high_cld_amt', 'low_cld_amt', 'mid_cld_amt', 'tot_cld_amt']} ) name_dict = fields.set_index('name_CAFE').to_dict()['name_std'] fields # - # #### Load one 366 day long year to provide time array # + code_folding=[0] # Use 2016, mn1, ensemble 1 ----- path = fcst_folder + '/yr2016/mn1/OUTPUT.1/' + fcst_filename + '.nc' dataset = xr.open_mfdataset(path, autoclose=True) time_use = dataset.time[:366] # - # #### Save each year and variable separately due to memory considerations # Temporary folder location ----- tmp_fldr = '/OSM/CBR/OA_DCFP/data/intermediate_products/pylatte_climatologies/tmp/' # + code_folding=[] # Loop over all forecast years and variables ----- years = range(2003,2016) months = range(1,13) ensembles = range(1,12) for year in years: print(year) print('----------') for idx, variable in enumerate(fields['name_CAFE']): print(variable) # First see if file already exists ----- savename = 'cafe.fcst.v1.atmos.' + fields['name_std'][idx] + '.' + str(year) + '.clim.nc' try: xr.open_dataset(tmp_fldr + savename, autoclose=True) except: fcst_list = [] for month in months: ens_list = [] ens = [] empty = True for ie, ensemble in enumerate(ensembles): path = fcst_folder + '/yr' + str(year) + '/mn' + str(month) + \ '/OUTPUT.' + str(ensemble) + '/' + fcst_filename + '.nc' # Try to stack ensembles into a list ----- try: dataset = xr.open_mfdataset(path, autoclose=True)[variable] # Truncate to 2 year forecasts ----- n_trunc = min([731, len(dataset.time)]) dataset = dataset.isel(time=range(n_trunc)) if 'latb' in dataset.dims: dataset = dataset.rename({'latb':'lat_2','lonb':'lon_2'}) ens_list.append(dataset.rename(fields['name_std'][idx])) ens.append(ie+1) empty = False except OSError: # File does not exist ----- pass # Concatenate ensembles ----- if empty == False: ens_object = xr.concat(ens_list, dim='ensemble') ens_object['ensemble'] = ens # Stack concatenated ensembles into a list for each month in a year ----- fcst_list.append(ens_object) # Concatenate all months within year ----- ds = xr.concat(fcst_list, dim='time') # Rechunk for chunksizes of at least 1,000,000 elements ----- ds = utils.prune(ds.chunk(chunks={'ensemble' : len(ds.ensemble), 'time' : len(ds.time)}).squeeze()) # Make month_day array of month-day ----- m = np.array([str(i).zfill(2) + '-' for i in ds.time.dt.month.values]) d = np.array([str(i).zfill(2) for i in ds.time.dt.day.values]) md = np.core.defchararray.add(m, d) # Replace time array with month_day array and groupby ----- ds['time'] = md ds_clim = ds.groupby('time').mean(dim=['time','ensemble'],keep_attrs=True) # Fill time with presaved time ----- ds_clim['time'] = time_use ds_clim.time.attrs['long_name'] = 'time' ds_clim.time.attrs['cartesian_axis'] = 'T' ds_clim.time.attrs['calendar_type'] = 'JULIAN' ds_clim.time.attrs['bounds'] = 'time_bounds' # Save and delete ----- with utils.timer(): ds_clim.to_netcdf(path=tmp_fldr + savename, mode = 'w', encoding = {'time':{'dtype':'float','calendar':'JULIAN', 'units':'days since 0001-01-01 00:00:00'}}) del ds, ds_clim # -
bin/support/old/build_CAFE_fcst_v1_atmos_climatology_single_level.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ## The 2D diffusion equation on GPUs, in minutes # # This notebook implements, for a given initial density profile, a solver for the 2D diffusion equation using an explicit finite difference scheme with 'do-nothing' conditions on the boundaries (and hence will not provide a reasonable solution once the profile has diffused to a boundary). # Some imports we will need below import numpy as np from devito import * import matplotlib.pyplot as plt # %matplotlib inline # ### Solver implementation # # We start by creating a Cartesian `Grid` representing the computational domain: nx, ny = 100, 100 grid = Grid(shape=(nx, ny)) # To represent the density, we use a `TimeFunction` -- a scalar, discrete function encapsulating space- and time-varying data. We also use a `Constant` for the diffusion coefficient. u = TimeFunction(name='u', grid=grid, space_order=2, save=200) c = Constant(name='c') # The 2D diffusion equation is expressed as: eqn = Eq(u.dt, c * u.laplace) # From this diffusion equation we derive our time-marching method -- at each timestep, we compute `u` at timestep `t+1`, which in the Devito language is represented by `u.forward`. Hence: step = Eq(u.forward, solve(eqn, u.forward)) # OK, it's time to let Devito generate code for our solver! op = Operator([step]) # Before executing the `Operator` we must first specify the initial density profile. Here, we place a "ring" with a constant density value in the center of the domain. xx, yy = np.meshgrid(np.linspace(0., 1., nx, dtype=np.float32), np.linspace(0., 1., ny, dtype=np.float32)) r = (xx - .5)**2. + (yy - .5)**2. # Inserting the ring u.data[0, np.logical_and(.05 <= r, r <= .1)] = 1. # We're now ready to execute the `Operator`. We run it with a diffusion coefficient of 0.5 and for a carefully chosen `dt`. Unless specified otherwise, the simulation runs for 199 timesteps as specified in the definition of `u` (i.e. the function was defined with `save=200` the initial data + 199 new timesteps). # + tags=["nbval-ignore-output"] stats = op.apply(dt=5e-05, c=0.5) # - # ### Initial conditions and snapshots every 40 timesteps # + tags=["nbval-ignore-output"] plt.rcParams['figure.figsize'] = (20, 20) for i in range(1, 6): plt.subplot(1, 6, i) plt.imshow(u.data[(i-1)*40]) plt.show() # - # ### GPU-parallel solver # # Let us now generate a GPU implementation of the same solver. It's actually straightforward! # + # Instead of `platform=nvidiaX`, you may run your Python code with # the environment variable `DEVITO_PLATFORM=nvidiaX` # We also need the `gpu-fit` option to tell Devito that `u` will definitely # fit in the GPU memory. This is necessary every time a TimeFunction with # `save != None` is used. Otherwise, Devito could generate code such that # `u` gets streamed between the CPU and the GPU, but for this advanced # feature you will need `devitopro`. op = Operator([step], platform='nvidiaX', opt=('advanced', {'gpu-fit': u})) # - # **That's it!** We can now run it exactly as before # + # Uncomment and run only if Devito was installed with GPU support. # stats = op.apply(dt=5e-05, c=0.5) # - # We should see a big performance difference between the two runs. We can also inspect `op` to see what Devito has generated to run on the GPU print(op)
examples/performance/01_gpu.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # %matplotlib notebook # # %matplotlib inline import numpy as np import pickle np.random.seed(123) import collections, copy, pickle from importlib import reload from dateutil.parser import parse import scipy.linalg import pandas as pd import sklearn import matplotlib import matplotlib.pyplot as plt from matplotlib.ticker import NullFormatter from matplotlib import rcParams rcParams['font.family'] = 'serif' rcParams['font.size'] = 14 # rcParams['text.usetex'] = True from IPython.display import HTML # + from mlxtend.frequent_patterns import apriori import sklearn.ensemble import sklearn.svm import sklearn.tree import sklearn.linear_model import sklearn.neighbors from sklearn.model_selection import cross_val_score from sklearn.model_selection import train_test_split import sklearn.metrics from sklearn import preprocessing from sklearn.preprocessing import OneHotEncoder from sklearn.model_selection import train_test_split # - import util.plot import util.string # + # https://github.com/pbloem/machine-learning/blob/master/worksheets/Worksheet%202%2C%20Sklearn.ipynb # - # data = pd.read_csv('ODI-2019-clean.csv', sep=';') fn = 'ODI-2019-clean.pkl' # load (old) data from disk with open(fn, 'rb') as f: data = pickle.load(f) # + # data.head() # - # ## Categorical models # https://scikit-learn.org/stable/modules/classes.html#module-sklearn.preprocessing # # Discretization # # https://scikit-learn.org/stable/modules/preprocessing.html#preprocessing-categorical-features # # https://scikit-learn.org/stable/auto_examples/preprocessing/plot_discretization_strategies.html#sphx-glr-auto-examples-preprocessing-plot-discretization-strategies-py # # Strategies: # - ‘uniform’: The discretization is uniform in each feature, which means that the bin widths are constant in each dimension. # - quantile’: The discretization is done on the quantiled values, which means that each bin has approximately the same number of samples. # - this causes outliers to be grouped together # - ‘kmeans’: The discretization is based on the centroids of a KMeans clustering procedure. class Encoders: pass E = Encoders() E.discretizers = {} E.encoders = {} key = 'Other' # reload(util.data) most_common = util.data.select_most_common(data.Programme, n=8, key=key) value = np.array(list(most_common.values())) # note that pd.where different than np.where keys = most_common.keys() data.Programme.where(data.Programme.isin(keys), key, inplace=True) def discretize(data, k, n_bins=5): X = data[k] avg = np.nanmedian([x for x in X]) X = np.where(np.isnan(X), avg, X) X = X.reshape(-1,1) bins = np.repeat(n_bins, X.shape[1]) # e.g. [5,3] for 2 features # encode to integers est = preprocessing.KBinsDiscretizer(n_bins=bins, encode='ordinal', strategy='kmeans') est.fit(X) data[k + ' bin'] = est.transform(X) E.discretizers[k] = est s = '' for st in [round(a,3) for a in est.bin_edges_[0]]: if k == 'Year': st = int(round(st)) s += str(st) + ', ' print('%s: $\\{%s\\}$\n' % (k,s[:-2])) numerical = ['Year', 'Money', 'Neighbours', 'Stress level', 'Bedtime'] for k in numerical: discretize(data, k) # + def init_encoder(columns): E.encoders['x'] = preprocessing.OneHotEncoder() enc = E.encoders['x'] enc.fit(columns) return enc.transform(columns) categorical = ['ML', 'IR', 'Stat', 'DB', 'Gender', 'Chocolate', 'Stand Up', 'Programme'] y = 'ML' categorical.remove(y) keys = [k + ' bin' for k in numerical] + categorical X_enc = init_encoder(data[keys]) E.encoders['x'].categories_ # + def init_label_encoder(column): E.encoders['y'] = preprocessing.LabelEncoder() enc = E.encoders['y'] enc.fit(column) return enc.transform(column) Y_enc = init_label_encoder(data[y]) E.encoders['y'].classes_ # - X_enc.shape, Y_enc.shape x_train, x_test, y_train, y_test = train_test_split(X_enc, Y_enc, test_size=0.5) x_train.shape, y_train.shape np.round(np.sqrt([31.301, 24.298, 81.092, 28.444]),3) def cross_validation(model_func, x_train, y_train, k=None, results=None, v=0): # Train for 5 folds, returing ROC AUC. You can also try 'accuracy' as a scorer n_folds = 5 scores_acc = cross_val_score(model_func, x_train, y_train, cv=n_folds, scoring='accuracy') # roc_auc accuracy scores_roc = cross_val_score(model_func, x_train, y_train, cv=n_folds, scoring='roc_auc') # roc_auc accuracy if results is not None: results[k] = (scores_acc, scores_roc) if v: print('scores per fold ', scores_acc) print(' mean score ', np.mean(scores_acc)) print(' standard dev. ', np.std(scores_acc)) # + models = { 'Logit': sklearn.linear_model.LogisticRegression(solver='liblinear', multi_class='ovr'), # 'SGD': sklearn.linear_model.SGDClassifier(loss="hinge", penalty="l2", max_iter=1000, tol=1e-3), # 'SVC auto': sklearn.svm.SVC(gamma='auto'), 'SVC': sklearn.svm.SVC(kernel='linear'), # 'SVC polynomial': sklearn.svm.SVC(kernel='poly', gamma='auto', degree=4), 'Decision Tree': sklearn.tree.DecisionTreeClassifier(), 'KNN 5': sklearn.neighbors.KNeighborsClassifier(n_neighbors=5), # 'KNN 10': sklearn.neighbors.KNeighborsClassifier(n_neighbors=10), 'Ensemble Random Forest': sklearn.ensemble.RandomForestClassifier(n_estimators=100), # 'Ensemble Bagging': sklearn.ensemble.BaggingClassifier(n_estimators=100) } results = {} for k,m in models.items(): print(k) cross_validation(m, x_train, y_train, k, results) # - print('Model & Mean Acc & Std Acc & Mean ROC & Std ROC \\\\ \n\\hline') best_k = '' best_mean = 0 for k, (scores_acc, scores_roc) in results.items(): if np.mean(scores_acc) > best_mean: best_mean = np.mean(scores_acc) best_k = k print('%s & %0.4f & %0.4f & %0.4f & %0.4f\\\\' % (k, np.mean(scores_acc), np.std(scores_acc), np.mean(scores_roc), np.std(scores_roc))) print('\nbest acc:', best_k, round(best_mean,4)) print('Model & Mean Acc & Std Acc & Mean ROC & Std ROC \\\\ \n\\hline') best_k = '' best_mean = 0 for k, (scores_acc, scores_roc) in results.items(): if np.mean(scores_roc) > best_mean: best_mean = np.mean(scores_roc) best_k = k print('\nbest roc:', best_k, round(best_mean,4)) # + # reinit models models = { 'Logit': sklearn.linear_model.LogisticRegression(solver='liblinear', multi_class='ovr'), # 'SGD': sklearn.linear_model.SGDClassifier(loss="hinge", penalty="l2", max_iter=1000, tol=1e-3), # 'SVC auto': sklearn.svm.SVC(gamma='auto'), 'SVC': sklearn.svm.SVC(kernel='linear'), # 'SVC polynomial': sklearn.svm.SVC(kernel='poly', gamma='auto', degree=4), 'Decision Tree': sklearn.tree.DecisionTreeClassifier(), 'KNN 5': sklearn.neighbors.KNeighborsClassifier(n_neighbors=5), # 'KNN 10': sklearn.neighbors.KNeighborsClassifier(n_neighbors=10), 'Ensemble Random Forest': sklearn.ensemble.RandomForestClassifier(n_estimators=100), # 'Ensemble Bagging': sklearn.ensemble.BaggingClassifier(n_estimators=100) } # train best model on whole dataset model = models[best_k] model.fit(x_train, y_train) y_pred = model.predict(x_test) for v in [sklearn.metrics.accuracy_score(y_test, y_pred), sklearn.metrics.roc_auc_score(y_test, y_pred)]: print(round(v,4)) # - best_k = 'Ensemble Random Forest' model = models[best_k] for k,v in results.items(): if k != best_k: i = 0 s,p = scipy.stats.ttest_ind(v[i], results[best_k][i]) print(k,s,p, p < 0.05) # Both the acc and roc are not always significant # + subkeys = [] for i,k in enumerate(keys): for v in E.encoders['x'].categories_[i]: subkeys.append(k + '_' + str(v)) assert len(subkeys) == pd.DataFrame(X_enc.toarray()).shape[1] # - # model.fit(X_enc, Y_enc) indices = np.argsort(model.feature_importances_) indices = np.flip(indices) n = 3 print('best features: indices, values') indices[:n], model.feature_importances_[indices[:n]] for i in indices[:3]: vec = np.zeros(X_enc.shape[1]) vec[i] = 1 print(subkeys[i]) # # Association rules # data_enc = pd.DataFrame(X_enc.toarray(), columns=subkeys, dtype=bool) data_enc = pd.SparseDataFrame(X_enc, columns=subkeys, default_fill_value=False) data_enc.head() # http://rasbt.github.io/mlxtend/user_guide/frequent_patterns/apriori/ frequent_itemsets = apriori(data_enc, min_support=0.6, use_colnames=True) frequent_itemsets['length'] = frequent_itemsets['itemsets'].apply(lambda x: len(x)) frequent_itemsets frequent_itemsets[ (frequent_itemsets['length'] >= 3) & (frequent_itemsets['support'] >= 0.6) ] frequent_itemsets[ (frequent_itemsets['length'] >= 2) & (frequent_itemsets['support'] >= 0.7) ]
A1/Task-1-Predictions.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="view-in-github" colab_type="text" # <a href="https://colab.research.google.com/github/dcpatton/Structured-Data/blob/main/target_encoding_cms_claims.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # + [markdown] id="4lD2FpLu1k7Q" # # Objective # This notebook is a proof of concept. It shows an example of Target Encoding categorical data. # + id="tTUDyFIWuFSl" outputId="d0e21232-70f8-4edc-87ee-fe683fab4196" colab={"base_uri": "https://localhost:8080/", "height": 35} import tensorflow as tf import pandas as pd pd.set_option('display.max_rows', 999) pd.set_option('max_info_columns', 200) import numpy as np import random seed=52 tf.random.set_seed(seed) random.seed(seed) tf.__version__ # + [markdown] id="qoU9uh6quLxF" # # Get the Data # # The data can be downloaded from https://www.cms.gov/Research-Statistics-Data-and-Systems/Downloadable-Public-Use-Files/SynPUFs/DE_Syn_PUF. Here I retrieve it from my GCP storage. # + id="TTPdpbVpubUr" from google.colab import auth auth.authenticate_user() # + id="b8qAY3CEuoGW" outputId="e6bc9a65-0ffb-4034-fb73-29da527edf78" colab={"base_uri": "https://localhost:8080/", "height": 68} # !curl https://sdk.cloud.google.com >/dev/null # + id="9nMKkv7Rur56" outputId="ce6c23cb-172d-41a0-b2ad-cd31758df1fa" colab={"base_uri": "https://localhost:8080/", "height": 765} # !gcloud init --skip-diagnostics --account='<EMAIL>' --project='cms-de1' # + id="XO-Te8HpurZm" outputId="da1615f6-26e5-47de-fe7e-3b5370e13b29" colab={"base_uri": "https://localhost:8080/", "height": 289} # !gsutil cp gs://de-synpuf/*.zip . # + [markdown] id="2qMeNzARumSr" # # EDA # # Examining the inpatient claims and beneficiary summary files. # + id="R7Fu9c_pyr9T" outputId="2c778543-ee33-49e9-faae-54c5ee6e8f5c" colab={"base_uri": "https://localhost:8080/", "height": 51} # !unzip 176549_DE1_0_2008_to_2010_Inpatient_Claims_Sample_1.zip # + id="nYdviYhyur2P" claims_df = pd.read_csv('DE1_0_2008_to_2010_Inpatient_Claims_Sample_1.csv', parse_dates=['CLM_FROM_DT', 'CLM_THRU_DT']) # + id="hh9M-PrqvmB4" outputId="c50ebfba-d980-4e04-e8b3-1597572c82b3" colab={"base_uri": "https://localhost:8080/", "height": 253} claims_df.head() # + id="8fcAqm9eJRRk" outputId="3efed567-bee6-45ff-9557-e773142e2e37" colab={"base_uri": "https://localhost:8080/", "height": 34} claims_df.DESYNPUF_ID.nunique() # + id="YMmJG8afwAHv" outputId="ea745cf7-edc5-439f-f930-2c8dc2047256" colab={"base_uri": "https://localhost:8080/", "height": 1000} claims_df.info() # + id="MJVQ8c-kwAPQ" outputId="6be9efd8-b88a-4c73-c46e-66de4ec25180" colab={"base_uri": "https://localhost:8080/", "height": 1000} claims_df.isna().sum() # + id="tgzyp-LUFdsr" outputId="5fc7e726-0aa0-4b86-91a9-c49a3b59eabb" colab={"base_uri": "https://localhost:8080/", "height": 119} # !unzip 176541_DE1_0_2008_Beneficiary_Summary_File_Sample_1.zip # !unzip 176600_DE1_0_2009_Beneficiary_Summary_File_Sample_1.zip # !unzip 176537_DE1_0_2010_Beneficiary_Summary_File_Sample_20.zip # + id="DWA0LuK0FPE_" summary_2008_df = pd.read_csv('DE1_0_2008_Beneficiary_Summary_File_Sample_1.csv', parse_dates=['BENE_BIRTH_DT']) summary_2009_df = pd.read_csv('DE1_0_2009_Beneficiary_Summary_File_Sample_1.csv', parse_dates=['BENE_BIRTH_DT']) summary_2010_df = pd.read_csv('DE1_0_2010_Beneficiary_Summary_File_Sample_20.csv', parse_dates=['BENE_BIRTH_DT']) # + id="kfA8hLJ0FlP6" outputId="623185b9-01cd-4ab5-b0b5-2163538bdfc0" colab={"base_uri": "https://localhost:8080/", "height": 224} summary_2008_df.head() # + id="ARjhREFlFuM5" outputId="d7151a80-c91b-4663-859d-fd587b2fadc8" colab={"base_uri": "https://localhost:8080/", "height": 680} summary_2008_df.info() # + id="SKh-87n8FuVb" outputId="ceae8300-20ee-4b63-c342-5a4b65c38eb0" colab={"base_uri": "https://localhost:8080/", "height": 578} summary_2008_df.isna().sum() # + id="0hfuaaXJIxT1" outputId="515a6a88-5d4f-491b-9577-90991f0a6d66" colab={"base_uri": "https://localhost:8080/", "height": 34} summary_2008_df.shape # + id="qcBpdZIpI36-" outputId="905ad8f6-7c88-4348-8de8-2a06d66a506e" colab={"base_uri": "https://localhost:8080/", "height": 34} summary_2008_df.DESYNPUF_ID.nunique() # + [markdown] id="oovAWjKSutR5" # Combining the beneficiary data into a single dataframe. # + id="WEnr5G4LerDK" summary_df = pd.merge(summary_2009_df, summary_2009_df, how='outer') summary_df = pd.merge(summary_df, summary_2010_df, how='outer') # + id="t_h4ysXxRCAE" outputId="924c0463-7502-44be-92c2-7b47f1cbeb4a" colab={"base_uri": "https://localhost:8080/", "height": 34} summary_df.shape # + id="ODG3yBO0fKui" outputId="535c6b32-1616-4372-bb37-6ef8a126aee8" colab={"base_uri": "https://localhost:8080/", "height": 34} summary_df.DESYNPUF_ID.nunique() # + id="vsSLFn4cdepy" outputId="b1b9cd72-6d1d-4999-9aab-8ccad3257430" colab={"base_uri": "https://localhost:8080/", "height": 221} summary_df.DESYNPUF_ID.value_counts(ascending=False) # + id="6BFt90wD3mjT" outputId="608a6dfb-eb2f-4d52-aadc-8343363f751f" colab={"base_uri": "https://localhost:8080/", "height": 680} summary_df.info() # + [markdown] id="3n5ST2SGSMhF" # # Preprocess Data # + id="TZvhTb9dSLV_" outputId="f462369a-469e-4de2-eb8c-3d42a51c48c0" colab={"base_uri": "https://localhost:8080/", "height": 34} claims_df.shape # + id="caqP9GUwXsZt" outputId="f8af4694-cae4-41bb-a901-55c9703c41a3" colab={"base_uri": "https://localhost:8080/", "height": 34} claims_df['ADMTNG_ICD9_DGNS_CD'].isna().sum() # + id="F8uQstumWcW2" # drop all lines with SEGMENT=2 because they contain no diagnosis codes nor procedure codes claims_df = claims_df[claims_df['SEGMENT']==1] # + id="OIhnAl98TCzC" outputId="016a324f-b28c-4f9b-fbf0-9d00fb813604" colab={"base_uri": "https://localhost:8080/", "height": 34} claims_df['ADMTNG_ICD9_DGNS_CD'].isna().sum() # + id="3eL7InGFXto5" # set missing admitting diagnosis codes to first diagnosis code missing_df = claims_df[claims_df['ADMTNG_ICD9_DGNS_CD'].isna()] for idx, row in missing_df.iterrows(): claims_df.at[idx, 'ADMTNG_ICD9_DGNS_CD'] = row.ICD9_DGNS_CD_1 # + id="3DALIov8aJaa" outputId="176d6a69-561c-47de-8b1a-60206a7ca177" colab={"base_uri": "https://localhost:8080/", "height": 34} claims_df['ADMTNG_ICD9_DGNS_CD'].isna().sum() # + id="e4qg1xS-UKYc" outputId="9d7276ed-9642-4bd5-84aa-822fe11f883e" colab={"base_uri": "https://localhost:8080/", "height": 129} claims_df[claims_df['ADMTNG_ICD9_DGNS_CD'].isna()] # + id="Q3iwGqt7ntMZ" claims_df.at[26530, 'ADMTNG_ICD9_DGNS_CD'] = '8020' # set it to ICD9_DGNS_CD_2 value # + id="VQVhxtdfoFOA" outputId="dd074e27-e367-4a84-d2a6-a0d0ba190971" colab={"base_uri": "https://localhost:8080/", "height": 34} claims_df['ADMTNG_ICD9_DGNS_CD'].isna().sum() # + id="5scFBi2gsYue" outputId="58cff851-1652-46ee-96dc-f4ffcde6bf2a" colab={"base_uri": "https://localhost:8080/", "height": 34} claims_df.CLM_ADMSN_DT.isna().sum() # + id="FS3p6ECqsjBB" outputId="85106dde-61c1-4b18-c236-0612ccdf4111" colab={"base_uri": "https://localhost:8080/", "height": 34} claims_df.NCH_BENE_DSCHRG_DT.isna().sum() # + id="ocHcwEUYszWK" outputId="00f2e90a-7623-4575-be31-c387ddc3ba20" colab={"base_uri": "https://localhost:8080/", "height": 34} claims_df.CLM_UTLZTN_DAY_CNT.isna().sum() # + id="wtYec0jxs5A8" outputId="0acb54f5-3256-46f3-ac6b-acf954e61596" colab={"base_uri": "https://localhost:8080/", "height": 1000} claims_df.CLM_UTLZTN_DAY_CNT.value_counts() # + id="lAzQ_lRkuLR9" outputId="e236c6c8-6bde-405d-af7a-bd60f2c28010" colab={"base_uri": "https://localhost:8080/", "height": 85} claims_sub_df = claims_df[['DESYNPUF_ID', 'ADMTNG_ICD9_DGNS_CD','CLM_UTLZTN_DAY_CNT']] claims_sub_df.isna().sum() # + id="JY9F1zTRxH7m" outputId="c6ef98a9-c3e3-424f-80fa-f0d549a081ea" colab={"base_uri": "https://localhost:8080/", "height": 272} summary_sub_df = summary_df[['DESYNPUF_ID', 'BENE_BIRTH_DT', 'BENE_SEX_IDENT_CD', 'SP_ALZHDMTA', 'SP_CHF', 'SP_CHRNKIDN', 'SP_CNCR', 'SP_COPD', 'SP_DEPRESSN', 'SP_DIABETES', 'SP_ISCHMCHT', 'SP_OSTEOPRS', 'SP_RA_OA', 'SP_STRKETIA']] summary_sub_df.isna().sum() # + id="CvjZemdnyLGU" outputId="0b930909-717d-4122-ac34-21a24066d46c" colab={"base_uri": "https://localhost:8080/", "height": 408} data_df = claims_sub_df.merge(summary_sub_df, on='DESYNPUF_ID') data_df.info() # + id="f0239PxbBSfG" data_df['year'] = pd.DatetimeIndex(data_df['BENE_BIRTH_DT']).year data_df['age'] = 2020-data_df['year'] data_df.drop(['year','BENE_BIRTH_DT'], axis='columns', inplace=True) # + [markdown] id="RlAUKR6RvIms" # CLM_UTLZTN_DAY_CNT will be the target to predict and ADMTNG_ICD9_DGNS_CD will be the high cardinality categorical column we encode. Renaming them for convenience. # + id="H-1x_nfhznh1" outputId="fa364b6c-2700-4f07-ec1c-4e38a0856b2a" colab={"base_uri": "https://localhost:8080/", "height": 224} data_df = data_df.rename(columns={"CLM_UTLZTN_DAY_CNT": "target", "ADMTNG_ICD9_DGNS_CD": "diagnosis"}) data_df.head() # + id="Oz_SNxtDzIek" outputId="7c68c3cd-4362-4c8e-eddd-04c208832a35" colab={"base_uri": "https://localhost:8080/", "height": 34} # Note the high cardinality data_df.diagnosis.nunique() # + [markdown] id="BALmKHAQPqe3" # We will target encode these 2316 unique values in the diagnosis column. # + id="0_aF0sD4Ggd-" from sklearn.model_selection import train_test_split train_df, test_df = train_test_split(data_df, test_size=0.2, random_state=seed) # + id="URBxLnVPHYkL" y_train = train_df.target.values y_test = test_df.target.values # + id="ru0E6YRcGJIN" # !pip install category_encoders -q # + id="N8KWgEoEFvTA" from category_encoders import TargetEncoder encoder = TargetEncoder(cols=['diagnosis']) train_df = encoder.fit_transform(train_df, y_train) test_df = encoder.transform(test_df) # + id="O1D4CyboPCal" outputId="98fa425f-647b-4d55-bfd2-f275e7a3a0a8" colab={"base_uri": "https://localhost:8080/", "height": 379} train_df.sample(10) # + id="JnT42U3MH4vJ" x_train = train_df.drop(['target', 'DESYNPUF_ID'], axis='columns').values x_test = test_df.drop(['target', 'DESYNPUF_ID'], axis='columns').values # + [markdown] id="tWNwEJ3oIZPb" # # The Model # + id="E86Ys9UGIXo8" outputId="736c7361-4a71-45bc-8dcc-2e749fc137c2" colab={"base_uri": "https://localhost:8080/", "height": 357} tf.keras.backend.clear_session() from tensorflow.keras.layers import Dense from tensorflow.keras import Input, Model inp = Input(shape=(x_train.shape[1])) x = Dense(512, activation='relu')(inp) x = Dense(256, activation='relu')(x) x = Dense(128, activation='relu')(x) x = Dense(64, activation='relu')(x) out = Dense(1)(x) model = Model(inputs=[inp], outputs=[out]) model.compile(optimizer='adam', loss='mse', metrics=['mae']) model.summary() # + [markdown] id="suKSU8XYvi-s" # # Training # # Since the data is synthetic we should not expect any kind of meaning in our results. But just running 10 epochs to demonstrate how to accomplish this with real data. # + id="DYUTVPhoJT6v" outputId="34b34bd4-4fec-490d-c043-20755ee5fd81" colab={"base_uri": "https://localhost:8080/", "height": 357} history = model.fit(x_train, y_train, epochs=10, validation_data=(x_test, y_test))
target_encoding_cms_claims.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Trends # # ### Trend 1 # - Capomulin and Ramicane had the largest reduction in Tumor Volume and the highest survival rate # # ### Trend 2 # - Capomulin and Ramican also had the smallest increase of Metastatic Sites however Stelasyn seemed to have a comparable increase as well # # ### Trend 3 # - Zoniferol seems to possibly stabilizing Survival Rate after day 35 and Propriva might reduce Metastatic sites after 40 days # # + # Dependencies and Setup # %matplotlib inline import matplotlib.pyplot as plt import pandas as pd import numpy as np # Hide warning messages in notebook import warnings warnings.filterwarnings('ignore') # File to Load (Remember to Change These) mouse_drug_data_to_load = "Input Files/02-Homework_05-Matplotlib_Pymaceuticals_data_mouse_drug_data.csv" clinical_trial_data_to_load = "Input Files/02-Homework_05-Matplotlib_Pymaceuticals_data_clinicaltrial_data.csv" # Read the Mouse and Drug Data and the Clinical Trial Data mouse_drug = pd.read_csv(mouse_drug_data_to_load) trial_data = pd.read_csv(clinical_trial_data_to_load) # Combine the data into a single dataset combined_data = pd.merge(trial_data,mouse_drug,how='left', on='Mouse ID') # Display the data table for preview combined_data.head() # - # ## Tumor Response to Treatment # + # Store the Mean Tumor Volume Data Grouped by Drug and Timepoint means = combined_data.groupby(['Drug','Timepoint']).mean().reset_index() # Convert to DataFrame mean_tumor = means[['Drug','Timepoint','Tumor Volume (mm3)']] # Preview DataFrame mean_tumor.head(15) # + # Store the Standard Error of Tumor Volumes Grouped by Drug and Timepoint std_error = combined_data.groupby(['Drug','Timepoint']).sem().reset_index() # Convert to DataFrame std_error = std_error.rename(columns={"Tumor Volume (mm3)":"Tumor Volume Std Error"}) std_error_tumor = std_error[['Drug','Timepoint','Tumor Volume Std Error']] # Preview DataFrame std_error_tumor.head(15) # - # Minor Data Munging to Re-Format the Mean Tumor Volume Data Frame mean_tumor_by_drug = mean_tumor.pivot(columns='Drug', index = 'Timepoint', values = 'Tumor Volume (mm3)') # Preview that Reformatting worked mean_tumor_by_drug.head(10) # Minor Data Munging to Re-Format the Standard error of Tumor Volumes Data Frame std_error_tumor_by_drug = std_error_tumor.pivot(columns='Drug', index = 'Timepoint', values = 'Tumor Volume Std Error') # Preview that Reformatting worked std_error_tumor_by_drug.head(15) # + # Generate the Plot (with Error Bars) x_axis = np.arange(0, len(mean_tumor_by_drug), 1) fig, plot = plt.subplots(figsize=(8,6)) plot.errorbar(x_axis, mean_tumor_by_drug['Capomulin'], std_error_tumor_by_drug['Capomulin'], fmt="ro:") plot.errorbar(x_axis, mean_tumor_by_drug['Infubinol'], std_error_tumor_by_drug['Ceftamin'], fmt="bo:") plot.errorbar(x_axis, mean_tumor_by_drug['Ketapril'], std_error_tumor_by_drug['Ketapril'], fmt="go:") plot.errorbar(x_axis, mean_tumor_by_drug['Placebo'], std_error_tumor_by_drug['Placebo'], fmt="co:") plot.errorbar(x_axis, mean_tumor_by_drug['Propriva'], std_error_tumor_by_drug['Propriva'], fmt="mo:") plot.errorbar(x_axis, mean_tumor_by_drug['Ramicane'], std_error_tumor_by_drug['Ramicane'], fmt="yo:") plot.errorbar(x_axis, mean_tumor_by_drug['Stelasyn'], std_error_tumor_by_drug['Stelasyn'], fmt="ko:") plot.errorbar(x_axis, mean_tumor_by_drug['Zoniferol'], std_error_tumor_by_drug['Zoniferol'], fmt="o:") plot.set_xlim(-.5, len(mean_tumor_by_drug)-.5) plt.xticks(x_axis,[0,5,10,15,20,25,30,35,40,45]) plot.set_xlabel("Treatment Duration (Days)") plot.set_ylabel("Tumor Volume (mm3)") plt.title("Tumor Response to Treatment") plt.grid() plt.legend(loc="best") # Save the Figure plt.savefig("Figures/Tumor_response_to_treatment.png") # - # ## Metastatic Response to Treatment # + # Store the Mean Met. Site Data Grouped by Drug and Timepoint met_means = combined_data.groupby(['Drug','Timepoint']).mean().reset_index() # Convert to DataFrame met_mean = met_means[['Drug','Timepoint','Metastatic Sites']] # Preview DataFrame met_mean.head(15) # + # Store the Standard Error associated with Met. Sites Grouped by Drug and Timepoint met_std_error = combined_data.groupby(['Drug','Timepoint']).sem().reset_index() # Convert to DataFrame met_std_error = met_std_error.rename(columns={"Metastatic Sites":"Metastatic Sites Std Error"}) std_error_met = met_std_error[['Drug','Timepoint','Metastatic Sites Std Error']] # Preview DataFrame std_error_met.head(15) # - # Minor Data Munging to Re-Format the Mean Metastatic Sites Data Frame met_mean_by_drug = met_mean.pivot(columns='Drug', index = 'Timepoint', values = 'Metastatic Sites') # Preview that Reformatting worked met_mean_by_drug.head() # Minor Data Munging to Re-Format the Standard error of the Metatstatic Sites Data Frame std_error_met_by_drug = std_error_met.pivot(columns='Drug', index = 'Timepoint', values = 'Metastatic Sites Std Error') # Preview that Reformatting worked std_error_met_by_drug.head(15) # + # Generate the Plot (with Error Bars) x_axis2 = np.arange(0, len(met_mean_by_drug), 1) fig, plot = plt.subplots(figsize=(8,6)) plot.errorbar(x_axis2, met_mean_by_drug['Capomulin'], std_error_met_by_drug['Capomulin'], fmt="ro:") plot.errorbar(x_axis2, met_mean_by_drug['Infubinol'], std_error_met_by_drug['Ceftamin'], fmt="bo:") plot.errorbar(x_axis2, met_mean_by_drug['Ketapril'], std_error_met_by_drug['Ketapril'], fmt="go:") plot.errorbar(x_axis2, met_mean_by_drug['Placebo'], std_error_met_by_drug['Placebo'], fmt="co:") plot.errorbar(x_axis2, met_mean_by_drug['Propriva'], std_error_met_by_drug['Propriva'], fmt="mo:") plot.errorbar(x_axis2, met_mean_by_drug['Ramicane'], std_error_met_by_drug['Ramicane'], fmt="yo:") plot.errorbar(x_axis2, met_mean_by_drug['Stelasyn'], std_error_met_by_drug['Stelasyn'], fmt="ko:") plot.errorbar(x_axis2, met_mean_by_drug['Zoniferol'], std_error_met_by_drug['Zoniferol'], fmt="o:") plot.set_xlim(-.5, len(met_mean_by_drug)-.5) plt.xticks(x_axis,[0,5,10,15,20,25,30,35,40,45]) plot.set_xlabel("Treatment Duration (Days)") plot.set_ylabel("Metatstatic Sites") plt.title("Metastatic Spread During Treatment") plt.grid() plt.legend(loc="best") # Save the Figure plt.savefig("Figures/metastatic_spread.png") # - # ## Survival Rates # + # Store the Count of Mice Grouped by Drug and Timepoint (W can pass any metric) mice_count = combined_data.groupby(['Drug','Timepoint']).count().reset_index() # Convert to DataFrame mice_count = mice_count.rename(columns={"Mouse ID":"Mouse Count"}) mice_count = mice_count[['Drug','Timepoint','Mouse Count']] # Preview DataFrame mice_count.head(15) # + # Minor Data Munging to Re-Format the Mouse Count Data Frame mouse_count_by_drug = mice_count.pivot(columns='Drug', index = 'Timepoint', values = 'Mouse Count') # Preview that Reformatting worked mouse_count_by_drug.head(15) # + for i in range(len(mouse_count_by_drug.columns)): value = mouse_count_by_drug.iloc[0,i] for j in range(len(mouse_count_by_drug)): value2 = mouse_count_by_drug.iloc[j,i] new_value = (value2/value)*100 mouse_count_by_drug.iloc[j,i]= new_value # Generate the Plot (Accounting for percentages) x_axis3 = np.arange(0, len(mouse_count_by_drug), 1) plt.figure(figsize=(8,6)) plt.plot(x_axis2, mouse_count_by_drug['Capomulin'], 'ro:') plt.plot(x_axis2, mouse_count_by_drug['Infubinol'], 'bo:') plt.plot(x_axis2, mouse_count_by_drug['Ketapril'], 'go:') plt.plot(x_axis2, mouse_count_by_drug['Placebo'], 'co:') plt.plot(x_axis2, mouse_count_by_drug['Propriva'], 'mo:') plt.plot(x_axis2, mouse_count_by_drug['Ramicane'], 'yo:') plt.plot(x_axis2, mouse_count_by_drug['Stelasyn'], 'ko:') plt.plot(x_axis2, mouse_count_by_drug['Zoniferol'], 'o:') plot.set_xlim(-.5, len(mouse_count_by_drug)-.5) plt.xticks(x_axis,[0,5,10,15,20,25,30,35,40,45]) plot.set_xlabel("Treatment Duration (Days)") plot.set_ylabel("Survival Rate (%)") plt.title("Survial Percent During Treatment") plt.grid() plt.legend(loc="best") # Save the Figure plt.savefig("Figures/mouse_survival_rate.png") # - # ## Summary Bar Graph # + # Calculate the percent changes for each drug drugs = mouse_count_by_drug.columns percent_change = [] changes = [] for drug in drugs: drug_df = mean_tumor.loc[mean_tumor['Drug'] == drug] start_value = drug_df.loc[drug_df['Timepoint'] == 0,'Tumor Volume (mm3)'].sum() end_value = drug_df.loc[drug_df['Timepoint'] == 45,'Tumor Volume (mm3)'].sum() change = ((end_value- start_value)/start_value)*100 if change > 0: changes.append("No Effect") else: changes.append("Positive Effect") percent_change.append(change) # Display the data to confirm vol_change=pd.DataFrame({'Drug':drugs,'Percent Change':percent_change, 'Change':changes}) vol_change # + # Store all Relevant Percent Changes into a Tuple changes=vol_change['Percent Change'] positive_changes = [] positive_labels = [] negative_changes = [] negative_labels = [] # Splice the data between passing and failing drugs for i in range(len(changes)): test_value = changes[i] if test_value > 0: negative_changes.append(test_value) negative_labels.append(labels[i]) else: positive_changes.append(test_value) positive_labels.append(labels[i]) label = positive_labels + negative_labels changes = positive_changes + negative_changes positive_index_list = np.arange(0, len(positive_changes), 1) x_axis5 = np.arange(0,len(changes),1) # Orient widths. Add labels, tick marks, etc. plt.figure(figsize=(8,6)) barlist = plt.bar(x_axis5, changes, width = .9, color = 'r',alpha=1, align="edge") for j in positive_index_list: barlist[j].set_color('g') plt.xticks(x_axis_total,label, rotation=45) plt.ylabel("Tumor Volume Change (%)") plt.title("Tumor Percent Change Over 45 Day Treatment") plt.grid() # Use functions to label the percentages of changes fmted_labels = [] for i in range(len(x_axis5)): fmt_label = str("{:.0f}".format(changes[i])+"%") fmted_labels.append(fmt_label) for i in range(len(x_axis5)): plt.text(x = x_axis5[i]+.15 , y = np.sign(changes[i])*4, s = fmted_labels[i],horizontalalignment='left', size = 12, color="white") # Call functions to implement the function calls # Save the Figure plt.savefig("Figures/tumor_change.png") # Show the Figure fig.show()
Homework 5.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Starbucks Capstone Challenge # # ## Introduction # # This data set contains simulated data that mimics customer behavior on the Starbucks rewards mobile app. Once every few days, Starbucks sends out an offer to users of the mobile app. An offer can be merely an advertisement for a drink or an actual offer such as a discount or BOGO (buy one get one free). Some users might not receive any offer during certain weeks. # # Not all users receive the same offer, and that is the challenge to solve with this data set. # # Your task is to combine transaction, demographic and offer data to determine which demographic groups respond best to which offer type. This data set is a simplified version of the real Starbucks app because the underlying simulator only has one product whereas Starbucks actually sells dozens of products. # # Every offer has a validity period before the offer expires. As an example, a BOGO offer might be valid for only 5 days. You'll see in the data set that informational offers have a validity period even though these ads are merely providing information about a product; for example, if an informational offer has 7 days of validity, you can assume the customer is feeling the influence of the offer for 7 days after receiving the advertisement. # # You'll be given transactional data showing user purchases made on the app including the timestamp of purchase and the amount of money spent on a purchase. This transactional data also has a record for each offer that a user receives as well as a record for when a user actually views the offer. There are also records for when a user completes an offer. # # Keep in mind as well that someone using the app might make a purchase through the app without having received an offer or seen an offer. # # ## Example # # To give an example, a user could receive a discount offer buy 10 dollars get 2 off on Monday. The offer is valid for 10 days from receipt. If the customer accumulates at least 10 dollars in purchases during the validity period, the customer completes the offer. # # However, there are a few things to watch out for in this data set. Customers do not opt into the offers that they receive; in other words, a user can receive an offer, never actually view the offer, and still complete the offer. For example, a user might receive the "buy 10 dollars get 2 dollars off offer", but the user never opens the offer during the 10 day validity period. The customer spends 15 dollars during those ten days. There will be an offer completion record in the data set; however, the customer was not influenced by the offer because the customer never viewed the offer. # # ## Cleaning # # This makes data cleaning especially important and tricky. # # You'll also want to take into account that some demographic groups will make purchases even if they don't receive an offer. From a business perspective, if a customer is going to make a 10 dollar purchase without an offer anyway, you wouldn't want to send a buy 10 dollars get 2 dollars off offer. You'll want to try to assess what a certain demographic group will buy when not receiving any offers. # # ## Final Advice # # Because this is a capstone project, you are free to analyze the data any way you see fit. For example, you could build a machine learning model that predicts how much someone will spend based on demographics and offer type. Or you could build a model that predicts whether or not someone will respond to an offer. Or, you don't need to build a machine learning model at all. You could develop a set of heuristics that determine what offer you should send to each customer (i.e., 75 percent of women customers who were 35 years old responded to offer A vs 40 percent from the same demographic to offer B, so send offer A). # # Data Sets # # The data is contained in three files: # # * portfolio.json - containing offer ids and meta data about each offer (duration, type, etc.) # * profile.json - demographic data for each customer # * transcript.json - records for transactions, offers received, offers viewed, and offers completed # # Here is the schema and explanation of each variable in the files: # # **portfolio.json** # * id (string) - offer id # * offer_type (string) - type of offer ie BOGO, discount, informational # * difficulty (int) - minimum required spend to complete an offer # * reward (int) - reward given for completing an offer # * duration (int) - time for offer to be open, in days # * channels (list of strings) # # **profile.json** # * age (int) - age of the customer # * became_member_on (int) - date when customer created an app account # * gender (str) - gender of the customer (note some entries contain 'O' for other rather than M or F) # * id (str) - customer id # * income (float) - customer's income # # **transcript.json** # * event (str) - record description (ie transaction, offer received, offer viewed, etc.) # * person (str) - customer id # * time (int) - time in hours since start of test. The data begins at time t=0 # * value - (dict of strings) - either an offer id or transaction amount depending on the record # # **Note:** If you are using the workspace, you will need to go to the terminal and run the command `conda update pandas` before reading in the files. This is because the version of pandas in the workspace cannot read in the transcript.json file correctly, but the newest version of pandas can. You can access the termnal from the orange icon in the top left of this notebook. # # You can see how to access the terminal and how the install works using the two images below. First you need to access the terminal: # # <img src="pic1.png"/> # # Then you will want to run the above command: # # <img src="pic2.png"/> # # Finally, when you enter back into the notebook (use the jupyter icon again), you should be able to run the below cell without any errors. import pandas as pd import numpy as np import math import json import datetime import pickle import matplotlib.pyplot as plt import seaborn as sns import re import os from sklearn.model_selection import train_test_split from sklearn.preprocessing import LabelBinarizer from sklearn.ensemble import RandomForestClassifier from sklearn.preprocessing import MinMaxScaler from sklearn.metrics import accuracy_score, f1_score from sklearn.ensemble import GradientBoostingClassifier, RandomForestClassifier from sklearn.linear_model import LogisticRegression from sklearn.metrics import fbeta_score, make_scorer from sklearn.model_selection import GridSearchCV from sklearn.model_selection import RandomizedSearchCV from joblib import dump, load from sklearn.preprocessing import LabelBinarizer, MultiLabelBinarizer # %matplotlib inline from datetime import datetime import progressbar # read in the json files portfolio = pd.read_json('data/portfolio.json', orient='records', lines=True) profile = pd.read_json('data/profile.json', orient='records', lines=True) transcript = pd.read_json('data/transcript.json', orient='records', lines=True) # # Python version used to perform analysis import sys print(sys.version) # # Data Exploration and transformation # ## Portolio Data # ### Exploratory Data Analysis (EDA) on Portfolio data # - Offer record attributes # * id (string) - Offer id # * offer_type (string) - String that describes the offer type # * bogo (Buy One Get One Free) # * discount # * informational # * difficulty (int) - Minimum amount a customer must spend to complete an offer # * reward (int) - Reward given for completing an offer # * duration (int) - Offer duration [days] # * channels (list of strings) # - EDA conclusions # - Channels variable is multi-label (can split using [MultiLabelBinarizer](https://scikit-learn.org/stable/modules/generated/sklearn.preprocessing.MultiLabelBinarizer.html#sklearn.preprocessing.MultiLabelBinarizer)) # - Need to rename 'id' variable to 'offerid' # - Need to [One Hot Encode (OHE)](https://machinelearningmastery.com/why-one-hot-encode-data-in-machine-learning/) 'offer_type' variable portfolio.head() # ### Cleaning the portfolio data # 1. Change the name of the 'id' column to 'offerid' # 2. Change the name of the 'duration' column to 'durationdays' # 3. Remove underscores from column names # 4. One hot encode the 'offertype' column # 5. One hot encode the 'channels' columns # 6. Replace the 'offertype' and 'channels' columns with their respective one hot encoded values def clean_portfolio(data_dir="./data"): """ Transforms a DataFrame containing offer ids and meta data about each offer (duration, type, etc.) INPUT: (Optional) data_dir: String that stores the full path to the data directory OUTPUT: portfolio: DataFrame containing offer ids and meta data about each offer (duration, type, etc.) """ portfolio = pd.read_json(os.path.join(data_dir, 'portfolio.json'), orient='records', lines=True) # Change the name of the 'id' column to 'offerid' columns = update_column_name(portfolio, 'id', 'offerid') # Change the name of the 'duration' column to 'durationdays' portfolio.columns = update_column_name(portfolio, 'duration', 'durationdays') # Remove underscores from column names portfolio.columns = [re.sub('_', '', elem) for elem in columns] # Initialize a list that stores the desired output DataFrame # column ordering column_ordering = ['offerid', 'difficulty', 'durationdays', 'reward'] # One hot encode the 'offertype' column offertype_df = pd.get_dummies(portfolio['offertype']) column_ordering.extend(offertype_df.columns.values) # One hot encode the 'channels' columns ml_binarizerobj = MultiLabelBinarizer() ml_binarizerobj.fit(portfolio['channels']) channels_df =\ pd.DataFrame(ml_binarizerobj.transform(portfolio['channels']), columns=ml_binarizerobj.classes_) column_ordering.extend(channels_df.columns.values) # Replace the 'offertype' and 'channels' columns portfolio = pd.concat([portfolio, offertype_df, channels_df], axis=1) portfolio = portfolio.drop(columns=['offertype', 'channels']) # Return the "cleaned" portfolio data return portfolio[column_ordering] def update_column_name(dataframe, old_column_name, new_column_name): """ Updates a Pandas DataFrame column name INPUT: dataframe: Pandas DataFrame object old_column_name: String that stores the old column name new_column_name: String that stores the new column name OUTPUT: column_names: np.array that stores the updated Pandas DataFrame column names""" column_names = dataframe.columns.values select_data = np.array([elem == old_column_name for elem in column_names]) column_names[select_data] = new_column_name return column_names portfolio = clean_portfolio() portfolio.head() # ## Profile data # ### Exploratory Data Analysis (EDA) on Customer profile data # - Customer profile record attributes # * age (int) - Customer age # * became_member_on (int) - Date when customer created an app account # * gender (str) - Customer gender (note some entries contain 'O' for other rather than M or F) # * id (str) - Customer id # * income (float) - Customer's income # - Customer profile EDA conclusions # - Gender and income have ~ 13% missing data # - Age is 118 when income is NaN # - Gender is not specified for ~ 1.5% of the data # - Year that a customer became a rewards member is not uniformly distributed - potential customer differentiator # - Month that a customer became a rewards member is approximately uniformly distributed - probably not a useful feature to include # #### Print the first five rows of the customer profile data profile.head() # #### Determine if any customer profile attributes contain missing values profile.isnull().sum(axis=0) * 100 / profile.shape[0] # #### Compute customer gender attribute distribution gender_counts = profile['gender'].value_counts() gender_counts *= 100 / gender_counts.sum() gender_counts # #### Compute summary customer profile attribute statistics when customer income is not specified profile[profile['income'].isnull()].describe() # #### Compute summary customer profile attribute statistics when customer income is specified profile[profile['income'].notnull()].describe() # #### Evaluate what year a customer became a rewards member def convert_to_datetime(elem): """Converts a string to a datetime object INPUT: elem: String that stores a date in the %Y%m%d format OUTPUT: datetimeobj: Datetime object""" return datetime.strptime(str(elem), '%Y%m%d') # + became_member_on = profile['became_member_on'].apply(convert_to_datetime) start_year = became_member_on.apply(lambda elem: elem.year).value_counts() start_year *= 100 / start_year.sum() start_year # - # #### Evaluate which month a customer became a rewards member start_month = became_member_on.apply(lambda elem: elem.month).value_counts() start_month *= 100 / start_month.sum() start_month # ### Clean the customer profile data # 1. Remove customers with missing income data # 2. Remove customer profiles where the gender attribute is missing # 3. Change the name of the 'id' column to 'customerid' # 4. Transform the 'became_member_on' column to a datetime object # 5. [One hot encode](https://www.kaggle.com/dansbecker/using-categorical-data-with-one-hot-encoding) a customer's membership start year # 6. One hot encode a customer's age range # 7. Transform a customer's gender from a character to a number def clean_profile(data_dir = "./data"): """ Transforms a DataFrame that contains demographic data for each customer INPUT: (Optional) data_dir: String that stores the full path to the data directory OUTPUT: profile: DataFrame that contains demographic data for each customer """ profile = pd.read_json('data/profile.json', orient='records', lines=True) # Remove customers with N/A income data profile = profile[profile['income'].notnull()] # Remove customers with unspecified gender profile = profile[profile['gender'] != 'O'] profile = profile.reset_index(drop=True) # Change the name of the 'id' column to 'customerid' profile.columns = update_column_name(profile, 'id', 'customerid') # Initialize a list that describes the desired DataFrame column # ordering column_ordering = ['customerid', 'gender', 'income'] # Transform the 'became_member_on' column to a datetime object profile['became_member_on'] =\ profile['became_member_on'].apply(convert_to_datetime) # One hot encode a customer's membership start year profile['membershipstartyear'] =\ profile['became_member_on'].apply(lambda elem: elem.year) membershipstartyear_df = pd.get_dummies(profile['membershipstartyear']) column_ordering.extend(membershipstartyear_df.columns.values) # One hot encode a customer's age range min_age_limit = np.int(np.floor(np.min(profile['age'])/10)*10) max_age_limit = np.int(np.ceil(np.max(profile['age'])/10)*10) profile['agerange'] =\ pd.cut(profile['age'], (range(min_age_limit,max_age_limit + 10, 10)), right=False) profile['agerange'] = profile['agerange'].astype('str') agerange_df = pd.get_dummies(profile['agerange']) column_ordering.extend(agerange_df.columns.values) # Transform a customer's gender from a character to a number binarizerobj = LabelBinarizer() profile['gender'] = binarizerobj.fit_transform(profile['gender']) gender_integer_map = {} for elem in binarizerobj.classes_: gender_integer_map[elem] = binarizerobj.transform([elem])[0,0] # Appened one hot encoded age range and membership start year variables profile = pd.concat([profile, agerange_df, membershipstartyear_df], axis=1) # Drop depcreated columns profile = profile.drop(columns=['age', 'agerange', 'became_member_on', 'membershipstartyear']) # Return a DataFrame with "clean" customer profile data return profile[column_ordering], gender_integer_map # + (profile, gender_integer_map) = clean_profile() print("Number of user profiles: %d" % (profile.shape[0])) # - # #### Print the first five rows of the preprocessed (i.e. clean) customer profile data # profile.head() # #### Print the mapping of customer gender string to an integer value print(gender_integer_map) # #### Plot Income Distribution as a Function of Gender # Results suggest that the minimum and maximum income for both male and female customers is approximately the same. However, male customer income is slightly biased towards lower values compared to female customer income. # + male_customers = profile[profile['gender'] == 1] female_customers = profile[profile['gender'] == 0] current_palette = sns.color_palette() sns.set(font_scale=1.5) sns.set_style('white') fig, ax = plt.subplots(figsize=(10, 4), nrows=1, ncols=2, sharex=True, sharey=True) plt.sca(ax[0]) sns.distplot(male_customers['income'] / 1000, color=current_palette[1]) plt.xlabel('Income in K') plt.ylabel('Percentage') plt.title('Male Customer Income') plt.sca(ax[1]) sns.distplot(female_customers['income'] / 1000, color=current_palette[0]) plt.xlabel('Income in K') plt.ylabel('Percentage') plt.title('Female Customer Income') plt.tight_layout() # - # #### Evaluate Membership Start Year Statistics # The results suggest that most customers recently joined the Starbucks rewards program. These results also suggest that there are more male customers than female customers. def reverse_ohe(dataframe_row, is_ohe_column): """ Reverses categorical one hot encoding INPUT: dataframe_row: Pandas Series that stores a DataFrame row is_ohe_column: List that describes which DataFrame columns correspond to the one hot encoding of a categorical variable OUTPUT: categorical_variable: String that stores the value of a categorical variable""" column_idx = np.argwhere(dataframe_row[is_ohe_column]) return str(dataframe_row.index.values[is_ohe_column][column_idx][0,0]) def initialize_membership_date(profile, gender_integer_map): """ Initializes a DataFrame that describes the membership start year statistics as a function of customer gender INPUT: profile: DataFrame that stores customer profiles gender_integer_map: Dictionary that describes the mapping of a gender string to an integer OUTPUT: membership_date: DataFrame that describes the membership start year statistics as a function of customer gender""" is_ohe_column = [type(elem) == int for elem in profile.columns] membership_date = profile[['gender']].copy() membership_date['gender'] = genderint_to_string(membership_date, gender_integer_map) membership_date['startyear'] =profile.apply(lambda elem: reverse_ohe(elem, is_ohe_column), axis=1) membership_date = membership_date.groupby(['startyear', 'gender']).size() membership_date = membership_date.reset_index() membership_date.columns = ['startyear', 'gender', 'count'] return membership_date def genderint_to_string(dataframe, gender_integer_map): """Transforms gender encoded as an integer to a string INPUT: dataframe: DataFrame that includes gender encoded as an integer gender_integer_map: Dictionary that describes the mapping of a gender string to an integer OUTPUT: gender: Series that encodes gender as a string""" gender = ['Male' if elem == 'M' else 'Female' for elem in gender_integer_map.keys()] integer_gender_map = dict(zip(gender_integer_map.values(), gender)) h_transfrom = lambda elem: integer_gender_map[elem] return dataframe['gender'].apply(h_transfrom) # + membership_date = initialize_membership_date(profile, gender_integer_map) sns.barplot(x='startyear', y='count', hue='gender', data=membership_date) plt.xlabel('Membership Start Year') plt.ylabel('Count') # - # #### Compute customer gender distribution membership_date.groupby('gender')['count'].sum() # #### Evaluate Age Range Statistics # These results suggest that the average customer age is between 50 and 60 years old. # def init_agerange(profile, gender_integer_map): """ Initializes a DataFrame that describes a customer's age range as a function of gender INPUT: profile: DataFrame that stores customer profiles gender_integer_map: Dictionary that describes the mapping of a gender string to an integer OUTPUT: age_range: DataFrame that describes a customer's age range as a function of gender """ pattern_obj = re.compile('^\[[0-9]+, [0-9]+\)') is_ohe_column =\ [pattern_obj.match(str(elem)) != None for elem in profile.columns] age_range = profile[['gender']].copy() age_range['gender'] = genderint_to_string(age_range, gender_integer_map) age_range['agerange'] =\ profile.apply(lambda elem: reverse_ohe(elem, is_ohe_column), axis=1) age_range = age_range.groupby(['agerange', 'gender']).size() age_range = age_range.reset_index() age_range.columns = ['agerange', 'gender', 'count'] is_above_100 = re.compile('\[10[0-9]+.*') h_transform = lambda elem: is_above_100.match(elem) != None above_100 = age_range['agerange'].apply(h_transform) return pd.concat([age_range[above_100 == False], age_range[above_100]]) # + age_range = init_agerange(profile, gender_integer_map) plt.figure(figsize=(10, 4)) sns.barplot(x='agerange', y='count', hue='gender', data=age_range) plt.xlabel('Age Range') plt.ylabel('Count') plt.xticks(rotation=90) # - # ## Transaction data EDA # - Customer transaction record attributes # * event (str) - Record description (i.e. transaction, offer received, offer viewed, etc.) # * person (str) - Customer id # * time (int) - Time in hours. The data begins at time t=0 # * value - (dict of strings) - Either an offer id or transaction amount depending on the record # - Customer transaction data EDA conclusions # - Need to separate offer and customer purchase data # - Results suggest ~ 45 % of the events are customers purchases and ~ 55% of the events describe customer offers # ### Print the first five rows of the transaction data transcript.head() # #### Compute the event type distribution event_counts = transcript['event'].value_counts() event_counts = pd.DataFrame(list(zip(event_counts.index.values, event_counts)), columns=['event', 'count']) event_counts # #### Compute the percentage of customer transaction and offer events # + total_transactions = event_counts['count'].sum() percentage_transactions = 100 * event_counts.iloc[0]['count'] / total_transactions percentage_offers = 100 * event_counts.iloc[1:]['count'].sum() / total_transactions print("Percentage of customer transaction events: %.1f %%" % (percentage_transactions)) print("Percentage of customer offer events: %.1f %%" % (percentage_offers)) # - # ### Clean the transaction data # 1. Change the name of the 'person' column to 'customerid' # 2. Remove customer id's that are not in the customer profile DataFrame # 3. Convert time variable units from hours to days # 4. Change the name of the 'time' column to 'timedays' # 5. Create a DataFrame that describes offers # - Create an offerid column # - Parse the offer event type (i.e. 'received', 'viewed', or 'completed') # - One hot encode customer offer events # 6. Create a DataFrame that describes customer transaction events # - Parse customer transaction values def clean_transcript(profile, data_dir = './data'): """ Transforms a DataFrame that contains records for transactions, offers received, offers viewed, and offers completed INPUT: profile: DataFrame that contains demographic data for each customer (Optional) data_dir: String that stores the full path to the data directory OUTPUT: offer_data: DataFrame that describes customer offer data transaction: DataFrame that describes customer transactions """ transcript = pd.read_json(os.path.join(data_dir, 'transcript.json'), orient='records', lines=True) # Change the name of the 'person' column to 'customerid' transcript.columns = update_column_name(transcript, 'person', 'customerid') # Remove customer id's that are not in the customer profile DataFrame select_data = transcript['customerid'].isin(profile['customerid']) transcript = transcript[select_data] percent_removed = 100 * (1 - select_data.sum() / select_data.shape[0]) print("Percentage of transactions removed: %.2f %%" % percent_removed) # Convert from hours to days transcript['time'] /= 24.0 # Change the name of the 'time' column to 'timedays' transcript.columns = update_column_name(transcript,'time','timedays') # Select customer offers pattern_obj = re.compile('^offer (?:received|viewed|completed)') h_is_offer = lambda elem: pattern_obj.match(elem) != None is_offer = transcript['event'].apply(h_is_offer) offer_data = transcript[is_offer].copy() offer_data = offer_data.reset_index(drop=True) # Initialize a list that describes the desired output DataFrame # column ordering column_order = ['offerid', 'customerid', 'timedays'] # Create an offerid column offer_data['offerid'] = offer_data['value'].apply(lambda elem: list(elem.values())[0]) # Transform a column that describes a customer offer event pattern_obj = re.compile('^offer ([a-z]+$)') h_transform = lambda elem: pattern_obj.match(elem).groups(1)[0] offer_data['event'] = offer_data['event'].apply(h_transform) # One hot encode customer offer events event_df = pd.get_dummies(offer_data['event']) column_order.extend(event_df.columns.values) # Create a DataFrame that describes customer offer events offer_data = pd.concat([offer_data, event_df], axis=1) offer_data.drop(columns=['event', 'value']) offer_data = offer_data[column_order] # Select customer transaction events transaction = transcript[is_offer == False] transaction = transaction.reset_index(drop=True) # Transform customer transaction event values transaction['amount'] = transaction['value'].apply(lambda elem: list(elem.values())[0]) # Create a DataFrame that describes customer transactions transaction = transaction.drop(columns=['event', 'value']) column_order = ['customerid', 'timedays', 'amount'] transaction = transaction[column_order] return offer_data, transaction offer_data, transaction = clean_transcript(profile) # #### Print the first five rows of the transformed customer offer data offer_data.head() # #### Print the first five rows of the transformed customer transaction data transaction.head() # # Combining transaction, demographic and offer data # For each customer, apply the following algorithm: # 1. Select a customer's profile # 2. Select offer data for a specific customer # 3. Select transactions for a specific customer # 4. Initialize DataFrames that describe when a customer receives, views, and completes an offer # 5. Iterate over each offer a customer receives # - Initialize the current offer id # - Look-up a description of the current offer # - Initialize the time period when an offer is valid # - Initialize a Boolean array that select customer transactions that fall within the valid offer time window # - Initialize a Boolean array that selects a description of when a customer completes an offer (this array may not contain any True values) # - Initialize a Boolean array that selects a description of when a customer views an offer (this array may not contain any True values) # - Determine whether the current offer was successful # - For an offer to be successful a customer has to view and complete it # - Select customer transactions that occurred within the current offer valid time window # - Initialize a dictionary that describes the current customer offer # - Update a list of dictionaries that describes the effectiveness of offers to a specific customer # # Once all customer transactions have been evaluated, convert the resulting list of dictionaries into a pandas DataFrame. def create_offeranalysis_dataset(profile, portfolio, offer_data, transaction): """ Creates an analytic dataset from the following Starbucks challenge datasets: * portfolio.json - Contains offer ids and meta data (duration, type, etc.) * profile.json - demographic data for each customer * transcript.json - records for transactions, offers received, offers viewed, and offers completed INPUT: profile: DataFrame that contains demographic data for each customer portfolio: Contains offer ids and meta data (duration, type, etc.) offer_data: DataFrame that describes customer offer data transaction: DataFrame that describes customer transactions OUTPUT: clean_data: DataFrame that characterizes the effectiveness of customer offers""" clean_data = [] customerid_list = offer_data['customerid'].unique() widgets=[' [', progressbar.Timer(), '] ', progressbar.Bar(), ' (', progressbar.ETA(), ') '] for idx in progressbar.progressbar(range(len(customerid_list)), widgets=widgets): clean_data.extend(create_combined_records(customerid_list[idx], portfolio, profile, offer_data, transaction)) clean_data = pd.DataFrame(clean_data) # Initialize a list that describes the desired output DataFrame # column ordering column_ordering = ['time', 'offerid', 'customerid', 'totalamount', 'offersuccessful', 'difficulty', 'durationdays', 'reward', 'bogo', 'discount', 'informational', 'email', 'mobile', 'social', 'web', 'gender', 'income', 2013, 2014, 2015, 2016, 2017, 2018, '[10, 20)', '[20, 30)', '[30, 40)', '[40, 50)', '[50, 60)', '[60, 70)', '[70, 80)', '[80, 90)', '[90, 100)', '[100, 110)'] clean_data = clean_data[column_ordering] clean_data = clean_data.sort_values('time') return clean_data.reset_index(drop=True) def create_combined_records(customer_id, portfolio, profile, offer_data, transaction): """ Creates a list of dictionaries that describes the effectiveness of offers to a specific customer INPUT: customer_id: String that refers to a specific customer profile: DataFrame that contains demographic data for each customer portfolio: DataFrame containing offer ids and meta data about each offer (duration, type, etc.) offer_data: DataFrame that describes customer offer data transaction: DataFrame that describes customer transactions OUTPUT: rows: List of dictionaries that describes the effectiveness of offers to a specific customer """ # Select a customer's profile cur_customer = profile[profile['customerid'] == customer_id] # Select offer data for a specific customer select_offer_data = offer_data['customerid'] == customer_id customer_offer_data = offer_data[select_offer_data] customer_offer_data = customer_offer_data.drop(columns='customerid') customer_offer_data = customer_offer_data.reset_index(drop=True) # Select transactions for a specific customer select_transaction = transaction['customerid'] == customer_id customer_transaction_data = transaction[select_transaction] customer_transaction_data =\ customer_transaction_data.drop(columns='customerid') customer_transaction_data =\ customer_transaction_data.reset_index(drop=True) # Initialize DataFrames that describe when a customer receives, # views, and completes an offer event_type = ['completed', 'received', 'viewed'] offer_received =\ customer_offer_data[customer_offer_data['received'] == 1] offer_received = offer_received.drop(columns=event_type) offer_received = offer_received.reset_index(drop=True) offer_viewed =\ customer_offer_data[customer_offer_data['viewed'] == 1] offer_viewed = offer_viewed.drop(columns=event_type) offer_viewed = offer_viewed.reset_index(drop=True) offer_completed =\ customer_offer_data[customer_offer_data['completed'] == 1] offer_completed = offer_completed.drop(columns=event_type) offer_completed = offer_completed.reset_index(drop=True) # Iterate over each offer a customer receives rows = [] for idx in range(offer_received.shape[0]): # Initialize the current offer id cur_offer_id = offer_received.iloc[idx]['offerid'] # Look-up a description of the current offer cur_offer = portfolio.loc[portfolio['offerid'] == cur_offer_id] durationdays = cur_offer['durationdays'].values[0] # Initialize the time period when an offer is valid cur_offer_startime = offer_received.iloc[idx]['timedays'] cur_offer_endtime =\ offer_received.iloc[idx]['timedays'] + durationdays # Initialize a boolean array that select customer transcations that # fall within the valid offer time window select_transaction =\ np.logical_and(customer_transaction_data['timedays'] >= cur_offer_startime, customer_transaction_data['timedays'] <= cur_offer_endtime) # Initialize a boolean array that selects a description of when a # customer completes an offer (this array may not contain any True # values) select_offer_completed =\ np.logical_and(offer_completed['timedays'] >= cur_offer_startime, offer_completed['timedays'] <= cur_offer_endtime) # Initialize a boolean array that selects a description of when a # customer views an offer (this array may not contain any True # values) select_offer_viewed =\ np.logical_and(offer_viewed['timedays'] >= cur_offer_startime, offer_viewed['timedays'] <= cur_offer_endtime) # Determine whether the current offer was successful cur_offer_successful =\ select_offer_completed.sum() > 0 and select_offer_viewed.sum() > 0 # Select customer transcations that occurred within the current offer # valid time window cur_offer_transactions = customer_transaction_data[select_transaction] # Initialize a dictionary that describes the current customer offer cur_row = {'offerid': cur_offer_id, 'customerid': customer_id, 'time': cur_offer_startime, 'offersuccessful': int(cur_offer_successful), 'totalamount': cur_offer_transactions['amount'].sum()} cur_row.update(cur_offer.iloc[0,1:].to_dict()) cur_row.update(cur_customer.iloc[0,1:].to_dict()) # Update a list of dictionaries that describes the effectiveness of # offers to a specific customer rows.append(cur_row) return rows # + clean_data_csvfile = "./data/clean_data.csv" if os.path.exists(clean_data_csvfile): clean_data = pd.read_csv(clean_data_csvfile) else: clean_data = create_offeranalysis_dataset(profile, portfolio, offer_data, transaction) clean_data.to_csv(clean_data_csvfile, index=False) clean_data = clean_data.drop(columns=['time', 'customerid', 'email', 'informational']) column_ordering = ['offerid', 'totalamount'] column_ordering.extend([elem for elem in clean_data.columns if elem not in column_ordering]) clean_data = clean_data[column_ordering] clean_data.head() # - clean_data.columns # # Spliting the data into training and testing data # # + random_state = 0 class_label_name = 'offersuccessful' variables = clean_data.drop(columns=[class_label_name]) class_label = clean_data.filter([class_label_name]) (X_train, X_test, y_train, y_test) = train_test_split(variables.values, class_label.values, test_size=0.2, random_state=random_state) variable_names = variables.columns[2:] offerid_train = X_train[:, 0] totalamount_train = X_train[:, 1] X_train = X_train[:, 2:].astype('float64') offerid_test = X_test[:, 0] totalamount_test = X_test[:, 1] X_test = X_test[:, 2:].astype('float64') y_train = y_train.ravel() y_test = y_test.ravel() # - # ## Data attributes print(variable_names) # # Predictive Modeling # ## Evaluate naive predictor performance # + naive_predictor_accuracy = accuracy_score(y_train,np.ones(len(y_train))) naive_predictor_f1score = f1_score(y_train, np.ones(len(y_train))) print("Naive predictor accuracy: %.3f" % (naive_predictor_accuracy)) print("Naive predictor f1-score: %.3f" % (naive_predictor_f1score)) # - # ## Constructing Logistic Regression model # # - Results suggest that a logistic regression model's accuracy and f1-score is better than the naive predictor # - Accuracy # - Naive predictor: 0.471 # - Logistic regression: 0.722 # - F1-score # - Naive predictor: 0.640 # - Logistic regression: 0.716 # + model_dir = "./models" if not os.path.exists(model_dir): os.mkdir(model_dir) lr_model_path = os.path.join(model_dir, 'lr_clf.joblib') scorer = make_scorer(fbeta_score, beta=0.5) if os.path.exists(lr_model_path): lr_random = load(lr_model_path) else: lr_clf = LogisticRegression(random_state=random_state, solver='liblinear') random_grid = {'penalty': ['l1', 'l2'], 'C': [1.0, 0.1, 0.01]} lr_random = RandomizedSearchCV(estimator = lr_clf, param_distributions = random_grid, scoring=scorer, n_iter = 6, cv = 3, verbose=2, random_state=random_state, n_jobs = 3) lr_random.fit(X_train, y_train) dump(lr_random, lr_model_path) # - # #### Define model performance evaluation function def evaluate_model_performance(clf, X_train, y_train): """ Prints a model's accuracy and F1-score INPUT: clf: Model object X_train: Training data matrix y_train: Expected model output vector OUTPUT: clf_accuracy: Model accuracy clf_f1_score: Model F1-score""" class_name = re.sub("[<>']", '', str(clf.__class__)) class_name = class_name.split(' ')[1] class_name = class_name.split('.')[-1] y_pred_rf = clf.predict(X_train) clf_accuracy = accuracy_score(y_train, y_pred_rf) clf_f1_score = f1_score(y_train, y_pred_rf) print("%s model accuracy: %.3f" % (class_name, clf_accuracy)) print("%s model f1-score: %.3f" % (class_name, clf_f1_score)) return clf_accuracy, clf_f1_score # #### Evaluate Logistic Regression Model Performance evaluate_model_performance(lr_random.best_estimator_, X_train, y_train) # ## Construct Random Forest Model # - Resuls suggest that a random forest model's accuracy and f1-score is better than the naive predictor # - Accuracy # - Naive predictor: 0.471 # - Random forest: 0.742 # - F1-score # - Naive predictor: 0.640 # - Random forest: 0.735 # + rf_model_path = os.path.join(model_dir, 'rf_clf.joblib') if os.path.exists(rf_model_path): rf_random = load(rf_model_path) else: rf_clf = RandomForestClassifier(random_state=random_state) # Number of trees in random forest n_estimators = [10, 30, 50, 100, 150, 200, 250, 300] # Number of features to consider at every split max_features = ['auto', 'sqrt'] # Maximum number of levels in tree max_depth = [int(x) for x in np.arange(3, 11)] max_depth.append(None) # Minimum number of samples required to split a node min_samples_split = [2, 5, 10] # Minimum number of samples required at each leaf node min_samples_leaf = [1, 2, 4] # Create the random grid random_grid = {'n_estimators': n_estimators, 'max_features': max_features, 'max_depth': max_depth, 'min_samples_split': min_samples_split, 'min_samples_leaf': min_samples_leaf} rf_random = RandomizedSearchCV(estimator = rf_clf, param_distributions = random_grid, scoring=scorer, n_iter = 100, cv = 3, verbose=2, random_state=random_state, n_jobs = 3) rf_random.fit(X_train, y_train) dump(rf_random, rf_model_path) # - # ### Evalaute Random Forest Model Performance evaluate_model_performance(rf_random.best_estimator_, X_train, y_train) # ### Plot Estimated Feature Importance # - Feature importance refers to a numerical value that describes a feature's contribution to building a model that maximizes its evaluation metric # - These results suggest that the top five features for this problem are: # 1. Offer difficulty (how much money a customer must spend to complete an offer) # 2. Offer duration # 3. Offer reward # 4. Customer income # 5. Whether a customer created an account on the Starbucks rewards mobile application in 2018 # + relative_importance = rf_random.best_estimator_.feature_importances_ relative_importance = relative_importance / np.sum(relative_importance) feature_importance =\ pd.DataFrame(list(zip(variable_names, relative_importance)), columns=['feature', 'relativeimportance']) feature_importance = feature_importance.sort_values('relativeimportance', ascending=False) feature_importance = feature_importance.reset_index(drop=True) palette = sns.color_palette("Blues_r", feature_importance.shape[0]) plt.figure(figsize=(8, 8)) sns.barplot(x='relativeimportance', y='feature', data=feature_importance, palette=palette) plt.xlabel('Relative Importance') plt.ylabel('Feature') plt.title('Random Forest Estimated Feature Importance') # - # #### Print the top 10 features sorted based on their estimated importance feature_importance.head(n=10) # ## Constructing a Gradient Boosting classifier # - Resuls suggest that a gradient boosting model's accuracy and f1-score is better than the naive predictor # - Accuracy # - Naive predictor: 0.471 # - Gradient boosting: 0.736 # - F1-score # - Naive predictor: 0.640 # - Gradient boosting: 0.725 # + gb_model_path = os.path.join(model_dir, 'gb_clf.joblib') if os.path.exists(gb_model_path): gb_random = load(gb_model_path) else: gb_clf = GradientBoostingClassifier(random_state=random_state) # Create the random grid random_grid = {'loss': ['deviance', 'exponential'], 'learning_rate': [0.1, 0.01, 0.001], 'n_estimators': [10, 30, 50, 100, 150, 200, 250, 300], 'min_samples_leaf': min_samples_leaf, 'min_samples_split': min_samples_split} gb_random = RandomizedSearchCV(estimator = gb_clf, param_distributions = random_grid, scoring=scorer, n_iter = 100, cv = 3, verbose=2, random_state=random_state, n_jobs = 3) gb_random.fit(X_train, y_train) dump(gb_random, gb_model_path) # - # ### Evaluate Gradient Boosting Model Performance evaluate_model_performance(gb_random.best_estimator_, X_train, y_train) # # Tune the best model # - Model ranking based on training data accuracy # 1. RandomForestClassifier model accuracy: 0.742 # 2. GradientBoostingClassifier model accuracy: 0.736 # 3. LogisticRegression model accuracy: 0.722 # 4. Naive predictor accuracy: 0.471 # - Model ranking based on training data F1-score # 1. RandomForestClassifier model f1-score: 0.735 # 2. GradientBoostingClassifier model f1-score: 0.725 # 3. LogisticRegression model f1-score: 0.716 # 4. Naive predictor f1-score: 0.640 # - Results suggest that the random forest model has the best training data accuracy and F1-score # # A logistic regression model constructs a linear decision boundary to separate successful and unsuccessful offers. However, based on the exploratory analysis the decision boundary could be non-linear. Therefore, an ensemble method like random forest or gradient boosting should perform better. # Both random forest and gradient boosting models are a combination of multiple decision trees. A random forest classifier randomly samples the training data with replacement to construct a set of decision trees that are combined using majority voting. In contrast, gradient boosting iteratively constructs a set of decision trees with the goal of reducing the number of misclassified training data samples from the previous iteration. A consequence of these model construction strategies is that the depth of decision trees generated during random forest model training are typically greater than gradient boosting weak learner depth to minimize model variance. Typically, gradient boosting performs better than a random forest classifier. However, gradient boosting may overfit the training data and requires additional effort to tune. A random forest classifier is less prone to overfitting because it constructs decision trees from random training data samples. Also, a random forest classifier’s hyperparameters are easier to optimize. # # Link - https://blog.citizennet.com/blog/2012/11/10/random-forests-ensembles-and-performance-metrics & https://www.quora.com/How-can-the-performance-of-a-Gradient-Boosting-Machine-be-worse-than-Random-Forests # + model_performance = [] classifier_type = ['naivepredictor', 'logisticregression', 'randomforest', 'gradientboosting'] model_performance.append((naive_predictor_accuracy, naive_predictor_f1score)) model_performance.append(evaluate_model_performance(lr_random.best_estimator_, X_train, y_train)) model_performance.append(evaluate_model_performance(rf_random.best_estimator_, X_train, y_train)) model_performance.append(evaluate_model_performance(gb_random.best_estimator_, X_train, y_train)) model_performance = pd.DataFrame(model_performance, columns=['accuracy', 'f1score']) classifier_type = pd.DataFrame(classifier_type, columns=['classifiertype']) model_performance = pd.concat([classifier_type, model_performance], axis=1) model_performance = model_performance.sort_values('accuracy', ascending=False) model_performance = model_performance.reset_index(drop=True) model_performance # - # ## Print the Best Model's Hyperparameters print(rf_random.best_estimator_) # ## Refine Best Model # - Refine model hyperparameter space # - [Hyperparameter Tuning the Random Forest in Python](https://towardsdatascience.com/hyperparameter-tuning-the-random-forest-in-python-using-scikit-learn-28d2aa77dd74) # + grid_fit_path = os.path.join(model_dir, 'grid_fit.joblib') if os.path.exists(grid_fit_path): grid_fit = load(grid_fit_path) else: parameters = {'n_estimators': [300, 350, 400, 450, 500], 'max_depth': [10, 11, 12, 13, 14, 15], 'min_samples_leaf': min_samples_leaf, 'min_samples_split': min_samples_split, 'random_state': [random_state]} grid_obj = GridSearchCV(rf_clf, parameters, scoring=scorer, cv=5, n_jobs=3, verbose=2) grid_fit = grid_obj.fit(X_train, y_train) dump(grid_fit, grid_fit_path) # Get the estimator best_clf = grid_fit.best_estimator_ evaluate_model_performance(best_clf, X_train, y_train) # - # ### Print the Refined Random Forest Model's Hyperparameters best_clf # ### Evaluate Test Data Performance evaluate_model_performance(best_clf, X_test, y_test) # # Conclusion # The analysis suggests that a random forest model has the best training data accuracy and F1-score. The refined random forest model hyperparameters using a grid search suggests that the resulting random forest model has a training data accuracy of 0.752 and an F1-score of 0.742. The test data set accuracy of 0.733 and F1-score of 0.727 suggests that the random forest model we constructed did not overfit the data. # The top five features based on their importance are: # Reward, duration days, difficulty, income and whether a customer created an account on the Starbucks rewards mobile application in 2018. # # # # Future improvements # # Since the top three features are associated with a customer offer, it may be possible to improve the performance of a random forest model by creating features that describe an offer’s success rate as a function of offer difficulty, duration, and reward. These additional features should provide a random forest classifier the opportunity to construct a better decision boundary that separates successful and unsuccessful customer offers.
code/Starbucks_Capstone_notebook v2.0as.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="view-in-github" colab_type="text" # <a href="https://colab.research.google.com/github/skybristol/experiments/blob/dev/Extracted_PDF_Annotation_via_Zotero.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # + [markdown] id="jtzwSnLRFKAJ" # I'm experimenting here with a process to turn annotations created within PDF files stored as part of a Zotero library into metadata contents and structured annotations for the bibliographic record. This is essentially for cases where there is no good citation metadata already in existence somewhere on the web (e.g., for certain types of government reports) and we need to extract that content from within PDFs. It's also for cases where built-in structured PDF metadata is no good, which is the case for anything other than professionally built PDFs (e.g., just exporting a PDF from your word processor does not build a good PDF). This technique also holds promise for setting up training data for building various kinds of entity recognition models to auto-extract particular concepts from full texts processed with NLP. # # I used the ZotFile plugins for Zotero, inspired by [this video](https://www.youtube.com/watch?v=_Fjhad-Z61o&t=1251s). In Zotero, the process includes storing the PDF file as an attachment so that Zotero is "managing" it, annotating the file using some type of PDF markup tool (I used Preview on Mac), and then running the ZotFile tool to extract annotations from the PDF which creates a rich text (html) note for the item in Zotero. The notes are synced to the group library with Zotero online where they can be picked up for processing. # # For annotation, I used a combination of highlighting particular text and then tagging that text with a keyword corresponding to a target part of the citation metadata I'm trying to identify (e.g., title, authors, etc.). I should then be able to pull these two pieces out of the generated markdown into a data structure that I can feed back into the corresponding record via the Zotero API. # # For the Python processing workflow part of this, I used the pyzotero package to connect to the Zotero group library, read items with notes, and then processed those items to generate structured data from the HTML notes that can be reinjected back into the items. # + colab={"base_uri": "https://localhost:8080/"} id="ump_4YY_PwtX" outputId="e8b72a38-e781-427d-81f0-a141c4bf88b6" # !pip install pyzotero # + id="tah2x2bj9UWH" from bs4 import BeautifulSoup import re from pyzotero import zotero from getpass import getpass # + [markdown] id="4NDeZC__aA8Q" # This function handles the process of working a given Zotero child item's note content to detect if it was generated using the ZotFile process and then extracting the annotations into a usable data structure (list of dictionaries). # + id="vULdcZcWA-5V" annotation_property_map = [ { "property_name": "title", "property_type": "single" }, { "property_name": "date", "property_type": "single" }, { "property_name": "institution", "property_type": "single" }, { "property_name": "author", "property_type": "multi" }, { "property_name": "project", "property_type": "tag" }, { "property_name": "place", "property_type": "tag" }, { "property_name": "commodity", "property_type": "tag" }, ] def structured_annotations(item_key, annotation_html, property_map=annotation_property_map): extract_keywords = [i["property_name"] for i in property_map] annotations_soup = BeautifulSoup(annotation_html, 'html.parser') pattern = '\"(.*?)\"' paragraphs = annotations_soup.find_all("p") if "Extracted Annotations" not in paragraphs[0].text: return None annotation_texts = list() for index,p in enumerate(paragraphs): em_in_p = p.find("em") if em_in_p: p_text = em_in_p.text else: p_text = p.text p_text_parts = p_text.split() if len(p_text_parts) > 0 and p_text_parts[0] in extract_keywords: prop = p_text_parts[0] annotation_text = re.search(pattern, paragraphs[index - 1].text) if annotation_text is not None: annotation_texts.append({ "item_key": item_key, "text": annotation_text.group(1), "property": prop }) return annotation_texts def update_zotero_item( item_key, annotations_list, zotero_api, commit_update=True, property_map=annotation_property_map): update_item = zotero_api.item(item_key) if not update_item: return available_updates = [i for i in annotations_list if i["item_key"] == item_key] if available_updates: single_value_props = [i["property_name"] for i in property_map if i["property_type"] == "single"] tag_props = [i["property_name"] for i in property_map if i["property_type"] == "tag"] for prop in single_value_props: update_value = next((i["text"] for i in available_updates if i["property"] == prop), None) if update_value is not None: update_item["data"][prop] = update_value update_item["data"]["place"] = ",".join([i["text"] for i in available_updates if i["property"] == "place"]) update_item["data"]["creators"] = [{'creatorType': 'author', 'name': i["text"]} for i in available_updates if i["property"] == "author"] update_item["data"]["tags"] = [{'tag': i["text"]} for i in available_updates if i["property"] in tag_props] if commit_update: zotero_api.update_item(update_item) return update_item # + [markdown] id="dLX1_JRaaSCz" # To interface with the Zotero API, you need to provide a library ID and an API key. This should work for anyone with a group library that has followed the same process I outlined above. # + colab={"base_uri": "https://localhost:8080/"} id="NHy-0rgMP2HT" outputId="65e32831-f1d9-4599-aa7e-10d0e583ebc0" zot = zotero.Zotero(input("Library ID "), "group", getpass(prompt="API Key ")) # + [markdown] id="1sK-iahFadzi" # This process would need to be worked out further in production practice, but we essentially walk the items in a given library looking for notes. There might be some more efficient way to zero in on these, but I haven't figured it out yet with pyzotero. Here, we make a pass through every item, look for items with children, get the children, and then get any that have notes. I assume that this might be additive where all note items that can be processed will be yielding annotations for the given parent item. I send the note html along with the associated item key to the function to return available structured annotations for further processing. Since every annotation will essentially contain a key/value pair (property and text content), we can simply build out an array of these with their item keys for further processing. # + id="s5SPrL0oX-Fa" item_annotations = list() for item in zot.all_top(): if item["meta"]["numChildren"] > 0: note_children = [i for i in zot.children(item["key"]) if i["data"]["itemType"] == "note"] if note_children: for note_child in note_children: extracted_annotations = structured_annotations(item["key"], note_child["data"]["note"]) if extracted_annotations: item_annotations.extend(extracted_annotations) # + [markdown] id="IJB3IitmbQAd" # In this case, we got the one item where I've annotated and then extracted text snippets corresponding to specific metadata elements I'm identifying and wanting to work with. # + colab={"base_uri": "https://localhost:8080/"} id="SvMrVsZJZ5D4" outputId="7e12e9ca-c715-41e8-e849-29515127c3a7" item_annotations # + [markdown] id="cY4e7qQYLdoC" # I added an additional function to work through the annotations gathered and commit them back to the respective Zotero items cataloging the annotated files. This basically roundtrips the process, letting us propose a workflow concentrated on marking up "messy" PDF files using meta keywords and then leveraging Zotero and ZotFile to build out the catalog records from the annotation markup. # + id="TuhPsN7b3HhM" for item_key in list(set([i["item_key"] for i in item_annotations])): update_zotero_item(item_key, item_annotations, zot, commit_update=True) # + [markdown] id="gJFRZHCoH4oE" # What I did above so far is a reasonable start, but there are a few issues. # # * This is pretty brittle at this point and requires a very specific convention to be followed in annotating a PDF text. This would need to be made a bit more robust in terms of dealing with text strings and different things people might do in free and open annotations. I mitigated this a little bit in the function by first looking for a set of keywords identifying the specific bits of annotation that we want to go after and then getting the highlighted text the annotation is identifying. However, some type of conventions would need to be established and followed in terms of highlighting a chunk of text and then marking up its particular significance. If we want to simply pick out the major elements of reasonably complete citation metadata, then something like I tried here should work well enough. # * I still need to work out the best way to feed everything back into building more usable report reference items in Zotero once metadata properties are extracted. That should be pretty straightforward, but I want to fiddle with the simplest workflow possible where someone would mark up a bunch of PDFs quickly, load the files to the Zotero library without making report items from them, and then see if the whole process can work from there. # # My takeaway so far is that it's actually really nice and fast to simply open up a PDF file and start marking it up. Theoretically, this could be done on a whole batch of PDFs totally separate from Zotero, bulk import those to Zotero, run the ZotFile extraction on the annotations, and then generate properly documented items. For the types of files this applies to, Zotero is not going to recognize that they should be "report" type items, so that part of things would need to be handled through the API. As noted, the real point here is to train an AI to do this work, at least within some contextual boundaries. But even if it was a person sitting down doing this work, it should be much faster to open a PDF, mark it up following a particular convention to identify the important bits, and then have a system take over to parse and catalog the files.
Extracted_PDF_Annotation_via_Zotero.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # <font face="times"><font size="6pt"><p style = 'text-align: center;'> <NAME> # # <font face="times"><font size="6pt"><p style = 'text-align: center;'><b>Computational Methods in the Physical Sciences</b><br/><br/> # # <p style = 'text-align: center;'><b>Module 3: Iterative Methods</b><br/><br/> # ***Prerequisite modules:*** Module 2 # # ***Estimated completion time:*** 3-6 hours # # ***Learning objectives:*** Become familiar with iteration and a simple example of of an iterative method for solving differential equations. # # <img src=" https://imgs.xkcd.com/comics/loop.png" width="350" height="350"/> # <p style = 'text-align: center;'>Image credit: https://xkcd.com/1411/<br/><br/> # This module considers various versions of a familiar physical example -- projectile motion -- and how it might be studied **numerically**; i.e., by computer. Such motion is described formally by differential equations, for which there are standard and sophisticated numerical solution techniques that will be explored in a later module. As a gentler introduction to numerical methods, in this chapter we present a simple approach to numerically solving **ordinary differential equations**. This method will not give the most accurate results in general, but the basic idea underlies more advanced methods, and it will get you "warmed up" for the techniques you will see in later modules. # ***Prompt:*** Do you think that mental and academic abilities are mostly something we are born with or something that we can develop? Please watch the 11-minute video below and write about what you take away from it. Provide your own definitions of "fixed" and "growth" mindsets. # ### <font color="blue">3.1 One-dimensional Motion Without Drag</font> # Let's start by modeling simple one-dimensional (1-D) motion with constant acceleration, $a$, and without resistance. Such motion is described by two variables that are functions of time $t$: position $x(t)$ and velocity $v(t)$. Since velocity is rate of change of position with time, $v = \dfrac{dx}{dt} \simeq \dfrac{\Delta x}{\Delta t}$, we can write # # \begin{equation} # v(t) \simeq \dfrac{x(t+ \Delta t) - x(t)}{\Delta t} . # %\label{eq:Eulerv} # \end{equation} # # Rearranging this equation, we find the approximate expression (which becomes exact in the limit as $\Delta t \rightarrow 0$) # # \begin{equation} # x(t+ \Delta t) \simeq x(t) + v(t) \Delta t . \hspace{50pt} (1) # %\label(eq:Eulerx} # \end{equation} # # This equation, depicted graphically in Fig. 1, represents the idea behind **Euler's method**: an approximate predicted value for $x$ at a later time $t + h$ ($\Delta t = h$ here) can be obtained from the slope of the $x(t)$ function (i.e., $v$) at an earlier time $t$. (The line labeled "Slope at $t + h/2$" represents the key to a more accurate approximation which will be presented in a later module.) # # # We also know that acceleration and velocity are related by $a \simeq \dfrac{\Delta v}{\Delta t} = \dfrac{v(t+ \Delta t) - v(t)}{\Delta t}$, so we can get an approximate prediction for $v$ similar to that for $x$: # # # \begin{equation} # v(t+ \Delta t) \simeq v(t) + a \, \Delta t . \hspace{50pt} (2) # \end{equation} # # <font color="green"><b>Breakpoint 1</b></font>: The same basic approach used to project $x$ and $v$ forward in time could be applied to project *any* (well-behaved) function $f(x)$, where $x$ need not represent position. Write an expression for $f(x + h)$ analogous to Eqs. (1) and (2), where $h$ is an infinitesimal displacement. # # # <img src=" http://www-personal.umich.edu/~mejn/cp/figures/fig8-2.png" width="350" height="350"/> # Image credit: Computational Physics, Newman(http://www-personal.umich.edu/~mejn/cp/index.html) # <img src=" https://i.stack.imgur.com/NIYp6.png" width="250" height="250"/> # <img src=" https://lazyprogrammer.me/wp-content/uploads/2018/12/EulerMethodGraph2.png" width="250" height="250"/> # # Image credit: https://math.stackexchange.com/questions/1998912/how-do-i-explain-euler-method # It turns out that the accuracy of the Euler method can be significantly improved by making a small change: instead of updating the position first and the velocity second, *the velocity is updated first, and the position then is updated using the* new *velocity*. That is, the equations used are: # # \begin{align} # v(t + \Delta t) & \simeq v(t) + a \, \Delta t , \hspace{78pt} (3) \\ # x(t + \Delta t) & \simeq x(t) + v(t + \Delta t) \, \Delta t . \hspace{50pt} (4) # \end{align} # # These modified equations embody the **Euler-Cromer method** (which sometimes goes by other names). Note that this approach amounts to using the slope at the later time, rather than the one at the earlier time, to extrapolate the position at the later time. The Euler-Cromer method does a much better job of conserving energy than does the Euler method, which often causes the energy to increase steadily. # # We can use Eqs. (3) and (4) to evolve the position over time, starting from some initial position and velocity. We put in the current position and velocity on the right-hand sides of the equations, compute the new values of those variables on the left-hand sides, then use those new values in the right-hand sides to get the next set of new values on the left, and so on. This approach to solving differential equations exemplifies the process of **iteration** -- the output of one step of a process serves as the input for the next step. It also is an example of a **finite-difference** method of solving differential equations, since it involves small but finite increments of a key parameter ($t$ here). # # Below, we display some pseudocode for the Euler-Cromer method. (Recall that lines beginning with a hash, #, are comments in Python.) ***Starting with this module and continuing through the subsequent ones, you are strongly encouraged to write pseudocode for Exercise solutions before actually writing code (even when pseudocode is not requested). This may allow you to avoid logic errors that would be hard to figure out from error messages or the code itself.*** # #Set up constants and initial values # initial & final times # time step # initial position # initial velocity # acceleration # # while (time is less than end-time): # update velocity # update position # # store updated velocity # store updated position # # increment time # store updated time # The Python code below shows how to do this motion computation (using variable `y` instead of `x`) with a typical set of initial parameter values and the case of zero acceleration. It is explained in more detail below the code cell. # + # Set up constant and initial values (SI units) t = 0 # initial time tf = 10 # final time dt = 0.1 # time step y = 0 # initial position, at origin v = 50 # initial velocity, in positive-y direction a = 0 # constant acceleration # Create & initialize lists for variable values time = [t] ypos = [y] yvel = [v] while t <= tf - dt: # Update variables using Eqs. (1) & (2) v = v + a * dt y = y + v * dt # Add updated variables to the lists yvel.append(v) ypos.append(y) # Increment time t = t + dt time.append(t) # - # The top group of lines simply defines the values of constants and the initial values of variables. The middle set of lines creates and initializes lists that will be used to store the sequence of times at which the position and velocity are computed, as well as the corresponding values of those variables. The lists at first contain just the initial values of the variables. (It's possible, and sometimes desirable, to create a list without providing any initial values to it. The syntax for that is `mylist = []`.) Note that the various parameters and variables are given names that represent the corresponding quantities. This isn't absolutely necessary, but it makes the code a lot more readable than it would be if the names were unrelated to the quantities they represent (e.g., `var1`, `gfxxq`, `Dumbledore`, etc.). # # The bottom section of code uses a `while` loop to update variable values. Recall that a `while` statement checks whether the indicated condition is met; if so, the loop is executed; if not, the program jumps to the first non-indented line following the `while` block. (We use a `while` loop rather than a `for` loop here because we don't know ahead of time how many cycles through the loop we need to make, but we do know the stopping condition -- the final time.) Note that the `while` condition here is written as `t <= tf - dt` (`dt` is playing the role of $\Delta t$ in the earlier equations). This condition is *not* checking whether the time `t` is less than or equal to the final time `tf`, but whether it's less than or equal to the *earlier* time, `tf - dt`. That's because the `while` loop is going to calculate the variable values at the *next* time step. If the condition were `t <= tf`, then when `t` became equal to `tf` and the program entered the loop, the loop would then calculate the variable values at `tf + dt`, which would be beyond the desired range. # # In the `while` loop, the new values of `y`, `v`, and `t` are added to the storage lists using the `.append()` **method**. (The term "method" comes from object-oriented programming, which will be discussed in a later module.) The lists can be made as long as necessary (up to the limit of the computer's memory) in this way. # # Note that in this Euler-Cromer method, the code implements Eqs. (3) and (4) as exact, not approximate expressions. # # ***IMPORTANT***: A key issue that comes up when running code like this is what the timestep `dt` should be: too long a timestep may lead to highly inaccurate results, while too short a timestep might make the computation take too much time. A reasonable rule of thumb might be to set the timestep to 1% or 0.1% of the total time expected for the motion and see how the results look and how long the computation takes. If the results look poor or are not what was expected, decrease the time step by a factor of 5 or 10; if the results look good but the calculation takes too long, try increasing the timestep by a factor of 3 or 5. # # There are several ways to run a program like this. In a Python IDE like Anaconda or Canopy, you would type or copy-and-paste the code into a `.py` file in the IDE editor, and then run it using the editor's run controls (e.g., the double green arrow button in Anaconda's Spyder IDE). To run it from the Python command line prompt, type the file name. (The directory containing the file must be in your Python "path" variable. If it's not, you will need to type the entire directory path of the file to identify it to Python; e.g. `python C:\Users\Anya\Documents\file.py`.) To run it in an IPython notebook, you would type or copy the code, or sections of it, into code cells in the notebook and then run the cells in order, or else run the entire notebook. There is no single protocol for how many lines of code should be placed in one notebook cell, but all the lines of a loop and all the lines of a user-defined function must be in one cell. A reasonable approach might be for contiguous code lines of similar character (e.g., those initializing parameters, or those defining lists), or all of the code designed to perform some particular task, to be put into a single cell. # # ### <font color="blue">3.2 Two-dimensional Projectile Motion Without Drag</font> # Extending the analysis of 1-D motion to two dimensions is not very difficult -- we simply have to add position, velocity, and acceleration variables for the new coordinate, $y$. Again assuming constant acceleration, the expanded set of (approximate) kinematic equations in the Euler-Cromer approach then is # # \begin{align} # v_x(t+ \Delta t) & = v_x(t) + a_x \, \Delta t , \\ # v_y(t+ \Delta t) & = v_y(t) + a_y \, \Delta t , \\ # x(t+ \Delta t) & = x(t) + v_x(t+ \Delta t) \Delta t , \\ # y(t+ \Delta t) & = y(t) + v_y(t+ \Delta t) \Delta t . # \end{align} # # The extension of the earlier 1-D code to the 2-D case is straightforward and is left as an exercise. # # ### <font color="blue">3.3 Two-dimensional Motion with Drag</font> # Although air drag often is considered negligible in kinematics problems, it actually plays an important role in the motion of many objects. In this section you will code in Python to solve for the position, velocity, and acceleration of a 2-D projectile motion with drag. # # At very low speeds for small particles, air drag is approximately proportional to the particle's speed -- this is **linear drag**. For larger (everyday) objects traveling at higher speeds, air drag is approximately proportional to the speed squared -- this case is referred to as **quadratic drag**. # # For the case of a linear drag force $\mathbf{f}_{lin}$, the corresponding acceleration $\mathbf{a}_{lin}$ of an object of mass $m$ would have the form # \begin{equation} # \mathbf{a}_{lin} = \dfrac{\mathbf{f}_{lin}}{m} = -\dfrac{b \mathbf{v}}{m}, # \end{equation} # where $b$ is a constant that depends on the object's size and shape, and on the material through which it travels. Note that the acceleration and velocity are vectors; the "$-$" sign captures the fact that the drag force is in the opposite direction of the velocity. # # In terms of components, which would be used (rather than full vectors) in a computer program, we have # \begin{equation} # a_{lin, x} = - \dfrac{b}{m} v_x \hspace{2mm} ; \hspace{5mm} a_{lin, y} = - \dfrac{b}{m} v_y . # \end{equation} # The force $\mathbf{f}_{quad}$ due to quadratic drag, and the corresponding acceleration $\mathbf{a}_{quad}$ of an object of mass $m$, take the forms # \begin{equation} # \mathbf{a}_{quad} = \dfrac{1}{m} \, \mathbf{f}_{quad} = - \dfrac{1}{2m} \, C_D \rho A v \, \mathbf{v}, # \end{equation} # where $C_D$ is the **coefficient of drag**, which depends on the shape of the object; $\rho$ is the density of the fluid through which the object moves (air in this case); $A$ is the cross-sectional area of the object (perpendicular to the direction of travel); and $\mathbf{v}$ is the object's instantaneous velocity ($v$ is its instantaneous speed). Notice that there are *two* "v" factors in the quadratic acceleration expression above: one is the velocity vector $\mathbf{v}$ itself, while the other is its magnitude $v$. # # Without air drag, an object in 2-D projectile motion has a horizontal acceleration of zero and a vertical acceleration of $-g$, so adding in (quadratic) air drag we have: # # \begin{align} # a_x = a_{quad, x} & = - \dfrac{1}{2m} C_D \rho A v \, v_x , \\ # a_y = a_{quad, y} - g & = - \dfrac{1}{2m} C_D \rho A v \, v_y - g . # \end{align} # Again, each of these expressions contain *two* "v" factors: the magnitude $v$ and one of its components ($v_x$ or $v_y$). # # ### <font color="blue">**Recap**</font> # * The Euler-Cromer method --- which "predicts" the value of a function at time $t + h$ from its value at $t$ using a *linear approximation* --- can be used to analyze motion in one or more dimensions. The analysis will be accurate only if the time step $h$ is chosen small enough. (What is "small enough" is determined by the desired accuracy of the computation.) # <br> # # * The code to implement the Euler-Cromer method involves iteration, embodied most naturally in a `while` loop with the final time as the limit of iteration. # <br> # # * Pseudocode is useful for planning the construction of complex or extensive code. # # ### <font color="blue">**Reflection Prompts**</font> # # These questions are intended to help you think about what you learned from this module and how it might be useful to you in the future. You are strongly encouraged to answer them before moving on to the next module. # # * Which components of this module did you find you were easily able to work through, and why do you think they were especially easy for you? # # * Which components of this module did you find more difficult to work through, and why do you think they were challenging? # # * When you got stuck, what did you do to get unstuck? Could this or similar actions be helpful if you get stuck in future work? # # * What do you understand more deeply about this material? # # * What questions or uncertainties remain for you regarding this material? # ### <font color="blue">Exercises</font> # # ***Please put your solutions to each exercise directly after the statement of the exercise.*** # <br> # # ***Whenever producing plots in the exercises below and in future modules, make sure that axes are labeled and show units. If a plot displays more than one curve or one set of points, a legend also should be included. (See Module 1B for information on making and formatting plots.)*** # <br> # <u>**Exercise \#1**</u> # **(a)** Transfer the Euler-Cromer code presented above into a new code cell below, encapsulate it in a Python function, and run it to model vertical motion without gravity. (Reminder: the creation of Python functions using the `def` command was discussed in Module 1B. You might want your function to take the initial velocity, the acceleration, and the final time as arguments.) For an initial speed of $300$ m/s, *predict* and then plot the $y$ vs $t$ data (with $y = 0$ at the Earth's surface). (Remember that you will have to import the `plot` function from `matplotlib.pyplot`.) **(b)** Change the acceleration so that it's appropriate for vertical motion near the surface of the Earth with gravity, and *predict* what the plot should look like. Run the function and check your prediction, plotting the *full motion above the surface*. **(c)** Have your code print out the peak height of the motion. # # ***<font color = "red">Briefly discuss all of your results in a Markdown cell following your results -- you should do this for every exercise in this and future modules.</font>*** # <br> # <u>**Exercise \#2**</u> # Suppose we want our code to model vertical motion near Earth, but not just close to its surface where $g$ is constant. **(a)** Create a Python function to work in this situation where the gravitational force on a mass $m$ at a distance $r$ from Earth's center is $F = G m M_E / r^2$. What is the percentage difference in the peak height *above the Earth's surface* compared to the case of constant $g$ if the initial velocity is $300$ m/s? (You will want your function to print out the peak height. You also will want to set the final time so that you can see most of the motion, starting from the Earth's surface and back down to it. Don't plot the part of the trajectory below the surface, since that would be physically unrealistic.) What happens if the initial speed is $3300$ m/s? (This is $30\%$ of escape velocity.) What's the percentage difference in the peak height in this case? Is it legitimate that we usually ignore the height-dependence of $g$? # <br> # <u>**Exercise \#3**</u> # The aim of this Exercise is to implement the equations in the section on 2-D motion without drag. **(a)** Write out pseudocode for this extended program. **(b)** Now write the code itself, and encapsulate it in a Python function to which you can pass the initial velocity components $v_{xo}$ and $v_{yo}$, as well as the gravitational acceleration $g$, as arguments. You might want your function to return both position and velocity components, as well as the time. # # For the following parts of the Exercise, let $v_{xo} = 20$ m/s and $v_{yo} = 30$ m/s, and use the standard value $g = 9.80$ m/s$^2$. **(c)** Make plots of $x$ vs. $t$ and $y$ vs. $t$ for the time period when the object is at or above "ground level." Do they look as you'd expect? **(d)** Plot the $v_x$ vs. $t$ and $v_y$ vs. $t$ graphs as well. Are they as expected? **(e)** Make a plot of $y$ versus $x$, and eyeball (or compute in your code) both the height of the peak and the *range* of the motion (the horizontal distance from the launch point to the point in the descent at which the projectile returns to the launch height). **(f)** Determine the same values analytically and check if they match the numerical results. # <br> # <u>**Exercise \#4**</u> # Modify your Python function of Exercise \#3 for the case of motion in the presence of quadratic drag. Design your function to take $C_D$, $\rho$, $m$, and $A$ as additional arguments. **(a)** Model the motion of a ball of mass $m = 3.0$ kg and radius $0.05$ m, and use the same initial velocity components and $g$ value that you used in the previous exercise: the aim is to compare the motions with and without drag. (Reasonable values of the constants, in MKS units, are $C_D = 0.5$, and $\rho = 1.3$ for air.) Plot graphs of $x$ and $y$ vs. $t$, and $y$ vs. $x$ for the case with drag. Also make a plot of $y$ vs. $x$ showing both curves, with and without drag. (Make sure to display a legend.) Briefly discuss how the two trajectories compare. Is the one with drag symmetric? What are the times-of-flight (from ground level back to ground level) for the two motions? **(b)** Repeat the comparison (with and without drag) with $v_{xo} = 60$ m/s and $v_{yo} = 90$ m/s. What do you conclude from comparing the results of parts (a) and (b)? \[***Hint:*** In both parts above, you can easily treat the case of no drag with your current function.\] # <br> # <font size="3pt">**Breakpoint Answers**</font> # # **Breakpoint 1**: $f(x + h) \simeq f(x) + h \, \dfrac{df(x)}{dx}$. # #
Computational_Module-03-Iterative_Methods.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python [conda env:gempy] # language: python # name: conda-env-gempy-py # --- # + import geopandas as gpd import pandas as pd import os import numpy as np import matplotlib as mpl import matplotlib.pyplot as plt import tarfile from discretize import TensorMesh from SimPEG.utils import plot2Ddata, surface2ind_topo from SimPEG.potential_fields import gravity from SimPEG import ( maps, data, data_misfit, inverse_problem, regularization, optimization, directives, inversion, utils, ) # + #sagrav = gpd.read_file(r'C:\users\rscott\Downloads\gravity_stations_shp\gravity_stations.shp') #test # - print(sagrav['MGA_ZONE'].unique()) sagrav.head() #survey_array = sagrav[['LONGITUDE','LATITUDE','AHD_ELEVAT','BA_1984_UM']].to_numpy() survey_array = sagrav[['MGA_EAST','MGA_NORTH','AHD_ELEVAT','BA_1984_UM']].to_numpy() dobs = survey_array survey_array.shape dobs.shape #dobs_total_bounds = [sagrav['MGA_EAST'].min(),sagrav['MGA_NORTH'].min(),sagrav['MGA_EAST'].max(),sagrav['MGA_NORTH'].max()] dobs_total_bounds = sagrav.total_bounds print(dobs_total_bounds) sa54 = sagrav.loc[sagrav['MGA_ZONE'] == 54] # + dobs_total_bounds minx, miny, maxx, maxy = dobs_total_bounds minx = sa54['MGA_EAST'].min() maxx = sa54['MGA_EAST'].max() miny = sa54['MGA_NORTH'].min() maxy = sa54['MGA_NORTH'].max() minxtest = maxx - 0.045 minxtest = maxx - 5000 maxxtest = maxx minytest = maxy - 0.045 minytest = maxy - 5000 maxytest = maxy print(minxtest, maxxtest, minytest, maxytest) # - # Define receiver locations and observed data receiver_locations = dobs[:, 0:3] dobs = dobs[:, -1] # + #sagrav_test = sagrav.loc[(sagrav['MGA_EAST'] >= minxtest) & (sagrav['MGA_EAST'] <= maxxtest) & (sagrav['MGA_NORTH'] >= minytest) & (sagrav['MGA_NORTH'] <= maxytest) ] from tqdm import tqdm from time import sleep #print(minxtest, minytest, maxxtest, maxytest) print(minx, miny, maxx, maxy) #maxrangey = (maxy - miny)//0.045 #maxrangex = (maxx - minx)//0.045 maxrangey = (maxy - miny)//5000 maxrangex = (maxx - minx)//5000 print(maxrangex, maxrangey) #with tqdm(total=maxrangey) as pbar: for i in range(int(maxrangey)): print(i) for j in range(int(maxrangex)): #xmin, ymin, xmax, ymax = sagrav_test.total_bounds #xmin = minx + j*0.045 #ymin = miny + i*0.045 #xmax = minx + (j+1)*0.045 #ymax = miny + (i+1)*0.045 xmin = minx + j*5000 ymin = miny + i*5000 xmax = minx + (j+1)*5000 ymax = miny + (i+1)*5000 print(xmin, ymin, xmax, ymax) #sagrav_test = sagrav.loc[(sagrav['LONGITUDE'] >= xmin) & (sagrav['LATITUDE'] >= ymin) & (sagrav['LONGITUDE'] <= xmax) & (sagrav['LATITUDE'] <= ymax) ] sagrav_test = sa54.loc[(sa54['MGA_EAST'] >= xmin) & (sa54['MGA_NORTH'] >= ymin) & (sa54['MGA_EAST'] <= xmax) & (sa54['MGA_NORTH'] <= ymax) ] #sac_sussex = sagrav.cx[xmin:xmax, ymin:ymax] #print(sagrav_test.shape) if (sagrav_test.shape[0] > 0): #print(sagrav_test) break if (sagrav_test.shape[0] > 3): print(sagrav_test) break print(minx, miny, maxx, maxy, sagrav_test.shape) print(sagrav_test.total_bounds) print(xmin, xmax, ymin, ymax) ncx = 10 ncy = 10 ncz = 5 #dx = 0.0045*2 #dy = 0.0045*2 dx = 500 dy = 500 dz = 200 x0 = xmin y0 = ymin z0 = -1000 hx = dx*np.ones(ncx) hy = dy*np.ones(ncy) hz = dz*np.ones(ncz) mesh2 = TensorMesh([hx, hx, hz], x0 = [x0,y0,z0]) mesh2 # - sagrav_test survey_array_test = sagrav_test[['LONGITUDE','LATITUDE','AHD_ELEVAT','BA_1984_UM']].to_numpy() print(survey_array_test.shape) dobs_test = survey_array_test receiver_locations_test = dobs_test[:, 0:3] dobs_test = dobs_test[:, -1] # + # Plot mpl.rcParams.update({"font.size": 12}) fig = plt.figure(figsize=(7, 5)) ax1 = fig.add_axes([0.1, 0.1, 0.73, 0.85]) plot2Ddata(receiver_locations_test, dobs_test, ax=ax1, contourOpts={"cmap": "bwr"}) ax1.set_title("Gravity Anomaly") ax1.set_xlabel("x (m)") ax1.set_ylabel("y (m)") ax2 = fig.add_axes([0.8, 0.1, 0.03, 0.85]) norm = mpl.colors.Normalize(vmin=-np.max(np.abs(dobs_test)), vmax=np.max(np.abs(dobs_test))) cbar = mpl.colorbar.ColorbarBase( ax2, norm=norm, orientation="vertical", cmap=mpl.cm.bwr, format="%.1e" ) cbar.set_label("$mgal$", rotation=270, labelpad=15, size=12) plt.show() # - dobs_test.shape sagrav_test # + maximum_anomaly = np.max(np.abs(dobs_test)) uncertainties = 0.01 * maximum_anomaly * np.ones(np.shape(dobs_test)) # - print(i) # + # Define the receivers. The data consist of vertical gravity anomaly measurements. # The set of receivers must be defined as a list. receiver_list = gravity.receivers.Point(receiver_locations_test, components="gz") receiver_list = [receiver_list] # Define the source field source_field = gravity.sources.SourceField(receiver_list=receiver_list) # Define the survey survey = gravity.survey.Survey(source_field) # - receiver_list data_object = data.Data(survey, dobs=dobs_test, standard_deviation=uncertainties) data_object mesh2 #source_field # + # Define density contrast values for each unit in g/cc. Don't make this 0! # Otherwise the gradient for the 1st iteration is zero and the inversion will # not converge. background_density = 1e-6 # Find the indecies of the active cells in forward model (ones below surface) #ind_active = surface2ind_topo(mesh, xyz_topo) topo_fake = receiver_locations_test + 399 print(receiver_locations_test) print(topo_fake) ind_active = surface2ind_topo(mesh2, receiver_locations_test) #ind_active = surface2ind_topo(mesh2, topo_fake) #ind_active = surface2ind_topo(mesh2, topo_fake) # Define mapping from model to active cells nC = int(ind_active.sum()) model_map = maps.IdentityMap(nP=nC) # model consists of a value for each active cell # Define and plot starting model starting_model = background_density * np.ones(nC) # - nC model_map ind_active starting_model simulation = gravity.simulation.Simulation3DIntegral( survey=survey, mesh=mesh2, rhoMap=model_map, actInd=ind_active ) # + # Define the data misfit. Here the data misfit is the L2 norm of the weighted # residual between the observed data and the data predicted for a given model. # Within the data misfit, the residual between predicted and observed data are # normalized by the data's standard deviation. dmis = data_misfit.L2DataMisfit(data=data_object, simulation=simulation) # Define the regularization (model objective function). reg = regularization.Simple(mesh2, indActive=ind_active, mapping=model_map) # Define how the optimization problem is solved. Here we will use a projected # Gauss-Newton approach that employs the conjugate gradient solver. opt = optimization.ProjectedGNCG( maxIter=10, lower=-1.0, upper=1.0, maxIterLS=20, maxIterCG=10, tolCG=1e-3 ) # Here we define the inverse problem that is to be solved inv_prob = inverse_problem.BaseInvProblem(dmis, reg, opt) # - dmis.nD # + # Defining a starting value for the trade-off parameter (beta) between the data # misfit and the regularization. starting_beta = directives.BetaEstimate_ByEig(beta0_ratio=1e0) # Defining the fractional decrease in beta and the number of Gauss-Newton solves # for each beta value. beta_schedule = directives.BetaSchedule(coolingFactor=5, coolingRate=1) # Options for outputting recovered models and predicted data for each beta. save_iteration = directives.SaveOutputEveryIteration(save_txt=False) # Updating the preconditionner if it is model dependent. update_jacobi = directives.UpdatePreconditioner() # Setting a stopping criteria for the inversion. target_misfit = directives.TargetMisfit(chifact=1) # Add sensitivity weights sensitivity_weights = directives.UpdateSensitivityWeights(everyIter=False) # The directives are defined as a list. directives_list = [ sensitivity_weights, starting_beta, beta_schedule, save_iteration, update_jacobi, target_misfit, ] # + # Here we combine the inverse problem and the set of directives inv = inversion.BaseInversion(inv_prob, directives_list) # Run inversion recovered_model = inv.run(starting_model) # + # Plot Recovered Model fig = plt.figure(figsize=(9, 4)) plotting_map = maps.InjectActiveCells(mesh2, ind_active, np.nan) ax1 = fig.add_axes([0.1, 0.1, 0.73, 0.8]) #ax1 = fig.add_axes([10.1, 10.1, 73.73, 80.8]) mesh2.plotSlice( plotting_map * recovered_model, normal="Y", ax=ax1, ind=int(mesh2.nCy / 2), grid=True, clim=(np.min(recovered_model), np.max(recovered_model)), pcolorOpts={"cmap": "viridis"}, ) ax1.set_title("Model slice at y = 0 m") ax2 = fig.add_axes([0.85, 0.1, 0.05, 0.8]) norm = mpl.colors.Normalize(vmin=np.min(recovered_model), vmax=np.max(recovered_model)) cbar = mpl.colorbar.ColorbarBase( ax2, norm=norm, orientation="vertical", cmap=mpl.cm.viridis ) cbar.set_label("$g/cm^3$", rotation=270, labelpad=15, size=12) plt.show() # + dpred = inv_prob.dpred # Observed data | Predicted data | Normalized data misfit data_array = np.c_[dobs_test, dpred, (dobs_test - dpred) / uncertainties] fig = plt.figure(figsize=(17, 4)) plot_title = ["Observed", "Predicted", "Normalized Misfit"] plot_units = ["mgal", "mgal", ""] ax1 = 3 * [None] ax2 = 3 * [None] norm = 3 * [None] cbar = 3 * [None] cplot = 3 * [None] v_lim = [np.max(np.abs(dobs)), np.max(np.abs(dobs)), np.max(np.abs(data_array[:, 2]))] for ii in range(0, 3): ax1[ii] = fig.add_axes([0.33 * ii + 0.03, 0.11, 0.23, 0.84]) cplot[ii] = plot2Ddata( receiver_list[0].locations, data_array[:, ii], ax=ax1[ii], ncontour=30, clim=(-v_lim[ii], v_lim[ii]), contourOpts={"cmap": "bwr"}, ) ax1[ii].set_title(plot_title[ii]) ax1[ii].set_xlabel("x (m)") ax1[ii].set_ylabel("y (m)") ax2[ii] = fig.add_axes([0.33 * ii + 0.25, 0.11, 0.01, 0.85]) norm[ii] = mpl.colors.Normalize(vmin=-v_lim[ii], vmax=v_lim[ii]) cbar[ii] = mpl.colorbar.ColorbarBase( ax2[ii], norm=norm[ii], orientation="vertical", cmap=mpl.cm.bwr ) cbar[ii].set_label(plot_units[ii], rotation=270, labelpad=15, size=12) plt.show() # - dpred # + data_source = "https://storage.googleapis.com/simpeg/doc-assets/gravity.tar.gz" # download the data downloaded_data = utils.download(data_source, overwrite=True) # unzip the tarfile tar = tarfile.open(downloaded_data, "r") tar.extractall() tar.close() # path to the directory containing our data dir_path = downloaded_data.split(".")[0] + os.path.sep # files to work with topo_filename = dir_path + "gravity_topo.txt" data_filename = dir_path + "gravity_data.obs" model_filename = dir_path + "true_model.txt" # - xyz_topo = np.loadtxt(str(topo_filename)) xyz_topo.shape xyzdobs = np.loadtxt(str(data_filename)) xyzdobs.shape xyz_topo[1] xyzdobs[0] xyzdobs sagrav_test dobs_test survey_array_test[0] receiver_locations_test[0] print(survey) survey.nD data data.noise_floor mesh2 xyzdobs recovered_model from SimPEG.utils import plot2Ddata, surface2ind_topo
notebooks/SA-Gravity.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + gradient={"editing": false, "execution_count": 4, "id": "boolean-diving", "kernelId": "583276d5-8a0a-4f47-9ffe-672fe9cb301f"} # %%capture # !pip install transformers # !pip install pypinyin # !pip install jieba # !pip install paddlepaddle # + gradient={"editing": false, "execution_count": 1, "id": "metropolitan-times", "kernelId": "583276d5-8a0a-4f47-9ffe-672fe9cb301f"} # %%capture import sys sys.path.append("../") import re,time,json from collections import defaultdict from torch.utils.data import DataLoader from pypinyin import pinyin, Style from tqdm import tqdm import pickle import numpy as np import torch import torch.nn.functional as F import torch.nn as nn from copy import deepcopy from transformers import (BertTokenizer,BertConfig,BertModel) from model.Embedding import * from model.fusionDataset import FusionDataset import jieba import jieba.posseg as pseg import paddle config = BertConfig.from_pretrained('../AnchiBERT') tokenizer = BertTokenizer.from_pretrained('../AnchiBERT') Anchibert = BertModel.from_pretrained('../AnchiBERT',config=config) device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") # + [markdown] gradient={"editing": false, "id": "stable-checkout", "kernelId": "583276d5-8a0a-4f47-9ffe-672fe9cb301f"} # ### Load Necessary preproceeded Data # + gradient={"editing": false, "execution_count": 2, "id": "funky-measurement", "kernelId": "583276d5-8a0a-4f47-9ffe-672fe9cb301f"} with open('../pretrained_model/char_map.json','r') as f: ix2glyph = defaultdict(lambda : '_') ix2glyph[0] = '[PAD]' glyph2ix = defaultdict(lambda : 1) glyph2ix.update({'[CLS]':0,'[SEP]':0,'[PAD]':0}) for i, k in enumerate(json.load(f).keys(),2): glyph2ix[k] = i ix2glyph[i] = k with open('../pretrained_model/pinyin_map.json','r') as f: pinyin2ix = defaultdict(lambda : 1) pinyin2ix.update({'[CLS]':0,'[SEP]':0,'[PAD]':0}) for i,k in enumerate(json.load(f).keys(),2): pinyin2ix[k] = i with open('../pretrained_model/pos_tags.json','r') as f: pos2ix = defaultdict(lambda : 0) pos2ix.update(json.load(f)) # + gradient={"editing": false, "execution_count": 3, "id": "whole-israeli", "kernelId": "583276d5-8a0a-4f47-9ffe-672fe9cb301f"} # train 上联 with open("couplet/train/in.txt",encoding='utf8') as f: tr_in = [row.strip().split() for row in f.readlines()] # train 下联 with open("couplet/train/out.txt",encoding='utf8') as f: tr_out = [row.strip().split() for row in f.readlines()] with open('data/train_in_pos.pt','rb') as f: tr_pos_in = pickle.load(f) with open('data/train_out_pos.pt','rb') as f: tr_pos_out = pickle.load(f) # + gradient={"editing": false, "execution_count": 4, "id": "designing-stage", "kernelId": "583276d5-8a0a-4f47-9ffe-672fe9cb301f"} display(len(tr_in)) total_len = len(tr_in) half = total_len//7 display(half) train_split = int(0.80 * half) # + gradient={"editing": false, "id": "played-lancaster", "kernelId": "583276d5-8a0a-4f47-9ffe-672fe9cb301f"} trainSet = FusionDataset(tr_in[:train_split],tokenizer,\ glyph2ix,pinyin2ix,pos2ix,tr_out[:train_split],\ tr_pos_in[:train_split],tr_pos_out[:train_split],\ device=device) # use device if you want to load it gpu # + gradient={"editing": false, "execution_count": 11, "id": "ordered-significance", "kernelId": "583276d5-8a0a-4f47-9ffe-672fe9cb301f"} valSet = FusionDataset(tr_in[train_split:half],tokenizer,\ glyph2ix,pinyin2ix,pos2ix,tr_out[train_split:half],\ tr_pos_in[train_split:half],tr_pos_out[train_split:half],\ device=device) # use device if you want to load it gpu # + gradient={"execution_count": 14, "id": "protective-sixth", "kernelId": "583276d5-8a0a-4f47-9ffe-672fe9cb301f"} from model.fusion_transformer import Fusion_Anchi_Trans_Decoder, Fusion_Anchi_Transformer, Anchi_Decoder,Anchi_Transformer # - from utils.trans_trainer import train # ## Fusion_Anchi_Trans_Decoder # for i in range(1,6): config = { # for Fusion_Anchi_Trans_Decoder 'max_position_embeddings':50, 'hidden_size':768, 'font_weight_path':'../pretrained_model/glyph_weight.npy', 'pinyin_embed_dim':30, # trainable 'pinyin_path':'../pretrained_model/pinyin_map.json', 'tag_size':30, 'tag_emb_dim':10, # trainable 'layer_norm_eps':1e-12, 'hidden_dropout':0.1, 'nhead':12, 'num_layers':6 , #6, trainable 'output_dim':9110,# fixed use glyph dim as output 'device':device, } # batch_size = [32,64,128] # lr =[0.1,0.01,0.001] # <model_name>_<optim>_<batch_num>_<lr>_<epoch>_<num_layer>_<pinyin_embed_dim>_<tag_emb_dim>_<train_data_size> name = f'fu_anchi_de_Adam_128_0001_60_6_30_10_110k' train(Fusion_Anchi_Trans_Decoder(config),trainSet,valSet,batch_size=128,lr=0.0001, epoch=60,bert=Anchibert,name= name, with_trans=True, optimizer_name='Adam',scheduleFactor=0.5, schedule_Patience=5,min_lr=1e-06,verbose=True ,patience=10,store='result/') torch.cuda.empty_cache() # ## Anchi_Decoder config = { # for Trans_Decoder 'max_position_embeddings':50, 'hidden_size':768, 'layer_norm_eps':1e-12, 'hidden_dropout':0.1, 'nhead':12, 'num_layers':6, # trainable 'output_dim':9110,# fixed use glyph dim as output 'device':device } # <model_name>_<optim>_<batch_num>_<lr>_<epoch>_<num_layer>_<train_size> name = 'anchi_de_Adam_128_0001_10_60_6_110k' train(Anchi_Decoder(config),trainSet,valSet,batch_size=128,lr=0.0001, epoch=60,bert=Anchibert,name= name, with_trans=True, optimizer_name='Adam',scheduleFactor=0.5, schedule_Patience=5,min_lr=1e-06,verbose=True ,patience=10,store='result/') torch.cuda.empty_cache() # ## Fusion_Anchi_Transformer config = { # Fusion_Anchi_Transformer 'max_position_embeddings':50, 'hidden_size':768, 'font_weight_path':'../pretrained_model/glyph_weight.npy', 'pinyin_embed_dim':30, # trainable 'pinyin_path':'../pretrained_model/pinyin_map.json', 'tag_size':30, 'tag_emb_dim':10, # trainable 'layer_norm_eps':1e-12, 'hidden_dropout':0.1, 'nhead':12, 'num_encoder_layers':5, # trainable 'num_decoder_layers':6, # trainable 'output_dim':9110,# fixed use glyph dim as output 'dim_feedforward': 3072, 'activation':'relu', 'trans_dropout':0.1, 'device':device } # <model_name>_<optim>_<batch_num>_<lr>_<epoch>_<pinyin_embed_dim>_<tag_emb_dim>_<encoder layer>_<decoder layer>_<train_data_size> name = 'fu_anchi_tra_Adam_128_01_60_30_10_5_6_110k' train(Fusion_Anchi_Transformer(config),trainSet,valSet,batch_size=128,lr=0.01, epoch=60,bert=Anchibert,name= name, with_trans=True, optimizer_name='Adam',scheduleFactor=0.5, schedule_Patience=5,min_lr=1e-06,verbose=True ,patience=10,store='result/') torch.cuda.empty_cache() # ## Anchi_Transformer config = { # Anchi_Transformer 'max_position_embeddings':50, 'hidden_size':768, 'layer_norm_eps':1e-12, 'hidden_dropout':0.1, 'nhead':12, 'num_encoder_layers':6, # trainable 'num_decoder_layers':6, # trainable 'output_dim':9110,# fixed use glyph dim as output 'dim_feedforward': 3072, 'activation':'relu', 'trans_dropout':0.1, 'device':device } # <model_name>_<optim>_<batch_num>_<lr>_<epoch>_<encoder layer>_<decoder layer>_<train_data_size> name = 'anchi_tra_Adam_128_01_60_6_6_110k' train(Anchi_Transformer(config),trainSet,valSet,batch_size=128,lr=0.01, epoch=60,bert=Anchibert,name= name, with_trans=True, optimizer_name='Adam',scheduleFactor=0.5, schedule_Patience=5,min_lr=1e-06,verbose=True ,patience=10,store='result/') torch.cuda.empty_cache()
notebook/Transformer_trainer.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Evaluation, analysis, and reporting on your metadata collection # # The first step is to extract all nodes that contain text, element or attribute, into a csv that flattens the xml while retaining all information, except for order of elements (though there is a parameter to extract that information in the XSL if you're interested in extending the code to test the content of an element). # # Second, we create a version of the data that only contains the xpaths from the FAIR recommendation you've created. To do this use the xpaths that coorespond with the FAIR recommendation concepts you're including, and in some cases, the element name. This instantiation of the recommendation does not go all the way into the child elements neccessary for the recommendation, but is employed in such a way as to scrape all the children elements used. This way the result contains all of the metadata that that site used to add additional context to the concepts the recommendation contains. # # Next these csv are analyzed for occurrence. # # Finally to compare directly between the differences in child elements each site uses, we pivot the data to create a table containing the completeness percentage for the highest occurring child element visualize the completeness. # # # #### Google Colaboratory environment # # Run the next two code cells to prepare Colab for creating reports. If you are using your own Jupyter, see the README.MD for necessary modules # + # !git clone https://github.com/scgordon/MILE4FAIRness.git # !pip install xlsxwriter pydrive # !pip install https://11956-14579099-gh.circle-artifacts.com/0/dist/plotly-3.8.0%2B2.g4a2ccc2e.tar.gz # !wget https://github.com/plotly/orca/releases/download/v1.2.1/orca-1.2.1-x86_64.AppImage -O /usr/local/bin/orca # !chmod +x /usr/local/bin/orca # !apt-get install xvfb libgtk2.0-0 libgconf-2-4 # - # cd 'MILE4FAIRness/notebooks/' # + # #!git clone https://github.com/scgordon/SampleMetadataMILE4.git ../collection/ # #!git clone https://github.com/scgordon/NCARlibraryXMLthroughTime.git ../collection/ # - # #### Collect your own metadata records # [Notebook: Compile Collections](./compileCollection.ipynb) # ## Prepare the notebook evironment # # * import modules # * define variables # * define recommendation # + import sys import os import pandas as pd import gzip import shutil import subprocess import tarfile import EARmd as md os.makedirs("../data/FAIR", exist_ok=True) # create a list of each collections name collectionsToProcess = [name for name in os.listdir("../collection") if not name.startswith('.') ] # - # ## Create a FAIR recommendation together # [Google Doc for Collaborating](https://docs.google.com/document/d/1pD76sp16zKm4noSMT1ZGGPw1n3zaJrIaEW-h1Fe_MSg/edit?usp=sharing) # # Add the finished Recommendation to the RecDict in the cell below. # + # A dictionary containing the recommendation xpaths and the relevent sub element. RecDict = {"Number of Records": "Number of Records", '/eml:eml/project/funding': 'funding', 'attributeLabel': 'attributeLabel', 'enumeratedDomain': 'enumeratedDomain', 'qualityControl': 'qualityControl', 'precision': 'precision', 'missingValueCode': 'missingValueCode', 'entityDescription': 'entityDescription', '/eml:eml/@xsi:schemaLocation': "xsi:schemaLocation", "/eml:eml/@packageId": "packageId", '/eml:eml/@system': 'system', "/eml:eml/access": "access", '/eml:eml/dataset/alternateIdentifier': "alternateIdentifier", "/eml:eml/dataset/title": "title", "/eml:eml/dataset/creator": "creator", "/eml:eml/dataset/contact": "contact", "/eml:eml/dataset/metadataProvider": "metadataProvider", "/eml:eml/dataset/associatedParty": "associatedParty", "/eml:eml/dataset/publisher": "publisher", "/eml:eml/dataset/pubDate": "pubDate", "/eml:eml/dataset/abstract": "abstract", '/eml:eml/dataset/project/abstract': "abstract", "/eml:eml/dataset/keywordSet": "keywordSet", "/eml:eml/dataset/project/keywordSet": "keywordSet", "/eml:eml/dataset/intellectualRights": "intellectualRights", "/eml:eml/dataset/maintenance": "maintenance", "/eml:eml/dataset/methods": "methods", "/eml:eml/dataset/project": "project", 'physical/distribution': 'distribution', "/eml:eml/dataset/dataTable/attributeList": "attributeList", "/eml:eml/dataset/spatialRaster/attributeList": "attributeList", "/eml:eml/dataset/spatialVector/attributeList": "attributeList", "/eml:eml/dataset/storedProcedure/attributeList": "attributeList", "/eml:eml/dataset/view/attributeList": "attributeList", "/eml:eml/dataset/otherEntity/attributeList": "attributeList", "/eml:eml/dataset/dataTable/constraint": "constraint", "/eml:eml/dataset/spatialRaster/constraint": "constraint", "/eml:eml/dataset/spatialVector/constraint": "constraint", "/eml:eml/dataset/storedProcedure/constraint": "constraint", "/eml:eml/dataset/view/constraint": "constraint", "/eml:eml/dataset/otherEntity/constraint": "constraint", "/eml:eml/dataset/dataTable": "[entity]", "/eml:eml/dataset/spatialRaster": "[entity]", "/eml:eml/dataset/spatialVector": "[entity]", "/eml:eml/dataset/storedProcedure": "[entity]", "/eml:eml/dataset/view": "[entity]", "/eml:eml/dataset/otherEntity": "[entity]", "/eml:eml/dataset/project": "project", "/eml:eml/dataset/coverage/geographicCoverage": 'geographicCoverage', "/eml:eml/dataset/coverage/taxonomicCoverage": 'taxonomicCoverage', "/eml:eml/dataset/coverage/temporalCoverage": 'temporalCoverage', "attributeList/attribute/attributeDefinition": 'attributeDefinition', '/eml:eml/additionalMetadata': 'additionalMetadata' } # create a pattern to look for elements used in fulfilling the communities stated information needs used = set() elements = list(RecDict.keys()) elements = [x for x in elements if x not in used and (used.add(x) or True)] # Used to order a dataframe in the order of the recommendation usedSet = set() ElementOrder = list(RecDict.values()) ElementOrder = [x for x in ElementOrder if x not in usedSet and (usedSet.add(x) or True)] # create a list to fill the column with blanks or to utilize concepts ConceptOrder = ['Number of Records']+['']*(len(elements)-1) # create a list to fill the column with blanks that corresponds with the order of the FAIR recommendation levels. LevelOrder = ConceptOrder # - # Optionally, replace the blank LevelOrder variable with each element's Level, be it Findable, Accessible, Interoperable, or Reusable #optional cell, only used if assigning level labels to recommendation elements/concepts # define a list of element recommendation levels, maintaining the same order as the ElementOrder list LevelOrder = ["Number of Records",'Findable','Findable','Findable','Findable','Findable','Findable','Findable','Findable', 'Findable','Findable','Findable','Findable','Findable','Accessible','Accessible','Interoperable','Interoperable', 'Interoperable','Interoperable','Interoperable','Interoperable','Interoperable','Interoperable','Interoperable', 'Interoperable','Interoperable','Interoperable','Reusable','Reusable','Reusable','Reusable','Reusable'] # ## Evaluation using the AllNodes.xsl transform # # This XSL is standards agnostic. AllNodes will work with any number of valid XML records, regardless of their standards compliance or creativity. # The transform flattens the XML in each record in a directory into a csv. For each node that has text the XSL writes a row that contains the directory name, file name, text content, and the Xpath for each element and attribute in the records in the collection. # # use the list of collections to run the evaluation for each collection for collection in collectionsToProcess: """ build a shell command to run the Evaluation XSL. You'll need java installed and describe the location in the first string of the cmd list """ cmd = ["/usr/bin/java", '-jar', "../scripts/saxon-b-9.0.jar", '-xsl:' + "../scripts/AllNodes.xsl", '-s:' + "../scripts/dummy.xml", '-o:' + "../data/FAIR/"+ str(collection) + "_XpathEvaluated.csv", 'recordSetPath=' + "../collection/" + str(collection) + "/"] # run the transform subprocess.run(' '.join(cmd), shell=True, check=True) xpath_eval_file = "../data/FAIR/"+ str(collection) + "_XpathEvaluated.csv" with open(xpath_eval_file, 'rb') as f: gzxpath_eval_file = xpath_eval_file + '.gz' with gzip.open(gzxpath_eval_file, 'wb') as gzf: shutil.copyfileobj(f, gzf) os.remove(xpath_eval_file) # ## Analysis using the EARmd.py module # The module has already been used for getting the records via the Requests module. Now we are going to utilize the flat nature of the evaluated metadataset to use pandas to analyze the metadata for elements in the FAIR recommendation we've built. This process will yield two versions of the dataset: the absolute return of the evaluation, and the subset the recommendation pattern identified. Each version will be organized differently. Both versions will have an analysis applied called XpathOccurrence. It returns various information about the occurrence of each xpath used in the collection's records. The most important of these for our purposes is the percentage of records that contained which elements. # # for collection in collectionsToProcess: # places for all the evaluated and analyzed data XpathEvaluated = os.path.join("../data/FAIR/", collection + "_XpathEvaluated.csv.gz") XpathOccurrence = os.path.join("../data/FAIR/", collection +'_XpathOccurrence.csv') # Read in the evaluated metadata EvaluatedDF = pd.read_csv(XpathEvaluated) # Use above dataframe and apply the xpathOccurrence functions from MDeval md.XpathOccurrence(EvaluatedDF, collection, XpathOccurrence) # Apply the recommendation to the collection md.applyRecommendation(elements, 'FAIR', collection) # ## Create reports with EARmd.py # # #### All Elements Useage # * The first row is the number of records. Use the *RecordCount* column # * Rows are Xpath in any record throughout the collection # * Columns are XpathCount, RecordCount, AverageOccurrencePerRecord, CollectionOccurrence% # # #### FAIR Elements Useage # * same as the Element Usage Analysis, but limited to elements and their children that occurr in the conceptual recommendation. # We will first apply a list of xpaths from a "50 thousand foot view". What is meant by this is that instead of explicitly looking for each child element of /eml:eml/dataset/contact looking for xpaths that contain /eml:eml/dataset/contact. This will allow us to create a version of the evaluation that contains elements important to fulfilling specific recommendation needs. It will also allow for additional insight in how element choices shift over time. # # #### FAIR Concepts Useage # * Take the occurrence percentage from the most used child element for each recommendation level parent element, and assign it to the element to get a high level view on recommendations compliance over time. # # Use the analyzed data to create reports for each collection. All reports are created as Excel spreadsheets. # # #### Visualize FAIR Fitness # * Visualize the FAIR completeness percentage for your collection as a way to determine the likelyhood the catalog will address the FAIR information needs of your data users and producers. # <p><NAME> 2019 Is your metadata catalog in shape?. Zenodo. https://doi.org/10.5281/zenodo.2558631</p> # # # ### Create a FAIRness report on the collection or collections # # * Give your report a name # * Create the report # * right click the resultant Google Sheets link recommendationName = "YourRec" # + os.makedirs("../reports/FAIR", exist_ok=True) #for collection in collectionsToProcess: # places for all the combined data and combined Report DataDestination = os.path.join('../reports/FAIR', "Report.xlsx") XpathOccurrence = os.path.join("../data/FAIR", 'Report_XpathOccurrence.csv') FAIROccurrence = os.path.join("..", "data", "FAIR", 'Report_FAIRoccurrence.csv') FAIRConcept = os.path.join('..','data','FAIR', 'Report_FAIRcompleteness.csv') FAIRGraph = os.path.join('..','data','FAIR', 'Report_FAIR_.png') # combine the absolute occurance analysis for a site through time XpathOccurrenceToCombine = [os.path.join("../data/FAIR", name) for name in os.listdir("../data/FAIR") if name.endswith('_XpathOccurrence.csv') ] md.CombineXPathOccurrence(XpathOccurrenceToCombine, XpathOccurrence, to_csv=True) # Build lists of recommendation specific occurrence analysis for a site through time FAIRoccurrenceToCombine = [os.path.join("../data/FAIR", name) for name in os.listdir("../data/FAIR") if name.endswith('_FAIROccurrence.csv') ] # utilize function to combine the recommendation specific analyses md.CombineAppliedRecommendation(collection, elements, 'FAIR', FAIRoccurrenceToCombine) # create recommendation pivot tables and radar graphs to acess the parent elements useage through time md.Collection_ConceptAnalysis(collection, 'FAIR', RecDict, LevelOrder, ConceptOrder, ElementOrder, collectionsToProcess) #write full quality image to Google Drive and get a link to insert next to the lower-quality picture in the google sheet MyfolderID = '1UJNvXdlLO-4QwYKESr7B5N4hWXrkKRTY' FAIRGraphLink = md.WriteToGoogle( os.path.join('..','data','FAIR', 'Report_FAIR_.png'), folderID=MyfolderID, Convert=None, Link=True) #create Excel report on all analyses, write additional functions on data to provide some collection analytics md.CombinationSpreadsheet(recommendationName, XpathOccurrence, FAIROccurrence, FAIRConcept, FAIRGraph, FAIRGraphLink, DataDestination ) # write the spreadsheet to Google Drive, convert to Sheet md.WriteToGoogle(DataDestination, folderID=MyfolderID, Convert=True) # -
notebooks/determineFAIRness.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Confidence Intervals and Classical Hypothesis Testing: Mean # *<NAME>* # # Now we look at inference regarding the mean of a population. The **mean** is the average value, and in this context refers to the mean of quantitative variables. # # ## Confidence Interval for the Mean # # You are employed by a company that fabricates chips and other electronic components. The company wants you to investigates the resistors it uses in producing its components. In particular, while the resistors used by the company are labeled with a particular resistance, the company wants to ensure the manufacturer of the resistors produces quality products. Your task is to verify that the labeled resistance agrees with the observed resistance, after subjecting the resistors to testing. # # You test the resistance (in $\text{k}\Omega$) of some resistors labeled $1\text{k}\Omega$ and obtain the following dataset (stored in a NumPy array). import numpy as np res = np.array([ 0.984, 0.988, 0.984, 0.987, 0.976, 0.997, 0.993, 0.985, 1.002, 0.987, 1.005, 0.993, 0.987, 0.992, 0.976, 0.998, 1.011, 0.971, 0.981, 1.008, 0.963, 0.992, 0.995, 0.99 , 0.996, 0.99 , 0.985, 0.997, 0.983, 0.981, 0.988, 0.991, 0.971, 0.982, 0.979, 1.008, 1.006, 1.006, 1.001, 0.999, 0.98 , 0.996, 0.979, 1.009, 0.99 , 0.996, 1.001, 0.981, 0.99 , 0.987, 0.97 , 0.992, 0.982, 0.983, 0.974, 0.999, 0.987, 1.002, 0.971, 0.982, 0.989, 0.985, 1.014, 0.991, 0.984, 0.992, 1.003, 0.985, 0.987, 0.985, 1. , 0.978, 0.99 , 0.99 , 0.985, 0.983, 0.981, 0.993, 0.993, 0.973, 1. , 0.982, 0.987, 0.988, 0.982, 0.978, 0.989, 1. , 0.983, 1.008, 0.997, 0.974, 0.988, 1.002, 0.988, 0.994, 0.991, 1. , 0.976, 0.987, 0.991, 1.010, 0.999, 1.002]) res.mean() # You now want to construct a confidence interval for the true resistance of the resistors. # # You believe it's safe to assume that the data follows a Normal distribution; in that case, the confidence interval for the mean resistance is given by: # # $$\bar{x} \pm t_{n - 1, 1 - \frac{\alpha}{2}} \frac{s}{\sqrt{n}}$$ # # where $\bar{x}$ is the sample mean, $s$ is the sample standard deviation, $\alpha$ is one minus the confidence level, and $t_{\nu, p}$ is the $p$th percentile of the [$t$ distribution](https://en.wikipedia.org/wiki/Student%27s_t-distribution) with $\nu$ degrees of freedom. # # We can use functions from **statsmodels** to compute this interval. # # *(WARNING: The following function/code is NOT stable!)* from statsmodels.stats.weightstats import _tconfint_generic # Computes confidence intervals _tconfint_generic(mean=res.mean(), std_mean=res.std()/np.sqrt(len(res)), dof=len(res) - 1, alpha=(1 - 0.95), alternative="two-sided") # Notice that 1 is *not* in the confidence interval. This leads you to suspect that the resistors the supplier produces are not being properly manufactured. # ## Hypothesis Testing for the Mean # # The confidence interval you computed suggests that the resistors' resistance level does not agree with the label. You now want to perform a hypothesis test to confirm your suspicion. In particular, you believe that the resistors have less resistance than specified. # # You will be testing the hypotheses: # # $$H_0: \mu = 1\text{k}\Omega$$ # $$H_A: \mu < 1\text{k}\Omega$$ # # Since you are assuming that the resistance is Normally distributed, you use the test statistic: # # $$t = \frac{\bar{x} - 1}{\frac{s}{\sqrt{n}}}$$ # # to determine if you should reject $H_0$ or not. # # The function `_tstat_generic()` can perform such a test and yield a $p$-value. # # *(WARNING: The following function/code is NOT stable!)* from statsmodels.stats.weightstats import _tstat_generic _tstat_generic(value1=res.mean(), value2=0, diff=1, std_diff=res.std()/np.sqrt(len(res)), dof=len(res) - 1, alternative="smaller") # The p-value is miniscule! Clearly the resistance of the resistors the manufacturer makes is less than $1\text{k}\Omega$. Your company is being fleeced by this manufacturer! # ## Two-Sample Test for Common Mean # # In light of your study the manager of your division has decided to stop outsourcing resistor production. The company wants to start manufacturing its own resistors, and has started experimenting with different processes before engaging in full-scale production. # # Right now there are two manufacturing processes, and you are tasked with determining whether the mean resistance of supposedly-$1\text{k}\Omega$ resistors is the same between the two processes. That is, given process A and process B, you wish to test # # $$H_0: \mu_A = \mu_B$$ # $$H_A: \mu_A \neq \mu_B$$ # # While you feel safe assuming that the resistance level of resistors is Normally distributed regardless of the manufacturing process employed, you don't assume that the standard deviation is common to all processes. In that case, you use the test statistic # # $$t = \frac{\bar{x}_A - \bar{x}_B}{\sqrt{\frac{s_A^2}{n_A} + \frac{s_B^2}{n_B}}}$$ # # After some tests you obtain the following datasets for the resistance of resistors produced by the different processes. # + res_A = np.array([ 1.002, 1.001, 1. , 0.999, 0.998, 1. , 1.001, 0.999, 1.002, 0.998, 1. , 0.998, 1.001, 1.001, 1.002, 0.997, 1.001, 1. , 1.001, 0.999, 0.998, 0.998, 1.002, 1.002, 0.996, 0.998, 0.997, 1.001, 1.002, 0.997, 1. , 1. , 0.998, 0.997]) res_B = np.array([ 0.995, 1.022, 0.993, 1.014, 0.998, 0.99 , 0.998, 0.998, 0.99 , 1.003, 1.016, 0.992, 1. , 1.002, 1.003, 1.005, 0.979, 1.012, 0.978, 1.01 , 1.001, 1.026, 1.011, 1. , 0.98 , 0.993, 1.016, 0.991, 0.986, 0.987, 1.012, 0.996, 1.013, 1.001, 0.984, 1.011, 1.01 , 1. , 1.001]) # - # This test is performed by `ttest_ind()` from **statsmodels**. from statsmodels.stats.weightstats import ttest_ind ttest_ind(res_A, res_B, # The datasets alternative="two-sided", usevar="unequal") # In the above output, the middle number is the p-value. In this case the p-value is approximately 0.659, which is large. We should not reject the null hypothesis. The two processes appear to produce resistors with the same mean level of resistance. # ## One-Way ANOVA # # Before you were able to report your findings you received word that three more manufacturing processes were tested and you now have resistors for five manufacturing processes. Your supervisor wants to know if all of the resistors produced by these processes have the same mean resistance or if some processes produce resistors with a mean resistance different from the rest. # # In other words, for resistors produced by processes A, B, C, D, or E, you need to test # # $$H_0: \mu_A = \mu_B = \mu_C = \mu_D = \mu_E$$ # $$H_A: H_0 \text{ is false}$$ # # The test for deciding which of these two hypotheses is true is known as ANOVA. ANOVA has assumptions. In addition to the assumption that the data was drawn from Normal distributions, you must assume that the data was drawn from distributions with the same standard deviation. You would need to check this, but you are in a hurry. # # You now have the following datasets in addition to the ones you started with. # + res_C = np.array([ 1.005, 1.012, 1.003, 0.993, 0.998, 1.002, 1.002, 0.996, 0.999, 1.004, 1.006, 1.007, 0.991, 1.011, 1. , 1. , 1.005, 1. , 0.995, 0.995, 1.002, 1.002, 0.991, 1.003, 0.997, 0.994, 0.995, 1. , 1.001, 1.005, 0.992, 0.999, 0.999, 1.002, 1. , 0.994, 1.001, 1.007, 1.003, 0.993]) res_D = np.array([ 1.006, 0.996, 0.986, 1.004, 1.004, 1. , 1. , 0.993, 0.991, 0.992, 0.989, 0.996, 1. , 0.996, 1.001, 0.989, 1. , 1.004, 0.997, 0.99 , 0.998, 0.994, 0.991, 0.995, 1.002, 0.997, 0.998, 0.99 , 0.996, 0.994, 0.988, 0.996, 0.998]) res_E = np.array([ 1.009, 0.999, 0.995, 1.008, 0.998, 1.001, 1.001, 1.001, 0.993, 0.992, 1.007, 1.005, 0.997, 1. , 1. , 1. , 0.996, 1.005, 0.997, 1.013, 1.002, 1.006, 1.004, 1.002, 1.001, 0.999, 1.001, 1.004, 0.994, 0.999, 0.997, 1.004, 0.996]) # - # The function `f_oneway()` from **scipy.stats** performs the one-way ANOVA test. from scipy.stats import f_oneway f_oneway(res_A, res_B, res_C, res_D, res_E) # The p-value of approximately 0.0347 appears small, so we can reject the null hypothesis that all processes yield resistors with the same level of resistance.
Chapter01/ClassicalInferenceMean.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + ####################MODIFIABLE RUN PARAMETERS######################### wellslist=['A2','B2'] tiprack_starting_pos ={ "tiprack_10" : 'A1', "tiprack_300" : 'A1' } # In case the dispensing tip arrives to slide or cslp with a given mistake – this factor, # listed in mm, can be used for fine z-correction. E.g. # sample_z_correction_factor=-4 will lower the dispensing point by 4mm. sample_z_correction_factor=-4 #Antibody incubation time in seconds ABS_INCUBATION_TIME = 60*60*1.5 #debug mode skips all incubations, prints out additional info debug = False ####################FIXED RUN PARAMETERS######################### API_VERSION= '2.7' default_flow_rate = 50 well_flow_rate = 5 sample_flow_rate = 0.2 wash_volume = 150 USE_TROUGH = True class Object: pass ####################LABWARE LAYOUT ON DECK######################### labwarePositions = Object() labwarePositions.buffers_reservoir = 1 labwarePositions.par2 = 2 labwarePositions.antibodies_plate = 3 labwarePositions.tiprack_300 = 6 # + ####################GENERAL SETUP################################ from opentrons import execute, simulate import string import json import time import logging logging.getLogger('opentrons').setLevel(logging.ERROR) stats = Object() stats.volume = 0 # ####################! FUNCTIONS - DO NOT MODIFY !######################### def washSamples(pipette, sourceSolutionWell, samples, volume, num_repeats=1): try: iter(samples) #print('samples are iterable') except TypeError: #print('samples arent iterable') samples = [samples] pipette.pick_up_tip() if(len(samples)==0): samples = [samples] print("Replacing solution on samples:" +str(samples) + " len=" + str(len(samples))) for i in range(0, num_repeats): print ("Iteration:"+ str(i)) for s in samples: print ("Washing sample:" + str(s)) pipette.aspirate(volume, sourceSolutionWell, rate=well_flow_rate) pipette.dispense(volume, s.bottom(sample_z_correction_factor), rate=sample_flow_rate).blow_out() stats.volume += volume pipette.drop_tip() def dilute_and_apply_fixative(pipette, sourceSolutionWell, dilutant_buffer_well, samples, volume): try: iter(samples) #print('samples are iterable') except TypeError: #print('samples arent iterable') samples = [samples] pipette.pick_up_tip() if(len(samples)==0): samples = [samples] print("Applying fixative to samples:" +str(samples) + " len=" + str(len(samples))) for s in samples: print ("Diluting fixative: " + str(s)) pipette.aspirate(volume+50, dilutant_buffer_well, rate=well_flow_rate) pipette.dispense(volume+50, sourceSolutionWell, rate=well_flow_rate) for iterator in range(0, 3): print ("Mixing: " + str(iterator+1)) pipette.aspirate(volume, sourceSolutionWell, rate=well_flow_rate) pipette.dispense(volume, sourceSolutionWell, rate=well_flow_rate) print ("Applying fixative to sample: " + str(s)) pipette.aspirate(volume, sourceSolutionWell, rate=well_flow_rate) pipette.dispense(volume, s, rate=sample_flow_rate).blow_out() stats.volume += volume pipette.drop_tip() def mix(pipette, sourceSolutionWell, volume, num_repeats): pipette.pick_up_tip() print("Mixing solution in samples:" +str(sourceSolutionWell)) for i in range(0, num_repeats): print ("Iteration:"+ str(i)) pipette.aspirate(volume, sourceSolutionWell, rate=2) pipette.dispense(volume, sourceSolutionWell, rate=2) pipette.drop_tip() def countdown(t): while t: mins, secs = divmod(t, 60) timeformat = '{:02d}:{:02d}'.format(mins, secs) print(timeformat, end='\r') if (not debug): time.sleep(1) t -= 1 print('off we go..') # + ###########################LABWARE SETUP################################# protocol = execute.get_protocol_api(API_VERSION) if debug: print(protocol) tiprack_300 = protocol.load_labware('opentrons_96_tiprack_300ul', labwarePositions.tiprack_300, "tiprack 300ul") if debug: print(tiprack_300) pipette_300 = protocol.load_instrument('p300_single', 'right', tip_racks = [tiprack_300]) pipette_300.flow_rate.dispense = default_flow_rate pipette_300.flow_rate.aspirate = default_flow_rate pipette_300.starting_tip = tiprack_300.well(tiprack_starting_pos['tiprack_300']) if debug: print(pipette_300) par2_slides = protocol.load_labware('par2s_9slides_blue_v2', labwarePositions.par2, 'par2s_9slides_blue_v2') #trough12_def = json.loads(AXYGEN_12well_plate_DEF_JSON) #trough12 = protocol.load_labware_from_definition(trough12_def, labwarePositions.buffers_reservoir, '12-trough buffers reservoir') #custom_96_def = json.loads(CUSTOM_96well_plate_DEF_JSON) custom_96 = protocol.load_labware('parhelia_black_96', labwarePositions.antibodies_plate, 'parhelia_black_96') trough12 = protocol.load_labware('parhelia_12trough', labwarePositions.buffers_reservoir, 'parhelia_12trough') temp_mod = protocol.load_module('temperature module', '8') par2_on_heat_module=temp_mod.load_labware('par2s_9slides_blue_v2') if debug: print(par2) # + buffer_wells = trough12.wells_by_name() buffers = Object() buffers.retreaval = buffer_wells['A1'] buffers.TBS_wash = buffer_wells['A2'] buffers.water = buffer_wells['A3'] buffers.storage = buffer_wells['A4'] preblock_wells_cycle1 = custom_96.rows()[0] antibody_wells_cycle1 = custom_96.rows()[1] opal_polymer_cycle1 = custom_96.rows()[2] opal_fluorophore1 = custom_96.rows()[3] preblock_wells_cycle2 = custom_96.rows()[4] antibody_wells_cycle2 = custom_96.rows()[5] opal_polymer_cycle2 = custom_96.rows()[6] opal_fluorophore2 = custom_96.rows()[7] sample_chambers = [] for well in wellslist: sample_chambers.append(par2_on_heat_module.wells_by_name()[well]) if debug: print(sample_chambers) # - #################PROTOCOL#################### protocol.home() # ###-------------------- FIRST ROUND #WASHING SAMPLES WITH TBS print("washing in TBS") washSamples(pipette_300, buffers.TBS_wash, sample_chambers, wash_volume, num_repeats=2) #countdown(300) # + #pipette_300.drop_tip() # - print("preblocking") for i in range (len(wellslist)): washSamples(pipette_300, preblock_wells_cycle1[i], sample_chambers[i], wash_volume) #INCUBATE 15 MIN print("preblocking incubation: 15 min") countdown(900) # + #countdown(3600) # + #APPLYING ANTIBODY COCKTAILS TO SAMPLES print("applying antibodies") for i in range (len(wellslist)): washSamples(pipette_300, antibody_wells_cycle1[i], sample_chambers[i], wash_volume) #INCUBATE 120 MIN print("staining incubation 1.5h") countdown(5400) # + #washSamples(pipette_300, buffers.TBS_wash, sample_chambers, wash_volume, num_repeats=1) # - #WASHING SAMPLES WITH TBS #three individual repeats below is because they need particular incubation time between them print("washing with TBS") washSamples(pipette_300, buffers.TBS_wash, sample_chambers, wash_volume, num_repeats=2) countdown(180) washSamples(pipette_300, buffers.TBS_wash, sample_chambers, wash_volume, num_repeats=2) countdown(180) washSamples(pipette_300, buffers.TBS_wash, sample_chambers, wash_volume, num_repeats=2) countdown(180) # + #APPLYING OPAL polymer HRP print("applying opal secondary") for i in range (len(wellslist)): washSamples(pipette_300, opal_polymer_cycle1[i], sample_chambers[i], wash_volume) #INCUBATE 120 MIN print("opal secondary for 10min") countdown(600) # - #three individual repeats below is because they need particular incubation time between them print("washing with TBS") washSamples(pipette_300, buffers.TBS_wash, sample_chambers, wash_volume, num_repeats=1) countdown(180) washSamples(pipette_300, buffers.TBS_wash, sample_chambers, wash_volume, num_repeats=1) countdown(180) washSamples(pipette_300, buffers.TBS_wash, sample_chambers, wash_volume, num_repeats=1) countdown(180) # + #Opal Signal generation print("Opal Signal generation") for i in range (len(wellslist)): washSamples(pipette_300, opal_fluorophore1[i], sample_chambers[i], wash_volume) #INCUBATE 120 MIN print("opal fluorophore1 10min" ) countdown(600) # - #WASHING SAMPLES WITH TBS print("washing in TBS") washSamples(pipette_300, buffers.TBS_wash, sample_chambers, wash_volume, num_repeats=2) countdown(120) washSamples(pipette_300, buffers.TBS_wash, sample_chambers, wash_volume, num_repeats=2) countdown(120) washSamples(pipette_300, buffers.retreaval, sample_chambers, wash_volume, num_repeats=3) countdown(180) temp_mod.set_temperature(95) print("retreaval") countdown(2400) temp_mod.set_temperature(25) # + #temp_mod.deactivate() # - print("cooling down") countdown(1200) # ###--------cycle 2 #WASHING SAMPLES WITH TBS print("washing in TBS") washSamples(pipette_300, buffers.TBS_wash, sample_chambers, wash_volume, num_repeats=2) #countdown(300) # + #pipette_300.drop_tip() # - print("preblocking") for i in range (len(wellslist)): washSamples(pipette_300, preblock_wells_cycle2[i], sample_chambers[i], wash_volume) #INCUBATE 15 MIN print("preblocking incubation: 15 min") countdown(900) # + #countdown(3600) # + #APPLYING ANTIBODY COCKTAILS TO SAMPLES print("applying antibodies") for i in range (len(wellslist)): washSamples(pipette_300, antibody_wells_cycle2[i], sample_chambers[i], wash_volume) #INCUBATE 120 MIN print("staining incubation 1.5h") countdown(5400) # + #washSamples(pipette_300, buffers.TBS_wash, sample_chambers, wash_volume, num_repeats=1) # - #WASHING SAMPLES WITH TBS #three individual repeats below is because they need particular incubation time between them print("washing with TBS") washSamples(pipette_300, buffers.TBS_wash, sample_chambers, wash_volume, num_repeats=2) countdown(180) washSamples(pipette_300, buffers.TBS_wash, sample_chambers, wash_volume, num_repeats=2) countdown(180) washSamples(pipette_300, buffers.TBS_wash, sample_chambers, wash_volume, num_repeats=2) countdown(180) # + #APPLYING OPAL polymer HRP print("applying hrpsecondaryab") for i in range (len(wellslist)): washSamples(pipette_300, opal_polymer_cycle2[i], sample_chambers[i], wash_volume) #INCUBATE 120 MIN print("opal secondary for 10min") countdown(600) # - #three individual repeats below is because they need particular incubation time between them print("washing with TBS") washSamples(pipette_300, buffers.TBS_wash, sample_chambers, wash_volume, num_repeats=1) countdown(180) washSamples(pipette_300, buffers.TBS_wash, sample_chambers, wash_volume, num_repeats=1) countdown(180) washSamples(pipette_300, buffers.TBS_wash, sample_chambers, wash_volume, num_repeats=1) countdown(180) # + #Opal Signal generation print("Opal Signal generation") for i in range (len(wellslist)): washSamples(pipette_300, opal_fluorophore2[i], sample_chambers[i], wash_volume) #INCUBATE 120 MIN print("opal fluorophore1 10min" ) countdown(600) # - #WASHING SAMPLES WITH TBS print("washing in TBS") washSamples(pipette_300, buffers.TBS_wash, sample_chambers, wash_volume, num_repeats=2) countdown(120) washSamples(pipette_300, buffers.TBS_wash, sample_chambers, wash_volume, num_repeats=2) countdown(120) washSamples(pipette_300, buffers.retreaval, sample_chambers, wash_volume, num_repeats=3) countdown(180) temp_mod.set_temperature(95) print("retreaval") countdown(2400) temp_mod.set_temperature(25) temp_mod.deactivate() print("cooling down") countdown(1200) #STORAGE, washing samples every hour for i in range (48): washSamples(pipette_300, buffers.storage,sample_chambers, 100) countdown(3600) print("total dispensed volume: ", str(stats.volume))
jupyter_notebooks/SLIDE_PAR2 OPAL.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="view-in-github" colab_type="text" # <a href="https://colab.research.google.com/github/will1973/MachineLearningInAction/blob/master/NLP_Lab05__wehe9821.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # + [markdown] id="2JX8CSelGdJf" colab_type="text" # # Lab 05 # + [markdown] id="be_7UFUxGfke" colab_type="text" # # Preprocessing # # Text preprocessing is traditionally an important step for natural language processing (NLP) tasks. It transforms text into a more digestible form so that machine learning algorithms can perform better. # + [markdown] id="ee0J8xZ9Kxwk" colab_type="text" # ## Text Wrangling # + [markdown] id="18BJU1JqYnSD" colab_type="text" # Text wrangling is a converting/gathering/extracting formatted text from the raw data. # + [markdown] id="tWOVJtY-L1Sm" colab_type="text" # For example, **HTML** does not include only text. Even you extract only the text from **HTML**, they are not all meaningful (i.e. advertisements). # # Have a look at the [news article](https://www.smh.com.au/national/nsw/macquarie-uni-suspends-teaching-for-10-days-to-move-learning-online-20200317-p54avs.html). We might be only interested in getting headline and main contents of the article. # # The following code removes some irrelevant tags (i.e. script, style, link, etc.) and displays the remained tags. () # # # + id="4_FeckidL1k0" colab_type="code" outputId="ebe52223-1010-46ff-904d-dcb98b1278b0" colab={"base_uri": "https://localhost:8080/", "height": 1000} import urllib from bs4 import BeautifulSoup url = "https://www.smh.com.au/national/nsw/macquarie-uni-suspends-teaching-for-10-days-to-move-learning-online-20200317-p54avs.html" html = urllib.request.urlopen(url).read() soup = BeautifulSoup(html) # remove not relevant tags (script, style, link, etc.) for script in soup(["script", "style", "link", "head", "noscript"]): script.extract() # rip it out # get text text = soup.get_text() print(soup.prettify()) # + [markdown] id="8RlCBsYSPkVp" colab_type="text" # **Try &lt;p&gt; tag** # # Using &lt;p&gt; tag is a common way to extract the main contents of the online news articles. BUT, do not expect this always provides what you want. # + id="uEx20icxPlDx" colab_type="code" outputId="920a13a7-c3ce-4465-9d94-31a49681314f" colab={"base_uri": "https://localhost:8080/", "height": 415} p_tags = soup.findAll('p',text=True) for i, p_tag in enumerate(p_tags): print(str(i) + str(p_tag)) # + [markdown] id="BYnmY30W4kXq" colab_type="text" # ## Punctuations removal # + [markdown] id="WUSe0XB142YM" colab_type="text" # First, let's try to remove by using the list of punctuations! # + id="zPPA04m5CTS5" colab_type="code" outputId="83c673ea-7d5a-480d-a16a-2338c054a584" colab={"base_uri": "https://localhost:8080/", "height": 35} puncts = [',', '.', '"', ':', ')', '(', '-', '!', '?', '|', ';', "'", '$', '&', '/', '[', ']', '>', '%', '=', '#', '*', '+', '\\', '•', '~', '@', '£', '·', '_', '{', '}', '©', '^', '®', '`', '<', '→', '°', '€', '™', '›', '♥', '←', '×', '§', '″', '′', 'Â', '█', '½', 'à', '…', '“', '★', '”', '–', '●', 'â', '►', '−', '¢', '²', '¬', '░', '¶', '↑', '±', '¿', '▾', '═', '¦', '║', '―', '¥', '▓', '—', '‹', '─', '▒', ':', '¼', '⊕', '▼', '▪', '†', '■', '’', '▀', '¨', '▄', '♫', '☆', 'é', '¯', '♦', '¤', '▲', 'è', '¸', '¾', 'Ã', '⋅', '‘', '∞', '∙', ')', '↓', '、', '│', '(', '»', ',', '♪', '╩', '╚', '³', '・', '╦', '╣', '╔', '╗', '▬', '❤', 'ï', 'Ø', '¹', '≤', '‡', '√', ] def remove_punctuation(x): x = str(x) for punct in puncts: if punct in x: x = x.replace(punct, '') return x text = "It's a nice day[]" print(remove_punctuation(text)) # + [markdown] id="yBYYb37K46gV" colab_type="text" # Then, what about the the regular expression ([re package](https://docs.python.org/3/library/re.html))? # + id="SicWED4146pe" colab_type="code" outputId="57d1700c-6951-487e-cdd8-12c5af32c5f3" colab={"base_uri": "https://localhost:8080/", "height": 35} import re def remove_punctuation_re(x): x = re.sub(r'[^\w\s]','',x) return x text = "It's a nice day[]" print(remove_punctuation_re(text)) # + [markdown] id="1D4zy-EAFC2B" colab_type="text" # Ok. Then what about emoticons? **:)** or **:D** or **:(** # # + id="lhSuQUQn7HHy" colab_type="code" colab={} #you can find the solution from the TweetTokenizer https://www.nltk.org/_modules/nltk/tokenize/casual.html#TweetTokenizer EMOTICONS = r""" (?: [<>]? [:;=8] # eyes [\-o\*\']? # optional nose [\)\]\(\[dDpP/\:\}\{@\|\\] # mouth | [\)\]\(\[dDpP/\:\}\{@\|\\] # mouth [\-o\*\']? # optional nose [:;=8] # eyes [<>]? | <3 # heart )""" # + [markdown] id="qbr_yUC07HRT" colab_type="text" # Have a look at some contractions of words! # # + id="GM6nYOK__dYw" colab_type="code" colab={} # These are just common English contractions. There are many edge cases. i.e. University's working on it. contraction_dict = {"ain't": "is not", "aren't": "are not","can't": "cannot", "'cause": "because", "could've": "could have", "couldn't": "could not", "didn't": "did not", "doesn't": "does not", "don't": "do not", "hadn't": "had not", "hasn't": "has not", "haven't": "have not", "he'd": "he would","he'll": "he will", "he's": "he is", "how'd": "how did", "how'd'y": "how do you", "how'll": "how will", "how's": "how is", "I'd": "I would", "I'd've": "I would have", "I'll": "I will", "I'll've": "I will have","I'm": "I am", "I've": "I have", "i'd": "i would", "i'd've": "i would have", "i'll": "i will", "i'll've": "i will have","i'm": "i am", "i've": "i have", "isn't": "is not", "it'd": "it would", "it'd've": "it would have", "it'll": "it will", "it'll've": "it will have","it's": "it is", "let's": "let us", "ma'am": "madam", "mayn't": "may not", "might've": "might have","mightn't": "might not","mightn't've": "might not have", "must've": "must have", "mustn't": "must not", "mustn't've": "must not have", "needn't": "need not", "needn't've": "need not have", "o'clock": "of the clock", "oughtn't": "ought not", "oughtn't've": "ought not have", "shan't": "shall not", "sha'n't": "shall not", "shan't've": "shall not have", "she'd": "she would", "she'd've": "she would have", "she'll": "she will", "she'll've": "she will have", "she's": "she is", "should've": "should have", "shouldn't": "should not", "shouldn't've": "should not have", "so've": "so have", "so's": "so as", "this's": "this is","that'd": "that would", "that'd've": "that would have", "that's": "that is", "there'd": "there would", "there'd've": "there would have", "there's": "there is", "here's": "here is","they'd": "they would", "they'd've": "they would have", "they'll": "they will", "they'll've": "they will have", "they're": "they are", "they've": "they have", "to've": "to have", "wasn't": "was not", "we'd": "we would", "we'd've": "we would have", "we'll": "we will", "we'll've": "we will have", "we're": "we are", "we've": "we have", "weren't": "were not", "what'll": "what will", "what'll've": "what will have", "what're": "what are", "what's": "what is", "what've": "what have", "when's": "when is", "when've": "when have", "where'd": "where did", "where's": "where is", "where've": "where have", "who'll": "who will", "who'll've": "who will have", "who's": "who is", "who've": "who have", "why's": "why is", "why've": "why have", "will've": "will have", "won't": "will not", "won't've": "will not have", "would've": "would have", "wouldn't": "would not", "wouldn't've": "would not have", "y'all": "you all", "y'all'd": "you all would","y'all'd've": "you all would have","y'all're": "you all are","y'all've": "you all have", "you'd": "you would", "you'd've": "you would have", "you'll": "you will", "you'll've": "you will have", "you're": "you are", "you've": "you have"} # + [markdown] id="fW0bu-886Tp0" colab_type="text" # ## Stopwords removal # Stopwords are the most common words in any natural language. For the purpose of analyzing text data and building NLP models, these stopwords might not add much value to the meaning of the document. Generally, the most common words used in a text are “the”, “is”, “in”, “for”, “where”, “when”, “to”, “at” etc. # # # + id="vh1rYL3NIavi" colab_type="code" outputId="d19b444a-01e3-4b72-a99d-c223924ab285" colab={"base_uri": "https://localhost:8080/", "height": 107} import nltk nltk.download('punkt') nltk.download('stopwords') from nltk.corpus import stopwords as sw from nltk.tokenize import word_tokenize my_sent = "Natural Language Processing is fun but challenging." tokens = word_tokenize(my_sent) stop_words = sw.words() filtered_sentence = [w for w in tokens if not w in stop_words] print(filtered_sentence) # + [markdown] id="eap6jyY26ovD" colab_type="text" # ## Case-folding # A common strategy is to do case-folding by reducing all letters to lower case # + id="yiLKuaoEIbnq" colab_type="code" outputId="1d50a558-e2a4-4e32-8590-7e4bdee90f9a" colab={"base_uri": "https://localhost:8080/", "height": 53} text = "Hello there!" #Returns the result of converting all characters in text to lowercase. print(text.lower()) #do we need to reduce all letters to lower case? text2 = "I love University of Sydney :D" print(text2.lower()) # + [markdown] id="L9e4G_t5gCYG" colab_type="text" # ## Stemming # Stemming is a process of removing and replacing word suffixes to arrive at a common root form of the word. # # * Try various types of NLTK stemmer in [demo](http://text-processing.com/demo/stem/) # * A comparative study of stemming algorithm: [Paper Link](https://pdfs.semanticscholar.org/1c0c/0fa35d4ff8a2f925eb955e48d655494bd167.pdf) # # + id="5FhadZUH607p" colab_type="code" outputId="6a077a97-b1e2-4f43-b9b4-88fa739d13fd" colab={"base_uri": "https://localhost:8080/", "height": 35} #let's try to test with porter algorithm from nltk.stem.porter import * stemmer = PorterStemmer() plurals = ['caresses', 'flies', 'dies', 'mules', 'denied', 'died', 'agreed', 'owned', 'humbled', 'sized', 'meeting', 'stating', 'siezing', 'itemization', 'sensational', 'traditional', 'reference', 'colonizer', 'plotted'] singles = [stemmer.stem(plural) for plural in plurals] print(singles) # + [markdown] id="t5Tm9Zj4K8nj" colab_type="text" # ## Lemmatisation # # Lemmatisation is the process of grouping together the inflected forms of a word so they can be analysed as a single item, identified by the word's lemma, or dictionary form # # + id="q0oIgt5NK8te" colab_type="code" outputId="4d1017e2-2196-4759-f49b-c4d781a5eb48" colab={"base_uri": "https://localhost:8080/", "height": 215} #by NLTK Wordnet nltk.download('wordnet') from nltk.stem import WordNetLemmatizer lemmatizer = WordNetLemmatizer() print(lemmatizer.lemmatize("cats")) print(lemmatizer.lemmatize("cacti")) print(lemmatizer.lemmatize("geese")) print(lemmatizer.lemmatize("rocks")) print(lemmatizer.lemmatize("python")) print(lemmatizer.lemmatize("better", pos="a")) print(lemmatizer.lemmatize("best", pos="a")) print(lemmatizer.lemmatize("run")) print(lemmatizer.lemmatize("run",'v')) # + [markdown] id="gZQBz4bS63_S" colab_type="text" # ## Tokenisation # # Given a character sequence and a defined document unit, tokenization is the task of chopping it up into pieces, called tokens , perhaps at the same time throwing away certain characters, such as punctuation # # Try various types of NLTK Tokenizer in [demo](http://text-processing.com/demo/tokenize/). # # [NLTK Tokeniser API Doc](https://www.nltk.org/api/nltk.tokenize.html) # + [markdown] id="Jj6FSwVH9pig" colab_type="text" # **TweetTokenizer: Twitter-aware tokeniser** # + id="PLiWA6VAGfyr" colab_type="code" outputId="874ab78c-5caa-42a1-92f3-a5d8c3f0f5c9" colab={"base_uri": "https://localhost:8080/", "height": 53} from nltk.tokenize import TweetTokenizer tknzr = TweetTokenizer() s0 = "I am so happy :) ;)" print(tknzr.tokenize(s0)) s0 = "I am so sad :(" print(tknzr.tokenize(s0)) # + [markdown] id="Q2aUurLvKn90" colab_type="text" # **TreebankWordTokenizer** # # The Treebank tokenizer uses regular expressions to tokenize text as in Penn Treebank. # + id="z79eqDUw9po5" colab_type="code" outputId="d8bfe4ed-acd3-4e80-bc32-aa8a74be5868" colab={"base_uri": "https://localhost:8080/", "height": 53} from nltk.tokenize import TreebankWordTokenizer tknzr = TreebankWordTokenizer() s0 = "I am so happy :) ;)" print(tknzr.tokenize(s0)) s0 = "I am so sad :(" print(tknzr.tokenize(s0)) # + [markdown] id="Ko6A3-sP67AA" colab_type="text" # # Word Cloud # * [Word Cloud](https://amueller.github.io/word_cloud/index.html) # * [Wikipedia Python](https://pypi.org/project/wikipedia/) # # + id="8RL-2Nl6M8bS" colab_type="code" outputId="3d0f07fe-6023-44e0-dcaf-ab70023fde5e" colab={"base_uri": "https://localhost:8080/", "height": 287} # !pip install wikipedia # + id="ts5qkwsIgWs8" colab_type="code" outputId="920e042b-ff8b-45b3-827b-4830f1126f92" colab={"base_uri": "https://localhost:8080/", "height": 198} from wordcloud import WordCloud import wikipedia # Getting wikipedia contents of "University of Sydney" text = wikipedia.page("Corona").content # Generate a word cloud image wordcloud = WordCloud().generate(text) # Display the generated image: # the matplotlib way: import matplotlib.pyplot as plt plt.imshow(wordcloud, interpolation='bilinear') plt.axis("off") plt.show() # + [markdown] id="_ynb-BNSMJF3" colab_type="text" # Try more word cloud examples: [Link](https://amueller.github.io/word_cloud/auto_examples/index.html) # + [markdown] id="Bgfxjm27_W58" colab_type="text" # # Saving and Loading Models # + [markdown] id="z8L2SD6URVmz" colab_type="text" # **Saving model** # + id="5CIo7H32xssE" colab_type="code" outputId="2f35cfd2-f531-4c59-999c-53c6bd4a4386" colab={"base_uri": "https://localhost:8080/", "height": 467} import torch import torch.nn.functional as F import torch.nn as nn import torch.optim as optim import numpy as np import matplotlib.pyplot as plt class TheModelClass(nn.Module): def __init__(self): super(TheModelClass, self).__init__() self.linear = nn.Linear(1, 1) def forward(self, input): output = self.linear(input) return output no_of_epochs = 500 display_interval = 20 learning_rate=0.01 # training data x_training = np.asarray([[1],[2],[5],[8],[9],[12],[14],[16],[18],[20]]) y_training = np.asarray([100,200,501,780,901,1201,1399,1598,1800,2000]) x_data_torch = torch.from_numpy(x_training).float() y_data_torch = torch.from_numpy(y_training).float() model = TheModelClass() optimizer = optim.SGD(model.parameters(), lr=learning_rate) for epoch in range(no_of_epochs): # zero the parameter gradients optimizer.zero_grad() # forward + backward + optimize outputs = model(x_data_torch) loss = torch.sum(torch.pow(outputs-y_data_torch.view(-1,1),2))/(2*x_training.shape[0]) loss.backward() optimizer.step() if epoch%display_interval == display_interval-1: print('Epoch: %d, loss: %.3f' %(epoch + 1, loss.item() )) # + id="zxBZETGT3pHZ" colab_type="code" outputId="d68f65b3-d03e-4a93-e6f3-90d6a5d3cafc" colab={"base_uri": "https://localhost:8080/", "height": 53} torch.save(model, 'filename.pt') # + [markdown] id="ZHlFto63RYdM" colab_type="text" # **Loading model** # + id="cxoYkzWp51Kr" colab_type="code" outputId="f053755e-c474-4339-d964-11d090b761ea" colab={"base_uri": "https://localhost:8080/", "height": 71} the_saved_model = torch.load('filename.pt') the_saved_model.eval() # + id="j13TVA787FYG" colab_type="code" outputId="69758511-2b56-41b0-daf5-54ff4435f12b" colab={"base_uri": "https://localhost:8080/", "height": 197} prediction = the_saved_model(x_data_torch).detach().numpy() for i in range(len(y_training)): print('X: %d, Y_true: %d, Y_predict: %.3f'%(x_training[i][0],y_training[i],prediction[i][0])) # + [markdown] id="t6iDS4iMSj_L" colab_type="text" # **How to Save (Upload) the model to your Google Drive** # # There are various ways to upload the files on Google Drive. # # [This tutorial](https://colab.research.google.com/notebooks/io.ipynb#scrollTo=RWSJpsyKqHjH) will guide you how to save the files on your Google Drive. # # 1. Mounting Google Drive locally # 2. Create a new Drive file # # + [markdown] id="rxxHsS-lxK7o" colab_type="text" # # Bi-LSTM with Hidden State Extraction # # The folllowing image represents the Bi-LSTM for N to 1 task. In the N to 1 task, it usually requires to extract the last hidden states of forward lstm and backward lstm and combine (concat) them. (Please check the lecture 5 recording, please!) # # ![alt text](https://usydnlpgroup.files.wordpress.com/2020/03/bi-lstm_nto1-e1585416735686.png) # + [markdown] id="cXmoWBP2yCms" colab_type="text" # Bi-LSTM: Bidirectional LSTM, which means the signal propagates backward as well as forward in time. # # We are going to apply the same toy data we used in the lab 4. # + id="FhridrFgxSPD" colab_type="code" colab={} import torch #You can enable GPU here (cuda); or just CPU device = torch.device("cuda" if torch.cuda.is_available() else "cpu") import numpy as np #Assume that we have the following character instances char_arr = ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z'] # one-hot encoding and decoding # {'a': 0, 'b': 1, 'c': 2, ..., 'j': 9, 'k', 10, ...} num_dic = {n: i for i, n in enumerate(char_arr)} dic_len = len(num_dic) # a list words for sequence data (input and output) seq_data = ['word', 'wood', 'deep', 'dive', 'cold', 'cool', 'load', 'love', 'kiss', 'kind'] # Make a batch to have sequence data for input and ouput # wor -> X, d -> Y # dee -> X, p -> Y def make_batch(seq_data): input_batch = [] target_batch = [] for seq in seq_data: # input data is: # wor woo dee div # [22, 14, 17] [22, 14, 14] [3, 4, 4] [3, 8, 21] ... input_data = [num_dic[n] for n in seq[:-1]] # target is : # d, d, p, e, ... # 3, 3, 15, 4, ... target = num_dic[seq[-1]] # convert input to one-hot encoding. # if input is [3, 4, 4]: # [[ 0, 0, 0, 1, 0, 0, 0, ... 0] # [ 0, 0, 0, 0, 1, 0, 0, ... 0] # [ 0, 0, 0, 0, 1, 0, 0, ... 0]] input_batch.append(np.eye(dic_len)[input_data]) target_batch.append([target]) return input_batch, target_batch ### Setting hyperparameters learning_rate = 0.1 n_hidden = 128 total_epoch = 50 # Number of sequences for RNN n_step = 3 # number of inputs (dimension of input vector) = 26 n_input = dic_len # number of classes = 26 n_class = dic_len # + id="ffYFrRaE1Ju5" colab_type="code" outputId="d7e43311-ffa4-47d3-ae80-0601106e7df1" colab={"base_uri": "https://localhost:8080/", "height": 1000} import torch.nn as nn import torch.nn.functional as F import torch.optim as optim from sklearn.metrics import accuracy_score class Net(nn.Module): def __init__(self): super(Net, self).__init__() self.lstm = nn.LSTM(n_input, n_hidden, batch_first =True,bidirectional=True, dropout=0.2) self.linear = nn.Linear(n_hidden*2,n_class) def forward(self, sentence): #h_n of shape (num_layers * num_directions, batch, hidden_size): tensor containing the hidden state for t = seq_len. lstm_out, (h_n,c_n) = self.lstm(sentence) #concat the last hidden state from two direction hidden_out =torch.cat((h_n[0,:,:],h_n[1,:,:]),1) z = self.linear(hidden_out) log_output = F.log_softmax(z, dim=1) return log_output,hidden_out # Move the model to GPU net = Net().to(device) # Loss function and optimizer criterion = nn.NLLLoss() optimizer = optim.Adam(net.parameters(), lr=learning_rate) # Preparing input input_batch, target_batch = make_batch(seq_data) # Convert input into tensors and move them to GPU by uting tensor.to(device) input_batch_torch = torch.from_numpy(np.array(input_batch)).float().to(device) target_batch_torch = torch.from_numpy(np.array(target_batch)).view(-1).to(device) for epoch in range(total_epoch): # Set the flag to training net.train() # forward + backward + optimize outputs,_ = net(input_batch_torch) loss = criterion(outputs, target_batch_torch) loss.backward() optimizer.step() optimizer.zero_grad() # Set the flag to evaluation, which will 'turn off' the dropout net.eval() outputs,_ = net(input_batch_torch) # Evaluation loss and accuracy calculation loss = criterion(outputs, target_batch_torch) _, predicted = torch.max(outputs, 1) acc= accuracy_score(predicted.cpu().numpy(),target_batch_torch.cpu().numpy()) print('Epoch: %d, loss: %.5f, train_acc: %.2f' %(epoch + 1, loss.item(), acc)) print('Finished Training') ## Prediction predict_words = [] for i in range(len(predicted.cpu().numpy())): ind = predicted.cpu().numpy()[i] predict_words.append(seq_data[i][:-1]+char_arr[ind]) print('\n=== Prediction Result ===') print('Input:', [w[:3] + ' ' for w in seq_data]) print('Predicted:', predict_words) print('Accuracy: %.2f' %acc) # + [markdown] id="8ufD5mpV6KM8" colab_type="text" # **How to extract the concatenated hidden state** # + id="7RkiE1Bh6Jt9" colab_type="code" outputId="e13e3645-89d4-4340-e241-7844d153256a" colab={"base_uri": "https://localhost:8080/", "height": 305} net.eval() _,hidden_state = net(input_batch_torch) print(n_hidden) print(len(seq_data)) print(hidden_state.size()) print(hidden_state.data) # + [markdown] id="z9dPCF1BMNpv" colab_type="text" # # Exercise # In this exercise, you are to preprocess the train and test data, and apply different pre-trained embeddings. # # **Note**: We won't mark your exercise based on the test set performance, we will only check whether the preprocessing part and embedding part are correct. # # **Important**: This exercise is very important to your assignment1 since you can use most of the codes here in your assignment1. # # + id="jKxN3z5FPRcw" colab_type="code" colab={} import torch #You can enable GPU here (cuda); or just CPU device = torch.device("cuda" if torch.cuda.is_available() else "cpu") # + [markdown] colab_type="text" id="DC69XgVlLuPi" # ## Download Dataset # + colab_type="code" id="zl7OaslvLuPo" colab={} # Code to download file into Colaboratory: # !pip install -U -q PyDrive from pydrive.auth import GoogleAuth from pydrive.drive import GoogleDrive from google.colab import auth from oauth2client.client import GoogleCredentials # Authenticate and create the PyDrive client. auth.authenticate_user() gauth = GoogleAuth() gauth.credentials = GoogleCredentials.get_application_default() drive = GoogleDrive(gauth) id = '<KEY>' downloaded = drive.CreateFile({'id':id}) downloaded.GetContentFile('twitter.csv') import pandas as pd df = pd.read_csv("twitter.csv") df_pick = df.sample(40,random_state=24) raw_text = df_pick["Text"].tolist() raw_label = df_pick["Label"].tolist() from sklearn.model_selection import train_test_split text_train,text_test,label_train,label_test = train_test_split(raw_text,raw_label,test_size=0.25,random_state=42) # + [markdown] colab_type="text" id="i92SvLGOLuPv" # ## Preprocessing [Complete this section] # + [markdown] colab_type="text" id="Xfp_5N2fLuPw" # **Case Folding** # + colab_type="code" id="l9LggrTtLuPx" colab={} text_train = [s.lower() for s in text_train] text_test = [s.lower() for s in text_test] # + [markdown] colab_type="text" id="hb4GVZYCLuP1" # **Remove punctuations [Please complete this section]** # + colab_type="code" id="Vl4MgL1XLuP2" colab={} import re def remove_punctuation_re(x): # Please complete this return x text_train = [remove_punctuation_re(s) for s in text_train] text_test = [remove_punctuation_re(s) for s in text_test] # + [markdown] colab_type="text" id="YvIc4FCRLuQC" # **Tokenization [Please complete this section]** # + colab_type="code" id="V3I1iHIWLuQD" colab={} import nltk nltk.download('punkt') from nltk.tokenize import word_tokenize #Please complete this text_train = text_test = # + [markdown] colab_type="text" id="ucmu2pWTLuQG" # **Remove stopwords [Please complete this section]** # + colab_type="code" id="4blOZaW7LuQH" colab={} nltk.download('stopwords') from nltk.corpus import stopwords as sw stop_words = sw.words() text_train_ns=[] for tokens in text_train: filtered_sentence = [w for w in tokens if not w in stop_words] text_train_ns.append(filtered_sentence) text_test_ns=[] for tokens in text_test: #Please complete this # + [markdown] colab_type="text" id="QqFS0duZLuQM" # **Lemmatisation [Please complete this section]** # + colab_type="code" id="6BTXGVwjLuQN" colab={} nltk.download('wordnet') from nltk.stem import WordNetLemmatizer lemmatizer = WordNetLemmatizer() text_train_le = [] for tokens in text_train_ns: lemma_sentence = [lemmatizer.lemmatize(w) for w in tokens ] text_train_le.append(lemma_sentence) text_test_le = [] for tokens in text_test_ns: #Please complete this # + [markdown] colab_type="text" id="MpQvfCJHLuQQ" # **Label Encoding** # + colab_type="code" id="FkcRYxaZLuQQ" colab={} from sklearn.preprocessing import LabelEncoder labels = np.unique(label_train) lEnc = LabelEncoder() lEnc.fit(labels) label_train_n = lEnc.transform(label_train) label_test_n = lEnc.transform(label_test) numClass = len(labels) print(labels) print(lEnc.transform(labels)) # + [markdown] colab_type="text" id="brgyXIhALuQT" # ## Embeddings [Complete this section] # + [markdown] colab_type="text" id="ltZvof0NLuQX" # **Padding** # + colab_type="code" id="O1V_W5PvLuQa" colab={} len_list = [len(s) for s in text_train_ns] seq_length = max(len_list) def add_padding(corpus, seq_length): output = [] for sentence in corpus: if len(sentence)>seq_length: output.append(sentence[:seq_length]) else: for j in range(seq_length-len(sentence)): sentence.append("<PAD>") output.append(sentence) return output text_train_pad = add_padding(text_train_le,seq_length ) text_test_pad = add_padding(text_test_le,seq_length ) # + [markdown] colab_type="text" id="rUA2H0-HLuQe" # **Download Embeddings [Please try other embeddings]** # # You can find the details from https://github.com/RaRe-Technologies/gensim-data # + colab_type="code" id="856alejrLuQe" colab={} import gensim.downloader as api word_emb_model = api.load("glove-twitter-25") #this is only example # + [markdown] colab_type="text" id="sCZMaK0zLuQj" # **Get embeddings** # + colab_type="code" id="bdSLmjkSLuQk" colab={} def get_embeddings(corpus,word_emb_model): emb_dim = word_emb_model.vector_size out = [] for sentence in corpus: out_temp = [] for word in sentence: try: out_temp.append(word_emb_model.wv[word]) except: out_temp.append([0]*emb_dim) out.append(out_temp) return np.array(out) train_emb = get_embeddings(text_train_pad,word_emb_model) test_emb = get_embeddings(text_test_pad,word_emb_model) # + [markdown] colab_type="text" id="yJ96KxTcLuQo" # ## Model # + colab_type="code" id="jCZG8wAXLuQp" colab={} n_input = train_emb.shape[2] n_hidden = 50 n_class = len(labels) total_epoch = 100 learning_rate = 0.01 # + colab_type="code" id="NRrSAS-DLuQs" colab={} import torch.nn as nn import torch.nn.functional as F import torch.optim as optim from sklearn.metrics import accuracy_score class Net(nn.Module): def __init__(self): super(Net, self).__init__() self.lstm = nn.LSTM(n_input, n_hidden, num_layers=2, batch_first =True, dropout=0.2) self.linear = nn.Linear(n_hidden,n_class) def forward(self, x): x,_ = self.lstm(x) x = self.linear(x[:,-1,:]) x = F.log_softmax(x, dim=1) return x net = Net().to(device) criterion = nn.NLLLoss() optimizer = optim.Adam(net.parameters(), lr=learning_rate) input_batch_torch = torch.from_numpy(np.array(train_emb)).float().to(device) target_batch_torch = torch.from_numpy(np.array(label_train_n)).view(-1).to(device) for epoch in range(total_epoch): net.train() outputs = net(input_batch_torch) loss = criterion(outputs, target_batch_torch) loss.backward() optimizer.step() optimizer.zero_grad() net.eval() outputs = net(input_batch_torch) if epoch%10 == 9: loss = criterion(outputs, target_batch_torch) _, predicted = torch.max(outputs, 1) acc= accuracy_score(predicted.cpu().numpy(),target_batch_torch.cpu().numpy()) print('Epoch: %d, loss: %.5f, train_acc: %.2f' %(epoch + 1, loss.item(), acc)) print('Finished Training') # + [markdown] id="nWTw2QWfN0Xc" colab_type="text" # ## Save and Load the model [Complete this section] # + [markdown] id="abct4WjhO4Bq" colab_type="text" # **Save the model [Complete this part]** # + id="gPRrmNS5ONQB" colab_type="code" colab={} # + [markdown] id="75fQ4jx5ON-t" colab_type="text" # **Load the model** # + id="F1AUVT1uOQm6" colab_type="code" colab={} model2 = torch.load('lab5.pt') model2.eval() # + [markdown] id="WnsgFMGYKrPA" colab_type="text" # ## Testing # + id="yntT6vpAKXCt" colab_type="code" outputId="46db9249-58e0-4c55-d268-0667a7de0f18" colab={"base_uri": "https://localhost:8080/", "height": 199} input_batch_torch = torch.from_numpy(np.array(test_emb)).float().to(device) outputs = model2(input_batch_torch) _, predicted = torch.max(outputs, 1) from sklearn.metrics import classification_report print(classification_report(label_test_n,predicted.cpu().numpy())) # + id="tVytyTdS0VQS" colab_type="code" colab={}
NLP_Lab05__wehe9821.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 2 # language: python # name: python2 # --- # + # %load_ext autoreload # %autoreload 2 from matplotlib.pyplot import imshow import matplotlib.pyplot as plt # %matplotlib inline # - import cPickle as pickle import os; import sys; sys.path.append('..') import gp import gp.nets as nets with open('../nets/IPMLB_FULL.p', 'rb') as f: cnn = pickle.load(f) cnn.uuid = 'IPMLB' import numpy as np import mahotas as mh sys.path.append('../gp/') import tifffile as tif # + def cr(z_in): images = [] probs = [] golds = [] rhoanas = [] for z in range(z_in,z_in+1): #70,72 image = tif.imread('/home/d/data/ac3/image/ac3_input_'+str(z).zfill(4)+'.tif')[100:800,100:800] #prob = tif.imread('/home/d/data/ac3/prob/ac3_input_'+str(z).zfill(4)+'_syn.tif')[100:800,100:800] prob = mh.imread('/home/d/data/ac3/probnew/000000092_000000092_'+str(z).zfill(9)+'_membrane-membrane.png')[100:800,100:800] gold = tif.imread('/home/d/data/ac3/gold/ac3_labels_'+str(z).zfill(4)+'.tif')[100:800,100:800] rhoana = tif.imread('/home/d/data/ac3/rhoana/z='+str(z).zfill(8)+'.tif')[100:800,100:800] # rhoana = mh.imread('/home/d/data/ac3/rhoananew/000000092_000000092_'+str(z).zfill(9)+'_neuroproof.png')[100:800,100:800] # rhoana_single = np.zeros((rhoana.shape[0], rhoana.shape[1]), dtype=np.uint64) # rhoana_single[:, :] = rhoana[:,:,0]*256*256 + rhoana[:,:,1]*256 + rhoana[:,:,2] # rhoana = rhoana_single # print rhoana.shape, prob.shape # image, prob, mask, gold, rhoana = gp.Util.read_section('/home/d/data/cylinderNEW/', z) # images.append(image) # probs.append(prob) # golds.append(gold.astype(np.uint64)) # rhoanas.append(rhoana.astype(np.uint64)) bigM = gp.Legacy.create_bigM_without_mask(cnn, images, probs, rhoanas, verbose=True, max=1000000) bigM_cylinder_after_95, out_cylinder_volume_after_auto_95, cylinder_auto_fixes_95, cylinder_auto_vi_s_95, vi_s_per_step2 = gp.Legacy.splits_global_from_M_automatic( cnn, bigM, images, probs, rhoanas, golds, sureness_threshold=.8) initial_median_vi = gp.Legacy.VI(golds, rhoanas)[0] return cylinder_auto_fixes_95, vi_s_per_step2, initial_median_vi # + imshow(rhoanas[0]) # - groups = [] for z in range(50,55): fixes, vis, initialvi = cr(z) groups.append((fixes, vis, initialvi)) v100 = [0]*len(groups) v99 = [0]*len(groups) v98 = [0]*len(groups) v97 = [0]*len(groups) v96 = [0]*len(groups) v95 = [0]*len(groups) v94 = [0]*len(groups) v93 = [0]*len(groups) v92 = [0]*len(groups) v91 = [0]*len(groups) i100 = [0]*len(groups) i99 = [0]*len(groups) i98 = [0]*len(groups) i97 = [0]*len(groups) i96 = [0]*len(groups) i95 = [0]*len(groups) i94 = [0]*len(groups) i93 = [0]*len(groups) i92 = [0]*len(groups) i91 = [0]*len(groups) for z,g in enumerate(groups): for i,f in enumerate(g[0]): # looping through fixes if f[1] >= 0.99: # add to 100 box curvi = g[1][i][0] # we get the median diffvi = abs(g[2] - curvi) print g[2],curvi v100[z] = diffvi i100[z] = i elif f[1] >= 0.98: curvi = g[1][i][0] # we get the median diffvi = abs(g[2] - curvi) v99[z] = diffvi i99[z] = i elif f[1] >= 0.97: curvi = g[1][i][0] # we get the median diffvi = abs(g[2] - curvi) v98[z] = diffvi i98[z] = i elif f[1] >= 0.96: curvi = g[1][i][0] # we get the median diffvi = abs(g[2] - curvi) v97[z] = diffvi i97[z] = i elif f[1] >= 0.95: curvi = g[1][i][0] # we get the median diffvi = abs(g[2] - curvi) v96[z] = diffvi i96[z] = i elif f[1] >= 0.94: curvi = g[1][i][0] # we get the median diffvi = abs(g[2] - curvi) v95[z] = diffvi i95[z] = i elif f[1] >= 0.93: curvi = g[1][i][0] # we get the median diffvi = abs(g[2] - curvi) v94[z] = diffvi i94[z] = i elif f[1] >= 0.92: curvi = g[1][i][0] # we get the median diffvi = abs(g[2] - curvi) v93[z] = diffvi i93[z] = i elif f[1] >= 0.91: curvi = g[1][i][0] # we get the median diffvi = abs(g[2] - curvi) v92[z] = diffvi i92[z] = i elif f[1] >= 0.90: curvi = g[1][i][0] # we get the median diffvi = abs(g[2] - curvi) v91[z] = diffvi i91[z] = i a= plt.boxplot([v100,v99,v98,v97,v96,v95,v94,v93,v92,v91]) b=plt.xticks(range(1,11),[i/100. for i in range(99,89,-1)]) # plt.ylim(-.25,1) t_95 = [] for i,f in enumerate(t0_f): if f[1] <= 0.95: t_95.append(i) break for i,f in enumerate(t1_f): if f[1] <= 0.95: t_95.append(i) break for i,f in enumerate(t2_f): if f[1] <= 0.95: t_95.append(i) break for i,f in enumerate(t3_f): if f[1] <= 0.95: t_95.append(i) break for i,f in enumerate(t4_f): if f[1] <= 0.95: t_95.append(i) break t0_vis = [] for v in t0_vi[0:100]: t0_vis.append(v[1]) t1_vis = [] for v in t1_vi[0:100]: t1_vis.append(v[1]) t2_vis = [] for v in t2_vi[0:100]: t2_vis.append(v[1]) t3_vis = [] for v in t3_vi[0:100]: t3_vis.append(v[1]) t4_vis = [] for v in t4_vi[0:100]: t4_vis.append(v[1]) fig = plt.figure() plt.axvline(x=t_95[0], color='red') # plt.axvline(x=t_95[1], color='red') # plt.axvline(x=t_95[2], color='red') plt.axvline(x=t_95[3], color='red') plt.axvline(x=t_95[4], color='red') plt.plot(t0_vis) # plt.plot(t1_vis) # plt.plot(t2_vis) plt.plot(t3_vis) plt.plot(t4_vis) t0_vis # + fig = plt.figure(figsize=(10,7)) font = {'family' : 'sans-serif', 'weight' : 'normal', 'size' : 24} plt.rc('font', **font) plt.axvline(x=t_95[0], ymax=.77, color='blue', linewidth=3, linestyle=':') # plt.axvline(x=t_95[1], color='red') # plt.axvline(x=t_95[2], color='red') plt.axvline(x=t_95[3], ymax=0.66, color='green', linewidth=3, linestyle=':') plt.axvline(x=t_95[4], ymax=0.62, color='red', linewidth=3, linestyle=':') plt.plot(t0_vis, color='blue', linewidth=3) # plt.plot(t1_vis) # plt.plot(t2_vis) plt.plot(t3_vis, color='green', linewidth=3) plt.plot(t4_vis, color='red', linewidth=3) plt.ylabel('Variation of Information') plt.xlabel('Correction') plt.ylim(0.5,0.9) plt.yticks(np.arange(0.5, 0.91, 0.1)) plt.savefig('/home/d/PAPERGFX/ptplot.pdf') # -
ipy_test/AC3TESTFORPT.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- from logicqubit.logic import * def iqft(logicQuBit, n): # transformada quântica de Fourier inversa for i in range(1,n+1): for j in range(1,i+1): logicQuBit.CU1(j, i, -pi/float(2**(i-j))) logicQuBit.H(i) logicQuBit = LogicQuBit(11) # + n = 4 phase = 11.0*pi/8.0 logicQuBit.H(1) logicQuBit.H(2) logicQuBit.H(3) logicQuBit.H(4) logicQuBit.U1(4, 1*phase) logicQuBit.U1(3, 2*phase) logicQuBit.U1(2, 4*phase) logicQuBit.U1(1, 8*phase) iqft(logicQuBit, n) # - logicQuBit.Measure([1,2,3,4]) logicQuBit.Plot()
phase_estimation_4q.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import numpy as np import pandas as pd import seaborn as sns import time def diffusion_trial(drift, boundary, beta, t_e_mean, t_m_mean, dc=1.0, dt=.005, max_steps=2e4): """Simulates a trial from the diffusion model.""" n_steps = 0. evidence = boundary * beta # Simulate a single DM path while (evidence > 0 and evidence < boundary and n_steps < max_steps): # DDM equation evidence += drift*dt + np.sqrt(dt) * dc * np.random.normal() # Increment step n_steps += 1.0 rt = n_steps * dt # Encoding time t_e = np.random.normal(t_e_mean, .1) # N200 latency N200 = np.random.normal(.5*t_e, .05) # Motor execturion time t_m = np.random.normal(t_m_mean, .1) if evidence >= boundary: choicert = t_e + rt + t_m elif evidence <= 0: choicert = -t_e - rt - t_m else: choicert = np.sign(evidence - boundary*.5)*(t_e + rt + t_m) # Choose closest boundary at max_steps return choicert, N200 # ###### Simulation participant 1 # + def sim1_condition(params, n_trials): """Simulates a diffusion process over an entire condition.""" itr = int(n_trials/4) drift, boundary, beta, t_e_mean, t_m_mean = params choicert = np.empty(n_trials) N200 = np.empty(n_trials) cond_spat = np.empty(n_trials) cond_coher = np.empty(n_trials) np.random.seed(123) for i in range(itr): choicert[i], N200[i] = diffusion_trial(drift[0], boundary, beta, t_e_mean[0], t_m_mean) choicert[itr + i], N200[itr + i] = diffusion_trial(drift[0], boundary, beta, t_e_mean[1], t_m_mean) choicert[2*itr + i], N200[2*itr + i] = diffusion_trial(drift[1], boundary, beta, t_e_mean[0], t_m_mean) choicert[3*itr + i], N200[3*itr + i] = diffusion_trial(drift[1], boundary, beta, t_e_mean[1], t_m_mean) cond_spat = np.concatenate([np.repeat(1,itr),np.repeat(2,itr), np.repeat(1,itr),np.repeat(2,itr)]) cond_coher = np.concatenate([np.repeat(1,itr),np.repeat(1,itr), np.repeat(2,itr),np.repeat(2,itr)]) conds = np.concatenate([np.repeat(1,itr), np.repeat(2,itr), np.repeat(3,itr), np.repeat(4,itr)]) return choicert, cond_spat, cond_coher, conds, N200 choicert, cond_spat,cond_coher, conds, N200 = sim1_condition(params=[[2.5, 1.5], 1.5, .5, [.3, .5], .4], n_trials=288) sim1_dic = {'y':choicert, 'cond_spat':cond_spat, 'cond_coher':cond_coher, 'conds':conds, 'n200lat': N200} sim1_df = pd.DataFrame(sim1_dic) sim1_df.to_csv('sim1.csv', index=False) # - # ###### Simulation participant 2 # + def sim2_condition(params, n_trials): """Simulates a diffusion process over an entire condition.""" itr = int(n_trials/4) drift, boundary, beta, t_e_mean, t_m_mean = params choicert = np.empty(n_trials) N200 = np.empty(n_trials) cond_spat = np.empty(n_trials) cond_coher = np.empty(n_trials) np.random.seed(123) for i in range(itr): choicert[i], N200[i] = diffusion_trial(drift[0], boundary, beta, t_e_mean, t_m_mean[0]) choicert[itr + i], N200[itr + i] = diffusion_trial(drift[0], boundary, beta, t_e_mean, t_m_mean[1]) choicert[2*itr + i], N200[2*itr + i] = diffusion_trial(drift[1], boundary, beta, t_e_mean, t_m_mean[0]) choicert[3*itr + i], N200[3*itr + i] = diffusion_trial(drift[1], boundary, beta, t_e_mean, t_m_mean[1]) cond_spat = np.concatenate([np.repeat(1,itr),np.repeat(2,itr), np.repeat(1,itr),np.repeat(2,itr)]) cond_coher = np.concatenate([np.repeat(1,itr),np.repeat(1,itr), np.repeat(2,itr),np.repeat(2,itr)]) conds = np.concatenate([np.repeat(1,itr), np.repeat(2,itr), np.repeat(3,itr), np.repeat(4,itr)]) return choicert, cond_spat, cond_coher, conds, N200 choicert, cond_spat,cond_coher, conds, N200 = sim2_condition(params=[[2.5, 1.5], 1.5, .5, .3, [.4, .6]], n_trials=500) sim2_dic = {'y':choicert, 'cond_spat':cond_spat, 'cond_coher':cond_coher, 'conds':conds, 'n200lat': N200} sim2_df = pd.DataFrame(sim2_dic) sim2_df.to_csv('sim2.csv', index=False) # - # ###### Simulation participant 3 # + def sim3_condition(params, n_trials): """Simulates a diffusion process over an entire condition.""" itr = int(n_trials/4) drift, boundary, beta, t_e_mean, t_m_mean = params choicert = np.empty(n_trials) N200 = np.empty(n_trials) cond_spat = np.empty(n_trials) cond_coher = np.empty(n_trials) np.random.seed(123) for i in range(itr): choicert[i], N200[i] = diffusion_trial(drift[0], boundary, beta, t_e_mean[0], t_m_mean[0]) choicert[itr + i], N200[itr + i] = diffusion_trial(drift[0], boundary, beta, t_e_mean[1], t_m_mean[1]) choicert[2*itr + i], N200[2*itr + i] = diffusion_trial(drift[1], boundary, beta, t_e_mean[0], t_m_mean[0]) choicert[3*itr + i], N200[3*itr + i] = diffusion_trial(drift[1], boundary, beta, t_e_mean[1], t_m_mean[1]) cond_spat = np.concatenate([np.repeat(1,itr),np.repeat(2,itr), np.repeat(1,itr),np.repeat(2,itr)]) cond_coher = np.concatenate([np.repeat(1,itr),np.repeat(1,itr), np.repeat(2,itr),np.repeat(2,itr)]) conds = np.concatenate([np.repeat(1,itr), np.repeat(2,itr), np.repeat(3,itr), np.repeat(4,itr)]) return choicert, cond_spat, cond_coher, conds, N200 choicert, cond_spat,cond_coher, conds, N200 = sim3_condition(params=[[2.5, 1.5], 1.5, .5, [.3, .5], [.4, .6]], n_trials=500) sim3_dic = {'y':choicert, 'cond_spat':cond_spat, 'cond_coher':cond_coher, 'conds':conds, 'n200lat': N200} sim3_df = pd.DataFrame(sim3_dic) sim3_df.to_csv('sim3.csv', index=False) # -
Neuro-Cognitive Models/Runs/Simulation_run/Data_simulation.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- from numpy import linspace,cos,pi,ceil,floor,arange import matplotlib.pyplot as plt import seaborn as sns sns.set(style='whitegrid') # + # sampling a signal bandlimited to 40Hz # with sampling rate of 800 Hz plt.figure(figsize=(18,6)) f = 40 tmin = -0.3 tmax = 0.3 t = linspace(tmin,tmax,400) x = cos(2*pi*t) + cos(2*pi*f*t) #signal sampling plt.plot(t,x,label='Signal Sampling 40 Hz') # sampling the signal with a sampling rate of 80 Hz # we are using Nyquist rate. T = 1/80 nmin = ceil(tmin/T) nmax = floor(tmax/T) n = arange(nmin,nmax) x1 = cos(2*pi*n*T) + cos(2*pi*f*n*T) plt.plot(n*T,x1,'bo',label = 'Sampling with 80Hz') # sampling the signal with a sampling rate of 30 Hz # we are not using Nyquist rate. T = 1/30 nmin = ceil(tmin/T) nmax = floor(tmax/T) n = arange(nmin,nmax) x2 = cos(2*pi*n*T) + cos(2*pi*f*n*T) plt.plot(n*T,x2,'-r',markersize = 8,label = 'Sampling with 30Hz') plt.legend() plt.xlabel('Time') plt.ylabel('Amplitude') plt.show() # -
Lesson 2/Sampling Theorem/Sampling Theorem/Sampling Theorem.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + print(__doc__) RANDOM_STATE = 42 # + from duplebalance import DupleBalanceClassifier from duplebalance.base import sort_dict_by_key from duplebalance.utils._plot import plot_2Dprojection_and_cardinality from collections import Counter import matplotlib.pyplot as plt from sklearn.decomposition import KernelPCA from sklearn.datasets import make_classification from sklearn.model_selection import train_test_split # + # Make a 5-class imbalanced classification task X, y = make_classification(n_classes=5, class_sep=1, # 3-class weights=[0.05, 0.05, 0.15, 0.25, 0.5], n_informative=10, n_redundant=1, flip_y=0, n_features=20, n_clusters_per_class=1, n_samples=2000, random_state=0) X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.5, random_state=42) origin_distr = sort_dict_by_key(Counter(y_train)) test_distr = sort_dict_by_key(Counter(y_test)) print('Original training dataset shape %s' % origin_distr) print('Original test dataset shape %s' % test_distr) # Visualize the dataset projection = KernelPCA(n_components=2).fit(X, y) fig = plot_2Dprojection_and_cardinality(X, y, projection=projection) plt.show() # - # # Train a DupleBalance Classifier # # ### Basic usage of DupleBalanceClassifier # + # Train a DupleBalanceClassifier clf = DupleBalanceClassifier( n_estimators=5, random_state=RANDOM_STATE, ).fit(X_train, y_train) # Predict & Evaluate score = clf.score(X_test, y_test) print ("DupleBalance {} | Balanced AUROC: {:.3f} | #Training Samples: {:d}".format( len(clf.estimators_), score, sum(clf.estimators_n_training_samples_) )) # - # ### Train DupleBalanceClassifier with automatic parameter tuning # + # Train a DupleBalanceClassifier clf = DupleBalanceClassifier( n_estimators=5, random_state=RANDOM_STATE, ).fit( X_train, y_train, perturb_alpha='auto', ) # Predict & Evaluate score = clf.score(X_test, y_test) print ("DupleBalance {} | Balanced AUROC: {:.3f} | #Training Samples: {:d}".format( len(clf.estimators_), score, sum(clf.estimators_n_training_samples_) )) # - # ### Train DupleBalanceClassifier with advanced training log # + # Train a DupleBalanceClassifier clf = DupleBalanceClassifier( n_estimators=5, random_state=RANDOM_STATE, ).fit( X_train, y_train, perturb_alpha='auto', eval_datasets={'test': (X_test, y_test)}, train_verbose={ 'granularity': 1, 'print_distribution': True, 'print_metrics': True, }, ) # Predict & Evaluate score = clf.score(X_test, y_test) print ("DupleBalance {} | Balanced AUROC: {:.3f} | #Training Samples: {:d}".format( len(clf.estimators_), score, sum(clf.estimators_n_training_samples_) )) # -
examples/notebooks/basic_example.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Digital Image Processing - HW13 - 98722278 - <NAME> # In this notebook, I have solved the assignment's problems which are as follows: # # 1. Answer below questions: # 1. What is **Dilated Convolution**? # 2. What are use cases? # 3. What are Pros and Cons? # 2. Mask R-CNN: # 1. Report a summary of [Mask R-CNN](http://openaccess.thecvf.com/content_ICCV_2017/papers/He_Mask_R-CNN_ICCV_2017_paper.pdf) paper. # 2. Use any implemented model(pretrained) on your custom input # 3. Compute number of parameters *in each layer* for below network: # # ``` python # model = get_unet((256, 256, 3)) # def conv2d_block(input_tensor, n_filters, kernel_size=3): # # # first layer # x=Conv2D(filters=n_filters, kernel_size=(kernel_size, kernel_size), # padding='same')(input_tensor) # x=Activation('relu')(x) # # second layer # x=Conv2D(filters=n_filters, kernel_size=(kernel_size, kernel_size), # padding='same')(input_tensor) # x=Activation('relu')(x) # return x # def get_unet(input_img, n_filters=16): # # Contracting Path # c1=conv2d_block(input_img, n_filters*1, kernel_size=3) # p1=MaxPooling2D((2, 2))(c1) # # c2=conv2d_block(p1, n_filters*2, kernel_size=3) # p2=MaxPooling2D((2, 2))(c2) # # c3=conv2d_block(p2, n_filters*4, kernel_size=3) # p3=MaxPooling2D((2, 2))(c3) # # c4=conv2d_block(p3, n_filters*8, kernel_size=3) # p4=MaxPooling2D((2, 2))(c4) # # c5=conv2d_block(p4, n_filters=n_filters*16, kernel_size=3) # # # Expansive Path # u6=Conv2DTranspose(n_filters*8, (3, 3), strides=(2, 2), padding='same')(c5) # u6=concatenate([u6, c4]) # c6=conv2d_block(u6, n_filters*8, kernel_size=3) # u7=Conv2DTranspose(n_filters*4, (3, 3), strides=(2, 2), padding='same')(c6) # u7=concatenate([u7, c3]) # # c7=conv2d_block(u7, n_filters*4, kernel_size=3) # u8=Conv2DTranspose(n_filters*2, (3, 3), strides=(2, 2), padding='same')(c7) # u8=concatenate([u8, c2]) # c8=conv2d_block(u8, n_filters*2, kernel_size=3) # u9=Conv2DTranspose(n_filters*1, (3, 3), strides=(2, 2), padding='same')(c8) # u9=concatenate([u9, c1]) # c9=conv2d_block(u9, n_filters*1, kernel_size=3) # outputs=Conv2D(10, (1, 1), activation='sigmoid')(c9) # model=Model(inputs=[input_img], outputs=[outputs]) # return model # # # ``` # ## 1 Answer below questions: # 1. What is **Dilated Convolution**? # 2. What are use cases? # 3. What are Pros and Cons? # ### 1.A Dilated Convolutation # First of of let depict the whole idea simply by comparing the dilated convolution math formual and standard conv formula. # # ![dilated conv](wiki/1_1.png) # # As we can see hyper parameter has been added which is `l` where corresponds to number of steps per addition in long-run convolution. If we set `l=0`, then we have normal convolution. This parameter `l` will skip some points in enighborhood. # # Here is how it works: # # ![dilated conv vis](wiki/1_2.gif) # # Dilated convolution incorporates larger receptive field regarding same amount of parameters of windows size, etc regarding normal conv. # # ![dilated conv receptive field](wiki/1_3.png) # ### 1.B Use cases # In the paper [Multi-scale contex aggregation by dilated convolutions](http://vladlen.info/papers/dilated-convolutions.pdf) authors have shown that almost any model on **semantic segmentation** task can perform better by adding a `contex` layer which is constructed of multiple different conv layers with different size of `l` to intorduce bigger receptive field over layers. # # The reason is as we know in semantic segmentation of similar tasks, we need to consider different object sizes when labeling pixels, so by having normal conv (`l=1`) and adding dilated convs (`l>=2`), the receptive field has been increased so labeling can be done wiser. # # Here is the definition of `contex` module in the aforementioned paper: # # ![context module](wiki/1_4.png) # ### 1.C Pros and Cons # About pros, in summary we can say dilation does not change number of parameters or calculations, so there is no memory of time disadvantage about this operation but as this approach provides larger receptive fields regarding same amount of operations, it outperforms must of the base models. Also, it dilation factor can be increased while preserving resolution. All this aspects caused to have better results in semantic segmentation tasks. # # But about cons, using dilation in models such as ResNet where this models try to preserve learned features in previous layers, can cause producing artifacts like chess board or dot effect in image-to-image translation tasks. Although this problem has solved by adding skip-connections to some particular points, not all x-step convs. # ## 2 Mask R-CNN: # 1. Report a summary of [Mask R-CNN](http://openaccess.thecvf.com/content_ICCV_2017/papers/He_Mask_R-CNN_ICCV_2017_paper.pdf) paper. # 2. Use any implemented model(pretrained) on your custom input # ### 2.A Mask R-CNN # Let's say firstly we know what is object detection, semantic segmentation and instance segmentation. (See [here](https://github.com/Nikronic/Digital-Image-Processing-IUST/tree/master/HW11#1-compare-semantic-segmentation-object-detection-and-instance-segmentation) if not) # # Also let's say we know what *R-CNN*, *Fast R-CNN* and *Faster R-CNN* models are. (See [here](https://github.com/Nikronic/Digital-Image-Processing-IUST/tree/master/HW11#2-compare-rcnn-fast-rcnn-and-faster-rcnn) if not) # # Mask R-CNN is a model for instance segmentation task. # This model generate 3 outputs : # 1. Object label # 2. Bounding box # 3. Object mask # # About all previous models with R-CNN architecture, third output has been provided and actually the major importance of Mask R-CNN model because third output. # # *Now let's have a brief understanding of model before going into more details.* # # ![mask r-cnn model](wiki/2_3.png) # # Mask R-CNN has been constructed by two stages, first stage's duty is to generate region proposals but using completely different approach from previously proposed papers. Secondly, it generates object labels, refine boudning boxes and masks pixel-wise based on the region proposal from first stage. These both stages obtained from a base CNN model based on [Feature Pyramid Network](https://arxiv.org/abs/1612.03144) style. # # FPN is U-net structure where in contracting phase any model like ResNet101 can be used. There is a expansion phase too which is similar to contracting phase but reverse in size where the output of each step in contracting phase has been concatenated to the output of corresponding layer in expansion phase. Here is image from FPN paper: # # ![fpn module](wiki/2_1.png) # # ![fpn module concat op](wiki/2_2.png) # # Now let's talk about first stage. A small CNN model called RPN (region proposal network) will extract region candidate where objects may exists. This model uses the output of expansion phase of FPN model as input as these inputs are extracted features from input images given to the whole network. (This step is indentical to Faster R-CNN model) # # But for second stage, Mask R-CNN generates a binary mask for each RoI while predicting object lables and their bounding boxes still is identical to faster r-cnn and have been done simultaneously. # # The loss function also has been expanded to consider mask loss using weighted sum (for each RoI): # # ![loss mask rcnn](wiki/2_4.png) # # Where loss bounding box and label are identical to faster rcnn. # # The mask branch of network has `Km^2` output where consists of `K` layer with size of `m*m` where `K` is number of classes. So loss mask defines in the way that first apply per-pixel sigmoid then average binary cross-entropy. # # Mask representation are small vectors which have been obtained from fully connected layers. The structure of spatial features of masks can be gained by pixel-to-pixel measurements. So to do this, authors generate `m*m` masks for each RoI using an FCN. But features from different RoIs need to be well aligned to be correctly measured using pixel-to-pixel approach and that's why **RoIAlign** has been introduced. # # **RoIAlgin** is operation to extract a small feature map from a RoI. But how it works: # 1. Use RoIPool to quantize a floating-point RoI into discrete form (`round(x/16)`) # 2. Divine output into different spatial bins where all are quantized too # 3. Aggregate feature values covered by each bin using MaxPool # # This approach introduces misaligned bins for different RoIs so to handle this harsh quantization, **RoIAlgin** layer has been used. To solve this issue here is the new approach: # # 1. Use `x/16` (no rounding) to quantize # 2. Consider four bins for each RoI # 3. Fill missed values using bilinear interpolation # 4. Aggregate feature values using MaxPool or AvgPool # # And about network architecture, three term can represent it: # 1. **Backbone** where can be any FPN network or VGG style used for feature extraction # 2. **Head** network for bounding box prediction and mask generationg where applied *separately* where can be any fully convolutional network like a part of ResNet, etc. # # # ### 2.B Mask R-CNN Inference # + colab={"base_uri": "https://localhost:8080/", "height": 34} colab_type="code" id="IVQNi3LFUavt" outputId="fe9c9b00-ea6e-44f9-f699-181f866d4a01" # !git clone https://github.com/matterport/Mask_RCNN.git # cd Mask_RCNN/ # + colab={} colab_type="code" id="BfKiP9ZVUq_V" import os import sys import random import math import numpy as np import skimage.io import matplotlib import matplotlib.pyplot as plt # %matplotlib inline # %tensorflow_version 1.x # + colab={} colab_type="code" id="N-oTchw3VVJi" from mrcnn import utils import mrcnn.model as modellib from mrcnn import visualize from samples.coco import coco # + colab={} colab_type="code" id="EQ7cb_0-VeEk" MODEL_DIR = 'mask_rcnn_coco.h5' if not os.path.exists(MODEL_DIR): utils.download_trained_weights(MODEL_DIR) # + colab={"base_uri": "https://localhost:8080/", "height": 921} colab_type="code" id="78-dWt35WVjZ" outputId="b9a3bc9c-0958-473c-9a7a-9be800137afe" class InferenceConfig(coco.CocoConfig): GPU_COUNT = 1 IMAGES_PER_GPU = 1 config = InferenceConfig() config.display() # + colab={} colab_type="code" id="7qskBf0IWgkb" model = modellib.MaskRCNN(mode="inference", model_dir='logs', config=config) model.load_weights(MODEL_DIR, by_name=True) # + colab={"base_uri": "https://localhost:8080/", "height": 286} colab_type="code" id="mraecJjjX6gk" outputId="99fbf392-956d-4e40-d7bd-fe733da20cd6" file_name = '../conf.jpg' image = skimage.io.imread(file_name) plt.imshow(image) # + colab={"base_uri": "https://localhost:8080/", "height": 735} colab_type="code" id="zViErsWPXHho" outputId="261dfe2b-8ab4-4fd8-bbf5-f439526cbfb7" class_names = ['BG', 'person', 'bicycle', 'car', 'motorcycle', 'airplane', 'bus', 'train', 'truck', 'boat', 'traffic light', 'fire hydrant', 'stop sign', 'parking meter', 'bench', 'bird', 'cat', 'dog', 'horse', 'sheep', 'cow', 'elephant', 'bear', 'zebra', 'giraffe', 'backpack', 'umbrella', 'handbag', 'tie', 'suitcase', 'frisbee', 'skis', 'snowboard', 'sports ball', 'kite', 'baseball bat', 'baseball glove', 'skateboard', 'surfboard', 'tennis racket', 'bottle', 'wine glass', 'cup', 'fork', 'knife', 'spoon', 'bowl', 'banana', 'apple', 'sandwich', 'orange', 'broccoli', 'carrot', 'hot dog', 'pizza', 'donut', 'cake', 'chair', 'couch', 'potted plant', 'bed', 'dining table', 'toilet', 'tv', 'laptop', 'mouse', 'remote', 'keyboard', 'cell phone', 'microwave', 'oven', 'toaster', 'sink', 'refrigerator', 'book', 'clock', 'vase', 'scissors', 'teddy bear', 'hair drier', 'toothbrush'] # Run detection results = model.detect([image], verbose=1) # Visualize results r = results[0] visualize.display_instances(image, r['rois'], r['masks'], r['class_ids'], class_names, r['scores']) # - # *Note: README: The code has been run on Colab* # ## 3 Compute number of parameters *in each layer* for below network: # # ``` python # model = get_unet((256, 256, 3)) # def conv2d_block(input_tensor, n_filters, kernel_size=3): # # # first layer # x=Conv2D(filters=n_filters, kernel_size=(kernel_size, kernel_size), # padding='same')(input_tensor) # Block_{changes with 'c' layers}_1 # x=Activation('relu')(x) # # second layer # x=Conv2D(filters=n_filters, kernel_size=(kernel_size, kernel_size), # padding='same')(input_tensor) # Block_{changes with 'c' layers}_2 # x=Activation('relu')(x) # return x # def get_unet(input_img, n_filters=16): # # Contracting Path # c1=conv2d_block(input_img, n_filters*1, kernel_size=3) # c1 # p1=MaxPooling2D((2, 2))(c1) # # c2=conv2d_block(p1, n_filters*2, kernel_size=3) # c2 # p2=MaxPooling2D((2, 2))(c2) # # c3=conv2d_block(p2, n_filters*4, kernel_size=3) # c3 # p3=MaxPooling2D((2, 2))(c3) # # c4=conv2d_block(p3, n_filters*8, kernel_size=3) # c4 # p4=MaxPooling2D((2, 2))(c4) # # c5=conv2d_block(p4, n_filters=n_filters*16, kernel_size=3) # c5 # # # Expansive Path # u6=Conv2DTranspose(n_filters*8, (3, 3), strides=(2, 2), padding='same')(c5) # u6 # u6=concatenate([u6, c4]) # c6=conv2d_block(u6, n_filters*8, kernel_size=3) # c6 # u7=Conv2DTranspose(n_filters*4, (3, 3), strides=(2, 2), padding='same')(c6) # u7 # u7=concatenate([u7, c3]) # # c7=conv2d_block(u7, n_filters*4, kernel_size=3) # c7 # u8=Conv2DTranspose(n_filters*2, (3, 3), strides=(2, 2), padding='same')(c7) # u8 # u8=concatenate([u8, c2]) # c8=conv2d_block(u8, n_filters*2, kernel_size=3) # c8 # u9=Conv2DTranspose(n_filters*1, (3, 3), strides=(2, 2), padding='same')(c8) # u9 # u9=concatenate([u9, c1]) # c9=conv2d_block(u9, n_filters*1, kernel_size=3) #c9 # outputs=Conv2D(10, (1, 1), activation='sigmoid')(c9) # outputs # model=Model(inputs=[input_img], outputs=[outputs]) # return model # # First please the comments above the understand naming convention used in below table of number of paramteres demonstration. # # | Layer Name | # Params | # | --- | --- | # | block_c1_1 | 448 | # | block_c1_2 | 2320 | # | block_c2_1 | 4640 | # | block_c2_2 | 9248 | # | block_c3_1 | 18496 | # | block_c3_2 | 36928 | # | block_c4_1 | 73856 | # | block_c4_2 | 147584 | # | block_c5_1 | 295168 | # | block_c5_2 | 590080 | # | u6 | 295040 | # | block_c6_1 | 295040 | # | block_c6_2 | 147584 | # | u7 | 73792 | # | block_c7_1 | 73792 | # | block_c7_2 | 36928 | # | u8 | 18464 | # | block_c8_1 | 18464 | # | block_c8_2 | 9248 | # | u9 | 4624 | # | block_c9_1 | 4624 | # | block_c9_2 | 2320 | # | outputs | 170 |
HW13/DIP-HW13.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ### Gastos dos vereadores com correios # # **Autor:** <NAME><br> # **Data de início:** 10 de fevereiro de 2018<br> # **Data de término:** 10 de fevereiro de 2018<br> # **Fonte de dados:** [SisGV Consulta](https://app-sisgvconsulta-prd.azurewebsites.net/)<br> # **Coleta de dados:** Extração em 10 de fevereiro de 2018, com script próprio<br> # **Arquivo de origem (convertido):** `gastos_vereadores.csv`<br> # **Ferramentas e bibliotecas:** Python 3.6, Jupyter, Pandas, Numpy, Matplotlib, Seaborn # + # Importação das ferramentas de análise import pandas as pd import numpy as np # Leitura do arquivo dataset = pd.read_csv('data/gastos_vereadores.csv', sep = ';', dtype = {'despesa': 'category'}) # Descarte de colunas inúteis para a análise dataset = dataset.drop(columns=['chave', 'nome_arquivo', 'centro_custo']) # Segmentação dos dados por ano dataset = dataset[dataset['ano'] == 2017] # Resultado print(f'Estão em análise:\n* {len(dataset)} despesas de 2017\n\ * {dataset["vereador"].nunique()} ocupantes do cargo de vereador e lideranças\n\ * {dataset["cnpj"].nunique()} fornecedores\n\ * R$ {dataset["valor"].sum()} em despesas') # - # Checagem de registro dataset.iloc[0] # + # Renomeação da categoria 'despesa' dataset['despesa'].cat.rename_categories([ 'Aperfeiçoamento profissional', 'Jornais e revistas', 'Combustível', 'Serviços gráficos', 'Contratação de PJ', 'Correios', 'Site', 'Estacionamento', 'Eventos', 'Correios (intermediado)', 'Aluguel de veículos (intermediado)', 'Xerox (intermediado)', 'Limpeza de veículos', 'Livros', 'Aluguel de móveis/equipamentos', 'Aluguel de veículos', 'Lubrificante', 'Material de escritório', 'Internet', 'Xerox', 'Telefone fixo', 'Celular' ], inplace = True) # Agrupamento por tipo de despesa df_tipo = dataset.groupby(['despesa']).agg({'valor': sum})\ .sort_values('valor', ascending = False)\ .reset_index() # Cálculo de porcentagem df_tipo['porcentagem'] = (df_tipo['valor'] / df_tipo['valor'].sum()) * 100 # Formatação dos números 'float' para dois dígitos decimais df_tipo['valor'] = df_tipo['valor'].map('{0:.2f}'.format) df_tipo['porcentagem'] = df_tipo['porcentagem'].map('{0:.2f}'.format) # Resultado df_tipo # + # Importação de bibliotecas de gráficos import matplotlib.pyplot as plt # %matplotlib inline import seaborn as sns # Agrupamento de dados por despesa e vereador grafico = dataset.groupby(['despesa', 'vereador']).agg({'valor': sum})\ .sort_values('valor', ascending = False)\ .reset_index() # Divisão dos valores por 1000 (para facilitar a visualização no gráfico) grafico['valor'] = grafico['valor'] / 1000 # Alteração dos eixos dos dados grafico_pivot = grafico.pivot(index='vereador',\ columns='despesa',\ values='valor') # Criação do gráfico sns.set(font_scale=1.1) plt.figure(figsize=(18, 26)) ax = sns.heatmap(grafico_pivot,\ cmap='YlGnBu',\ annot=True,\ fmt='.2f',\ cbar=False,\ vmax=200,\ annot_kws={"size":13}) ax.set_title('CÂMARA: Gastos dos vereadores em 2017 por categoria (valores em R$ mil)') ax.set_ylabel('') ax.set_xlabel('') sns.despine() # - # Três vereadores se destacam na categoria "Correios (intermediado)", com mais de R$ 100 mil no ano: # # ``` # + <NAME> (PV) - R$ 121 mil # + <NAME> (PTB) - R$ 192,9 mil # + <NAME> (PTB) - R$ 163,6 mil # ``` # + # Segmentação dos dados por vereador e despesa anni = dataset[(dataset['vereador'] == 'ABOU ANNI') & \ (dataset['despesa'] == 'Correios (intermediado)')] # Agrupamento por mês anni_grupo = anni.groupby(['mes']).agg({'valor': sum}).reset_index() # Descrição dos gastos anni_grupo['valor'].describe() # - # Em 2017, <NAME> foi vereador por oito meses. No período, teve gasto mensal médio de R$ 15.136,69. # # O `std`, contudo, leva a inferir um padrão de gasto irregular. Isso é comprovado quando vemos: # # ``` # + valor mínimo: R$ 5.993,27 # + valor máximo: R$ 34.530,36 # ``` # Criação de gráfico por mês sns.set(font_scale=1.1) sns.factorplot(x='mes', y='valor', data=anni_grupo, kind='bar', margin_titles=False, size=6, aspect=2) sns.despine() # + # Segmentação dos dados por vereador e despesa amadeu = dataset[(dataset['vereador'] == '<NAME>') & \ (dataset['despesa'] == 'Correios (intermediado)')] # Agrupamento por mês amadeu_grupo = amadeu.groupby(['mes']).agg({'valor': sum}).reset_index() # Descrição dos gastos amadeu_grupo['valor'].describe() # - # <NAME> foi vereador em todos os meses de 2017. Seu gasto mensal médio com correios foi de R$ 14.527,94. # # O `std` também se aproxima de mean, o que indica padrão irregular de despesas no decorrer dos meses: # # ``` # + valor mínimo: R$ 666,37 # + valor máximo: R$ 38.524,79 # ``` # Criação de gráfico por mês sns.factorplot(x='mes', y='valor', data=amadeu_grupo, kind='bar', margin_titles=False, size=6, aspect=2) sns.despine() # + # Segmentação dos dados por vereador e despesa frange = dataset[(dataset['vereador'] == 'PAULO FRANGE') & \ (dataset['despesa'] == 'Correios (intermediado)')] # Agrupamento por mês frange_grupo = frange.groupby(['mes']).agg({'valor': sum}).reset_index() # Descrição dos gastos frange_grupo['valor'].describe() # - # Paulo Frange também ocupou o cargo de vereador durante todos os meses. Sua despesa média mensal com correios foi de R$ 13.633,47. # # O std mais elevado que o mean reflete um pico nos gastos em determinado período -- uma anomalia em comparação ao restante dos meses: # # ``` # + valor mínimo: R$ 5.420,78 # + valor máximo: R$ 90.993,12 # ``` # Criação de gráfico por mês sns.factorplot(x='mes', y='valor', data=frange_grupo, kind='bar', margin_titles=False, size=6, aspect=2) sns.despine() # --- # #### Atualização # # _3 de março de 2018_ # # A análise mostou que três vereadores gastaram, cada, mais de R$ 100 mil com correios em 2017. A partir das informações aqui apresentadas, pedi à Câmara, via LAI, os extratos dos vereadores para entender por que havia despesas tão altas. Os registros apontavam o motivo para os valores elevados: malas diretas. # # Malas diretas, segundo os Correios, # # > é uma mensagem publicitária com apelo promocional ou meramente informativa (propaganda), com objetivo de venda, divulgação, informação, prospecção e/ou fidelização, encaminhada a um público dirigido ou com potencial para resposta, emitida em vários exemplares idênticos ou personalizados. # # Em resumo, é, no contexto político, autopromoção. Isso é vedado pela Constituição: # # > Art. 37. A administração pública direta e indireta de qualquer dos Poderes da União, dos Estados, do Distrito Federal e dos Municípios obedecerá aos princípios de legalidade, impessoalidade, moralidade, publicidade e eficiência e, também, ao seguinte:<br><br> # > (...)<br><br> # > XXII - as administrações tributárias da União, dos Estados, do Distrito Federal e dos Municípios, atividades essenciais ao funcionamento do Estado, exercidas por servidores de carreiras específicas, terão recursos prioritários para a realização de suas atividades e atuarão de forma integrada, inclusive com o compartilhamento de cadastros e de informações fiscais, na forma da lei ou convênio.<br><br> # > § 1º A publicidade dos atos, programas, obras, serviços e campanhas dos órgãos públicos deverá ter caráter educativo, informativo ou de orientação social, dela não podendo constar nomes, símbolos ou imagens que caracterizem promoção pessoal de autoridades ou servidores públicos. # # Tínhamos, portanto, uma pauta. E ela ficou assim: [https://globoplay.globo.com/v/6543894/](https://globoplay.globo.com/v/6543894/) # # Cabe ressaltar, porém, que há divergências entre os dados apresentados na análise, que foram extraídos do SisGV Consulta (mantido pela própria Câmara), e os extratos dos Correios. Embora ambos sejam dados oficiais, utilizei na reportagem as informações dos Correios, por suspeitar de falha na estruturação da API do SisGV -- falha esta que repassei à assessoria da Câmara para que possa corrigir.
20200913_abraji_analisededadoscompython/src/misc/2018-02-24-vereadores_correios.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import random import numpy as np import pandas as pd import matplotlib.pyplot as plt from mpl_toolkits.mplot3d import Axes3D # + tags=[] def clouds_single_axis(cloud_no=2, n=1200, stretch=0.5, split_dist=10): # generate n random points x = np.random.randn(n) # divide into number of arrays representing each cloud x = [x[i:i + int(n/cloud_no)] for i in range(0, n, int(n/cloud_no))] # transform arrays so that clouds are distinct x = np.matmul(np.diag(((np.random.randint(2, size=cloud_no)*2)-1)*stretch), x) x = np.array([x[i] + ((np.repeat(np.random.rand(1), int(n/cloud_no))*2)-1)*split_dist for i in range(0, cloud_no)]) return(np.squeeze(x.reshape((1,n)))) def clouds_multi_axis(cloud_no=2, naxs=3, n=1200, stretch_par=[1, 1, 1], dist_par=[5, 5, 5]): # define a permutation for the generated axis arr = np.random.permutation(range(0, n)) # extract each axis and apply permutation output = [] for i in range(0, naxs): gen = clouds_single_axis(cloud_no=cloud_no, n=n, stretch=stretch_par[i], split_dist=dist_par[i]) output.append(gen[arr]) return(np.array(output)) # + t = clouds_multi_axis(cloud_no=3, n=2001) fig = plt.figure() ax = Axes3D(fig) ax.scatter(t[0][:1000], t[1][:1000], t[2][:1000], alpha=0.4, s=10) ax.scatter(np.mean(t[0]), np.mean(t[1]), np.mean([2]), color='red', s=100, alpha=0.9) # - plt.savefig('cloud_default_test.png') X = np.array(t).T f = open("cloud_default_test.csv", "w+") X = pd.DataFrame(X, columns=('x', 'y', 'z')) X.to_csv("cloud_default_test.csv", index=False) f.close()
CloudDataGenerator/Cloud_Data_Generator.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 38 # language: python # name: python38 # --- # # HELLO WORLD # The cell below contains the classic "first program" of many languages. # # The script input is quite simple, we instruct the computer to print the literal # string "hello world" to standard input/output device which is the console. Here our jupyter interface intercepts to output stream and embeds it into this document. print("hello world") # ## Change Contents and Run Again # Now we try changing the contents of the literal string (everything between the quotes is the literal. I'll try with something elaborate like: # # Were gonna build a dog park, the best one ever - and the cats are gonna pay for it! print("Were gonna build a dog park, the best one ever - and the cats are gonna pay for it!") print("Were gonna build a dog park, the best one ever - and the cats are gonna pay for it!")
9-MyJupyterNotebooks/1-HelloWorld/HelloWorld.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="view-in-github" colab_type="text" # <a href="https://colab.research.google.com/github/bs3537/DS-Unit-1-Sprint-3-Statistical-Tests-and-Experiments/blob/master/Bhav_copy_LS_DS_131_Statistics_Probability_Assignment_.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # + [markdown] id="PdTVPC9wB2zb" colab_type="text" # Data Science Unit 1 Sprint 3 Assignment 1 # Apply the t-test to real data # Your assignment is to determine which issues have "statistically significant" differences between political parties in this 1980s congressional voting data. The data consists of 435 instances (one for each congressperson), a class (democrat or republican), and 16 binary attributes (yes or no for voting for or against certain issues). Be aware - there are missing values! # # Your goals: # # Load and clean the data (or determine the best method to drop observations when running tests) # Using hypothesis testing, find an issue that democrats support more than republicans with p < 0.01 # Using hypothesis testing, find an issue that republicans support more than democrats with p < 0.01 # Using hypothesis testing, find an issue where the difference between republicans and democrats has p > 0.1 (i.e. there may not be much of a difference) # Note that this data will involve 2 sample t-tests, because you're comparing averages across two groups (republicans and democrats) rather than a single group against a null hypothesis. # # Stretch goals: # # Refactor your code into functions so it's easy to rerun with arbitrary variables # Apply hypothesis testing to your personal project data (for the purposes of this notebook you can type a summary of the hypothesis you formed and tested) # + id="GIV5ET5hCKnu" colab_type="code" colab={} #Load and clean the data (or determine the best method to drop observations when running tests import pandas as pd import numpy as np # + id="RyiDrROjDH3Q" colab_type="code" colab={} df = pd.read_csv('https://archive.ics.uci.edu/ml/machine-learning-databases/voting-records/house-votes-84.data') # + id="70pS2yBsDxH-" colab_type="code" outputId="6420ca10-6fba-4e7c-8ff4-05886a38808e" colab={"base_uri": "https://localhost:8080/", "height": 195} df.head() # + id="Tmc7pZCQD9Bs" colab_type="code" outputId="7ae9cd49-b7e0-49b2-bcf3-fe7f9d28529b" colab={"base_uri": "https://localhost:8080/", "height": 34} df.shape # + id="CUkKj77RD_yC" colab_type="code" outputId="a06afcc5-bca8-4d1c-c96c-79bbabb204ca" colab={"base_uri": "https://localhost:8080/", "height": 319} df.isnull().sum() #pandas is not recognizing '?' values using this code # + id="tpHul-96EDde" colab_type="code" outputId="1c48bcfe-ace8-4f27-b432-0e17cde4e05b" colab={"base_uri": "https://localhost:8080/", "height": 34} df.isnull().values.any() # + id="kcV2P2G6MD_L" colab_type="code" colab={} # this function will replace ? values by null values df.replace('?', np.nan, inplace=True) # + id="wtQYswePMIVO" colab_type="code" outputId="d9798a57-6b09-4f2c-94c8-83024a66d9a0" colab={"base_uri": "https://localhost:8080/", "height": 319} df.isnull().sum() # + id="BnnTEhPBMN9w" colab_type="code" outputId="ee3f6754-7a00-4bda-a384-8b68ea00abd3" colab={"base_uri": "https://localhost:8080/", "height": 1000} df.dropna() # the output below is now clean with all rows with ? value have been dropped. # + id="bK2sUINbNDq3" colab_type="code" outputId="ff9e2094-7f65-431e-fbb8-1669a1cafa39" colab={"base_uri": "https://localhost:8080/", "height": 319} df.dtypes # + id="_em_5O8LNGI4" colab_type="code" colab={} #next step is converting categorical yes and no values to binary numeric values for statistical testing df2 = df.rename(columns = {'republican': 'Party'}) # + id="ok2-3-HFOdYr" colab_type="code" outputId="bf7d7b73-0407-427f-be6a-5f97e6b236ef" colab={"base_uri": "https://localhost:8080/", "height": 195} df2.head() # + id="Uf21oVvNOiWa" colab_type="code" colab={} df3 = df2.dropna() # + id="TxH85VsrOorE" colab_type="code" outputId="4b1d52c4-00b1-4d20-c18b-8828e8b4152d" colab={"base_uri": "https://localhost:8080/", "height": 195} df3.head() # + id="l6x9iUpCO9-V" colab_type="code" outputId="dec20e96-23f1-40c0-e2c2-4d2c23690459" colab={"base_uri": "https://localhost:8080/", "height": 319} df3.dtypes # + id="pIldS8rfRWdY" colab_type="code" outputId="ad65b3aa-1efa-41e7-8e4b-6a2c471ecfd5" colab={"base_uri": "https://localhost:8080/", "height": 550} df3.head().T # + id="pyAX8GbzU282" colab_type="code" outputId="7b801760-797d-498d-b0b2-b23ec455c406" colab={"base_uri": "https://localhost:8080/", "height": 67} df3["n"].value_counts() # + id="1rqB3g4pVjRp" colab_type="code" colab={} cleanup_df3 = {"n": {"y": 1, "n": 0}} # + id="rohG8yFZVvYH" colab_type="code" outputId="6dba5507-9ba4-452f-ddb2-7255e3a43848" colab={"base_uri": "https://localhost:8080/", "height": 296} df3.replace(cleanup_df3, inplace=True) df3.head() # + id="7S-d91GJWRV7" colab_type="code" outputId="ddee7e31-bfe1-42ea-89e8-17a384b20456" colab={"base_uri": "https://localhost:8080/", "height": 67} df3["y"].value_counts() # + id="QRVD2sWHWT5y" colab_type="code" colab={} cleanup2_df3 = {"y": {"y": 1, "n": 0}} # + id="pgZ2nxRzWaId" colab_type="code" outputId="e645f17f-beb7-4b21-9761-9598a98e091d" colab={"base_uri": "https://localhost:8080/", "height": 296} df3.replace(cleanup2_df3, inplace=True) df3.head() # + id="f1-ToSW3Wq3v" colab_type="code" outputId="4764e47f-3f67-4e43-aa2e-e611597d25b1" colab={"base_uri": "https://localhost:8080/", "height": 67} df3["n.1"].value_counts() # + id="Eo9Ux4NPWt9F" colab_type="code" colab={} cleanup3_df3 = {"n.1": {"y": 1, "n": 0}} # + id="dK2KJHYpW0oF" colab_type="code" outputId="2ad787cc-e8ca-4d69-e77e-de2440e40728" colab={"base_uri": "https://localhost:8080/", "height": 296} df3.replace(cleanup3_df3, inplace=True) df3.head() # + id="pxlofklcW_d9" colab_type="code" outputId="106ac2e7-90b0-4370-bf90-f2d960952054" colab={"base_uri": "https://localhost:8080/", "height": 67} df3["y.1"].value_counts() # + id="9mOerIoYXCte" colab_type="code" colab={} cleanup4_df3 = {"y.1": {"y": 1, "n": 0}} # + id="OjbjkEaQXM4z" colab_type="code" outputId="026541a4-63f0-4936-f86d-a63efa891ef3" colab={"base_uri": "https://localhost:8080/", "height": 296} df3.replace(cleanup4_df3, inplace=True) df3.head() # + id="TawZ-YsuXVgP" colab_type="code" outputId="d2b25f8e-2a7d-49e7-8194-1afc88919d62" colab={"base_uri": "https://localhost:8080/", "height": 67} df3["y.2"].value_counts() # + id="aaxDuxesXaQ1" colab_type="code" colab={} cleanup5_df3 = {"y.2": {"y": 1, "n": 0}} # + id="UxA_3qLQXfNF" colab_type="code" outputId="fde66153-7556-4976-889e-23fddcb9beea" colab={"base_uri": "https://localhost:8080/", "height": 296} df3.replace(cleanup5_df3, inplace=True) df3.head() # + id="9JqV0zZ1Xqln" colab_type="code" outputId="1cb57d2c-06bf-4cd6-9ed0-359234f5717a" colab={"base_uri": "https://localhost:8080/", "height": 67} df3["y.3"].value_counts() # + id="XmBxvUhRXxHa" colab_type="code" colab={} cleanup6_df3 = {"y.3": {"y": 1, "n": 0}} # + id="VKW00d7SX7j1" colab_type="code" outputId="fe0745ac-c86a-4a7b-b4b5-7d3dfc050b0b" colab={"base_uri": "https://localhost:8080/", "height": 296} df3.replace(cleanup6_df3, inplace=True) df3.head() # + id="TpivqWv4X_X_" colab_type="code" outputId="382fdd8e-a51d-4759-d7e9-22976f1e0cb7" colab={"base_uri": "https://localhost:8080/", "height": 67} df3["n.2"].value_counts() # + id="4gAPJb3-YFPu" colab_type="code" colab={} cleanup7_df3 = {"n.2": {"y": 1, "n": 0}} # + id="Yxb7pmJoYOPu" colab_type="code" outputId="f003c15e-02b0-4489-c885-02f1e932f8f7" colab={"base_uri": "https://localhost:8080/", "height": 296} df3.replace(cleanup7_df3, inplace=True) df3.head() # + id="mpoM_pRVYVCH" colab_type="code" outputId="80a5dbd3-2e4f-4521-c69a-ada2fdbbf1c2" colab={"base_uri": "https://localhost:8080/", "height": 67} df3["n.3"].value_counts() # + id="RlKh_51PYePH" colab_type="code" colab={} cleanup8_df3 = {"n.3": {"y": 1, "n": 0}} # + id="xjD68_T6YhNR" colab_type="code" outputId="c9bd3b0c-776d-4d90-a9b2-edeb2ca46071" colab={"base_uri": "https://localhost:8080/", "height": 296} df3.replace(cleanup8_df3, inplace=True) df3.head() # + id="teEvpkqqYr4O" colab_type="code" outputId="38def24a-1339-4698-a142-28eb04cc3231" colab={"base_uri": "https://localhost:8080/", "height": 67} df3["n.4"].value_counts() # + id="Qxx6w0ZzYyWB" colab_type="code" colab={} cleanup9_df3 = {"n.4": {"y": 1, "n": 0}} # + id="_FVFyQ3pY1g-" colab_type="code" outputId="6ad65469-6a6a-471b-de68-f6e0e635806f" colab={"base_uri": "https://localhost:8080/", "height": 296} df3.replace(cleanup9_df3, inplace=True) df3.head() # + id="_y7bE3d4Y8Op" colab_type="code" outputId="dea8dbe7-90b6-456a-9086-5f6a8f662bbe" colab={"base_uri": "https://localhost:8080/", "height": 67} df3["y.4"].value_counts() # + id="D-GCcpqWZCpZ" colab_type="code" colab={} cleanup10_df3 = {"y.4": {"y": 1, "n": 0}} # + id="KJF5VkybZLNu" colab_type="code" outputId="ac43bf9f-181b-4090-fb73-5a3067fbfc27" colab={"base_uri": "https://localhost:8080/", "height": 296} df3.replace(cleanup10_df3, inplace=True) df3.head() # + id="JkNGncVCZVrW" colab_type="code" outputId="3971ca7b-e772-4e1d-a3aa-7c69e525af6c" colab={"base_uri": "https://localhost:8080/", "height": 67} df3["?"].value_counts() # + id="x4XOLkhlZb2v" colab_type="code" colab={} cleanup11_df3 = {"?": {"y": 1, "n": 0}} # + id="R5Y98In2ZiEv" colab_type="code" outputId="0bcfb642-62b6-48c5-d767-f7de9d807dd1" colab={"base_uri": "https://localhost:8080/", "height": 296} df3.replace(cleanup11_df3, inplace=True) df3.head() # + id="1Xw8xvBDZojY" colab_type="code" outputId="37820837-d236-42c7-e70a-44cee086719b" colab={"base_uri": "https://localhost:8080/", "height": 67} df3["y.5"].value_counts() # + id="PwHf5M1cZuuM" colab_type="code" colab={} cleanup12_df3 = {"y.5": {"y": 1, "n": 0}} # + id="5UR1Z3pHZ1Mz" colab_type="code" outputId="5c776821-c7f1-456d-9e42-bcfda7f2a3e6" colab={"base_uri": "https://localhost:8080/", "height": 296} df3.replace(cleanup12_df3, inplace=True) df3.head() # + id="TWy0QxUpZ9qK" colab_type="code" outputId="4f5f6267-23e2-4909-e064-0af80c5fb6fc" colab={"base_uri": "https://localhost:8080/", "height": 67} df3["y.6"].value_counts() # + id="W0Na4QuWaChm" colab_type="code" colab={} cleanup13_df3 = {"y.6": {"y": 1, "n": 0}} # + id="P3IJMDeVaJVj" colab_type="code" outputId="d01d1c29-00f5-4146-c860-d52053a638f6" colab={"base_uri": "https://localhost:8080/", "height": 296} df3.replace(cleanup13_df3, inplace=True) df3.head() # + id="UaNhNF55aPc8" colab_type="code" outputId="697a4889-ddd3-487a-f02b-c9438328a905" colab={"base_uri": "https://localhost:8080/", "height": 67} df3["y.7"].value_counts() # + id="iUCEO1yaaUQM" colab_type="code" colab={} cleanup14_df3 = {"y.7": {"y": 1, "n": 0}} # + id="1Is-rs8Uaajp" colab_type="code" outputId="8188138f-0779-4519-e0da-c1ae06cda7a7" colab={"base_uri": "https://localhost:8080/", "height": 296} df3.replace(cleanup14_df3, inplace=True) df3.head() # + id="Lv0n3PDeafsM" colab_type="code" outputId="a050caf2-a581-4a15-b040-56ad0619ee7f" colab={"base_uri": "https://localhost:8080/", "height": 67} df3["n.5"].value_counts() # + id="sO8TMP4Yar0B" colab_type="code" colab={} cleanup15_df3 = {"n.5": {"y": 1, "n": 0}} # + id="riCDpmiRavfd" colab_type="code" outputId="9fd22257-4488-4855-db7c-d847846ca592" colab={"base_uri": "https://localhost:8080/", "height": 296} df3.replace(cleanup15_df3, inplace=True) df3.head() # + id="FiCqX7LRa05w" colab_type="code" outputId="a19c9bba-b8b7-4881-9a66-5971dee84367" colab={"base_uri": "https://localhost:8080/", "height": 67} df3["y.8"].value_counts() # + id="0MSOia6xa-jU" colab_type="code" colab={} cleanup16_df3 = {"y.8": {"y": 1, "n": 0}} # + id="7-tBI-i0bF0G" colab_type="code" outputId="32f8b00b-d732-41d8-8306-92626506b8f7" colab={"base_uri": "https://localhost:8080/", "height": 296} df3.replace(cleanup16_df3, inplace=True) df3.head() # + id="2fZqh9f8bI1C" colab_type="code" outputId="3b385512-b47d-4b15-edf6-17182ac495a0" colab={"base_uri": "https://localhost:8080/", "height": 319} df3.dtypes # + [markdown] id="NuPAlEbabNcT" colab_type="text" # ### Thus, all the categorical variables have been coded as binary variables and can now be used for statistical analysis. # + id="KtP11lPc7yBn" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 195} outputId="dbeb7b17-eb0e-4781-fccd-978eb0a09246" # let us look at issue of handicapped infants first # n = handicapped_infants issue #1= yes vote, n = no vote df4 = df3[['Party', 'n']] df4.head() # + id="AwFCC9Y-CVl2" colab_type="code" colab={} df4_rep = df4[df4['Party']=='republican'] df4_dem = df4[df4['Party']=='democrat'] # + id="c7cZvnblDBbi" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 195} outputId="f6df7f8d-0de3-4acc-9b3a-724f86abc173" df5 = df4_rep[df4_rep['n']== 1] df5.head() # + id="z7nP_4IrEQzM" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="6bab31b2-61f5-4a33-af53-df19a789a2e5" df5.shape # + id="xxcHS1BGDbtZ" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 195} outputId="cf59eaf4-3e95-4611-f232-6b7cdf1e702b" df6 = df4_dem[df4_dem['n']== 1] df6.head() # + id="n8KCjhHTETfn" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="40c170eb-a9fd-43c6-f307-3e859603983b" df6.shape # + id="rB6DeoI3EWup" colab_type="code" colab={} # democrats have 73 yes votes on this issue of handicapped infants while republicans have 23 yes votes on this issue which seems significant. # we have to next do an inferential test to find p value. # + id="kSPfUbznIXw9" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 136} outputId="f6f3dc12-53a8-4d74-dace-99e5da70fde8" #creating a crosstab table for frequency distribution for issue of handicapped infants n_crosstab = pd.crosstab(df4.Party, df4.n, normalize='columns') n_crosstab # + id="aMz00m1LG7yF" colab_type="code" colab={} # since we are comparing proportions, we have to apply z test # + id="KF2j-aB1KWb2" colab_type="code" colab={} import numpy as np import scipy.stats as st import scipy.special as sp # n = number of obervations of yes and no n = 100 # h = number of yes votes for democrats on this issue h = 76 # q = null-hypothesis of no difference in yes and no votes q = .5 # + id="reO9ElsoLPrF" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="8954b111-72f2-431d-fe29-3e913d84b3a4" # z score calculation xbar = float(h) / n z = (xbar - q) * np.sqrt(n / (q * (1 - q))) # We don't want to display more than 4 decimals. z # + id="sh55aHZQL1XR" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="34c26b8b-871f-40e7-dc59-50c99c7d4e0d" pval = 2 * (1 - st.norm.cdf(z)) pval # + id="mwUUQ1OBL8cZ" colab_type="code" colab={} #The p value here is very low and lower than 0.01, so our z score calculation was successful in calculating the p value in this case # + [markdown] id="3Vzg6lx_MLAN" colab_type="text" # ####Democrats support the issue of handicapped infants more than republicans with a p value < 0.01. # + id="Q_vXRbYoNTm2" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 156} outputId="d6909379-ea83-44b9-8393-18313595befe" df3_group = df3.groupby('Party').mean() df3_group # + id="hQvqhjNyNy2d" colab_type="code" colab={} # For issue y.1 = physician-fee-freeze, republicans seem to be voting more yes, let us evaluate this using same z score method as above # + id="cGMHkinDOMuH" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 195} outputId="8acb5619-b711-4dfc-be99-5ef6e39d6528" df7 = df3[['Party', 'y.1']] df7.head() # + id="KdnOej1dO-iJ" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 195} outputId="5b37d45d-e1f4-4c79-c736-2712f477f65c" df8 = df7.rename(columns = {'y.1' : 'phy'}) df8.head() # + id="hZiWky7rO3fi" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 136} outputId="dd7d93c1-830f-4a47-e33b-59dd2c88a345" y1_crosstab = pd.crosstab(df8.Party, df8.phy, normalize='columns') y1_crosstab # + id="IhVyBV1NPoD5" colab_type="code" colab={} # from above output, the number of yes votes for this issue of physician fee freeze is 94% for republicans and 5.3% for democrats. # Let us now do a z test to find p value. # + id="_RVm2UhOP6BQ" colab_type="code" colab={} # n = number of obervations of yes and no n2 = 100 # h = number of yes votes for republicans on this issue h2 = 94.6 # q = null-hypothesis of no difference in yes and no votes q2 = .5 # + id="Wa8ZBsrtQMsV" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="ff42e784-88c5-41ff-b8e1-c736b19bf468" # z score calculation xbar = float(h2) / n2 z2 = (xbar - q2) * np.sqrt(n2 / (q2 * (1 - q2))) # We don't want to display more than 4 decimals. z2 # + id="Gt_OL-2YQZEj" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="0b71c5bd-9597-438d-8375-169cf01373d1" pval2 = 2 * (1 - st.norm.cdf(z2)) pval2 # + [markdown] id="wZbDM-ACQkzc" colab_type="text" # ### The p value is 0.0 which is <0.01, so republicans support the issue of physicians fee freeze more than democrats with p value <0.01. # + id="mYkfO81mREIb" colab_type="code" colab={} # For issue y=water-project-cost-sharing, there doesn't seem much difference in yes votes in two parties. # + id="TnTNBBRIRNFM" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 195} outputId="e5d16c92-dd3e-4cdc-c166-8ee72dfbe6e1" df9 = df3[['Party', 'y']] df9.head() # + id="ED3m9Vq4Rrp8" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 136} outputId="ef5cf668-4b6c-4de8-85e4-34c0c09c0a86" y2_crosstab = pd.crosstab(df9.Party, df9.y, normalize='columns') y2_crosstab # + id="hWq6FFo0R0XF" colab_type="code" colab={} # from the above cross tab, yes votes for democrats on this issue are 52.3% and for republicans are 47.6%. # Let us do a z test to find p value. # + id="zt-RrkI3SCRn" colab_type="code" colab={} # n = number of obervations of yes and no n3 = 100 # h = number of yes votes for democrats on this issue h3 = 52.3 # q = null-hypothesis of no difference in yes and no votes q3 = .5 # + id="MRgC92bXSRdn" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="01cbb509-57af-4847-f231-180b5178994d" # z score calculation xbar = float(h3) / n3 z3 = (xbar - q3) * np.sqrt(n3 / (q3 * (1 - q3))) # We don't want to display more than 4 decimals. z3 # + id="H0FOVsleSfwm" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="18ce2e10-518d-4207-df4d-2837f88136a0" pval3 = 2 * (1 - st.norm.cdf(z3)) pval3 # + [markdown] colab_type="text" id="kDXG_GAWTnCT" # ### The p value is 0.64 which is >0.01, so not significant. There is no significant difference in yes votes for republicans and democrats on the issue of water-project-cost-sharing. The p value is also >0.1.
Bhav_copy_LS_DS_131_Statistics_Probability_Assignment_.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- org_file = open("text_freq",'r',encoding = 'utf-8') output_file = open("text_freq_out",'w',encoding='utf-8') counter = 1 uuid_list = [] for row in org_file: if row in uuid_list: pass else: if row.startswith('@'): uuid_list.append(row) if row.endswith(' \n'): pass else: if row.endswith('i:1\n'): output_file.write('1 i:%d\n'%counter) counter +=1 else: output_file.write(row)
wams/us_financial_text/Untitled.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Importing Libraries # + code_folding=[0] ### Importing Libraries ### import pandas as pd # data science essentials import numpy as np import matplotlib.pyplot as plt # data visualization import seaborn as sns # enhanced data visualization import statsmodels.formula.api as smf import statsmodels.api as sm # linear regression (statsmodels) from sklearn.model_selection import train_test_split # train/test split import sklearn.linear_model # different linear models import random as rand # random number generation from sklearn.neighbors import KNeighborsRegressor # KNN for Regression from sklearn.neighbors import KNeighborsClassifier # KNN for classification from sklearn.preprocessing import StandardScaler # standard scaler from sklearn.model_selection import RandomizedSearchCV # hyperparameter tuning from sklearn.linear_model import LogisticRegression # logistic regression from sklearn.metrics import roc_auc_score # auc score from sklearn.metrics import confusion_matrix # confusion matrix from sklearn.metrics import make_scorer # customizable scorer from sklearn.tree import DecisionTreeClassifier # classification trees from sklearn.tree import export_graphviz # exports graphics from sklearn.ensemble import RandomForestClassifier # random forest from sklearn.ensemble import GradientBoostingClassifier# gbm import time # + code_folding=[] run_control={"marked": false} ### Set up pandas settings and data loading ### start_time = time.time() # setting pandas print options pd.set_option('display.max_rows', 500) pd.set_option('display.max_columns', 500) pd.set_option('display.width', 100) # Setting Data Types for the files # Train and Test dtypes_01 = {'id' : str, 'region_code' : str, 'district_code' : str} # Expected Output dtypes_02 = {'id' : str} # Import CSV Files raw_train_data_path = 'https://raw.githubusercontent.com/RLanderosR/Pump_it_UP_Data_Mining_the_Water_Table_DrivenData/main/Train_Data.csv' raw_train_outp_path = 'https://raw.githubusercontent.com/RLanderosR/Pump_it_UP_Data_Mining_the_Water_Table_DrivenData/main/Train_Outcome.csv' raw_test_data_path = 'https://raw.githubusercontent.com/RLanderosR/Pump_it_UP_Data_Mining_the_Water_Table_DrivenData/main/Test_Data.csv' t_data = pd.read_csv(raw_train_data_path, dtype = dtypes_01) out_data = pd.read_csv(raw_train_outp_path, dtype = dtypes_02) test = pd.read_csv(raw_test_data_path, dtype = dtypes_01) # Merging the Test data with the expected Outcome train = pd.merge(t_data,out_data,on='id') # Setting the Column 'date_recorded' as datetime train['date_recorded'] = pd.to_datetime(train['date_recorded']) test['date_recorded'] = pd.to_datetime(test['date_recorded']) # Save id for test dataset test_id = test['id'] # + code_folding=[] # # preparing explanatory variable data # train_base_data = train.drop(['id', 'amount_tsh', 'date_recorded', 'funder', # 'gps_height', 'installer', 'wpt_name', # 'num_private', 'basin', 'subvillage', 'region', # 'region_code', 'district_code', 'lga', 'ward', # 'recorded_by', 'scheme_management', 'scheme_name', # 'permit', 'construction_year'], # axis = 1) # # preparing explanatory variable data # train_data = train_base_data.drop(['status_group'], # axis = 1) # preparing explanatory variable data train_base_data = train.drop(['id', 'date_recorded', 'funder', 'gps_height', 'installer', 'longitude', 'latitude', 'wpt_name', 'subvillage', 'ward'], axis = 1) # preparing explanatory variable data train_data = train_base_data.drop(['status_group'], axis = 1) # - # # Functional vs Non Functional # + # Data Preparation train['predict'] = 0 for index, value in train.iterrows(): # loop to get the unique choice if train.loc[index, 'status_group'] == 'non functional': train.loc[index, 'predict'] = 1 # preparing response variables train_target = train.loc[ : , 'predict'] # preparing training and testing sets (all letters are lowercase) x_train, x_test, y_train, y_test = train_test_split( train_data, train_target, test_size = 0.33, random_state = 219) # - for col in train_data: print(col,'+') # + # # merging X_train and y_train so that they can be used in statsmodels # train_t = pd.concat([x_train, y_train], axis = 1) # # Step 1: build a model # lm_best = smf.ols(formula = """predict ~ longitude + # latitude + # population + # public_meeting + # extraction_type + # extraction_type_group + # extraction_type_class + # management + # management_group + # payment + # payment_type + # water_quality + # quality_group + # quantity + # quantity_group + # source + # source_type + # source_class + # waterpoint_type + # waterpoint_type_group""", # data = train_t) # # Step 2: fit the model based on the data # results = lm_best.fit() # # Step 3: analyze the summary output # print(results.summary()) # merging X_train and y_train so that they can be used in statsmodels train_t = pd.concat([x_train, y_train], axis = 1) # Step 1: build a model lm_best = smf.ols(formula = """predict ~ amount_tsh + num_private + basin + region + region_code + district_code + lga + population + public_meeting + recorded_by + scheme_management + scheme_name + permit + construction_year + extraction_type + extraction_type_group + extraction_type_class + management + management_group + payment + payment_type + water_quality + quality_group + quantity + quantity_group + source + source_type + source_class + waterpoint_type + waterpoint_type_group""", data = train_t) # Step 2: fit the model based on the data results = lm_best.fit() # Step 3: analyze the summary output print(results.summary()) # + cell_style="split" train_cols = pd.get_dummies(train_data, prefix = 'd_') train_cols.shape # + cell_style="split" test_cols = test.drop(['id'], axis = 1) test_cols = pd.get_dummies(test_cols, prefix = 'd_') test_cols.shape # + cell_style="split" train_data.nunique() # + cell_style="split" test.nunique() # + run_control={"marked": false} NF_data = pd.get_dummies(train_data, prefix = 'd_') NF_data = NF_data.drop( columns = 'd__other - mkulima/shinyanga', axis = 1) # preparing response variables NF_target = train.loc[ : , 'predict'] # preparing training and testing sets (all letters are lowercase) x_train, x_test, y_train, y_test = train_test_split( NF_data, NF_target, test_size = 0.25, random_state = 219) # + # declaring a hyperparameter space learn_space = np.arange(0.1, 1.3, 0.1) estimator_space = np.arange(20, 100, 10) depth_space = np.arange(5, 9 , 1) warm_start_space = [True, False] # creating a hyperparameter grid param_grid = {'learning_rate' : learn_space, 'max_depth' : depth_space, 'n_estimators' : estimator_space, 'warm_start' : warm_start_space} # INSTANTIATING the model object without hyperparameters NF_full_gbm_grid = GradientBoostingClassifier(random_state = 219) # GridSearchCV object NF_full_gbm_cv = RandomizedSearchCV(estimator = NF_full_gbm_grid, param_distributions = param_grid, cv = 3, n_iter = 10, random_state = 219, scoring = make_scorer(roc_auc_score, needs_threshold = False)) # FITTING to the FULL DATASET (due to cross-validation) NF_full_gbm_cv.fit(NF_data, NF_target) # PREDICT step is not needed # printing the optimal parameters and best score print("Tuned Parameters :", NF_full_gbm_cv.best_params_) print("Tuned Training AUC:", NF_full_gbm_cv.best_score_.round(4)) # checking the best estimator for the model NF_full_gbm_cv.best_estimator_ # INSTANTIATING with best_estimator NF_gbm_tuned = NF_full_gbm_cv.best_estimator_ # FIT step not needed # PREDICTING based on the testing set NF_gbm_tuned_pred = NF_gbm_tuned.predict(x_test) # SCORING the results NF_gbm_tuned_train_acc = NF_gbm_tuned.score(x_train, y_train).round(4) NF_gbm_tuned_test_acc = NF_gbm_tuned.score(x_test, y_test).round(4) NF_gbm_tuned_auc = roc_auc_score(y_true = y_test, y_score = NF_gbm_tuned_pred).round(4) NF_gbm_tuned_test_gap = abs(NF_gbm_tuned_train_acc-NF_gbm_tuned_test_acc).round(4) # SCORING the results print('Training ACCURACY:', NF_gbm_tuned_train_acc) print('Testing ACCURACY:', NF_gbm_tuned_test_acc) print('Train-Test Gap :', NF_gbm_tuned_test_gap) print('AUC Score :', NF_gbm_tuned_auc) # unpacking the confusion matrix NF_gbm_tuned_tn, \ NF_gbm_tuned_fp, \ NF_gbm_tuned_fn, \ NF_gbm_tuned_tp = confusion_matrix(y_true = y_test, y_pred = NF_gbm_tuned_pred).ravel() # printing each result one-by-one print(f""" True Negatives : {NF_gbm_tuned_tn} False Positives: {NF_gbm_tuned_fp} False Negatives: {NF_gbm_tuned_fn} True Positives : {NF_gbm_tuned_tp} """) # - # # Functional vs Needs Repair # + # Data Preparation train_base_data['predict'] = 0 for index, value in train_base_data.iterrows(): # loop to get the unique choice if train_base_data.loc[index, 'status_group'] == 'functional needs repair': train_base_data.loc[index, 'predict'] = 1 # preparing explanatory variable data FNR_data = train_base_data.drop(['status_group', 'predict'], axis = 1) FNR_data = pd.get_dummies(FNR_data, prefix = 'd_') FNR_data = FNR_data.drop( columns = 'd__other - mkulima/shinyanga', axis = 1) # preparing response variables FNR_target = train_base_data.loc[ : , 'predict'] # preparing training and testing sets (all letters are lowercase) x_train, x_test, y_train, y_test = train_test_split( FNR_data, FNR_target, test_size = 0.25, random_state = 219) # + # declaring a hyperparameter space learn_space = np.arange(0.1, 1.3, 0.1) estimator_space = np.arange(20, 100, 10) depth_space = np.arange(5, 9 , 1) warm_start_space = [True, False] # creating a hyperparameter grid param_grid = {'learning_rate' : learn_space, 'max_depth' : depth_space, 'n_estimators' : estimator_space, 'warm_start' : warm_start_space} # INSTANTIATING the model object without hyperparameters FNR_full_gbm_grid = GradientBoostingClassifier(random_state = 219) # GridSearchCV object FNR_full_gbm_cv = RandomizedSearchCV(estimator = FNR_full_gbm_grid, param_distributions = param_grid, cv = 3, n_iter = 10, random_state = 219, scoring = make_scorer(roc_auc_score, needs_threshold = False)) # FITTING to the FULL DATASET (due to cross-validation) FNR_full_gbm_cv.fit(FNR_data, FNR_target) # PREDICT step is not needed # printing the optimal parameters and best score print("Tuned Parameters :", FNR_full_gbm_cv.best_params_) print("Tuned Training AUC:", FNR_full_gbm_cv.best_score_.round(4)) # checking the best estimator for the model FNR_full_gbm_cv.best_estimator_ # INSTANTIATING with best_estimator FNR_gbm_tuned = FNR_full_gbm_cv.best_estimator_ # FIT step not needed # PREDICTING based on the testing set FNR_gbm_tuned_pred = FNR_gbm_tuned.predict(x_test) # SCORING the results FNR_gbm_tuned_train_acc = FNR_gbm_tuned.score(x_train, y_train).round(4) FNR_gbm_tuned_test_acc = FNR_gbm_tuned.score(x_test, y_test).round(4) FNR_gbm_tuned_auc = roc_auc_score(y_true = y_test, y_score = FNR_gbm_tuned_pred).round(4) FNR_gbm_tuned_test_gap = abs(FNR_gbm_tuned_train_acc-FNR_gbm_tuned_test_acc).round(4) # SCORING the results print('Training ACCURACY:', FNR_gbm_tuned_train_acc) print('Testing ACCURACY:', FNR_gbm_tuned_test_acc) print('Train-Test Gap :', FNR_gbm_tuned_test_gap) print('AUC Score :', FNR_gbm_tuned_auc) # unpacking the confusion matrix FNR_gbm_tuned_tn, \ FNR_gbm_tuned_fp, \ FNR_gbm_tuned_fn, \ FNR_gbm_tuned_tp = confusion_matrix(y_true = y_test, y_pred = FNR_gbm_tuned_pred).ravel() # printing each result one-by-one print(f""" True Negatives : {FNR_gbm_tuned_tn} False Positives: {FNR_gbm_tuned_fp} False Negatives: {FNR_gbm_tuned_fn} True Positives : {FNR_gbm_tuned_tp} """) # + # Data Preparation train_base_data['predict'] = 0 for index, value in train_base_data.iterrows(): # loop to get the unique choice if train_base_data.loc[index, 'status_group'] == 'functional': train_base_data.loc[index, 'predict'] = 1 # preparing explanatory variable data F_data = train_base_data.drop(['status_group', 'predict'], axis = 1) F_data = pd.get_dummies(F_data, prefix = 'd_') F_data = F_data.drop( columns = 'd__other - mkulima/shinyanga', axis = 1) # preparing response variables F_target = train_base_data.loc[ : , 'predict'] # preparing training and testing sets (all letters are lowercase) x_train, x_test, y_train, y_test = train_test_split( F_data, F_target, test_size = 0.25, random_state = 219) # + # declaring a hyperparameter space learn_space = np.arange(0.1, 1.3, 0.1) estimator_space = np.arange(20, 100, 10) depth_space = np.arange(5, 9 , 1) warm_start_space = [True, False] # creating a hyperparameter grid param_grid = {'learning_rate' : learn_space, 'max_depth' : depth_space, 'n_estimators' : estimator_space, 'warm_start' : warm_start_space} # INSTANTIATING the model object without hyperparameters F_full_gbm_grid = GradientBoostingClassifier(random_state = 219) # GridSearchCV object F_full_gbm_cv = RandomizedSearchCV(estimator = F_full_gbm_grid, param_distributions = param_grid, cv = 3, n_iter = 10, random_state = 219, scoring = make_scorer(roc_auc_score, needs_threshold = False)) # FITTING to the FULL DATASET (due to cross-validation) F_full_gbm_cv.fit(F_data, F_target) # PREDICT step is not needed # printing the optimal parameters and best score print("Tuned Parameters :", F_full_gbm_cv.best_params_) print("Tuned Training AUC:", F_full_gbm_cv.best_score_.round(4)) # checking the best estimator for the model F_full_gbm_cv.best_estimator_ # INSTANTIATING with best_estimator F_gbm_tuned = F_full_gbm_cv.best_estimator_ # FIT step not needed # PREDICTING based on the testing set F_gbm_tuned_pred = F_gbm_tuned.predict(x_test) # SCORING the results F_gbm_tuned_train_acc = F_gbm_tuned.score(x_train, y_train).round(4) F_gbm_tuned_test_acc = F_gbm_tuned.score(x_test, y_test).round(4) F_gbm_tuned_auc = roc_auc_score(y_true = y_test, y_score = F_gbm_tuned_pred).round(4) F_gbm_tuned_test_gap = abs(F_gbm_tuned_train_acc-F_gbm_tuned_test_acc).round(4) # SCORING the results print('Training ACCURACY:', F_gbm_tuned_train_acc) print('Testing ACCURACY:', F_gbm_tuned_test_acc) print('Train-Test Gap :', F_gbm_tuned_test_gap) print('AUC Score :', F_gbm_tuned_auc) # unpacking the confusion matrix F_gbm_tuned_tn, \ F_gbm_tuned_fp, \ F_gbm_tuned_fn, \ F_gbm_tuned_tp = confusion_matrix(y_true = y_test, y_pred = F_gbm_tuned_pred).ravel() # printing each result one-by-one print(f""" True Negatives : {F_gbm_tuned_tn} False Positives: {F_gbm_tuned_fp} False Negatives: {F_gbm_tuned_fn} True Positives : {F_gbm_tuned_tp} """) # - # # Results # + NF_predictions = pd.DataFrame({'Non Funtional' : NF_gbm_tuned_pred}) FNR_predictions = pd.DataFrame({'Funtional Needs Repair' : FNR_gbm_tuned_pred}) F_predictions = pd.DataFrame({'Funtional' : F_gbm_tuned_pred}) all_predictions = pd.concat([train['id'], train['status_group'], NF_predictions, FNR_predictions, F_predictions], axis = 1) all_predictions # - # # Actual # + # Preparing the data to be forcasted df_test = test.drop(['id', 'amount_tsh', 'date_recorded', 'funder', 'gps_height', 'installer', 'wpt_name', 'num_private', 'basin', 'subvillage', 'region', 'region_code', 'district_code', 'lga', 'ward', 'recorded_by', 'scheme_management', 'scheme_name', 'permit', 'construction_year'], axis = 1) df_test = pd.get_dummies(df_test, prefix = 'd_') NF_pred = NF_gbm_tuned.predict(df_test) FNR_pred = FNR_gbm_tuned.predict(df_test) F_pred = F_gbm_tuned.predict(df_test) data = {'id' : test_id, 'NF_pred' : NF_pred, 'FNR_pred' : FNR_pred, 'F_pred' : F_pred, } df_response = pd.DataFrame(data) # - df_response # + # Creating flags to enable data exploration df_response['all_true'] = 0 for index, value in df_response.iterrows(): # loop to get the unique choice if df_response.loc[index, 'NF_pred'] == 1 & \ df_response.loc[index, 'FNR_pred'] == 1 & \ df_response.loc[index, 'F_pred'] == 1: df_response.loc[index, 'all_true'] = 1 df_response['NF_FNR'] = 0 for index, value in df_response.iterrows(): # loop to get the unique choice if df_response.loc[index, 'NF_pred'] == 1 & \ df_response.loc[index, 'FNR_pred'] == 1: df_response.loc[index, 'NF_FNR'] = 1 df_response['NF_F'] = 0 for index, value in df_response.iterrows(): # loop to get the unique choice if df_response.loc[index, 'NF_pred'] == 1 & \ df_response.loc[index, 'F_pred'] == 1: df_response.loc[index, 'NF_F'] = 1 df_response['FNR_F'] = 0 for index, value in df_response.iterrows(): # loop to get the unique choice if df_response.loc[index, 'FNR_pred'] == 1 & \ df_response.loc[index, 'F_pred'] == 1: df_response.loc[index, 'FNR_F'] = 1 # - df_response['all_true'][df_response['all_true'] == 1] # + ## 0.7902 Score Submission # Creating a copy to explore the data explore = df_response.copy() # Creating column to translate the binaries into the actual statuses explore['status_group'] = 'functional' for index, value in explore.iterrows(): # loop to get the unique choice if explore.loc[index, 'NF_pred'] == 1: explore.loc[index, 'status_group'] = 'non functional' elif explore.loc[index, 'FNR_pred'] == 1: explore.loc[index, 'status_group'] = 'functional needs repair' # Managing the values that overlap with the other GBM for index, value in explore.iterrows(): # All True then Pump is FUNCTIONAL if explore.loc[index, 'all_true'] == 1: explore.loc[index, 'status_group'] = 'functional' # Non Functional and Funcitonal Needs Repair is Funcitonal Needs Repair elif explore.loc[index, 'NF_FNR'] == 1: explore.loc[index, 'status_group'] = 'functional needs repair' # Non Functional and Funcitonal is NON Funcitonal elif explore.loc[index, 'NF_F'] == 1: explore.loc[index, 'status_group'] = 'non functional' # Funcitonal Needs Repair Functional and Funcitonal elif explore.loc[index, 'FNR_F'] == 1: explore.loc[index, 'status_group'] = 'functional' sumbission_test = explore.copy() sumbission_test = sumbission_test.drop(columns = ['NF_pred', 'FNR_pred', 'F_pred', 'all_true', 'NF_FNR', 'NF_F', 'FNR_F'], axis = 1) sumbission_test.to_csv('pump_it_RLR_logic.csv', index=False) # + code_folding=[0] ## 0.7861 Score Submission ## Creating a column to translate the binaries into the actual statuses # df_response['status_group'] = 'functional' # for index, value in df_response.iterrows(): # # loop to get the unique choice # if df_response.loc[index, 'NF_pred'] == 1: # df_response.loc[index, 'status_group'] = 'non functional' # elif df_response.loc[index, 'FNR_pred'] == 1: # df_response.loc[index, 'status_group'] = 'functional needs repair' # sumbission = df_response.copy() # sumbission = sumbission.drop(columns = ['NF_pred', 'FNR_pred', 'F_pred'], # axis = 1) # sumbission.to_csv('pump_it_RLR.csv', index=False) # - Overall_Score =
Machine_Learning_Code.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # &emsp;&emsp;&emsp;&emsp;&emsp; # &emsp;&emsp;&emsp;&emsp;&emsp; # &emsp;&emsp;&emsp;&emsp;&emsp; # &emsp;&emsp;&emsp;&emsp;&emsp; # &emsp;&emsp;&emsp;&emsp;&emsp; # &emsp;&emsp;&ensp; # [Home Page](Start_Here.ipynb) # # # [Previous Notebook](Multi-stream_pipeline.ipynb) # &emsp;&emsp;&emsp;&emsp;&emsp; # &emsp;&emsp;&emsp;&emsp;&emsp; # &emsp;&emsp;&emsp;&emsp;&emsp; # &emsp;&emsp;&emsp;&emsp;&ensp; # [1](Introduction_to_Deepstream_and_Gstreamer.ipynb) # [2](Getting_started_with_Deepstream_Pipeline.ipynb) # [3](Introduction_to_Multi-DNN_pipeline.ipynb) # [4](Multi-stream_pipeline.ipynb) # [5] # &emsp;&emsp;&emsp;&emsp;&emsp; # &emsp;&emsp;&emsp;&emsp;&emsp; # &emsp;&emsp;&emsp;&emsp;&emsp; # &emsp;&emsp;&emsp;&emsp;&emsp; # # # # Exercise : Multi-stream - Multi-DNN pipeline # # In this notebook, you will build an Multi-stream Multi-DNN pipeline using the concepts learned from the previous notebooks. # # ## Building the pipeline # # We will the using batched on the Multi-DNN network from [Notebook 3](Introduction_to_Multi-DNN_pipeline.ipynb) and combine it with the knowledge learnt in [Notebook 4](Multi-stream_pipeline.ipynb). # # # Here are the illustrations of the Pipeline # ![test2](images/test2.png) # ![test3](images/test3.png) # # Let us get started with the Notebook , You will have to fill in the `TODO` parts of the code present in the Notebook to complete the pipeline. Feel free to refer to the previous notebooks for the commands. # + # Import required libraries import sys sys.path.append('../source_code') import gi import configparser gi.require_version('Gst', '1.0') from gi.repository import GObject, Gst from gi.repository import GLib from ctypes import * import time import sys import math import platform from common.bus_call import bus_call from common.FPS import GETFPS import pyds # Define variables to be used later fps_streams={} PGIE_CLASS_ID_VEHICLE = 0 PGIE_CLASS_ID_BICYCLE = 1 PGIE_CLASS_ID_PERSON = 2 PGIE_CLASS_ID_ROADSIGN = 3 MUXER_OUTPUT_WIDTH=1920 MUXER_OUTPUT_HEIGHT=1080 TILED_OUTPUT_WIDTH=1920 TILED_OUTPUT_HEIGHT=1080 OSD_PROCESS_MODE= 0 OSD_DISPLAY_TEXT= 0 pgie_classes_str= ["Vehicle", "TwoWheeler", "Person","RoadSign"] ################ Three Stream Pipeline ########### # Define Input and output Stream information num_sources = 3 INPUT_VIDEO_1 = '/opt/nvidia/deepstream/deepstream-5.0/samples/streams/sample_720p.h264' INPUT_VIDEO_2 = '/opt/nvidia/deepstream/deepstream-5.0/samples/streams/sample_720p.h264' INPUT_VIDEO_3 = '/opt/nvidia/deepstream/deepstream-5.0/samples/streams/sample_720p.h264' OUTPUT_VIDEO_NAME = "../source_code/N4/ds_out.mp4" # - # We define a function `make_elm_or_print_err()` to create our elements and report any errors if the creation fails. # # Elements are created using the `Gst.ElementFactory.make()` function as part of Gstreamer library. ## Make Element or Print Error and any other detail def make_elm_or_print_err(factoryname, name, printedname, detail=""): print("Creating", printedname) elm = Gst.ElementFactory.make(factoryname, name) if not elm: sys.stderr.write("Unable to create " + printedname + " \n") if detail: sys.stderr.write(detail) return elm # #### Initialise GStreamer and Create an Empty Pipeline # + for i in range(0,num_sources): fps_streams["stream{0}".format(i)]=GETFPS(i) # Standard GStreamer initialization GObject.threads_init() Gst.init(None) # Create gstreamer elements */ # Create Pipeline element that will form a connection of other elements print("Creating Pipeline \n ") pipeline = Gst.Pipeline() if not pipeline: sys.stderr.write(" Unable to create Pipeline \n") # - # #### Create Elements that are required for our pipeline # # Compared to the first notebook , we use a lot of queues in this notebook to buffer data when it moves from one plugin to another. # + ########### Create Elements required for the Pipeline ########### #################### ~~~~~~~ TODO ~~~~~~~~ ################ # Source element for reading from the file # Since the data format in the input file is elementary h264 stream,we need a h264parser # Use nvdec_h264 for hardware accelerated decode on GPU ######### Defining Stream 1 source1 = h264parser1 = decoder1 = ########## ########## Defining Stream 2 source2 = h264parser2 = decoder3 = ########### ########## Defining Stream 3 source3 = h264parser3 = decoder3 = ########### #################### ~~~~~~~ END ~~~~~~~~ ################ # Create nvstreammux instance to form batches from one or more sources. streammux = make_elm_or_print_err("nvstreammux", "Stream-muxer","Stream-muxer") #################### ~~~~~~~ TODO ~~~~~~~~ ################ # Use nvinfer to run inferencing on decoder's output, behaviour of inferencing is set through config file pgie = # Use nvtracker to give objects unique-ids tracker = # Seconday inference for Finding Car Color sgie1 = # Seconday inference for Finding Car Make sgie2 = # Seconday inference for Finding Car Type sgie3 = #################### ~~~~~~~ END ~~~~~~~~ ################ # Creating Tiler to present more than one streams tiler=make_elm_or_print_err("nvmultistreamtiler", "nvtiler","nvtiler") # Use convertor to convert from NV12 to RGBA as required by nvosd nvvidconv = make_elm_or_print_err("nvvideoconvert", "convertor","nvvidconv") # Create OSD to draw on the converted RGBA buffer nvosd = make_elm_or_print_err("nvdsosd", "onscreendisplay","nvosd") # Creating queue's to buffer incoming data from pgie queue1=make_elm_or_print_err("queue","queue1","queue1") # Creating queue's to buffer incoming data from tiler queue2=make_elm_or_print_err("queue","queue2","queue2") # Creating queue's to buffer incoming data from nvvidconv queue3=make_elm_or_print_err("queue","queue3","queue3") # Creating queue's to buffer incoming data from nvosd queue4=make_elm_or_print_err("queue","queue4","queue4") # Creating queue's to buffer incoming data from nvvidconv2 queue5=make_elm_or_print_err("queue","queue5","queue5") # Creating queue's to buffer incoming data from nvtracker queue6=make_elm_or_print_err("queue","queue6","queue6") # Creating queue's to buffer incoming data from sgie1 queue7=make_elm_or_print_err("queue","queue7","queue7") # Creating queue's to buffer incoming data from sgie2 queue8=make_elm_or_print_err("queue","queue8","queue8") # Creating queue's to buffer incoming data from sgie3 queue9=make_elm_or_print_err("queue","queue9","queue9") # Use convertor to convert from NV12 to RGBA as required by nvosd nvvidconv2 = make_elm_or_print_err("nvvideoconvert", "convertor2","nvvidconv2") # Place an encoder instead of OSD to save as video file encoder = make_elm_or_print_err("avenc_mpeg4", "encoder", "Encoder") # Parse output from Encoder codeparser = make_elm_or_print_err("mpeg4videoparse", "mpeg4-parser", 'Code Parser') # Create a container container = make_elm_or_print_err("qtmux", "qtmux", "Container") # Create Sink for storing the output sink = make_elm_or_print_err("filesink", "filesink", "Sink") # - # Now that we have created the elements ,we can now set various properties for out pipeline at this point. # # Configuration file : [pgie](N3/dstest3_pgie_config.txt) # + ############ Set properties for the Elements ############ # Set Input Video files source1.set_property('location', INPUT_VIDEO_1) source2.set_property('location', INPUT_VIDEO_2) source3.set_property('location', INPUT_VIDEO_3) # Set Input Width , Height and Batch Size streammux.set_property('width', 1920) streammux.set_property('height', 1080) streammux.set_property('batch-size', num_sources) # Timeout in microseconds to wait after the first buffer is available # to push the batch even if a complete batch is not formed. streammux.set_property('batched-push-timeout', 4000000) # Set configuration file for nvinfer pgie.set_property('config-file-path', "../source_code/N4/dstest4_pgie_config.txt") sgie1.set_property('config-file-path', "../source_code/N4/dstest4_sgie1_config.txt") sgie2.set_property('config-file-path', "../source_code/N4/dstest4_sgie2_config.txt") sgie3.set_property('config-file-path', "../source_code/N4/dstest4_sgie3_config.txt") #Set properties of tracker from tracker_config config = configparser.ConfigParser() config.read('../source_code/N4/dstest4_tracker_config.txt') config.sections() for key in config['tracker']: if key == 'tracker-width' : tracker_width = config.getint('tracker', key) tracker.set_property('tracker-width', tracker_width) if key == 'tracker-height' : tracker_height = config.getint('tracker', key) tracker.set_property('tracker-height', tracker_height) if key == 'gpu-id' : tracker_gpu_id = config.getint('tracker', key) tracker.set_property('gpu_id', tracker_gpu_id) if key == 'll-lib-file' : tracker_ll_lib_file = config.get('tracker', key) tracker.set_property('ll-lib-file', tracker_ll_lib_file) if key == 'll-config-file' : tracker_ll_config_file = config.get('tracker', key) tracker.set_property('ll-config-file', tracker_ll_config_file) if key == 'enable-batch-process' : tracker_enable_batch_process = config.getint('tracker', key) tracker.set_property('enable_batch_process', tracker_enable_batch_process) ## Set batch size pgie_batch_size=pgie.get_property("batch-size") print("PGIE batch size :",end='') print(pgie_batch_size) if(pgie_batch_size != num_sources): print("WARNING: Overriding infer-config batch-size",pgie_batch_size," with number of sources ", num_sources," \n") pgie.set_property("batch-size",num_sources) ## Set batch size sgie1_batch_size=sgie1.get_property("batch-size") print("SGIE1 batch size :",end='') print(sgie1_batch_size) if(sgie1_batch_size != num_sources): print("WARNING: Overriding infer-config batch-size",sgie1_batch_size," with number of sources ", num_sources," \n") sgie1.set_property("batch-size",num_sources) ## Set batch size sgie2_batch_size=sgie2.get_property("batch-size") print("SGIE2 batch size :",end='') print(sgie2_batch_size) if(sgie2_batch_size != num_sources): print("WARNING: Overriding infer-config batch-size",sgie2_batch_size," with number of sources ", num_sources," \n") sgie2.set_property("batch-size",num_sources) ## Set batch size sgie3_batch_size=sgie3.get_property("batch-size") print("SGIE3 batch size :",end='') print(sgie3_batch_size) if(sgie3_batch_size != num_sources): print("WARNING: Overriding infer-config batch-size",sgie3_batch_size," with number of sources ", num_sources," \n") sgie3.set_property("batch-size",num_sources) # Set display configurations for nvmultistreamtiler tiler_rows=int(2) tiler_columns=int(2) tiler.set_property("rows",tiler_rows) tiler.set_property("columns",tiler_columns) tiler.set_property("width", TILED_OUTPUT_WIDTH) tiler.set_property("height", TILED_OUTPUT_HEIGHT) # Set encoding properties and Sink configs encoder.set_property("bitrate", 2000000) sink.set_property("location", OUTPUT_VIDEO_NAME) sink.set_property("sync", 0) sink.set_property("async", 0) # - # We now link all the elements in the order we prefer and create Gstreamer bus to feed all messages through it. # + ########## Add and Link ELements in the Pipeline ########## print("Adding elements to Pipeline \n") pipeline.add(source1) pipeline.add(h264parser1) pipeline.add(decoder1) pipeline.add(source2) pipeline.add(h264parser2) pipeline.add(decoder2) pipeline.add(source3) pipeline.add(h264parser3) pipeline.add(decoder3) pipeline.add(streammux) pipeline.add(pgie) pipeline.add(tracker) pipeline.add(sgie1) pipeline.add(sgie2) pipeline.add(sgie3) pipeline.add(tiler) pipeline.add(nvvidconv) pipeline.add(nvosd) pipeline.add(queue1) pipeline.add(queue2) pipeline.add(queue3) pipeline.add(queue4) pipeline.add(queue5) pipeline.add(queue6) pipeline.add(queue7) pipeline.add(queue8) pipeline.add(queue9) pipeline.add(nvvidconv2) pipeline.add(encoder) pipeline.add(codeparser) pipeline.add(container) pipeline.add(sink) print("Linking elements in the Pipeline \n") source1.link(h264parser1) h264parser1.link(decoder1) ###### Create Sink pad and connect to decoder's source pad sinkpad1 = streammux.get_request_pad("sink_0") if not sinkpad1: sys.stderr.write(" Unable to get the sink pad of streammux \n") srcpad1 = decoder1.get_static_pad("src") if not srcpad1: sys.stderr.write(" Unable to get source pad of decoder \n") srcpad1.link(sinkpad1) ###### ###### Create Sink pad and connect to decoder's source pad source2.link(h264parser2) h264parser2.link(decoder2) sinkpad2 = streammux.get_request_pad("sink_1") if not sinkpad2: sys.stderr.write(" Unable to get the sink pad of streammux \n") srcpad2 = decoder2.get_static_pad("src") if not srcpad2: sys.stderr.write(" Unable to get source pad of decoder \n") srcpad2.link(sinkpad2) ###### ###### Create Sink pad and connect to decoder's source pad source3.link(h264parser3) h264parser3.link(decoder3) sinkpad3 = streammux.get_request_pad("sink_2") if not sinkpad2: sys.stderr.write(" Unable to get the sink pad of streammux \n") srcpad3 = decoder3.get_static_pad("src") if not srcpad3: sys.stderr.write(" Unable to get source pad of decoder \n") srcpad3.link(sinkpad3) ###### streammux.link(queue1) #################### ~~~~~~~ TODO ~~~~~~~~ ################ # Link the Elements using from the pipeline illustration # Remember to add queue after every element #################### ~~~~~~~ END ~~~~~~~~ ################ queue8.link(nvosd) nvosd.link(queue9) queue9.link(nvvidconv2) nvvidconv2.link(encoder) encoder.link(codeparser) codeparser.link(container) container.link(sink) # - # create an event loop and feed gstreamer bus mesages to it loop = GObject.MainLoop() bus = pipeline.get_bus() bus.add_signal_watch() bus.connect ("message", bus_call, loop) # This callback function is the same as used in the previous notebook. # tiler_sink_pad_buffer_probe will extract metadata received on OSD sink pad # and update params for drawing rectangle, object information etc. def tiler_src_pad_buffer_probe(pad,info,u_data): #Intiallizing object counter with 0. obj_counter = { PGIE_CLASS_ID_VEHICLE:0, PGIE_CLASS_ID_PERSON:0, PGIE_CLASS_ID_BICYCLE:0, PGIE_CLASS_ID_ROADSIGN:0 } # Set frame_number & rectangles to draw as 0 frame_number=0 num_rects=0 gst_buffer = info.get_buffer() if not gst_buffer: print("Unable to get GstBuffer ") return # Retrieve batch metadata from the gst_buffer # Note that pyds.gst_buffer_get_nvds_batch_meta() expects the # C address of gst_buffer as input, which is obtained with hash(gst_buffer) batch_meta = pyds.gst_buffer_get_nvds_batch_meta(hash(gst_buffer)) l_frame = batch_meta.frame_meta_list while l_frame is not None: try: # Note that l_frame.data needs a cast to pyds.NvDsFrameMeta frame_meta = pyds.NvDsFrameMeta.cast(l_frame.data) except StopIteration: break # Get frame number , number of rectables to draw and object metadata frame_number=frame_meta.frame_num num_rects = frame_meta.num_obj_meta l_obj=frame_meta.obj_meta_list while l_obj is not None: try: # Casting l_obj.data to pyds.NvDsObjectMeta obj_meta=pyds.NvDsObjectMeta.cast(l_obj.data) except StopIteration: break # Increment Object class by 1 and Set Box border to Red color obj_counter[obj_meta.class_id] += 1 obj_meta.rect_params.border_color.set(0.0, 0.0, 1.0, 0.0) try: l_obj=l_obj.next except StopIteration: break ################## Setting Metadata Display configruation ############### # Acquiring a display meta object. display_meta=pyds.nvds_acquire_display_meta_from_pool(batch_meta) display_meta.num_labels = 1 py_nvosd_text_params = display_meta.text_params[0] # Setting display text to be shown on screen py_nvosd_text_params.display_text = "Frame Number={} Number of Objects={} Vehicle_count={} Person_count={}".format(frame_number, num_rects, obj_counter[PGIE_CLASS_ID_VEHICLE], obj_counter[PGIE_CLASS_ID_PERSON]) # Now set the offsets where the string should appear py_nvosd_text_params.x_offset = 10 py_nvosd_text_params.y_offset = 12 # Font , font-color and font-size py_nvosd_text_params.font_params.font_name = "Serif" py_nvosd_text_params.font_params.font_size = 10 # Set(red, green, blue, alpha); Set to White py_nvosd_text_params.font_params.font_color.set(1.0, 1.0, 1.0, 1.0) # Text background color py_nvosd_text_params.set_bg_clr = 1 # Set(red, green, blue, alpha); set to Black py_nvosd_text_params.text_bg_clr.set(0.0, 0.0, 0.0, 1.0) # Using pyds.get_string() to get display_text as string to print in notebook print(pyds.get_string(py_nvosd_text_params.display_text)) pyds.nvds_add_display_meta_to_frame(frame_meta, display_meta) ############################################################################ # Get frame rate through this probe fps_streams["stream{0}".format(frame_meta.pad_index)].get_fps() try: l_frame=l_frame.next except StopIteration: break return Gst.PadProbeReturn.OK # + #################### ~~~~~~~ TODO ~~~~~~~~ ################ # Set Tiler source pad , Checkout Notebook 4 to understand where to attach this pad to. tiler_src_pad= #################### ~~~~~~~ END ~~~~~~~~ ################ if not tiler_src_pad: sys.stderr.write(" Unable to get src pad \n") else: tiler_src_pad.add_probe(Gst.PadProbeType.BUFFER, tiler_src_pad_buffer_probe, 0) # - # Now with everything defined , we can start the playback and listen the events. # List the sources print("Now playing...") start_time = time.time() print("Starting pipeline \n") # start play back and listed to events pipeline.set_state(Gst.State.PLAYING) try: loop.run() except: pass # cleanup print("Exiting app\n") pipeline.set_state(Gst.State.NULL) print("--- %s seconds ---" % (time.time() - start_time)) # Convert video profile to be compatible with Jupyter notebook # !ffmpeg -loglevel panic -y -an -i ../source_code/N4/ds_out.mp4 -vcodec libx264 -pix_fmt yuv420p -profile:v baseline -level 3 ../source_code/N4/output.mp4 # Display the Output from IPython.display import HTML HTML(""" <video width="960" height="540" controls> <source src="../source_code/N4/output.mp4" </video> """.format()) # ### Other Bootcamps # The contents of this Bootcamp originates from [OpenACC GPU Bootcamp Github](https://github.com/gpuhackathons-org/gpubootcamp). Here are some additional Bootcamp which might be of interest: # # - [DeepStream Pipeline Optimization using Profiling](https://github.com/gpuhackathons-org/gpubootcamp/tree/master/ai/DeepStream_Perf_Lab) # # ## Licensing # # This material is released by OpenACC-Standard.org, in collaboration with NVIDIA Corporation, under the Creative Commons Attribution 4.0 International (CC BY 4.0). # # [Previous Notebook](Multi-stream_pipeline.ipynb) # &emsp;&emsp;&emsp;&emsp;&emsp; # &emsp;&emsp;&emsp;&emsp;&emsp; # &emsp;&emsp;&emsp;&emsp;&emsp; # &emsp;&emsp;&emsp;&emsp;&ensp; # [1](Introduction_to_Deepstream_and_Gstreamer.ipynb) # [2](Getting_started_with_Deepstream_Pipeline.ipynb) # [3](Introduction_to_Multi-DNN_pipeline.ipynb) # [4](Multi-stream_pipeline.ipynb) # [5] # &emsp;&emsp;&emsp;&emsp;&emsp; # &emsp;&emsp;&emsp;&emsp;&emsp; # &emsp;&emsp;&emsp;&emsp;&emsp; # &emsp;&emsp;&emsp;&emsp;&emsp; # # &emsp;&emsp;&emsp;&emsp;&emsp; # &emsp;&emsp;&emsp;&emsp;&emsp; # &emsp;&emsp;&emsp;&emsp;&emsp; # &emsp;&emsp;&emsp;&emsp;&emsp; # &emsp;&emsp;&emsp;&emsp;&emsp; # &emsp;&emsp;&ensp; # [Home Page](Start_Here.ipynb) #
ai/DeepStream/English/python/jupyter_notebook/Multi-stream_Multi_DNN.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 (ipykernel) # language: python # name: python3 # --- # + import datetime import pandas as pd import numpy as np import matplotlib.pyplot as plt import seaborn as sb from pprint import pprint from scipy import stats from sklearn import preprocessing import seaborn as sns import statsmodels.api as sm import statsmodels.tsa.api as smt from statsmodels.tsa.stattools import adfuller from pandas_profiling import ProfileReport import math # - # %matplotlib inline # + TRAIN_PATH = "/workspace/data/predict_future_sales/sales_train.csv" train_df = pd.read_csv(TRAIN_PATH) train_df['item_sum'] = train_df['item_price'] * train_df['item_cnt_day'] train_df['date'] = train_df['date'].apply(lambda x: datetime.datetime.strptime(x, "%d.%m.%Y")) train_df['month'] = train_df['date'].apply(lambda x: x.strftime("%Y%m")) # - train_df.head() # # モデル使用データ # + #元データ ts=train_df.groupby(["date_block_num"])["item_cnt_day"].sum() ts.astype('float') # トレンド除去 ts_diff = ts.diff().dropna() # 季節性除去 ts_diff_12 = ts.diff(12).dropna() # + [markdown] tags=[] # # SARIMAXモデルを構築 # # モデル = 短期の自己相関(ARMA) + 季節性分(S)+ トレンド(差分で消す)<br> # とする ※外因性は考えない # - # ## memo # # 元データのARIMA(1,0,0)= I(1)のARIMA(1,0,0)<br> # 元データのARIMA(0,1,1) = I(1)のARIMA(0,0,1)<br> # 元データのARIMA(0,1,2) = I(1)のARIMA(0,0,2)<br> # 元データのSARIMA(0,0,0)(1,1,0,12)= I(12)のSARIMA(0,0,0)(1,0,0,12)<br> # # 元データのARIMA(1,12,0) ≠ I(12)のARIMA(1,0,0)<br> # 元データのARIMA(1,2,0) ≠ I(2)のARIMA(1,0,0)<br> # 元データのARIMA(1,0,1) ≠ I(1)のARIMA(1,0,1)<br> # # 上記より、 # - ARモデル/MAモデルのみを使い、かつ1次和分過程だった場合は、元データのARIMAと一致 # - 2回以上の差分やARMAモデルにすると不一致 # 12月のみフラグ付け dec_df = train_df.copy() dec_df = dec_df.sort_values("date_block_num") dec_df = dec_df.drop_duplicates("date_block_num")['Dec_flg'] dec_df = dec_df.reset_index(drop=True) # + [markdown] tags=[] # ## 最適な次数を見つける # + tags=[] # 総当たりで、AICが最小となるSARIMAの次数を探す max_p = 3 max_q = 3 max_sp = 1 max_sq = 1 pattern = max_p*(max_q + 1)*(max_sp + 1)*(max_sq + 1) modelSelection = pd.DataFrame(index=range(pattern), columns=["model", "aic"]) # # 自動SARIMA選択 # num = 0 # for p in range(1, max_p + 1): # for q in range(0, max_q + 1): # for sp in range(0, max_sp + 1): # for sq in range(0, max_sq + 1): # sarima = sm.tsa.SARIMAX( # ts, order=(p,1,q), # seasonal_order=(sp,1,sq,12), # enforce_stationarity = False, # enforce_invertibility = False, # ).fit() # modelSelection.iloc[num]["model"] = f"order=({p},1,{q}), season=({sp},1,{sq})" # modelSelection.iloc[num]["aic"] = sarima.aic # num = num + 1 # 自動SARIMA選択 num = 0 for p in range(1, max_p + 1): for q in range(0, max_q + 1): sarima = sm.tsa.SARIMAX( # ts_diff_12.reset_index(drop=True), ts, order=(p,12,q), enforce_stationarity = False, enforce_invertibility = False, ).fit(disp=False) modelSelection.iloc[num]["model"] = f"order=({p},12,{q}))" modelSelection.iloc[num]["aic"] = sarima.aic num = num + 1 # - modelSelection[modelSelection.aic == min(modelSelection.aic)] # ## モデル構築 # + tags=[] # sarima = sm.tsa.SARIMAX(ts, # order=(1,1,3), seasonal_order=(0,1,0,12), # enforce_stationarity = False, enforce_invertibility = False).fit() sarima = sm.tsa.SARIMAX(ts, order=(0,0,0),seasonal_order=(1,1,0,2), enforce_stationarity = False, enforce_invertibility = False).fit(disp=False) sarima.aic # + tags=[] sarima.summary() # + tags=[] # 残差のチェック residSARIMA = sarima.resid fig = plt.figure(figsize=(20,4)) ax1 = fig.add_subplot(121) fig = sm.graphics.tsa.plot_acf(residSARIMA, lags=10, ax=ax1) ax2 = fig.add_subplot(122) fig = sm.graphics.tsa.plot_pacf(residSARIMA, lags=10, ax=ax2) ax1.set_title("rawdata SARIMA acf") ax2.set_title("rawdata SARIMA pacf") # - # ## 残差がうまく取り除けていないので、元データと見比べ # + fig = plt.figure(figsize=(20,8)) ax = fig.subplots(2,1) ax[0].plot(ts.index, ts.values) ax[1].plot(residSARIMA.index, residSARIMA.values) ax[0].set_title("raw data") ax[1].set_title("SARIMA resid data") # - # 予測 bestPred = sarima.predict() # 実データと予測結果の図示 plt.figure(figsize=(20,10)) plt.plot(ts) plt.plot(bestPred, "r", linestyle='--') # + [markdown] jp-MarkdownHeadingCollapsed=true tags=[] # #### 調査:月によって販売店舗数に差があるか # # 残差はほとんど定常過程っぽいが、1時点だけ異常値をだしている(13地点)<br> # 原因として、店舗数が急に拡大したか確認<br> # # <span>結論:関係なし</span> # - test = train_df.groupby("date_block_num")['shop_id'].nunique() plt.figure(figsize=(20,3)) plt.plot(test.index, test.values) plt.title("shop by month") plt.xticks(rotation=90) plt.show() # + [markdown] tags=[] # ## ARIMAのパラメータ推定 # # すでに1次和分過程で単位根であることはわかっているので、ARMAのパラメータを見つける<br> # ARMA(1,1)より、<span style="color:red;">ARIMA(1,1,1)</span>とパラメータが決まった。 # - # 差分系列への自動ARMA推定関数の実行 resDiff = sm.tsa.arma_order_select_ic(ts_diff, ic='aic', trend='n') resDiff # ### SARIMAのパラメータ推定 # + jupyter={"outputs_hidden": true} tags=[] # 総当たりで、AICが最小となるSARIMAの次数を探す max_sp = 1 max_sd = 1 pattern = (max_sp + 1)*(max_sd + 1) modelSelection = pd.DataFrame(index=range(pattern), columns=["model", "aic"]) # 自動SARIMA選択 num = 0 for sp in range(0, max_sp+1): for sq in range(0, max_sd+1): sarima = sm.tsa.SARIMAX( ts_diff, order=(0,1,1), seasonal_order=(sp,1,sq,12), enforce_stationarity = False, enforce_invertibility = False ).fit() modelSelection.iloc[num]["model"] = f"order=(1,1,1), season=({sp},1,{sq})" modelSelection.iloc[num]["aic"] = sarima.aic num = num + 1 # - modelSelection[modelSelection.aic == min(modelSelection.aic)] # ### モデル構築 # + tags=[] sarima = sm.tsa.SARIMAX(ts_diff, order=(1,1,1), seasonal_order=(0,1,1,12), enforce_stationarity = False, enforce_invertibility = False).fit() # + tags=[] sarima.summary() # + tags=[] # 残差のチェック residSARIMA = sarima.resid fig = plt.figure(figsize=(12,8)) ax1 = fig.add_subplot(211) fig = sm.graphics.tsa.plot_acf(residSARIMA, lags=12, ax=ax1) ax2 = fig.add_subplot(212) fig = sm.graphics.tsa.plot_pacf(residSARIMA, lags=12, ax=ax2) # - # 予測 bestPred = sarima.predict(32, 40) # 実データと予測結果の図示 plt.figure(figsize=(20,10)) plt.plot(ts_diff) plt.plot(bestPred, "r", linestyle='--') # + [markdown] jp-MarkdownHeadingCollapsed=true tags=[] # ## logデータへのSARIMA適応 (SARIMAパラメータの総探し) # + jupyter={"outputs_hidden": true} tags=[] # 総当たりで、AICが最小となるSARIMAの次数を探す max_p = 3 max_q = 3 max_d = 1 max_sp = 1 max_sq = 1 max_sd = 1 pattern = max_p*(max_q + 1)*(max_d + 1)*(max_sp + 1)*(max_sq + 1)*(max_sd + 1) modelSelection = pd.DataFrame(index=range(pattern), columns=["model", "aic"]) # 自動SARIMA選択 num = 0 for p in range(1, max_p + 1): for d in range(0, max_d + 1): for q in range(0, max_q + 1): for sp in range(0, max_sp + 1): for sd in range(0, max_sd + 1): for sq in range(0, max_sq + 1): sarima = sm.tsa.SARIMAX( ts_log, order=(p,d,q), seasonal_order=(sp,sd,sq,12), enforce_stationarity = False, enforce_invertibility = False ).fit() modelSelection.iloc[num]["model"] = f"order=({p},{d},{q}), season=({sp},{sd},{sq}))" modelSelection.iloc[num]["aic"] = sarima.aic num = num + 1 # - modelSelection[modelSelection.aic == min(modelSelection.aic)] sarima = sm.tsa.SARIMAX(ts, order=(1,0,0), seasonal_order=(1,0,0,12), enforce_stationarity = False, enforce_invertibility = False).fit() # 残差のチェック residSARIMA = sarima.resid fig = plt.figure(figsize=(12,8)) ax1 = fig.add_subplot(211) fig = sm.graphics.tsa.plot_acf(residSARIMA, lags=12, ax=ax1) ax2 = fig.add_subplot(212) fig = sm.graphics.tsa.plot_pacf(residSARIMA, lags=12, ax=ax2) # 予測 bestPred = sarima.predict(30, 40) # 実データと予測結果の図示 plt.figure(figsize=(20,10)) plt.plot(ts) plt.plot(bestPred, "r", linestyle='--') # + [markdown] jp-MarkdownHeadingCollapsed=true tags=[] # ## logデータへのSARIMA適応 (ARIMA→SARIMAパラメータ推定) # - ts_log=pd.DataFrame(train_df.groupby(["date_block_num"])["item_cnt_day"].sum()) ts_log['item_cnt_day'] = ts_log['item_cnt_day'].apply(lambda x: np.log(x)) # + jupyter={"outputs_hidden": true} tags=[] # 差分系列への自動ARMA推定関数の実行 resDiff = sm.tsa.arma_order_select_ic(ts_log_diff, ic='aic') resDiff # + # 総当たりで、AICが最小となるSARIMAの次数を探す max_sp = 1 max_sq = 1 max_sd = 1 pattern = (max_sp + 1)*(max_sq + 1)*(max_sd + 1) modelSelection = pd.DataFrame(index=range(pattern), columns=["model", "aic"]) pattern # + jupyter={"outputs_hidden": true} tags=[] # 自動SARIMA選択 num = 0 for sp in range(0, max_sp+1): for sd in range(0, max_sq+1): for sq in range(0, max_sd+1): sarima = sm.tsa.SARIMAX( ts, order=(1,1,1), seasonal_order=(sp,sd,sq,12), enforce_stationarity = False, enforce_invertibility = False ).fit() modelSelection.iloc[num]["model"] = f"order=(1,1,1), season=({sp},{sd},{sq}))" modelSelection.iloc[num]["aic"] = sarima.aic num = num + 1 # - modelSelection[modelSelection.aic == min(modelSelection.aic)] # + jupyter={"outputs_hidden": true} tags=[] sarima = sm.tsa.SARIMAX(ts, order=(1,1,1), seasonal_order=(0,1,1,12), enforce_stationarity = False, enforce_invertibility = False).fit() sarima.summary() # - # 残差のチェック residSARIMA = sarima.resid fig = plt.figure(figsize=(12,8)) ax1 = fig.add_subplot(211) fig = sm.graphics.tsa.plot_acf(residSARIMA, lags=12, ax=ax1) ax2 = fig.add_subplot(212) fig = sm.graphics.tsa.plot_pacf(residSARIMA, lags=12, ax=ax2) # 予測 bestPred = sarima.predict(30, 40) # 実データと予測結果の図示 plt.figure(figsize=(20,10)) plt.plot(ts) plt.plot(bestPred, "r", linestyle='--') # モデルを構築しても残差が残っている・・・。<br> # # つまり、、、<br> # まだモデリングできていない成分が残っているということ!!<br> # # 残っているのは季節性。だが、季節性は主に12月が強く出るだけで他の月は出ないっぽい<br>
workspace/notebooks/ARIMA/arima_model.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernel_info: # name: python3 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # %matplotlib inline from matplotlib import style style.use('fivethirtyeight') import matplotlib.pyplot as plt import numpy as np import pandas as pd from scipy import stats import datetime from datetime import datetime,timedelta from itertools import chain # # Reflect Tables into SQLAlchemy ORM # Python SQL toolkit and Object Relational Mapper import sqlalchemy from sqlalchemy.ext.automap import automap_base from sqlalchemy.orm import Session from sqlalchemy import create_engine, func engine = create_engine("sqlite:///Resources/hawaii.sqlite") # reflect an existing database into a new model Base = automap_base() # reflect the tables Base.prepare(engine, reflect=True) # We can view all of the classes that automap found Base.classes.keys() # Save references to each table Measurement = Base.classes.measurement Station = Base.classes.station # Create our session (link) from Python to the DB session = Session(engine) # # Exploratory Climate Analysis # Retrive the last date point last_date = (engine.execute('select date from measurement order by date desc').first())[0] last_date # Calculate the date 1 year ago from the last data point in the database year, month, day = map(int, last_date.split("-")) year_ago = datetime(year, month, day) - timedelta(days=365) print(year_ago.strftime("%Y-%m-%d")) # Design a query to retrieve the last 12 months of precipitation data and plot the results last_year_prcp = session.query(Measurement.date, Measurement.prcp).filter(Measurement.date >= year_ago).all() last_year_prcp # + # Perform a query to retrieve the data and precipitation scores date = [row[0] for row in last_year_prcp] precipitation = [row[1] for row in last_year_prcp] # Save the query results as a Pandas DataFrame and set the index to the date column climate_df = pd.DataFrame({'date' : date, 'precipitation' : precipitation}).set_index('date') # Sort the dataframe by date climate_df = climate_df.sort_values('date') climate_df.head() # - # Use Pandas Plotting with Matplotlib to plot the data climate_df.plot(figsize = (10,6)) plt.xlabel("date") plt.tick_params( axis='x', which='both', # both major and minor ticks are affected labelbottom=False) # remove x ticks label plt.legend(loc = 'upper center') plt.show() # Use Pandas to calcualte the summary statistics for the precipitation data climate_df.describe() # Design a query to show how many stations are available in this dataset? session.query(func.count(Station.name)).all() # What are the most active stations? (i.e. what stations have the most rows)? # List the stations and the counts in descending order. engine.execute('select station, count(station) as count from measurement group by station order by count desc').fetchall() # Using the station id from the previous query, calculate the lowest temperature recorded, # highest temperature recorded, and average temperature of the most active station? engine.execute('select min(tobs), max(tobs), avg(tobs) from measurement where station = "USC00519281"').fetchall() # Choose the station with the highest number of temperature observations. engine.execute('select station, tobs from measurement where station = "USC00519281"').fetchall() # Query the last 12 months of temperature observation data for this station and plot the results as a histogram data = engine.execute('select tobs from Measurement where date >= "2016-08-23" and station = "USC00519281"').fetchall() data = [row[0] for row in data] hist_data = pd.DataFrame({'tobs': data}) # Query the last 12 months of temperature observation data for this station and plot the results as a histogram hist = hist_data.hist(bins = 12, figsize = (10,5)) plt.ylabel("Frequency") plt.title("") plt.legend(["tobs"]) # + # This function called `calc_temps` will accept start date and end date in the format '%Y-%m-%d' # and return the minimum, average, and maximum temperatures for that range of dates def calc_temps(start_date, end_date): """TMIN, TAVG, and TMAX for a list of dates. Args: start_date (string): A date string in the format %Y-%m-%d end_date (string): A date string in the format %Y-%m-%d Returns: TMIN, TAVE, and TMAX """ return session.query(func.min(Measurement.tobs), func.avg(Measurement.tobs), func.max(Measurement.tobs)).\ filter(Measurement.date >= start_date).filter(Measurement.date <= end_date).all() # function usage example print(calc_temps('2012-02-28', '2012-03-05')) # + # Use your previous function `calc_temps` to calculate the tmin, tavg, and tmax # for your trip using the previous year's data for those same dates. # trip date is 2018-01-16, 2018-01-22 my_trip = (calc_temps('2017-01-16', '2017-01-22')) print(my_trip) # - # Create a dataframe using the result my_trip_df = pd.DataFrame(my_trip, columns = ['min', 'avg', 'max']) my_trip_df # Plot the results from your previous query as a bar chart. # Use "Trip Avg Temp" as your Title, average temperature for the y value # Use the peak-to-peak (tmax-tmin) value as the y error bar (yerr) error = [my_trip_df['max']-my_trip_df['min']] my_trip_df.plot(kind = 'bar', y='avg', yerr=error, title='Trip Avg Temp', color='coral', alpha=0.5, figsize=(4,6), legend = '') plt.ylabel("Temp (F)") plt.tick_params( axis='x', which='both', labelbottom=False) # ### Daily Rainfall Average # Calculate the total amount of rainfall per weather station for your trip dates using the previous year's matching dates. # Sort this in descending order by precipitation amount and list the station, name, latitude, longitude, and elevation engine.execute('select measurement.station, name, latitude, longitude, elevation, sum(prcp) as total_rainfall \ from measurement\ join station on measurement.station = station.station \ where date between "2017-01-16" and "2017-01-22" \ group by measurement.station order by total_rainfall desc').fetchall() # + # Create a query that will calculate the daily normals # (i.e. the averages for tmin, tmax, and tavg for all historic data matching a specific month and day) def daily_normals(date): """Daily Normals. Args: date (str): A date string in the format '%m-%d' Returns: A list of tuples containing the daily normals, tmin, tavg, and tmax """ sel = [func.min(Measurement.tobs), func.avg(Measurement.tobs), func.max(Measurement.tobs)] return session.query(*sel).filter(func.strftime("%m-%d", Measurement.date) == date).all() daily_normals("01-01") # + # Set the start and end date of the trip start_date = '2018-01-16' end_date = '2018-01-22' # Use the start and end date to create a range of dates my_dates = pd.date_range(start_date, end_date).strftime('%Y-%m-%d') my_dates # - # Stip off the year and save a list of %m-%d strings months_dates = pd.date_range(start_date, end_date).strftime('%m-%d') months_dates normals = [] # Loop through the list of %m-%d strings and calculate the normals for each date for date in months_dates: normal = daily_normals(date) # push each tuple of calculations into a list called `normals` normals.append(normal) normals # + # Load the previous query results into a Pandas DataFrams new_list = [x for x in chain.from_iterable(normals)] my_trip_df = pd.DataFrame(new_list, columns = ['tmin','tavg','tmax']) # Add the `trip_dates` range as the `date` index my_trip_df['date'] = my_dates my_trip_df = my_trip_df.set_index('date') my_trip_df # - # Plot the daily normals as an area plot with `stacked=False` my_trip_df.plot(kind = 'area', stacked = False, alpha = 0.25) plt.xticks(rotation = 45)
climate_starter.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python [conda env:my_root] # language: python # name: conda-env-my_root-py # --- # In this notebook, the goal is to perform parallelized model selection and then report final test set accuracy. # # Structure of notebook: # # * Load data # * Perform train-test split # * Define dictionary of hyperparameters # * Do cross-validation to select best hyperparameters # * Train final model # * Evaluate final model on test set, using bootstrapping to obtain confidence intervals # Load base packages # + import numpy as np import pandas as pd import matplotlib.pyplot as plt import seaborn as sns import sys from scipy.stats import zscore, zmap from sklearn.datasets import load_breast_cancer from sklearn.model_selection import train_test_split, KFold from itertools import product from joblib import Parallel, delayed from multiprocessing import cpu_count # - # This command lets you edit `.py` files and have the changed versions be accessed by Jupyter. # %load_ext autoreload # %autoreload 2 # Load cadre modeling package sys.path.insert(0, '../cadreModels/') from classificationBinary import binaryCadreModel sns.set_style('darkgrid') # Load `breastcancer` and then extract observations, labels, and features. Note that we're turning the labels into a rank-2 array. breastcancer = load_breast_cancer() X, Y, features = breastcancer['data'], np.expand_dims(breastcancer['target'], 1), breastcancer['feature_names'] # Map `Y` values to -1 and +1 for hinge loss Y = 2 * Y - 1 pd.DataFrame(Y)[0].value_counts() # Perform a randomized train-test split Xtr, Xte, Ytr, Yte = train_test_split(X, Y, test_size=0.2, random_state=1515) # Dictionary of hyperparameters used for model selection. We're holding the sparsity parameters `alpha_d` and `alpha_W` fixed at their default values of 0.9. scm_params = {'M': np.array([2,3]), 'lambda_d': np.array([0.01, 0.1]), 'lambda_W': np.array([0.01, 0.1])} # 3-fold cross-validation index generator kf = KFold(n_splits=3, random_state=1414) # Arguments to the SCM initialization function: # # * `M` -- number of cadres # * `lambda_d` -- regularization strength hyperparameter for cadre-assignment weight `d` # * `lambda_W` -- regularization strength hyperparameter for classification-weights `W` # * `alpha_d` -- sparsity parameter for `d` # * `alpha_W` -- sparsity parameter for `W` # * `Tmax` -- number of total iterations # * `record` -- how often during training to evaluate loss and accuracy # * `gamma` -- cadre-assignment sharpness hyperparameter # # In this analysis, we're using a small `Tmax` value, but larger ones may be needed for more complex datasets. # Required arguments to SCM fit method: # # * `Xtr` -- training feature values, in `np.array` format # * `Ytr` -- training labels, in `np.array` format # # Optional arguments to SCM fit method: # # * `Xva` -- validation feature values, in `np.array` format # * `Yva` -- validation labels, in `np.array` format # * `names` -- list or `pd.Index` of feature names # * `seed` -- RNG seed for parameter initalization SGD # * `store` -- whether or not to store copy of training data in SCM object, False by default # * `progress` -- whether or not to print diagnostics during training, False by default # If $N$ is the number of observations, and $P$ is the number of features, `Xtr` and `Xva` should be $N \times P$ arrays, and `Ytr` and `Yva` should be $N\times1$ arrays. If the labels are supplied as rank-1 arrays instead of rank-2 arrays, TensorFlow will automatically do some broadcasting that won't reflect what you want it do be doing. # # The fit method doesn't automatically standardize data, so, if applicable, that should be performed prior to fitting # # If `progress=True`, the printed diagnostics will be: # # Iteration Number, Loss Value, Training Accuracy, Validation Accuracy (if applicable), Time # # You can supply `Xva` and `Yva` to monitor for overfitting. # `alpha_d` and `alpha_W` should be between 0 and 1; if they are close to 1, then the parameters `d` and `W` will be more likely to be sparse. # # The SCM optimization problem sometimes suffers from ill-conditioning. When this happens, it's best to change `gamma` or `lambda_d`. I've found that `gamma=10` works fairly well for datasets with tens of non-sparse features; as dimensionality increases, it may need to be decreased. Increasing `lambda_d` will also make estimated values of `d` smaller, which helps with conditioning. scores = {'M': [], 'lambda_d': [], 'lambda_W': [], 'accuracy': [], 'loss': []} for M, l_d, l_W in product(scm_params['M'], scm_params['lambda_d'], scm_params['lambda_W']): print(M, l_d, l_W) for (tr, va) in kf.split(Xtr): ## split training data into training and validation sets x_tr, x_va, y_tr, y_va = Xtr[tr,:], Xtr[va,:], Ytr[tr,:], Ytr[va,:] ## standardize validation data with respect to training data and then standardize training data x_va = zmap(x_va, x_tr) x_tr = zscore(x_tr) ## initalize and fit SCM model object with current hyperparameters scm_mod = binaryCadreModel(M=M, lambda_d=l_d, lambda_W=l_W, Tmax=201, record=10, gamma=5.) scm_mod.fit(Xtr=x_tr, Ytr=y_tr, Xva=x_va, Yva=y_va, names=features, progress=False) ## update records scores['M'].append(M) scores['lambda_d'].append(l_d) scores['lambda_W'].append(l_W) scores['accuracy'].append(scm_mod.score(x_va, y_va)) scores['loss'].append(scm_mod.loss[-1]) ## transform scores in DataFrame for easy analysis scores = pd.DataFrame(scores) # Identify best hyperparameter configuration best_hyperparameters = scores.groupby(['M','lambda_W','lambda_d']).mean().sort_values('accuracy', ascending=False) best_hyperparameters # Estimate a model using all of the training data and the best hyperparameters best_M = best_hyperparameters['M'].values[0] best_l_d = best_hyperparameters['lambda_d'].values[0] best_l_W = best_hyperparameters['lambda_W'].values[0] scm_best = binaryCadreModel(M=best_M, lambda_d=best_l_d, lambda_W=best_l_W, Tmax=201, record=10, gamma=5.) x_te = zmap(Xte, Xtr) x_tr = zscore(Xtr) scm_best.fit(Xtr=x_tr, Ytr=Ytr, Xva=x_te, Yva=Yte, names=features) # We can evaluate convergence by plotting loss and accuracy pd.DataFrame({'loss': scm_best.loss, 'TrainingAccuracy': scm_best.accs, 'TestingAccuracy': scm_best.accsVa}).plot() # We can look at the values of the classification weight parameter `W`. `W` is a set of $M$ length-$P$ column vectors. The value of the $p$th component in the $m$th column quantifies the association between the predicted label and that feature. As the value becomes more positive, the feature becomes more positively associated with the `+1` label. # When we look at a plot like this, it's often informative to see what features are used similarly between cadres and which are used differently. In the plot below, for example, `texture error` is associated with class `+1` in `w1` (orange) and associated with class `-1` in `w0` (blue). Also, `worst radius` has a much stronger association with class `-1` in `w1` than it does with `w2`. W_df = pd.DataFrame(scm_best.W, columns=['w0','w1'], index=scm_best.columns).reset_index().assign(baseline=0) fig, ax = plt.subplots() fig.set_size_inches(20, 5) p = sns.lineplot(x='index', y='weight', hue='cadre', data=W_df.melt('index', var_name='cadre', value_name='weight')) for item in p.get_xticklabels(): item.set_rotation(45) # We can also look at the distributions of features by cadre. First we predict each training point's label and cadre. __, l_tr, __, m_tr = scm_best.predictFull(x_tr) augmented_data = pd.DataFrame(x_tr, columns=scm_best.columns).assign(cadre=m_tr) # We print counts of every (cadre, true label, predicted label) combination. Cadre 0 primarily contains `+1` points, and cadre 1 primarily contains `-1` points. pd.DataFrame({'true_y': Ytr[:,0], 'pred_y': l_tr[:,0], 'cadre': m_tr}).groupby(['cadre', 'true_y', 'pred_y']).size() # We bind the features and cadre into a single `DataFrame` and find feature means, which we plot by cadre. They are very distinct. feature_means = augmented_data.groupby('cadre').mean().reset_index().melt(id_vars='cadre', var_name='feature', value_name='mean_value') sns.lineplot(x='feature', y='mean_value', hue='cadre', data=feature_means) # The breastcancer dataset is fairly small, and training is quick. But for larger datasets, training will take longer, and it is advantageous to perform model selection by training in parallel. The main package you need for this is `joblib`, which implements parallelized `for` loops. (The common term is "embarassingly parallel".) We've also loading `multiprocessing`, but we only use it to detect how many cores we have access to. # First we see how many cores we have access to. cpu_count() # We can use some or all of these to speed up the process. # # Notes: # # * It's not always the best to use every core at once. Having to wait for each core's job to finish before moving on can produce delays. Also, TensorFlow will automatically parallelize some large matrix computations, I believe. So forcing each core to train a separate model can result in slower training times. # * It looks like Jupyter has access to 16 cores. Node-03 on the server has 48, although you have to run that through the command line. # Redefine hyperparameters and cross-validation setting. In practice, you'd want to use 10 or 20 folds. scm_params = {'M': np.array([2,3]), 'lambda_d': np.array([0.01, 0.1]), 'lambda_W': np.array([0.01, 0.1])} kf = KFold(n_splits=3, random_state=1414) # First we define a function that trains a single model and returns its validation set accuracy. def scmCrossval(Xtr, Ytr, Xva, Yva, Tmax, M, a_W, l_W, a_d, l_d, gamma, features, fold): ## standardize validation data with respect to training data and then standardize training data x_va = zmap(Xva, Xtr) x_tr = zscore(Xtr) ## initalize and fit SCM model object with current hyperparameters scm_mod = binaryCadreModel(M=M, alpha_d=a_d, alpha_W=a_W, lambda_d=l_d, lambda_W=l_W, Tmax=Tmax, record=10, gamma=gamma) scm_mod.fit(Xtr=x_tr, Ytr=Ytr, names=features) ## extract final loss value loss = scm_mod.loss[-1] ## calculate training set accuracy tra_acc = scm_mod.score(x_tr, Ytr) ## calculate validation set accuracy val_acc = scm_mod.score(x_va, Yva) ## return everything as a list return fold, M, a_W, l_W, a_d, l_d, gamma, loss, tra_acc, val_acc # Now we invoke `joblib` to do the parallelized training. `joblib`'s `Parallel` function is the workhorse here. It's syntax is kind of verbose and confusing, unfortunately. First we describe the type of job we do, then we specify the function that is to be parallelized (wrapping it in `delayed`), and then we specify the parallelized functions arguments. # # The parallelization backend we use is `"threading"`, as opposed to the default of `"multiprocessing"`. My experience is that `"threading"` works better when each parallelized function call (i.e., `scmCrossVal` call) is fairly memory-intensive. Setting `verbose=11` ensures that you are notified each time a job completes. # + n_jobs = 8 a_d = 0.9 a_W = 0.9 gamma = 5. Tmax = 201 scores = (Parallel(n_jobs=8, backend='threading', verbose=11)(delayed(scmCrossval) (Xtr[tr,:], Ytr[tr,:], Xtr[va,:], Ytr[va,:], Tmax, M, a_W, l_W, a_d, l_d, gamma, features, fold) for (M, l_d, l_W, (fold, (tr, va))) in product(scm_params['M'], scm_params['lambda_d'], scm_params['lambda_W'], enumerate(kf.split(Xtr))))) # - # `Parallel` returns out cross-validation results as a list of tuples. So we need to reshape everything a `pd.DataFrame` for easier comparisons. results = {'fold': [], 'M': [], 'a_W': [], 'l_W': [], 'a_d': [], 'l_d': [], 'gamma': [], 'loss': [], 'training_acc': [], 'validation_acc': []} for fold, M, a_W, l_W, a_d, l_d, gamma, loss, tra_acc, val_acc in scores: results['fold'].append(fold) results['M'].append(M) results['a_W'].append(a_W) results['l_W'].append(l_W) results['a_d'].append(a_d) results['l_d'].append(l_d) results['gamma'].append(gamma) results['loss'].append(loss) results['training_acc'].append(tra_acc) results['validation_acc'].append(val_acc) results = pd.DataFrame(results) results.drop('fold', axis=1).groupby(['M','a_W','l_W','a_d','l_d','gamma']).mean().sort_values('validation_acc', ascending=False) # Now we can choose optimal hyperparameters as before and train a final model.
examples/old/ClassificationExampleParallel.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python [conda env:PythonCPU] # language: python # name: conda-env-PythonCPU-py # --- from __future__ import print_function import time import numpy as np import pandas as pd from sklearn.datasets import fetch_mldata from sklearn.decomposition import PCA from sklearn.manifold import TSNE # %matplotlib inline import matplotlib.pyplot as plt from mpl_toolkits.mplot3d import Axes3D import seaborn as sns mnist = fetch_mldata("MNIST original") X = mnist.data / 255.0 y = mnist.target print(X.shape, y.shape) feat_cols = [ 'pixel'+str(i) for i in range(X.shape[1]) ] df = pd.DataFrame(X,columns=feat_cols) df['y'] = y df['label'] = df['y'].apply(lambda i: str(i)) X, y = None, None print('Size of the dataframe: {}'.format(df.shape)) # For reproducability of the results np.random.seed(42) rndperm = np.random.permutation(df.shape[0]) plt.gray() fig = plt.figure( figsize=(16,7) ) for i in range(0,15): ax = fig.add_subplot(3,5,i+1, title="Digit: {}".format(str(df.loc[rndperm[i],'label'])) ) ax.matshow(df.loc[rndperm[i],feat_cols].values.reshape((28,28)).astype(float)) plt.show()
Notebook/AdditionalNotebooks/embedding1.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Week 1. Course Outline # --- # # 1. Deployment # 2. Modeling # 3. Data # 4. MLOps (Machine Learning Operations) is an emerging discipline, and comprises a set of tools and principles to support progress through the ML project lifecycle. # # # # ##### ML project lifecycle is an iterative process, where during a later stage we might go back to an earlier stage. (That’s why we call it a cycle--it’s a circular process.) # <img src = "https://i.gyazo.com/ddcd0e0435ecdbcd133fa03e804e29f4.png"> # <img src = "https://i.gyazo.com/99ec218022d7f868aecb3c5bc8525934.png">
Course 1. Introduction to Machine Learning in Production/1.1 The ML Project Lifecycle.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + import tensorflow as tf variables_in_checkpoint = tf.train.list_variables('transducer-rnn-base/model.ckpt') variables_in_checkpoint # - reader = tf.train.NewCheckpointReader('transducer-rnn-base/model.ckpt') # + new_checkpoint_vars = {} for old_name in reader.get_variable_to_shape_map(): print(old_name) if 'transducer/' not in old_name: new_name = 'transducer/' + old_name else: new_name = old_name new_checkpoint_vars[new_name] = tf.Variable(reader.get_tensor(old_name)) # - new_checkpoint_vars # + init = tf.global_variables_initializer() saver = tf.train.Saver(new_checkpoint_vars) with tf.Session() as sess: sess.run(init) saver.save(sess, 'transducer-rnn-base/model-rename.ckpt')
pretrained-model/stt/transducer-rnn-lm/rename-checkpoint-rnn-transducer.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 (ipykernel) # language: python # name: python3 # --- # + import datajoint as dj import pandas as pd from datetime import date import numpy as np from matplotlib import pyplot as plt from scipy import signal from datetime import date,datetime,timedelta # Establish connection dj.config['database.host'] = '172.16.31.10' dj.config['database.user'] = 'yonib' dj.config['database.password'] = '<PASSWORD>' dj.conn() # configure a schema for testing stuff schema = dj.schema('yonib_observatory_test',locals()) # + @schema class Genotype(dj.Lookup): definition = """ genotype:varchar(255) """ contents = zip(['Pvalb-IRES-Cre/wt;Ai32(RCL-ChR2(H134R)_EYFP)/wt', 'Sst-IRES-Cre/wt;Ai32(RCL-ChR2(H134R)_EYFP)/wt', 'Vip-IRES-Cre/wt;Ai32(RCL-ChR2(H134R)_EYFP)/wt', 'wt/wt']) @schema class SessionType(dj.Lookup): definition = """ session_type:varchar(255) """ contents = zip(['brain_observatory_1.1', 'functional_connectivity']) @schema class Mouse(dj.Manual): definition = """ # mouse information specimen_id: bigint # unique mouse ID --- sex:enum('M','F','U') # Sex: Male, Female, Unkown -> Genotype dob:date """ # In my present formulation, things like channel and probe counts # and area ID can be found with queries but aren't included in the # Sessions table. @schema class Session(dj.Manual): definition = """ session_id:bigint --- ->Mouse session_datetime:datetime ->SessionType publication_datetime:datetime has_nwb:bool isi_experiment_id:bigint """ @schema class ProbePhase(dj.Lookup): definition = """ probe_phase:varchar(255) """ contents = zip(['3a', 'PXI']) @schema class Probe(dj.Manual): definition = """ probe_id:bigint --- ->Session ->ProbePhase probe_name:varchar(10) air_channel_index:int surface_channel_index:int sampling_rate:float lfp_sampling_rate:float """ @schema class BrainStructure(dj.Lookup): definition = """ brain_structure:varchar(10) """ contents = zip(['APN', 'BMAa', 'CA1', 'CA2', 'CA3', 'COAa', 'COApm', 'CP', 'DG', 'Eth', 'HPF', 'IGL', 'IntG', 'LD', 'LGd', 'LGv', 'LP', 'LT', 'MB', 'MGd', 'MGm', 'MGv', 'MRN', 'NOT', 'OLF', 'OP', 'PF', 'PIL', 'PO', 'POL', 'POST', 'PP', 'PPT', 'PRE', 'PoT', 'ProS', 'RPF', 'RT', 'SCig', 'SCiw', 'SCop', 'SCsg', 'SCzo', 'SGN', 'SUB', 'TH', 'VIS', 'VISal', 'VISam', 'VISl', 'VISli', 'VISmma', 'VISmmp', 'VISp', 'VISpm', 'VISrl', 'VL', 'VPL', 'VPM', 'ZI', 'grey', 'nan']) @schema class Channel(dj.Manual): definition = """ channel_id:bigint --- ->Probe ->BrainStructure structure_id = null:float local_index:int probe_horizontal_position:int probe_vertical_position:int anterior_posterior_ccf_coordinate = null:float dorsal_ventral_ccf_coordinate = null:float left_right_ccf_coordinate=null:float """ @schema class Unit(dj.Manual): definition = """ unit_id:bigint --- ->Channel pt_ration = null:float amplitude = null:float amplitude_cutoff = null:float cumulative_drift = null:float d_prime = null:float duration = null:float firing_rate = null:float halfwidth = null:float isi_violations = null:float isolation_distance = null:float l_ration = null:float max_drift = null:float nn_hit_rate = null:float nn_miss_rate = null:float presence_ration = null:float quality = null:varchar(10) recovery_slope = null:float repolarization_slope = null:float silhouette_score = null:float snr = null:float spread = null:float velocity_above = null:float velocity_below = null:float """ # I would prefer to have spiketrain data be part of the unit, # But this is going to make more sense if we don't load all NWB files @schema class SpikeTrain(dj.Manual): definition = """ ->Unit --- spike_ts:longblob """ @schema class LFP(dj.Manual): definition = """ ->Channel --- lfp_sampling_rate:float lfp:longblob """ dj.ERD(schema) # + # This notation is borrowed from the mesoscale folks. # I am assuming that it is best practices? @schema class SessionCSV(dj.Manual): definition = """ session_csv:varchar(255) """ @schema class SessionIngest(dj.Imported): definition = """ ->SessionCSV """ def make(self,key): # For now, there is only one session file. self.insert1({'session_csv': key['session_csv']},skip_duplicates = True) # df=pd.read_csv(key['session_csv'],index_col = 'id') for session_id,row in df.iterrows(): session_datetime = datetime.strptime(row['date_of_acquisition'], "%Y-%m-%dT%H:%M:%S%z") publication_datetime = datetime.strptime(row['published_at'], "%Y-%m-%dT%H:%M:%S%z") specimen_id = row['specimen_id'] # Add the mouse data mouse_data = {'specimen_id':row['specimen_id'], 'sex':row['sex'], 'genotype':row['genotype'], 'dob':session_datetime.date()-timedelta(row['age_in_days'])} Mouse().insert1(mouse_data,skip_duplicates = True) # Add the Session data session_data = {'session_id':session_id, 'specimen_id':row['specimen_id'], 'session_datetime':session_datetime, 'publication_datetime':publication_datetime, 'session_type':row['session_type'], 'has_nwb':row['has_nwb'], 'isi_experiment_id':row['isi_experiment_id'], } Session().insert1(session_data,skip_duplicates = True) @schema class ProbeCSV(dj.Manual): definition = """ probe_csv:varchar(255) """ @schema class ProbeIngest(dj.Imported): definition = """ ->ProbeCSV """ def make(self,key): self.insert1({'probe_csv': key['probe_csv']},skip_duplicates = True) # df=pd.read_csv(key['probe_csv'],index_col = 'id') for probe_id,row in df.iterrows(): # Add the probe probe_data = {'probe_id':probe_id, 'session_id':row['ecephys_session_id'], 'probe_phase':row['phase'], 'probe_name':row['name'], 'air_channel_index':row['air_channel_index'], 'surface_channel_index':row['surface_channel_index'], 'sampling_rate':row['sampling_rate'], 'lfp_sampling_rate':row['lfp_sampling_rate']} Probe().insert1(probe_data,skip_duplicates = True) @schema class ChannelCSV(dj.Manual): definition = """ channel_csv:varchar(255) """ # Note the difference in the insert commands between this Channel code and the code above. # Before, tables were small enough form repeat insert calls. # Here, we needed to brake things down to a single call. # This switches it from takeing "so long yoni stopped waiting " to ~20 seconds to run. @schema class ChannelIngest(dj.Imported): definition = """ ->ChannelCSV """ def make(self,key): self.insert1({'channel_csv': key['channel_csv']},skip_duplicates = True) df=pd.read_csv(key['channel_csv'],index_col = 'id') channel_data_array = [] for channel_id,row in df.iterrows(): channel_data = {'channel_id':channel_id, 'probe_id':row['ecephys_probe_id'], 'brain_structure':str(row['ecephys_structure_acronym']), 'local_index':row['local_index'], 'probe_horizontal_position':row['probe_horizontal_position'], 'probe_vertical_position':row['probe_vertical_position'], 'anterior_posterior_ccf_coordinate':row['anterior_posterior_ccf_coordinate'], 'dorsal_ventral_ccf_coordinate':row['dorsal_ventral_ccf_coordinate'], 'left_right_ccf_coordinate':row['left_right_ccf_coordinate'], 'structure_id':row['ecephys_structure_id']} channel_data_array.append(channel_data) Channel().insert(tuple(channel_data_array)) @schema class UnitCSV(dj.Manual): definition = """ unit_csv:varchar(255) """ # This one was even weirder...I kept having a lost connection problem, so I set it to send every 1000 units @schema class UnitIngest(dj.Imported): definition = """ ->UnitCSV """ def make(self,key): self.insert1({'unit_csv': key['unit_csv']},skip_duplicates = True) df=pd.read_csv(key['unit_csv'],index_col = 'id') unit_data_array = [] idx = 0 for unit_id,row in df.iterrows(): unit_data = {'unit_id':unit_id, 'channel_id':row['ecephys_channel_id'], 'pt_ration':row['PT_ratio'], 'amplitude':row['amplitude'], 'amplitude_cutoff':row['amplitude_cutoff'], 'cumulative_drift':row['cumulative_drift'], 'd_prime':row['d_prime'], 'duration':row['duration'], 'firing_rate':row['firing_rate'], 'halfwidth':row['halfwidth'], 'isi_violations':row['isi_violations'], 'isolation_distance':row['isolation_distance'], 'l_ration':row['l_ratio'], 'max_drift':row['max_drift'], 'nn_hit_rate':row['nn_hit_rate'], 'nn_miss_rate':row['nn_miss_rate'], 'presence_ration':row['presence_ratio'], 'quality':row['quality'], 'recovery_slope':row['recovery_slope'], 'repolarization_slope':row['repolarization_slope'], 'silhouette_score':row['silhouette_score'], 'snr':row['snr'], 'spread':row['spread'], 'velocity_above':row['velocity_above'], 'velocity_below':row['velocity_below'],} unit_data_array.append(unit_data) idx+=1 if (idx%1000)==0: Unit().insert(tuple(unit_data_array)) unit_data_array = [] #print(idx) # gets anything that wasn't checkpointed Unit().insert(tuple(unit_data_array)) # There is a super annoying bug whereby if you don't draw the table, # then it won't work # dj.ERD(schema) is effectivly the same as a "commit" call dj.ERD(schema) # - # %%timeit -n 1 -r 1 SessionCSV.insert1({'session_csv': 'C:\\Users\\yoni.browning\\Documents\\DataJoint\\AllenData\\sessions.csv'},skip_duplicates=True) SessionIngest.populate() # %%timeit -n 1 -r 1 ProbeCSV.insert1({'probe_csv': 'C:\\Users\\yoni.browning\\Documents\\DataJoint\\AllenData\\probes.csv'},skip_duplicates=True) ProbeIngest.populate() Probe() # %%timeit -n 1 -r 1 ChannelCSV.insert1({'channel_csv': 'C:\\Users\\yoni.browning\\Documents\\DataJoint\\AllenData\\channels.csv'},skip_duplicates=True) ChannelIngest.populate() Channel() # %%timeit -n 1 -r 1 UnitCSV.insert1({'unit_csv': 'C:\\Users\\yoni.browning\\Documents\\DataJoint\\AllenData\\units.csv'},skip_duplicates=True) UnitIngest.populate()
datajoint/Allen_Data_DataJoint.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Exploratory Data Analysis - GitHub repo's issues # + import json import numpy as np import pandas as pd import datetime import warnings warnings.filterwarnings('ignore') # # Suppress pandas's warning # warnings.simplefilter(action='ignore', category=Warning) ## https://stackoverflow.com/questions/20625582/how-to-deal-with-settingwithcopywarning-in-pandas # pd.options.mode.chained_assignment = None # default='warn' # - # ## Data requirements # Below is a list of interested data that we will use in the initial analysis # * How long the issues open # * First comment response time # * Number of comments # * Number of participants # * Labels associated with each issue # * Milestone # Later will be done in the later part - Find data for each year where # * Top words in question # * Top words in answers # * Top participants + Type and company # ## Cleaning data - High-level data # # * Extract only interested columns which are # [ # 'issue_id', # 'title', # 'contents', # 'authorLogin', # 'authorAssociation', # 'createdAt', # 'closedAt', # 'closed', # 'comments_count', # 'comments_data', # 'participants_count', # 'labels', # 'milestone' # ] # # * Cast datetime columns to ['createdAt', 'closedAt'] datetime # * Create a new column 'closedDuration_days' to store a timediff between the columns ['closedAt', 'createdAt'] in days # * Additional columns for comments data # * Create a new column 'firstCommentCreatedAt' to store a datetime object the first comment is created # * Create a new column 'firstCommentDuration_days' to store a timediff between the columns 'createdAt' and the creation time of the first comment in days # * New columns for the first comment author info ['firstCommentAuthor', 'firstCommentAuthorAssociation'] # * New columns for total number of unique comment authors based on authorAssociation ['num_unique_comment_author_MEMBER', 'num_unique_comment_author_CONTRIBUTOR', 'num_unique_comment_author_COLLABORATOR', 'num_unique_comment_author_NONE'] # * Extract 'milestone' title and replace the 'milestone' column with this value # * Perform one-hot encoding to the label column def save_df_to_xlsx(df, xlsx_filename): ''' Save the input dataframe to the specified file path Note that the xlsx is needed to create visualizations in Tableau because it could not properly read the csv data when the contents contain ',' ''' writer = pd.ExcelWriter(xlsx_filename, engine='xlsxwriter', options={'remove_timezone': True}) df.to_excel(writer, index=False, sheet_name='Sheet1') workbook = writer.book worksheet = writer.sheets['Sheet1'] writer.save() def get_comment_info_columns(row): ''' Function to extract data from a dataframe's row that will be used in the function 'create_highlevel_df' Input argument: row: A row of data frame containing github issue info Output argument: a list containing the following information [ 'firstCommentCreatedAt', 'firstCommentAuthor', 'firstCommentAuthorAssociation', 'num_unique_comment_author_MEMBER', 'num_unique_comment_author_CONTRIBUTOR', 'num_unique_comment_author_COLLABORATOR', 'num_unique_comment_author_NONE' ] ''' # If the comments_count is greater than 0 if row['comments_count'] > 0: # --- Extract data for 'firstCommentCreatedAt', 'firstCommentAuthor', 'firstCommentAuthorAssociation' out = [ row['comments_data'][0]['createdAt'], row['comments_data'][0]['authorLogin'], row['comments_data'][0]['authorAssociation'] ] # --- Extract data for 'num_unique_comment_*' # We need to extract data from the 'comments_data' column # Get a list of all comment authors first list_comment_authors = [] for cur_comment in row['comments_data']: cur_author = { 'login': cur_comment['authorLogin'], 'association': cur_comment['authorAssociation'] } list_comment_authors.append(cur_author) unique_comment_authors = [dict(y) for y in set(tuple(x.items()) for x in list_comment_authors)] # Then, get count for each type dict_author_assoc = { 'MEMBER': 0, 'CONTRIBUTOR': 0, 'COLLABORATOR': 0, 'NONE': 0 } for cur_comment_author in unique_comment_authors: cur_assoc = cur_comment_author['association'] dict_author_assoc[cur_assoc] += 1 # Append data to the output list out = out + [ dict_author_assoc['MEMBER'], dict_author_assoc['CONTRIBUTOR'], dict_author_assoc['COLLABORATOR'], dict_author_assoc['NONE'] ] else: # Otherwise, output a default output list out = [None, None, None, 0, 0, 0, 0] return out # + def create_highlevel_df(json_path): ''' Extract github issues data from the specified path and output an extracted summary data as a new dataframe ''' # Load json data and create dataframe with open(json_path) as json_file: data_raw = json.load(json_file) df_raw = pd.DataFrame.from_dict(data_raw) # Let's get only columns that we need for the analysis now new_cols = [ 'issue_id', 'title', 'contents', 'authorLogin', 'authorAssociation', 'createdAt', 'closedAt', 'closed', 'comments_count', 'participants_count', 'comments_data', 'labels', 'milestone' ] df_out = df_raw[new_cols] # --- Cast columns to datetime df_out[['createdAt', 'closedAt']] = df_out[['createdAt', 'closedAt']].apply(pd.to_datetime) # --- Find closedDuration_days get_diff_days = lambda s: (s.dt.total_seconds() / (24 * 60 * 60)).round(2) # df_out['closedDuration_days'] = ((df_out['closedAt'] - df_out['createdAt']).dt.total_seconds() / (24 * 60 * 60)).round(2) df_out['closedDuration_days'] = get_diff_days(df_out['closedAt'] - df_out['createdAt']) # --- Find comment info cols_first_comments = [ 'firstCommentCreatedAt', 'firstCommentAuthor', 'firstCommentAuthorAssociation', 'num_unique_comment_author_MEMBER', 'num_unique_comment_author_CONTRIBUTOR', 'num_unique_comment_author_COLLABORATOR', 'num_unique_comment_author_NONE' ] df_out[cols_first_comments] = pd.DataFrame( df_out.apply(lambda row: get_comment_info_columns(row), axis=1).values.tolist(), index= df_out.index) df_out['firstCommentCreatedAt'] = pd.to_datetime(df_out['firstCommentCreatedAt']) df_out['firstCommentDuration_days'] = get_diff_days(df_out['firstCommentCreatedAt'] - df_out['createdAt']) # --- Extract 'milestone' title and replace the 'milestone' colummn with this value df_out['milestone'] = df_out['milestone'].apply(lambda x: x['title'] if x else '') # --- Perform one hot encoding for the labels columns # Get a list of labels list_labels = [] for index, row in df_out.iterrows(): list_labels = list_labels + row['labels'] list_labels = list(set(list_labels)) # Then, create new columns for each of those labels # Create a dictionary to map those labels with a boolean value for each record in the dataset dict_labels = dict((el,[]) for el in list_labels) for index, row in df_out.iterrows(): for cur_label in list_labels: cur_bool = False if cur_label in row['labels']: cur_bool = True dict_labels[cur_label].append(cur_bool) # Then, create a dataframe for it and merge the newly created dataframe with the current output dataframe df_out = pd.concat([df_out, pd.DataFrame.from_dict(dict_labels)], axis=1) # Finally, prepend those newly added label columns with 'Label_' dict_rename = {} for cur_label in list_labels: dict_rename[cur_label] = 'label_' + cur_label df_out = df_out.rename(columns=dict_rename) # --- Reorder columns final_cols = [ 'issue_id', 'title', 'contents', 'authorLogin', 'authorAssociation', 'createdAt', 'closed', 'closedAt', 'closedDuration_days', 'milestone', 'participants_count', 'comments_count', 'firstCommentCreatedAt', 'firstCommentDuration_days', 'firstCommentAuthor', 'firstCommentAuthorAssociation', 'num_unique_comment_author_MEMBER', 'num_unique_comment_author_CONTRIBUTOR', 'num_unique_comment_author_COLLABORATOR', 'num_unique_comment_author_NONE', 'labels' ] # Then, append the one hot encoding's label columns final_cols = final_cols + ['label_' + cur_label for cur_label in list_labels] df_out = df_out[final_cols] return df_out # + list_libs = [ 'qunit', 'mocha', 'jest', 'jasmine', 'funcunit', 'puppeteer', 'cypress' ] all_df = {} for cur_lib in list_libs: print('***** [{}] START repo#{}" *****'.format(str(datetime.datetime.now()), cur_lib)) json_path = '../data/github_repo_issues_{}.json'.format(cur_lib) df_cur = create_highlevel_df(json_path) all_df[cur_lib] = df_cur print('|-- Total records: ', df_cur.shape[0]) # --- Save data to csv csv_filename = 'temp/repo_issue_summary_{}.csv'.format(cur_lib) print('|-- Save data to "{}"...'.format(csv_filename)) df_cur.to_csv(csv_filename, index=False) # --- Save data to xlsx since the csv data is not read properly in Tableau due to a usage of ',' in the contents xlsx_filename = "temp/repo_issue_summary_{}.xlsx".format(cur_lib) print('|-- Save data to "{}"...'.format(xlsx_filename)) writer = pd.ExcelWriter(xlsx_filename, engine='xlsxwriter', options={'remove_timezone': True}) df_cur.to_excel(writer, index=False, sheet_name='Sheet1') workbook = writer.book worksheet = writer.sheets['Sheet1'] writer.save() # - df_cur.head() # + cols_final_no_encoding = [ 'repo', 'issue_id', 'title', 'contents', 'authorLogin', 'authorAssociation', 'createdAt', 'closed', 'closedAt', 'closedDuration_days', 'milestone', 'participants_count', 'comments_count', 'firstCommentCreatedAt', 'firstCommentDuration_days', 'firstCommentAuthor', 'firstCommentAuthorAssociation', 'num_unique_comment_author_MEMBER', 'num_unique_comment_author_CONTRIBUTOR', 'num_unique_comment_author_COLLABORATOR', 'num_unique_comment_author_NONE', 'labels' ]; df_combined = pd.DataFrame(columns=cols_final_no_encoding) for cur_lib in list_libs: df_temp = all_df[cur_lib] df_temp['repo'] = cur_lib df_combined = pd.concat([df_combined, df_temp[cols_final_no_encoding]], ignore_index=True) # - # Save the combinded dataframe to xlsx save_df_to_xlsx(df_combined, 'temp/repo_issue_summary_combined.xlsx')
data_wrangling/create_repo_summary_data.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 2 # language: python # name: python2 # --- # The Canonical Ensemble # ====================== # # Most physical systems are not isolated, but exchange energy with the # environment. Since the system is very small compared to the environment, # we consider that the environment acts effectively as a heat reservoir or # heat bath at a fixed temperature $T$. If a small system is put in # thermal contact with the heat bath, it will reach thermal equilibrium # exchanging energy until the system attains the temperature of the bath. # # Imagine an infinitely large number of mental copies of the system and # the heat bath. The probability $P_s$ that the system is found in a # microstate $s$ with energy $s$ is given by: # $$P_s=\frac{1}{Z}e^{-E_s/k_BT}, # $$ where $Z$ is the normalization constant. This # corresponds to the canonical ensemble. Since $\sum P_s = 1$, we have # $$Z=\sum_s{e^{-E_s/k_BT}}, # $$ where the sum is over all the possible microstates of the # system. This equation defines the “partition function” of the system. # # We can use (\[c\_boltz\]) to obtain the ensemble average of physical # quantities of interest. For instance, the mean energy is given by: # $$\langle E \rangle = \sum_s E_s\, P_s=\frac{1}{Z}\sum_s{E_s\,e^{-\beta # E_s}},$$ with $\beta=1/k_BT$. # # The Metropolis algorithm # ------------------------ # # We want to obtain an estimate for the mean value of an observable $A$: # $$\langle A \rangle = \sum_s A_s e^{-\beta E_s}/\sum_s e^{-\beta E_s},$$ # where $E_s$ and $A_s$ are the values of the energy and the quantity $A$ # in the configuration $s$. The idea of using Monte Carlo consists in # sampling a subset of configuration and approximating the average by the # mean over the sample: # $$\langle A \rangle \simeq \sum_s^{m} A_s e^{-\beta E_s}/\sum_s^{m} # e^{-\beta E_s},$$ where the sampling is over $m$ configurations. # # A crude Monte Carlo procedure is to generate a configuration at random, # calculate $E_s$ and $A_s$, and the contributions of this configuration # to the sums. This is equivalent to the “hit and miss” Monte Carlo method # for evaluating integrals. We have seen that this approach is very # inefficient, because the configurations generated would likely be very # improbable and contribute very little to the sum. Instead, we want to # generate a sample of configurations that are <span>*important*</span>, # <span>*i. e.*</span> have large contributions to the sums. This is # precisely the equivalent to “importance sampling”. Hence, we need to # generate the configurations according to a probability distribution. In # this case, the most convenient one is not other than the Boltzmann # probability itself $P_s$ (\[c\_boltz\]). Since we will average over the # $m$ configurations generated with this probability, we must use the # expression: # $$\langle A \rangle \simeq \sum_s^{m} \frac{A_s}{P_s} e^{-\beta # E_s}/\sum_s^{m} \frac{1}{P_s}e^{-\beta E_s} # = \frac{1}{m}\sum_s^{m}A_s$$ # # The idea of the Monte Carlo algorithm consists in performing a random # walk over the space of configurations. The walker “hops” from a # configuration $i$ to another $j$ using the “transition probability” # $$W=\min{\left(1,\frac{P_j}{P_j}\right)}.$$ Replacing by the # corresponding expression, we obtain: # $$W=\min{\left(1,e^{-\beta(E_j-E_i)}\right)}.$$ # # Since we are only interested in the ratio $P_j/P_j$, it is not necessary # to know the normalization constant $Z$. Although we have picked this # expression for the transition probability $W$, is not the only choice. # It can be shown that the only requirement is that $W$ satisfies the # “detailed balance” condition: # $$W(i \rightarrow j)e^{-\beta E_i} = W(j \rightarrow i)e^{-\beta E_j}.$$ # # Another comon choice in the literature is given by: # $$W(i\rightarrow j)=\frac{1}{e^{-\beta (E_j-E_i)}+1}.$$ Note that if # $\Delta E=0$, then $W=1/2$ and the trial configuration has an equal # probability of being accepted. # # The pseudocode for a Monte Carlo simulation can be outlined as follows: # # 1. Establish an initial configuration. # # 2. Make a random trial change in the configuration. For example, choose # a spin at random and try to flip it. Or choose a particle at random # and attempt to displace it a random distance. # # 3. Compute the change in the energy of the system $\Delta E$ due to the # trial change. # # 4. If $\Delta E \leq 0$, accept the new configuration and go to step 8. # # 5. If $\Delta E$ is positive, compute the “transition probability” # $W=e^{-\beta \Delta E}$. # # 6. Generate a random number $r$ in the interval $[0,1]$. # # 7. If $r \leq W$, accept the new configuration; otherwise retain the # previous configuration. # # 8. Repeat steps (2) to (8) to obtain a sufficient number of # configurations or “trials”. # # 9. Compute averages over configurations which are statistically # independent of each other. # # ### Important conditions for validity # # A Monte Carlo algorithm must satisfy detailed balance, but also **Ergodicity**. This means that the possible moves should guarantee that the system will explore the entire phase space. If there are regions of phase space that are not accessible via local moves, for instance, one should implement global moves or more complex update strategies. # # ### Exercise 11.1: Classical gas in 1D # # In this case, we assume that the particles do not interact and # the particle velocities are continuous and unbounded. The # energy is the sum of the kinetic energies of the individual # particles. Hence, for and ideal gas, the only coordinates of interest are # the velocities. In order to change a configuration, we choose a particle # at random and change its velocity by a random amount according to the corresponding # transition probability. For simplicity we # consider only the one-dimensional case. # # 1. Simulate an ideal gas of $N$ particles in 1D. Choose $N=20$, $T=100$ # and 200 MC steps. Give all the particles the same initial velocity # $v_0=10$. Determine the value of the maximum velocity change # $\Delta v$ so that the acceptance ratio is approximately $50\%$. # What is the mean kinetic energy and mean velocity of the particles? # # 2. We might expect that the total energy of an ideal gas to remain # constant since the particles do not interact with each other and # hence they cannot exchange energy directly. What is the initial # value of the energy of the system? Does it remain constant? If it # does not, explain how the energy changes. Explain why the measured # mean particle velocity is zero even though the initial particle # velocities are not zero. # # 3. What is a simple criterion for “thermal equilibrium”? Estimate the # number of Monte Carlo steps per particle necessary for the system to # reach thermal equilibrium. What choice of the initial velocities # allows the system to reach thermal equilibrium at temperature $T$ as # quickly as possible? # # 4. Compute the mean energy per particle for $T=10$, $100$ and $400$. In # order to compute the averages after the system has reached thermal # equilibrium, start measuring only after equilibrium has # been achieved. Increase the number of Monte Carlo steps until the # desired averages do not change appreciably. What is the approximate # number of warmup steps for $N=10$ and $T=100$, and for $N=40$ and # $T=100$? If the number of warmup steps is different in the two # cases, explain the reason for this difference. # # 5. Compute the probability $P(E)dE$ for the system of $N$ particles to # have a total energy between $E$ and $E+dE$. Do you expect $P(E)$ to # be proportional to $e^{-\beta E}$? Plot $P(E)$ as a function of $E$ # and describe the qualitative behavior of $P(E)$. Doe s the plot of # $\ln{(P(E))}$ yield a straight line? # # 6. Compute the mean energy for $T=10$, $20$, $30$, $90$, $100$ and # $110$ and estimate the heat capacity. # # 7. Compute the mean square energy fluctuations $\langle \Delta E^2 # \rangle = \langle E^2 \rangle - \langle E \rangle ^2$ for $T=10$ and # $T=40$. Compare the magnitude of the ratio $\langle \Delta E^2 # \rangle/T^2$ with the heat capacity determined in the previous item. # # # + # %matplotlib inline import numpy as np from matplotlib import pyplot nwalkers = 20 v = np.zeros(nwalkers) v.fill(10) T = 40 delta = 10 def metropolis(vold): global delta, T vtrial = np.random.random() vtrial = vold+(2*vtrial-1)*delta weight = np.exp(-(vtrial**2-vold**2)/T) vnew = vold if(weight >= 1): #Accept vnew = vtrial elif(weight != 0): r = np.random.random() if(r <= weight): #Accept vnew = vtrial return vnew # Warmup loop Nwarmup = 1000 Ewarmup = np.zeros(Nwarmup) Naccept = 0. for j in range(Nwarmup): for i in range(nwalkers): vold = v[i] v[i] = metropolis(v[i]) if(v[i] != vold): Naccept += 1 Ewarmup[j] = np.sum(v**2)/nwalkers x = np.arange(0,Nwarmup,1) pyplot.plot(x,Ewarmup,ls='-',c='blue'); print "Acceptance ratio= ", Naccept/float(Nwarmup*nwalkers) # Measurement loop Nmeasure = 100000 Naccept = 0. E = 0. E2 = 0. for j in range(Nmeasure): for i in range(nwalkers): vold = v[i] v[i] = metropolis(v[i]) if(v[i] != vold): Naccept += 1 E += np.sum(v**2) E2 += np.sum(v**4) E = E/Nmeasure/nwalkers E2 = E2/Nmeasure/nwalkers print "<Energy>=", E print "<Energy^2>=", E2 print "Error=", np.sqrt((E2-E**2)/Nmeasure/nwalkers) print "Acceptance ratio= ", Naccept/float(Nmeasure*nwalkers) # - # #### Challenge 11.1 # # Exercise 11.1, items 4-7
11_01_canonical.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: 'Python 3.8.10 64-bit (''mlbootcamp'': conda)' # name: python3 # --- import os import sys import plotly.graph_objects as go from plotly.subplots import make_subplots import math import numpy as np import pandas as pd import datetime import pytz import joblib from sklearn.linear_model import LinearRegression from matplotlib import pyplot # + # Auto-reload to directly apply changes # %load_ext autoreload # %autoreload 2 # Display all columns pd.set_option("display.max_columns", None) # - # # Import data # # # + # Load data df = pd.read_csv("C://user/awesome_datascientist/myproject3/mvp/data/input_data.csv") df = df.set_index("data_index_") # Set timezone df.index = pd.to_datetime(df.index).tz_convert("Europe/Amsterdam") # - # # Data exploration # Visualise results def plot_figure_load(df): fig = go.Figure( make_subplots( shared_xaxes=True, vertical_spacing=0.02, ) ) fig.add_trace( go.Scatter( x=df.index, y=df["load_actuals_mw"], name="Actual", legendgroup="Actual", showlegend=True, line_color="green", opacity=0.5, )) title_text = "Energy demand" fig.update_layout(title={'text': title_text, 'y': 0.95, 'x': 0.5, 'xanchor': 'center', 'yanchor': 'top'}, autosize=False, width=800, height=800, paper_bgcolor='white', plot_bgcolor='white' ) fig.update_yaxes(title_text="Demand [MW]", title_standoff=30, title_font=dict(size=12), showgrid=True, gridcolor='lightgrey', zeroline=True, zerolinecolor='lightgrey', ) fig.update_xaxes(title_text="Date", showgrid=True, gridcolor='lightgrey', zeroline=True, zerolinecolor='lightgrey', ) return fig fig = plot_figure_load(df) fig.show() # # 1. Batch model # ## 1A. Baseline model # # Because demand has a strong daily and weekly pattern, the baseline model is a seasonal naive forecats. The predicted demand is the demand at the same time of the day 1 week ago. # + # Baseline model: Shifted by 1 week df_load_baseline = df.copy(deep=True).reset_index()[["load_actuals_mw"]] df_load_baseline["data_index_"] = pd.to_datetime( df.reset_index()["data_index_"] ) + datetime.timedelta(weeks=1) df_load_baseline = df_load_baseline.set_index("data_index_").rename( columns={"load_actuals_mw": "baseline"} ) # + # Plot next to each other fig = plot_figure_load(df) # Plot baseline prediction fig.add_trace( go.Scatter( x=df_load_baseline.index, y=df_load_baseline["baseline"], name="Baseline model", legendgroup="Baseline model", showlegend=True, line_color="orange", opacity=0.5, ), col=1, row=1, ) # - # ## 1B. ML model # + def add_fourier_features(df, column_name, period, n, period_name = "f"): t = df[column_name] for i in range(n): j = math.ceil((i+1)/2) if i%2: df[f'{period_name}_{i}'] = np.cos(j * 2 * np.pi * t / period) else: df[f'{period_name}_{i}'] = np.sin(j * 2 * np.pi * t / period) return df def create_workday_weekend_features(df, fourier_order): # split features in workday / weekend df['is_workday'] = (~(df.is_holiday.astype(bool) | (df.day_of_week == 5) | (df.day_of_week == 6))) workday_data = { f'workday_{k}':df[k]*df.is_workday.astype(int) for k in ['temperature', 'solar_ghi'] + [f'f_quarter_{f}' for f in range(fourier_order)] } weekend_data = { f'weekend_{k}':df[k]*(~df.is_workday).astype(int) for k in ['temperature', 'solar_ghi'] + [f'f_quarter_{f}' for f in range(fourier_order)] } return workday_data, weekend_data # add Fourier features to capture daily pattern in model fourier_order = 6 df = add_fourier_features(df, "quarter_of_day", 4 * 24, fourier_order, "f_quarter") # split workdays and weekend/holidays workday_data, weekend_data = create_workday_weekend_features(df, fourier_order) df_linregr = pd.DataFrame( {**workday_data, **weekend_data, "load": df["load_actuals_mw"]} ) # List the input feature columns feat_columns = list(workday_data.keys()) + list(weekend_data.keys()) # + tags=[] # Define size of train and test set of model number_of_training_days = 30 number_of_test_days = 30 test_start_date_run_i = df_linregr.index.min() + datetime.timedelta( days=number_of_training_days ) test_end_date = df_linregr.index.max() df_result = pd.DataFrame() # Run model for full period of data set while True: print(f"Start of prediction of this run: {test_start_date_run_i}") # split train/test set df_train = df_linregr[ test_start_date_run_i - datetime.timedelta(days=number_of_training_days) : test_start_date_run_i ] df_test = df_linregr[ test_start_date_run_i : test_start_date_run_i + datetime.timedelta(days=number_of_test_days) ] lr = LinearRegression() lr.fit(df_train[feat_columns], df_train["load"]) y_pred = lr.predict(df_test[feat_columns]) # Combine results df_result_run_i = pd.DataFrame( { "load": df_test["load"], "pred": y_pred, } ) # Store results in a single dataframe df_result = df_result.append(df_result_run_i) # Adjust start date of test set for next run test_start_date_run_i = test_start_date_run_i + datetime.timedelta( days=number_of_test_days ) if test_start_date_run_i > test_end_date: break # + # Plot next to each other fig = plot_figure_load(df) # Plot baseline prediction fig.add_trace( go.Scatter( x=df_result.index, y=df_result["pred"], name="Linear regression model", legendgroup="Linear regression model", showlegend=True, line_color="magenta", opacity=0.5, ), col=1, row=1, ) # - # # 2. Real time model # # Deploying your model real-time will beat even advanced ML batch models in performance. Therefore the model is kept simple. # Baseline model: Shifted by 15 minutes df_load_rt = df.copy(deep=True).reset_index()[["load_actuals_mw"]] df_load_rt["data_index_"] = pd.to_datetime( df.reset_index()["data_index_"] ) + datetime.timedelta(minutes=15) df_load_rt = df_load_rt.set_index("data_index_").rename( columns={"load_actuals_mw": "baseline"} ) # + # Plot next to each other fig = plot_figure_load(df) # Plot baseline prediction fig.add_trace( go.Scatter( x=df_load_rt.index, y=df_load_rt["baseline"], name="Baseline model", legendgroup="Baseline model", showlegend=True, line_color="orange", opacity=0.5, ), col=1, row=1, )
bootcamp/capstone/notebooks/Untitled12.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # 0. Setup # Please make sure your environment is set up according to the instructions here: https://github.com/NASA-NAVO/aas_workshop_2020_winter/blob/master/00_SETUP.md # # Ensure you have the latest version of the workshop material by updating your environment: # TBD # # 1. Overview # NASA services can be queried from Python in multiple ways. # * Generic Virtual Observatory (VO) queries. # * Call sequence is consistent, including for non-NASA resources. # * Use the `pyvo` package: https://pyvo.readthedocs.io/en/latest/ # * Known issues/caveats: https://github.com/NASA-NAVO/aas_workshop_2020_winter/blob/master/KNOWN_ISSUES.md # * Astroquery interfaces # * Call sequences not quite as consistent, but follow similar patterns. # * See https://astroquery.readthedocs.io/en/latest/ # * Informal Q&A session Tuesday, 5:30pm-6:30pm, NumFocus booth # * Ad hoc archive-specific interfaces # # # 2. VO Services # This workshop will introduce 4 types of VO queries: # * **VO Registry** - Discover what services are available worldwide # * **Simple Cone Search** - Search for catalog object within a specified cone region # * **Simple Image Access** - Search for image products within a spatial region # * **Simple Spectral Access** - Search for spectral products within a spatial region # * **Table Access** - SQL-like queries to databases # # ## 2.1 Import Necessary Packages # + # Generic VO access routines import pyvo as vo # For specifying coordinates and angles from astropy.coordinates import SkyCoord from astropy.coordinates import Angle from astropy import units as u # For downloading files from astropy.utils.data import download_file # Ignore unimportant warnings import warnings warnings.filterwarnings('ignore', '.*Unknown element mirrorURL.*', vo.utils.xml.elements.UnknownElementWarning) # - # ## 2.1 Look Up Services in VO Registry # Simple example: Find Simple Cone Search (conesearch) services related to SWIFT. services = vo.regsearch(servicetype='conesearch', keywords=['swift']) services # ### 2.1.1 Use different arguments/values to modify the simple example # | Argument | Description | Examples | # | :-----: | :----------- | :-------- | # | **servicetype** | Type of service | `conesearch` or `scs` for **Simple Cone Search**<br> `image` or `sia` for **Simple Image Access**<br> `spectrum` or `ssa` for **Simple Spectral Access**<br> `table` or `tap` for **Table Access Protocol**| # | **keyword** | List of one or more keyword(s) to match service's metadata. Both ORs and ANDs may be specified.<br><ul><li>(OR) A list of keywords match a service if **any** of the keywords match the service.</li><li>(AND) If a keyword contains multiple space-delimited words, **all** the words must match the metadata.</li></ul>| `['galex', 'swift']` matches 'galex' or 'swift'<br>`['hst survey']` matches services mentioning both 'hst' and 'survey' | # | **waveband** | Resulting services have data in the specified waveband(s) | ‘radio’, ‘millimeter’, ‘infrared’, ‘optical’, ‘uv’, ‘euv’, ‘x-ray’ ‘gamma-ray’ | # # ### 2.1.2 Inspect the results. # #### Using pyvo # Although not lists, `pyvo` results can be iterated over to see each individual result. The results are specialized based on the type of query, providing access to the important properties of the results. Some useful accessors with registry results are: # * `short_name` - A short name # * `res_title` - A more descriptive title # * `res_description` - A more verbose description # * `reference_url` - A link for more information # * `ivoid` - A unique identifier for the service. Gives some indication of what organization is serving the data. # Print the number of results and the 1st 4 short names and titles. print(f'Number of results: {len(services)}\n') for s in list(services)[:4]: # (Treat services as list to get the subset of rows) print(f'{s.short_name} - {s.res_title}') # #### Filtering results # Of the services we found, which one(s) have 'stsci.edu' in their unique identifier? stsci_services = [s for s in services if 'stsci.edu' in s.ivoid] for s in stsci_services: print (f'(STScI): {s.short_name} - {s.res_title}') # #### Using astropy # With the `to_table()` method, `pyvo` results can also be converted to Astropy `Table` objects which offer a variety of addional features. See http://docs.astropy.org/en/stable/table/ for more on working with Astropy Tables. # + # Convert to an Astropy Table services_table = services.to_table() # Print the column names and display 1st 3 rows with a subset of columns print(f'\nColumn Names:\n{services_table.colnames}\n') services_table['short_name', 'res_title', 'res_description'][:3] # - # ## 2.2 Cone search # Example: Find a cone search service for the USNO-B catalog and search it around M51 with a .1 degree radius. (More inspection could be done on the service list instead of blindly choosing the first service.) # # The position (`pos`) is best specified with `SkyCoord` objects (see http://docs.astropy.org/en/stable/api/astropy.coordinates.SkyCoord.html). # # The size of the region is specified with the `radius` keyword and may be decimal degrees or an Astropy `Angle` (http://docs.astropy.org/en/stable/api/astropy.coordinates.Angle.html#astropy.coordinates.Angle). m51_pos = SkyCoord.from_name("m51") services = vo.regsearch(servicetype='conesearch', keywords='usno-b') results = services[0].search(pos=m51_pos, radius=0.1) # Astropy Table is useful for displaying cone search results. results.to_table() # ## 2.3 Image search # Example: Find an image search service for GALEX, and search it around coordinates 13:37:00.950,-29:51:55.51 (M83) with a radius of .2 degrees. Download the first file in the results. # #### Find an image service services = vo.regsearch(servicetype='image', keywords=['galex']) services.to_table()['ivoid', 'short_name', 'res_title'] # #### Search one of the services # The first service looks good. Search it! # # For more details on using `SkyCoord` see http://docs.astropy.org/en/stable/api/astropy.coordinates.SkyCoord.html#astropy.coordinates.SkyCoord # # **NOTE**: For image searches, the size of the region is defined by the `size` keyword which is more like a diameter than a radius. # + m83_pos = SkyCoord('13h37m00.950s -29d51m55.51s') results = services[1].search(pos=m83_pos, size=.2) # We can look at the results. results.to_table() # - # #### Download an image # For the first result, print the file format and download the file. If repeatedly executing this code, add `cache=True` to `download_file()` to prevent repeated downloads. # # See `download_file()` documentation here: https://docs.astropy.org/en/stable/api/astropy.utils.data.download_file.html#astropy.utils.data.download_file print(results[0].format) file_name = download_file(results[0].getdataurl()) file_name # ## 2.4 Spectral search # Example: Find a spectral service for x-ray data. Query it around Delta Ori with a search **diameter** of 10 arc minutes, and download the first data product. Note that the results table can be inspected for potentially useful columns. # # Spectral search is very similar to image search. In this example, note: # * **`diameter`** defines the size of the search region # * `waveband` used in `regsearch()` # * Astropy `Angle` used to specify radius units other than degrees. # + # Search for a spectrum search service that has x-ray data. services = vo.regsearch(servicetype='spectrum', waveband='x-ray') # Assuming there are services and the first one is OK... results = services[0].search(pos=SkyCoord.from_name("Delta Ori"), diameter=Angle(10 * u.arcmin)) # Assuming there are results, download the first file. print(f'Title: {results[0].title}, Format: {results[0].format}') file_name = download_file(results[0].getdataurl()) file_name # - # ## 2.5 Table search # Example: Find the HEASARC Table Access Protocol (TAP) service, get some information about the available tables. services = vo.regsearch(servicetype='tap', keywords=['heasarc']) print(f'{len(services)} service(s) found.') # We found only one service. Print some info about the service and its tables. print(f'{services[0].describe()}') tables = services[0].service.tables # Queries for details of the service's tables print(f'{len(tables)} tables:') for t in tables: print(f'{t.name:30s} - {t.description}') # A more succinct option than t.describe() # #### Column Information # For any table, we can list the column names and descriptions. for c in tables['zcat'].columns: print(f'{c.name:30s} - {c.description}') # #### Perform a Query # Example: Perform a cone search on the ZCAT catalog at M83 with a 1.0 degree radius. # + coord = SkyCoord.from_name("m83") query = f''' SELECT ra, dec, Radial_Velocity, radial_velocity_error, bmag, morph_type FROM public.zcat as cat where contains(point('ICRS',cat.ra,cat.dec),circle('ICRS',{coord.ra.deg},{coord.dec.deg},1.0))=1 ''' results = services[0].service.run_async(query) results.to_table() # - # # 3. Astroquery # Many archives have Astroquery modules for data access, including: # # * [HEASARC Queries (astroquery.heasarc)](https://astroquery.readthedocs.io/en/latest/heasarc/heasarc.html) # * [HITRAN Queries (astroquery.hitran)](https://astroquery.readthedocs.io/en/latest/hitran/hitran.html) # * [IRSA Image Server program interface (IBE) Queries (astroquery.ibe)](https://astroquery.readthedocs.io/en/latest/ibe/ibe.html) # * [IRSA Queries (astroquery.irsa)](https://astroquery.readthedocs.io/en/latest/irsa/irsa.html) # * [IRSA Dust Extinction Service Queries (astroquery.irsa_dust)](https://astroquery.readthedocs.io/en/latest/irsa/irsa_dust.html) # * [JPL Spectroscopy Queries (astroquery.jplspec)](https://astroquery.readthedocs.io/en/latest/jplspec/jplspec.html) # * [MAST Queries (astroquery.mast)](https://astroquery.readthedocs.io/en/latest/mast/mast.html) # * [NASA ADS Queries (astroquery.nasa_ads)](https://astroquery.readthedocs.io/en/latest/nasa_ads/nasa_ads.html) # * [NED Queries (astroquery.ned)](https://astroquery.readthedocs.io/en/latest/ned/ned.html) # # For more, see https://astroquery.readthedocs.io/en/latest/ # # ## 3.1 NED # Example: Get an Astropy Table containing the objects from paper 2018ApJ...858...62K. For more on the API, see https://astroquery.readthedocs.io/en/latest/ned/ned.html from astroquery.ned import Ned objects_in_paper = Ned.query_refcode('2018ApJ...858...62K') objects_in_paper
QuickReference.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="view-in-github" colab_type="text" # [View in Colaboratory](https://colab.research.google.com/github/scarimp/Pucktest/blob/master/my_second_colabtfl.ipynb) # + id="bSXpVM1hCz38" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 319} outputId="fa2bb78d-3c4f-4ed0-ee11-d5b3d5965600" # !pip install --upgrade tensorflow # + id="HWLHCG9QDLNu" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 282} outputId="7c48dae9-98af-43ce-b6c8-e4a6847492c0" # !pip install tflearn # + id="ouc4sa6PFn5s" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 94} outputId="437484c2-ce72-4275-e5f5-996a61852920" from __future__ import absolute_import, division, print_function import tflearn import tflearn.datasets.mnist as mnist # + id="Cpy78nzUGmS8" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 92} outputId="79de95c8-c900-4a46-d7c9-24cbbe8a8ab9" X, Y, testX, testY = mnist.load_data(one_hot=True) # + id="ple3chN7G2_i" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 508} outputId="e91869b5-dda8-4bcf-8dbe-0ea545b3e793" print(X[:10], Y[:20]) # + [markdown] id="Z6Z770QjKFn6" colab_type="text" # # + [markdown] id="UhIvA2RxJpQU" colab_type="text" # The **softmax function at ** :https://eli.thegreenplace.net/2016/the-softmax-function-and-its-derivative/ # # + id="TQulJyLyFiBq" colab_type="code" colab={} from __future__ import absolute_import, division, print_function import tflearn import tflearn.datasets.mnist as mnist # MNIST Data X, Y, testX, testY = mnist.load_data(one_hot=True) # Model input_layer = tflearn.input_data(shape=[None, 784], name='input') dense1 = tflearn.fully_connected(input_layer, 128, name='dense1') dense2 = tflearn.fully_connected(dense1, 256, name='dense2') # softmax = tflearn.fully_connected(dense2, 10, activation='softmax') regression = tflearn.regression(softmax, optimizer='adam', learning_rate=0.001, loss='categorical_crossentropy') # Define classifier, with model checkpoint (autosave) model = tflearn.DNN(regression, checkpoint_path='model.tfl.ckpt') # Train model, with model checkpoint every epoch and every 200 training steps. model.fit(X, Y, n_epoch=1, validation_set=(testX, testY), show_metric=True, snapshot_epoch=True, # Snapshot (save & evaluate) model every epoch. snapshot_step=500, # Snapshot (save & evalaute) model every 500 steps. run_id='model_and_weights') # --------------------- # Save and load a model # --------------------- # Manually save model model.save("model.tfl") # Load a model model.load("model.tfl") # Or Load a model from auto-generated checkpoint # >> model.load("model.tfl.ckpt-500") # Resume training model.fit(X, Y, n_epoch=1, validation_set=(testX, testY), show_metric=True, snapshot_epoch=True, run_id='model_and_weights') # ------------------ # Retrieving weights # ------------------ # Retrieve a layer weights, by layer name: dense1_vars = tflearn.variables.get_layer_variables_by_name('dense1') # Get a variable's value, using model `get_weights` method: print("Dense1 layer weights:") print(model.get_weights(dense1_vars[0])) # Or using generic tflearn function: print("Dense1 layer biases:") with model.session.as_default(): print(tflearn.variables.get_value(dense1_vars[1])) # It is also possible to retrieve a layer weights through its attributes `W` # and `b` (if available). # Get variable's value, using model `get_weights` method: print("Dense2 layer weights:") print(model.get_weights(dense2.W)) # Or using generic tflearn function: print("Dense2 layer biases:") with model.session.as_default(): print(tflearn.variables.get_value(dense2.b))
my_second_colabtfl.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import sys import scipy import numpy import matplotlib import pandas import sklearn import pandas from pandas.plotting import scatter_matrix import matplotlib.pyplot as plt from sklearn import model_selection from sklearn.metrics import classification_report from sklearn.metrics import confusion_matrix from sklearn.metrics import accuracy_score from sklearn.linear_model import LogisticRegression from sklearn.tree import DecisionTreeClassifier from sklearn.neighbors import KNeighborsClassifier from sklearn.discriminant_analysis import LinearDiscriminantAnalysis from sklearn.naive_bayes import GaussianNB from sklearn.svm import SVC names = ['sepal-length', 'sepal-width', 'petal-length', 'petal-width', 'class'] dataset = pandas.read_csv("C:/Users/HP/Downloads/archive (41)/Iris.csv", names=names) print(dataset.shape) print(dataset.head(30)) print(dataset.describe()) print(dataset.groupby('class').size())
iris dataset-checkpoint.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + pycharm={"is_executing": false} import os, sys in_colab = 'google.colab' in sys.modules #if your're in colab if in_colab: #pull files from github repo os.chdir('/content') # !git init . # !git remote add origin https://github.com/LambdaSchool/DS-Unit-2-Kaggle-Challenge.git # !git pull origin master #install required python packages # !pip install -r requirements.txt #change into directory for module os.chdir('module3') # + [markdown] pycharm={"name": "#%% md\n"} # #import stuff # import numpy as np # import pandas as pd # # #read nyc apartment rental listing data # df = pd.read_csv('../renthop-nyc.csv') # assert df.shape == (49352,34) # train, and test df cleaned# # + pycharm={"name": "#%%\n", "is_executing": false} # <editor-fold desc="wrangling dDF"> import numpy as np import pandas as pd import io import requests #read nyc apartment rental listing data url = 'https://raw.githubusercontent.com/DAVIDnHANG/DS-Unit-2-Kaggle-Challenge/master/data/apartments/renthop-nyc.csv' s=requests.get(url).content df = pd.read_csv(io.StringIO(s.decode('utf-8'))) assert df.shape == (49352,34) #remove less than .5 and more than .5 df = df[ (df['price'] >= np.percentile(df['price'], 0.5)) & (df['price'] <= np.percentile(df['price'], 99.5)) & (df['latitude'] >= np.percentile(df['latitude'], 0.5)) & (df['latitude'] < np.percentile(df['latitude'], 99.95)) & (df['longitude'] >= np.percentile(df['longitude'], 0.05)) & (df['longitude'] <= np.percentile(df['longitude'], 99.95)) ] #Do train/test split #use data from april and may 2016 to train #use data from june 2016 to test. df['created'] = pd.to_datetime(df['created'], infer_datetime_format=True) cutoff = pd.to_datetime('2016-06-01') train = df[df.created < cutoff] test = df[df.created >= cutoff] # Wrangle train and test sets in the same way def engineer_features(df): #avoid copy warning df = df.copy() #Does the aprtments have a description df['description'] = df['description'].str.strip().fillna('') df['has_description'] = df['description'] != '' #Whot long is the description df['description_length'] = df['description'].str.len() #how many total perks does each apartment have perk_cols = ['elevator', 'cats_allowed', 'hardwood_floors', 'dogs_allowed', 'doorman', 'dishwasher', 'no_fee', 'laundry_in_building', 'fitness_center', 'pre-war', 'laundry_in_unit', 'roof_deck', 'outdoor_space', 'dining_room', 'high_speed_internet', 'balcony', 'swimming_pool', 'new_construction', 'exclusive', 'terrace', 'loft', 'garden_patio', 'common_outdoor_space', 'wheelchair_access'] df['perk_count'] = df[perk_cols].sum(axis=1) #r cats | dog allowed? df['cats_or_dogs'] = (df['cats_allowed'] ==1) | (df['dogs_allowed']==1) #r cats & dogs allowed? df['cats_or_dogs'] = (df['cats_allowed']==1) & (df['dogs_allowed']==1) #Total number of rooms (beds+baths) df['rooms'] = df['bedrooms'] + df['bathrooms'] #Extract number of days elasped in year df['days'] = (df['created'] - pd.to_datetime('2016-01-01')).dt.days df = df.drop(columns='created') return df train = engineer_features(train) test = engineer_features(test) # </editor-fold> # + pycharm={"name": "#%%\n", "is_executing": false} train.head(3) # - # Cross_validate_score # How do we get started? According to the Scikit-Learn User Guide # SCikit learn cross validation score, higher is better # whereas regression error metrics lower is better. so regression score gives a negative, so by -(-), the # will represents intituive human language. # + pycharm={"name": "#%%\n", "is_executing": false} import category_encoders as ce # <editor-fold desc="from"> from from sklearn.feature_selection import f_regression, SelectKBest from sklearn.impute import SimpleImputer from sklearn.linear_model import Ridge from sklearn.model_selection import cross_val_score from sklearn.pipeline import make_pipeline from sklearn.preprocessing import StandardScaler # </editor-fold> # <editor-fold desc="oneHot, selectKBest ridge"> target = 'price' high_cardinality = ['display_address', 'street_address', 'description'] features = train.columns.drop([target]+high_cardinality) X_train = train[features] y_train = train[target] Pipeline_OneHot_SelectK_Ridge = make_pipeline( ce.OneHotEncoder(use_cat_names=True), SimpleImputer(strategy='mean'), #replacing missing values with mean StandardScaler(), #standardize features by removing the mean and scaling to unit ariance #select top 20 features SelectKBest(f_regression, k=20), Ridge(alpha=1.0) ) k = 3 #pass in pipeline, #training data #cv=k scores = cross_val_score( Pipeline_OneHot_SelectK_Ridge, X_train, y_train, cv=k, scoring = 'neg_mean_absolute_error' ) print(f'MAE for {k} folds:', -scores) print('The first time') # </editor-fold> # + pycharm={"name": "#%%\n", "is_executing": false} # <editor-fold desc="target Encoder, simple imputer, randomforest regressor"> import category_encoders as ce from sklearn.pipeline import make_pipeline from sklearn.ensemble import RandomForestRegressor from sklearn.impute import SimpleImputer from sklearn.model_selection import cross_val_score target_random_forest = 'price' high_cardinality_RF = ['display_address', 'street_address', 'description'] features_random_forest = train.columns.drop([target_random_forest]+high_cardinality_RF) X_train_RF = train[features_random_forest] y_train_RF = train[target_random_forest] pipeline_TargetEncoder_Random = make_pipeline( ce.TargetEncoder(min_samples_leaf=1, smoothing=1), SimpleImputer(strategy='median'), RandomForestRegressor(n_estimators=100, n_jobs=-1, random_state=42) ) k_Targetencoder_Random=4 scores_RF = cross_val_score(pipeline_TargetEncoder_Random, X_train_RF, y_train_RF, cv=k_Targetencoder_Random ) print(-scores_RF) # </editor-fold> # + pycharm={"name": "#%%\n", "is_executing": false} # <editor-fold desc="ordinaryEncoder, decision tree regressor"> import matplotlib.pyplot as plt from sklearn.model_selection import validation_curve from sklearn.tree import DecisionTreeRegressor target_S = 'price' high_cardinality = ['display_address', 'street_address', 'description'] features_S = train.columns.drop([target_S]+high_cardinality) X_train_S = train[features_S] y_train_S = train[target_S] pipeline_SimpleImputer_DecisionTree = make_pipeline( ce.OrdinalEncoder(), SimpleImputer(), DecisionTreeRegressor() ) depth = range(1,30,3) train_scores, val_scores = validation_curve( pipeline_SimpleImputer_DecisionTree, X_train_S, y_train_S, param_name='decisiontreeregressor__max_depth', param_range=depth, scoring = 'neg_mean_absolute_error', cv=3, #n_jobs=-1 ) plt.figure(dpi=150) plt.plot(depth, np.mean(-train_scores, axis=1), color='blue', label ='training error') plt.plot(depth, np.mean(-val_scores,axis=1), color='red',label='validation error') plt.title('validation curve') plt.xlabel('model complexity:randomForestRegressor maxDepth') plt.ylabel('model score: Mean Abs Error') plt.legend(); # </editor-fold> # + pycharm={"name": "#%%\n", "is_executing": false} from sklearn.model_selection import GridSearchCV, RandomizedSearchCV import category_encoders as ce from sklearn.preprocessing import StandardScaler from sklearn.feature_selection import SelectKBest, f_regression from sklearn.linear_model import Ridge from sklearn.pipeline import make_pipeline target_grid = 'price' features_grid = train.columns.drop([target_grid]+high_cardinality) X_train_Grid = train[features_grid] y_train_grid = train[target_grid] pipeline_Gridsearch_Randomized = make_pipeline( ce.OneHotEncoder(use_cat_names=True), SimpleImputer(), StandardScaler(), SelectKBest(f_regression), Ridge() ) param_distributions = { 'simpleimputer__strategy' : ['mean', 'median'], 'selectkbest__k' : range(1, len(X_train_Grid.columns)+1), 'ridge__alpha': [0.1,1,10], } search = RandomizedSearchCV( pipeline_Gridsearch_Randomized, param_distributions=param_distributions, n_iter=20, cv=5, scoring='neg_mean_absolute_error', verbose=10, return_train_score=True, n_jobs=-1 ) search.fit(X_train_Grid, y_train_grid); # + pycharm={"name": "#%%\n", "is_executing": false} search.fit(X_train_Grid, y_train_grid); X_train_Grid.dtypes # + pycharm={"name": "#%%\n", "is_executing": false} # <editor-fold desc="making df"> import pandas as pd import numpy as np #import pandas as pd import os from sklearn.model_selection import train_test_split #uploaded = files.upload() #then change the filepath buffer once the file is in google.colab. also remove the file.upload() function #name the column, adding informative names to features #data has no header, and no footheader #train= pd.merge(pd.read_csv('/content/test_features.csv'), pd.read_csv('/content/train_labels.csv')) train_label_df = pd.merge(pd.read_csv('../m2w2 kaggle/train_features.csv'), pd.read_csv('../m2w2 kaggle/train_labels.csv')) #label =pd.read_csv('/content/train_labels.csv') #train= pd.read_csv('/content/train_features.csv') test_df = pd.read_csv('../m2w2 kaggle/test_features.csv') submission_df = pd.read_csv('../m2w2 kaggle/sample_submission.csv') # </editor-fold> # + pycharm={"name": "#%%\n", "is_executing": false} # <editor-fold desc="train, test, split"> from sklearn.model_selection import train_test_split #Let Train, Validate, test then set features,target on .75 .25 train, validate = train_test_split(train_label_df, train_size = 0.75, test_size =0.25, random_state=42) train.shape, validate.shape, test_df.shape #I am using whole test_df to test. so let reword test_df to simply test test = test_df train.shape, validate.shape, test.shape # </editor-fold> # + pycharm={"name": "#%%\n", "is_executing": false} # <editor-fold desc="train, test, split"> def wrangletwo(X): """Wrangle train, validate, and test sets in the same way""" # Prevent SettingWithCopyWarning X = X.copy() # About 3% of the time, latitude has small values near zero, # outside Tanzania, so we'll treat these values like zero. X['latitude'] = X['latitude'].replace(-2e-08, 0) # When columns have zeros and shouldn't, they are like null values. # So we will replace the zeros with nulls, and impute missing values later. cols_with_zeros = ['longitude', 'latitude'] for col in cols_with_zeros: X[col] = X[col].replace(0, np.nan) # quantity & quantity_group are duplicates, so drop one X = X.drop(columns='quantity_group') # return the wrangled dataframe return X train = wrangletwo(train) val = wrangletwo(validate) test = wrangletwo(test) # The status_group column is the target target_two = 'status_group' # Get a dataframe with all train columns except the target & id train_features = train.drop(columns=[target, 'id']) # Get a list of the numeric features numeric_features_two = train_features.select_dtypes(include='number').columns.tolist() # Get a series with the cardinality of the nonnumeric features cardinality_two = train_features.select_dtypes(exclude='number').nunique() # Get a list of all categorical features with cardinality <= 50 categorical_features_two = cardinality_two[cardinality_two <= 50].index.tolist() # Combine the lists features_two = numeric_features_two + categorical_features_two X_train_two = train[features_two] y_train_two = train[target_two] X_val_two = val[features_two] y_val_two = val[target_two] X_test_two = test[features_two] # </editor-fold> # + pycharm={"name": "#%%\n", "is_executing": false} search.fit(X_train_two, y_train_two); # + pycharm={"name": "#%%\n", "is_executing": false} y_train_grid # + pycharm={"name": "#%%\n"}
module3/m2w2 kaggle_module3_lesson.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # A Guided Tour of Ray Core: Multiprocessing Pool # [*Distributed multiprocessing.Pool*](https://docs.ray.io/en/latest/multiprocessing.html) make it easy to scale existing Python applications that use [`multiprocessing.Pool`](https://docs.python.org/3/library/multiprocessing.html) by leveraging *actors*. # # --- # First, let's start Ray… # + import logging import ray ray.init( ignore_reinit_error=True, logging_level=logging.ERROR, ) # - # ## Multiprocessing Pool example # The following is a simple Python function with a slight delay added (to make it behave like a more complex calculation)... # + import time def my_function (x): time.sleep(1) return x ** 2 # + # %%time my_function(11) # - # First, let's try using this with Python's built-in [multiprocessing.Pool](https://docs.python.org/3/library/multiprocessing.html): # + # %%time from multiprocessing import Pool pool = Pool() for result in pool.map(my_function, range(50)): print(result) # - # Now we'll create a *Pool* using and distribute its tasks across a cluster (or across the available cores on a laptop): # + # %%time from ray.util.multiprocessing import Pool pool = Pool() for result in pool.map(my_function, range(50)): print(result) # - # The distributed version has the trade-off of increased overhead, although now it can scale-out horizontally across a cluster. The benefits would be more pronounced with a more computationally expensive calculation. # Finally, shutdown Ray ray.shutdown()
ex_04_mult_pool.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + import numpy as np import pandas as pd import matplotlib.pyplot as plt import seaborn as sns # For machine learning from sklearn.model_selection import train_test_split from sklearn.ensemble import RandomForestClassifier from sklearn.metrics import classification_report # - df = pd.read_csv('../dataset/words.csv') # + # df.head() # - len(df) # + # df['Word'].value_counts() # - df_X = df.drop(["Word"], axis=1) df_y = df["Word"] # Creating training and testing data X_train, X_test, y_train, y_test = train_test_split(df_X, df_y, test_size=0.051) print("Training data size : {} \nTesting data size : {}".format(len(X_train), len(X_test))) print("Train data shape : {} \nTest Data shape : {} ".format(X_train.shape, X_test.shape)) # + # X_train.head() # - print("Total unique elements in training data are : {}".format(len(y_train.value_counts()))) # + # print("Training data for each alphabets are :\n{}".format(y_train.value_counts())) # - print("Total unique elements in test data are : {}".format(len(y_test.value_counts()))) # + # print("Training data for each alphabets are :\n{}".format(y_test.value_counts())) # - rf = RandomForestClassifier(n_estimators=50, random_state=30) rf.fit(X_train, y_train) rf.score(X_train, y_train) rf.score(X_test, y_test) # #### Achieved training score of 100% and test score of 99.63% y_test_predicted = rf.predict(X_test) # + # print("Classification Report \n ", classification_report(y_test, y_test_predicted)) # - # ### This model is good enough for all the cases of words # # Using pickle to save model for later use import pickle5 as pickle # Saving the model pickle.dump(rf, open('../models/words_model', 'wb'))
notebooks/Words model training and testing.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Fig. 4: Frequency Dependence of Eigenmodes on External Field Strength # This notebook reproduces Fig. 4 in the paper, which shows the frequency dependence of first five eigenmodes on the strength of the external field. # + import matplotlib.pyplot as plt import pandas as pd import statsmodels.formula.api as sm from numpy import pi from style_helpers import style_cycle # %matplotlib inline plt.style.use('style_sheets/fig4.mplstyle') # - df = pd.read_csv('../data/eigenmode_info_data_frame.csv') df = df.query('has_particle == False') # The external field values used in the simulations are in units of A/m. We therefore add an additional column containing the values in mT. # + # Define vacuum permeability (in V.s/A.m) mu0 = 4 * pi * 1e-7 # Convert field values to milli-Tesla df['Hz_mT'] = df['Hz'] * mu0 * 1e3 # - def plot_fitted_line(N, df, ax): """ Return a `pandas.Series` containing the intercept and slope of a line fitted to the frequency data for the `N`-the eigenmode. """ linefit = sm.ols(formula="freq ~ Hz_mT", data=df).fit() a = linefit.params.Intercept b = linefit.params.Hz_mT max_Hz = df['Hz_mT'].max() ax.plot([0, max_Hz], [a, a + max_Hz * b], '-k') def plot_freq_vs_H_ext_for_mode(N, df, ax, **mpl_kwargs): df_filtered = df.query('N == {N}'.format(N=N)).sort_values('Hz_mT') Hz_mT = df_filtered['Hz_mT'] freqs = df_filtered['freq'] plot_fitted_line(N, df_filtered, ax) ax.plot(Hz_mT, freqs, label='N={N}'.format(N=N), **mpl_kwargs) # + fig, ax = plt.subplots(figsize=(6, 6)) # Plot lines in reverse order to avoid higher modes # covering the lower ones. for N, style_kwargs in reversed(list(zip([1, 2, 3, 4, 5], style_cycle))): plot_freq_vs_H_ext_for_mode(N, df, ax, linestyle='none', **style_kwargs) ax.set_xlabel('H_ext (mT)') ax.set_ylabel('Frequency (GHz)') ax.set_xlim(-12.5, 312.5) ax.legend(numpoints=1, loc='best', prop={'size': 16}) ax.grid()
notebooks/fig_4_frequency_dependence_on_external_field.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + # 画图(为什么和书上画出来的图不一样?看不出有什么问题) import matplotlib.pyplot as plt def lg_plot(clf): x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1 y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1 xx, yy = np.meshgrid(np.arange(x_min, x_max, 0.1), np.arange(y_min, y_max, 0.1)) Z = clf.predict(np.c_[xx.ravel(), yy.ravel()]) Z = Z.reshape(xx.shape) plt.figure() plt.contourf(xx, yy, Z, alpha=0.4) plt.scatter(X[:, 0], X[:, 1], c=y, alpha=0.8) plt.show() # + # 线性SVM分类 import numpy as np from sklearn import datasets from sklearn.pipeline import Pipeline from sklearn.preprocessing import StandardScaler from sklearn.svm import LinearSVC # 卫星数据 from sklearn.datasets import make_moons X, y = make_moons(n_samples=500, noise=0.15, random_state=1) svm_clf = Pipeline(( ("scaler", StandardScaler()), ("linear_svc", LinearSVC(C=10, loss="hinge", max_iter=10000)), )) svm_clf.fit(X, y) lg_plot(svm_clf) # + # 添加多项式特性,然后线性分类 from sklearn.datasets import make_moons from sklearn.pipeline import Pipeline from sklearn.preprocessing import PolynomialFeatures polynomial_svm_clf = Pipeline(( ("poly_features", PolynomialFeatures(degree=3)), ("scaler", StandardScaler()), ("svm_clf", LinearSVC(C=10, loss="hinge", max_iter=10000)), )) polynomial_svm_clf.fit(X, y) lg_plot(polynomial_svm_clf) # + #使用多项式核 from sklearn.svm import SVC poly_kernel_svm_clf = Pipeline(( ("scaler", StandardScaler()), ("svm_clf", SVC(kernel="poly", degree=3, coef0=1, C=5)) # SVC没有损失函数? )) poly_kernel_svm_clf.fit(X, y) lg_plot(poly_kernel_svm_clf) # + #高斯RBF核函数 rbf_kernel_svm_clf = Pipeline(( ("scaler", StandardScaler()), ("linear_svc", SVC(kernel="rbf", gamma=5, C=0.001)), )) rbf_kernel_svm_clf.fit(X, y) lg_plot(rbf_kernel_svm_clf) # -
ch5 SVM using moons.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import pandas as pd import numpy as np messages = pd.read_csv('smsspamcollection\SMSSpamCollection',sep = '\t' , names = ['label','message']) messages # ### Data Cleaning and Preprocessing # + import re import nltk from nltk.corpus import stopwords from nltk.stem import PorterStemmer # - ps = PorterStemmer() corpus= [] for i in range(0,len(messages)): review = re.sub('[^a-zA-Z]', ' ' , messages['message'][i]) review = review.lower() review = review.split() review = [ps.stem(word) for word in review if not word in(stopwords.words('english'))] ## print(review) review = ' '.join(review) corpus.append(review) corpus # ### Creating the Bag Of Word models or Document Matrix from sklearn.feature_extraction.text import CountVectorizer cv = CountVectorizer(max_features=5000) x = cv.fit_transform(corpus).toarray() x x.shape y = pd.get_dummies(messages['label']) y y = y.iloc[:,1].values y # ### train and test split from sklearn.model_selection import train_test_split X_train,X_test,y_train,y_test = train_test_split(x,y,test_size = 0.2,random_state=0) X_train.shape X_test.shape y_train.shape y_test.shape # ### Trainng model using Naive bayes classifier from sklearn.naive_bayes import MultinomialNB spam_detect_model = MultinomialNB().fit(X_train,y_train) spam_detect_model y_pred = spam_detect_model.predict(X_test) y_pred y_test from sklearn.metrics import confusion_matrix confusion_m =confusion_matrix(y_test,y_pred) confusion_m from sklearn.metrics import accuracy_score accuracy = accuracy_score(y_test,y_pred) accuracy
Spam Classifier.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] toc=true # <h1>Table of Contents<span class="tocSkip"></span></h1> # <div class="toc"><ul class="toc-item"><li><span><a href="#[Tutorial-Title]¶" data-toc-modified-id="[Tutorial-Title]¶-1"><span class="toc-item-num">1&nbsp;&nbsp;</span>[Tutorial Title]¶</a></span><ul class="toc-item"><li><span><a href="#Introduction" data-toc-modified-id="Introduction-1.1"><span class="toc-item-num">1.1&nbsp;&nbsp;</span>Introduction</a></span></li><li><span><a href="#Prerequisites" data-toc-modified-id="Prerequisites-1.2"><span class="toc-item-num">1.2&nbsp;&nbsp;</span>Prerequisites</a></span></li><li><span><a href="#Initialization" data-toc-modified-id="Initialization-1.3"><span class="toc-item-num">1.3&nbsp;&nbsp;</span>Initialization</a></span><ul class="toc-item"><li><span><a href="#Ensure-database-is-running" data-toc-modified-id="Ensure-database-is-running-1.3.1"><span class="toc-item-num">1.3.1&nbsp;&nbsp;</span>Ensure database is running</a></span></li><li><span><a href="#Download-and-install-additional-components." data-toc-modified-id="Download-and-install-additional-components.-1.3.2"><span class="toc-item-num">1.3.2&nbsp;&nbsp;</span>Download and install additional components.</a></span></li><li><span><a href="#Connect-to-database." data-toc-modified-id="Connect-to-database.-1.3.3"><span class="toc-item-num">1.3.3&nbsp;&nbsp;</span>Connect to database.</a></span></li><li><span><a href="#Populate-database-with-test-data." data-toc-modified-id="Populate-database-with-test-data.-1.3.4"><span class="toc-item-num">1.3.4&nbsp;&nbsp;</span>Populate database with test data.</a></span></li><li><span><a href="#Create-secondary-indexes-if-necessary" data-toc-modified-id="Create-secondary-indexes-if-necessary-1.3.5"><span class="toc-item-num">1.3.5&nbsp;&nbsp;</span>Create secondary indexes if necessary</a></span></li></ul></li></ul></li><li><span><a href="#[Tutorial-Section]" data-toc-modified-id="[Tutorial-Section]-2"><span class="toc-item-num">2&nbsp;&nbsp;</span>[Tutorial Section]</a></span><ul class="toc-item"><li><span><a href="#[Subsection-1]" data-toc-modified-id="[Subsection-1]-2.1"><span class="toc-item-num">2.1&nbsp;&nbsp;</span>[Subsection 1]</a></span></li><li><span><a href="#[Subsection-2]" data-toc-modified-id="[Subsection-2]-2.2"><span class="toc-item-num">2.2&nbsp;&nbsp;</span>[Subsection 2]</a></span></li></ul></li><li><span><a href="#[Next-Section]" data-toc-modified-id="[Next-Section]-3"><span class="toc-item-num">3&nbsp;&nbsp;</span>[Next Section]</a></span><ul class="toc-item"><li><span><a href="#[Subsection-1]" data-toc-modified-id="[Subsection-1]-3.1"><span class="toc-item-num">3.1&nbsp;&nbsp;</span>[Subsection 1]</a></span></li><li><span><a href="#[Subsection-2]" data-toc-modified-id="[Subsection-2]-3.2"><span class="toc-item-num">3.2&nbsp;&nbsp;</span>[Subsection 2]</a></span></li></ul></li><li><span><a href="#Takeaways-and-Conclusion" data-toc-modified-id="Takeaways-and-Conclusion-4"><span class="toc-item-num">4&nbsp;&nbsp;</span>Takeaways and Conclusion</a></span></li><li><span><a href="#Clean-up" data-toc-modified-id="Clean-up-5"><span class="toc-item-num">5&nbsp;&nbsp;</span>Clean up</a></span></li><li><span><a href="#Further-Exploration-and-Resources" data-toc-modified-id="Further-Exploration-and-Resources-6"><span class="toc-item-num">6&nbsp;&nbsp;</span>Further Exploration and Resources</a></span><ul class="toc-item"><li><span><a href="#Next-steps" data-toc-modified-id="Next-steps-6.1"><span class="toc-item-num">6.1&nbsp;&nbsp;</span>Next steps</a></span></li></ul></li></ul></div> # - # **How To Create Aerospike Tutorial in Jupyter Notebook:** # - Use the template below and customize the square-bracketed text in the template for your needs. Omit them if appropriate. # - Code cells in this notebook are not meant to run; they are provided for illustrative purpose only. When both Python and Java code cells are presented, use the appropriate one and delete the other. # - Leverage Jupyter Notebook style to build the tutorial through specific incremental goals. Avoid large markdown and code cells; instead break them down into multiple bite-sized cells. # - Create a TOC level map of goals and points. Decribe the code examples without code. Get the flow and examples reviewed before finalizing the details of content and code. Check for spelling, typos, and other errors. Get the final version reviewed before publishing the notebook. # - Provide local notebook links when possible as local links run on click, but remote notebooks must be downloaded first. # - Use a unique set name in "test" namespace to populate each tutorial's data. Only modify data within this set without affecting the rest of the database. It is a good practice to clean up this set at the end. # - Explore the extensions TOC (accessible from the header icon), collapsible headers, and scratchpad (arrow icon at right bottom). Cofigure and save as you would like the user to experience when they open the notebook first. For example, TOC at top, all headers collapsed, etc. Consider configuring TOC extension to have a TOC cell at the top. # - Name the notebook appropriately and check it into appropriate repo, such as under [this notebooks directory](https://github.com/aerospike/aerospike-dev-notebooks.docker/tree/main/notebooks). Execute all cells before checking into the repo so that the notebook may be read completely as a blog post with its output visible in the non-interactive github viewer. # # # [Tutorial Title] # [A brief description of the topic and goals of the tutorial.] # # This notebook requires Aerospike datbase running on localhost and that python and the Aerospike python client have been installed (`pip install aerospike`). Visit [Aerospike notebooks repo](https://github.com/aerospike-examples/interactive-notebooks) for additional details and the docker container. # ## Introduction # [Describe what the developer will learn and how.] # # [Why is this topic relevant and important.] # # [The structure and flow. Tips for skipping basic sections or for returning users.] # ## Prerequisites # This tutorial assumes familiarity with the following topics: # # [Provide topics and links. For example:] # - [Hello World](hello_world.ipynb) # - [Aerospike Basic Operations](basic_operations.ipynb) # - [Another area] # ## Initialization # ### Ensure database is running # This notebook requires that Aerospike datbase is running. # [Include the right code cell for Java or Python from the two cells below.] import io.github.spencerpark.ijava.IJava; import io.github.spencerpark.jupyter.kernel.magic.common.Shell; IJava.getKernelInstance().getMagics().registerMagics(Shell.class); # %sh asd # !asd >& /dev/null # !pgrep -x asd >/dev/null && echo "Aerospike database is running!" || echo "**Aerospike database is not running!**" # ### Download and install additional components. # [For Java notebooks, the latest Java client should be loaded here by including the following code cell. Specify the correct version. This section can be omitted for Python notebooks as a Python client is already installed.] # %%loadFromPOM <dependencies> <dependency> <groupId>com.aerospike</groupId> <artifactId>aerospike-client</artifactId> <version>[specify the Java client version here, e.g., 5.0.0]</version> </dependency> </dependencies> # ### Connect to database. # [Most developer tutorials need a client to be connected to the database. Use the right cell and change the parameters as needed.] import com.aerospike.client.AerospikeClient; AerospikeClient client = new AerospikeClient("localhost", 3000); System.out.println("Initialized the client and connected to the cluster."); # + # import the module from __future__ import print_function import aerospike # Configure the client config = { 'hosts': [ ('127.0.0.1', 3000) ], 'policy' : {'key': aerospike.POLICY_KEY_SEND} } # Create a client and connect it to the cluster try: client = aerospike.client(config).connect() except: import sys print("failed to connect to the cluster with", config['hosts']) sys.exit(1) print('Client successfully connected to the database.') # - # ### Populate database with test data. # [Remove this section if the tutorial does not need the database to be populated with test data.] # # [This can be done with code or using the cvsloader utility.] # # [Describe the test data and intent. The cells below are only examples - replace with your own data.] # + import com.aerospike.client.policy.ClientPolicy; String Namespace = "test"; String Set = "my-unique-set"; String BinInt = "binint"; String BinStr = "binstr"; int NumRecords = 10; String Names[] = {"1. Clark", "<NAME>", "<NAME>", "<NAME>", "5. Clark", "<NAME>", "<NAME>", "<NAME>", "<NAME>", "10. Smith"}; ClientPolicy policy = new ClientPolicy(); for (int i = 1; i <= NumRecords; i++) { Key key = new Key(Namespace, Set, i); Bin bin1 = new Bin(BinInt, i); Bin bin2 = new Bin(BinStr, Names[i-1]); client.put(policy.writePolicyDefault, key, bin1, bin2); } System.out.format("Written %d records in ns=%s set=%s with userkeys 1-%d.", NumRecords, Namespace, Set, NumRecords); # + namespace = 'test' tutorial_set = 'my-unique-set' # Records are addressable via a tuple of (namespace, set, key) people = [ {'id':1, 'name':'<NAME>', 'age': 53}, {'id':2, 'name':'<NAME>', 'age': 21}, {'id':3, 'name':'<NAME>', 'age': 34}, {'id':4, 'name':'<NAME>', 'age': 48}, {'id':5, 'name':'<NAME>', 'age': 29}, {'id':6, 'name':'<NAME>', 'age': 56}, {'id':7, 'name':'<NAME>', 'age': 30}, {'id':8, 'name':'<NAME>', 'age': 32}, {'id':9, 'name':'<NAME>', 'age': 44}, {'id':10, 'name':'<NAME>', 'age': 22} ] try: for i in range(10): # Write the records client.put((namespace, tutorial_set, 'id'+str(people[i]['id'])), people[i]) except Exception as e: import sys print("error: {0}".format(e), file=sys.stderr) print('Test data populated.') # - # ### Create secondary indexes if necessary # [To use the query API, a secondary index must exist on the query field.] # # [Replace or remove the code cell as appropriate.] # + import com.aerospike.client.policy.Policy; import com.aerospike.client.query.IndexType; import com.aerospike.client.task.IndexTask; import com.aerospike.client.AerospikeException; import com.aerospike.client.ResultCode; String IndexName = "idx_numeric_test_demo_binint"; Policy policy = new Policy(); policy.socketTimeout = 0; // Do not timeout on index create. try { IndexTask task = client.createIndex(policy, Namespace, Set, IndexName, BinInt, IndexType.NUMERIC); task.waitTillComplete(); } catch (AerospikeException ae) { if (ae.getResultCode() != ResultCode.INDEX_ALREADY_EXISTS) { throw ae; } } System.out.format("Created index %s on ns=%s set=%s bin=%s.", IndexName, Namespace, Set, BinInt); # + # Must create an index to query on a bin index_name = "test_demo_number_idx" from aerospike import exception as ex try: client.index_integer_create(Namespace, Set, "age", index_name) except ex.IndexFoundError: pass print('Secondary index created.') # - # # [Tutorial Section] # # [Section of the core of the tutorial.] # ## [Subsection 1] # [Subsection 1] # ## [Subsection 2] # [Subsection 2] # # [Next Section] # # [Next section.] # ## [Subsection 1] # [Subsection 1] # ## [Subsection 2] # [Subsection 2] # # Takeaways and Conclusion # [Summarize the takeaways and conclusion of the tutorial.] # # Clean up # [Perform any cleanup - remove data, close connection, etc.] client.dropIndex(policy, Namespace, Set, IndexName); client.truncate(null, Namespace, Set) client.close(); System.out.println("Removed tutorial data and server connection closed."); client.index_remove(namespace, index_name)¶ client.truncate(namespace, tutorial_set, 0) # Close the connection to the Aerospike cluster client.close() print('Removed tutorial data. Connection closed.') # # Further Exploration and Resources # [Point to the topics that the developer should explore.] # # Resources # - [Related notebooks, for example:] # - [Queries](https://github.com/aerospike/aerospike-dev-notebooks.docker/blob/main/notebooks/python/query.ipynb) # - [Blog posts] # - [Doc links] # - [Other resources] # ## Next steps # # Visit [Aerospike notebooks repo](https://github.com/aerospike-examples/interactive-notebooks) to run additional Aerospike notebooks. To run a different notebook, download the notebook from the repo to your local machine, and then click on File->Open, and select Upload.
notebooks/nb-template.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + import detectron2 from detectron2.utils.logger import setup_logger setup_logger() # import some common libraries import numpy as np import os, json, cv2, random # import some common detectron2 utilities from detectron2 import model_zoo from detectron2.engine import DefaultPredictor from detectron2.config import get_cfg from detectron2.utils.visualizer import Visualizer from detectron2.data import MetadataCatalog, DatasetCatalog # - def load_segmentation_dataset(f_p,label_names=None): """" Returns: [dict]: Dictionary of list with names """ data=load_json(f_p) cat_map={} for cat in data["categories"]: if cat["name"] in label_names: cat_map[cat['id']]=cat["name"] image_map={} for cat in data["images"]: image_map[cat['id']]=cat["file_name"] annos={} for d in data["annotations"]: tmp=[] seg=d["segmentation"][0] for i in range(0,len(seg)-1,2): tmp.append([seg[i],seg[i+1]]) if image_map[d["image_id"]] not in annos: annos[image_map[d["image_id"]]]=[{"class_id":cat_map[d["category_id"]],"annotation":tmp}] else: annos[image_map[d["image_id"]]].append({"class_id":cat_map[d["category_id"]],"annotation":tmp}) return annos data_dir="/home/asad/projs/SMN/data/cucumber" ann=load_segmentation_dataset() # + # if your dataset is in COCO format, this cell can be replaced by the following three lines: # from detectron2.data.datasets import register_coco_instances # register_coco_instances("my_dataset_train", {}, "json_annotation_train.json", "path/to/image/dir") # register_coco_instances("my_dataset_val", {}, "json_annotation_val.json", "path/to/image/dir") from detectron2.structures import BoxMode def get_veg_dicts(img_dir): json_files = [ json_file for json_file in os.listdir(img_dir) if json_file.endswith(".json") ] dataset_dicts = [] for idx, json_file in tqdm(enumerate(json_files),total=len(json_files)): for ext in self.extensions: filename = json_file.split(".")[0] + ext c_fname = os.path.join(img_dir, filename) img = cv2.imread(c_fname) if img is not None: break if img is None: print(f"Image Not Found for {json_file}") raise (f"Image Not Found for {json_file}") #print(f"Processing json {json_file}") loaded=load_segmentation_dataset(os.path.join(img_dir, json_file)) record = {} height, width = img.shape[:2] record["file_name"] = c_fname record["image_id"] = idx record["height"] = height record["width"] = width annos = imgs_anns["shapes"] objs = [] for anno in annos: px = [x for x, y in anno] py = [y for x, y in anno] poly = [(x + 0.5, y + 0.5) for x, y in zip(px, py)] poly = [p for x in poly for p in x] obj = { "bbox": [np.min(px), np.min(py), np.max(px), np.max(py)], "bbox_mode": BoxMode.XYXY_ABS, "segmentation": [poly], "category_id": 0, } objs.append(obj) record["annotations"] = objs #print(f"Processed images {idx}") dataset_dicts.append(record) return dataset_dicts for d in ["train", "val"]: DatasetCatalog.register("balloon_" + d, lambda d=d: get_balloon_dicts("balloon/" + d)) MetadataCatalog.get("balloon_" + d).set(thing_classes=["balloon"]) balloon_metadata = MetadataCatalog.get("balloon_train")
.ipynb_checkpoints/detectron2_comparison-checkpoint.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="3kUTiT2E_M5H" colab_type="text" # ### Import # + id="cU9l4_D1_M5J" colab_type="code" colab={} import pandas as pd import numpy as np # + id="zCUG0trn_M5R" colab_type="code" colab={} import pickle import sys import nltk from nltk.stem.porter import * from sklearn.feature_extraction.text import TfidfVectorizer import matplotlib.pyplot as plt import seaborn # %matplotlib inline # + id="KoieloYD56U0" colab_type="code" outputId="41f1fd73-3ed0-4db5-9a2b-36c795de145a" colab={"base_uri": "https://localhost:8080/", "height": 131} from google.colab import drive drive.mount('/content/drive') # + id="Xtb_csJD_M5X" colab_type="code" colab={} data = pd.read_csv('/content/drive/My Drive/Shaastra Workshop Material/Text Analysis/spam.csv',encoding = "ISO-8859-1") # + id="VsWabRo-TKfX" colab_type="code" colab={} data.drop(['Unnamed: 2','Unnamed: 3','Unnamed: 4'], axis=1, inplace=True) # + id="9E8-W-3TTtda" colab_type="code" colab={} data.rename(columns={'v1':'target','v2':'text'},inplace=True) # + id="V8u8w-AFv-3H" colab_type="code" outputId="23e2b890-6a45-406c-feaf-c9dc869a66db" colab={"base_uri": "https://localhost:8080/", "height": 206} data.head() # + [markdown] id="od7YmYLQ_M7k" colab_type="text" # ### Basic Preprocessing # + id="pVMMYTwGxuNy" colab_type="code" colab={} train = data.copy() train['text'] = train['text'].astype(str) # + id="lOeA8PLt_M7z" colab_type="code" outputId="9f6ac926-625f-45ef-95b2-540191c3566d" colab={"base_uri": "https://localhost:8080/", "height": 73} ### Fill your code here ## 1. Lowercasing train['text'] = train['text'].apply(lambda x: " ".join(x.lower() for x in x.split())) ## 2. Punctuation Removal train['text'] = train['text'].str.replace('[^\w\s]','') ## 3. Stopwords Removal nltk.download('stopwords') ## for removing the stopwords from nltk.corpus import stopwords stop = stopwords.words('english') train['text'] = train['text'].apply(lambda x: " ".join(x for x in x.split() if x not in stop)) train['text'].head() """ fill your code for all these operations """ # + id="w1QGCz-0_M9F" colab_type="code" outputId="65fcad8b-c787-46cd-e18a-63d8ced55f5a" colab={"base_uri": "https://localhost:8080/", "height": 167} ## lemmatization ## write your code here from textblob import Word nltk.download('wordnet') train['text'] = train['text'].apply(lambda x: " ".join([Word(word).lemmatize() for word in x.split()])) train['text'].head() # + [markdown] id="n4Rng-D6_M9a" colab_type="text" # ### Target creation # + id="v1h4taDp_M9f" colab_type="code" outputId="4cd068c9-f0d5-4f61-b22e-0e8f61d3b1b0" colab={"base_uri": "https://localhost:8080/", "height": 36} train['target'].unique()#.isna().sum() # + id="FJuHE_9K_M9p" colab_type="code" outputId="6ffa68c9-f9de-4b8e-dec8-458742879fc2" colab={"base_uri": "https://localhost:8080/", "height": 73} train['target'].replace(['ham','spam'],[0,1],inplace=True) train['target'].value_counts() # + id="aE1ArY61XLzf" colab_type="code" outputId="4b857a40-461e-43a9-8384-758338277d23" colab={"base_uri": "https://localhost:8080/", "height": 423} train # + [markdown] id="5GXIRkpoYhDQ" colab_type="text" # ### Basic Model on whole dataset # + id="gmAlQ-wlWHWB" colab_type="code" colab={} from sklearn.model_selection import train_test_split x_train,x_valid,y_t,y_v = train_test_split(train['text'],train['target'],test_size=0.2,random_state=4353) # + id="91U5c_i-W8-e" colab_type="code" colab={} from sklearn.feature_extraction.text import TfidfVectorizer tfidf = TfidfVectorizer(max_features=100, lowercase=True, analyzer='word', stop_words= 'english',ngram_range=(1,1)) tfidf.fit(train['text']) x_t = tfidf.transform(x_train) x_v = tfidf.transform(x_valid) # + id="ATlrNFgMWbje" colab_type="code" outputId="30e37d0a-b281-49e1-e1d2-dd052fcacafe" colab={"base_uri": "https://localhost:8080/", "height": 36} x_v.toarray().shape # + id="Uc5H47YRXc75" colab_type="code" colab={} def model_training(clf, x_t, y_t, x_v=None , y_v=None ,task='binary:logistic'): clf.fit(x_t,y_t) print('training accuracy', clf.score(x_t,y_t)) if task=='binary:logistic': print('validation accuracy', clf.score(x_v,y_v)) print('validation f1_score',f1_score(clf.predict(x_v),y_v)) print('validation roc_auc score',roc_auc_score(y_v,clf.predict_proba(x_v)[::,-1])) print('confusion matrix \n',confusion_matrix(y_v, clf.predict(x_v))) if task=='reg:linear': if x_v!=None: print('validation r2_score', clf.score(x_v,y_v)) print('validation MSE',mean_squared_error(clf.predict(x_v),y_v)) return clf # + id="qSSBgn-TW9Qd" colab_type="code" outputId="f2cd9d1a-32e4-405c-a054-56fbac5936b6" colab={"base_uri": "https://localhost:8080/", "height": 36} train['target'].mean() # + id="VBOaguLBW-p5" colab_type="code" outputId="4b18af11-24c1-4cc8-b69f-a23d9b532159" colab={"base_uri": "https://localhost:8080/", "height": 241} from sklearn.linear_model import LogisticRegression from sklearn.ensemble import RandomForestClassifier from sklearn.ensemble import GradientBoostingClassifier from sklearn.metrics import f1_score, classification_report, roc_auc_score, confusion_matrix, accuracy_score from xgboost import XGBClassifier lgr = LogisticRegression(n_jobs=1) # model_training(lgr,x_t,y_t,x_v,y_v) xgb = XGBClassifier(n_estimators=500, max_depth=5,learning_rate=0.1,scale_pos_weight=1.4266790777602751) # xgb = XGBClassifier(n_estimators=500, max_depth=5,learning_rate=0.1,scale_pos_weight=1.6760372565622355) model_training(lgr,x_t,y_t,x_v,y_v) # + id="Xc9IGAr91Qm7" colab_type="code" colab={} # training accuracy 0.974646623289208 # validation accuracy 0.967713004484305 # validation f1_score 0.859375 # validation roc_auc score 0.9588548752834467 # confusion matrix # [[969 11] # [ 25 110]] # XGBClassifier(base_score=0.5, booster='gbtree', colsample_bylevel=1, # colsample_bynode=1, colsample_bytree=1, gamma=0, # learning_rate=0.1, max_delta_step=0, max_depth=5, # min_child_weight=1, missing=None, n_estimators=500, n_jobs=1, # nthread=None, objective='binary:logistic', random_state=0, # reg_alpha=0, reg_lambda=1, scale_pos_weight=1.4266790777602751, # seed=None, silent=None, subsample=1, verbosity=1) # + [markdown] id="oGVi5Hkc_pnv" colab_type="text" # ### Training on pretrained word2vec model # + id="-joK9G7y_tqR" colab_type="code" outputId="468a21f0-a837-44ac-f444-fe936b9b8a88" colab={"base_uri": "https://localhost:8080/", "height": 74} import gensim import logging from gensim.models import Word2Vec wv = gensim.models.KeyedVectors.load_word2vec_format("/content/drive/My Drive/Shaastra Workshop Material/Text Analysis/GoogleNews-vectors-negative300.bin.gz", binary=True) # above is only pretrained embeddings # wv = gensim.models.KeyedVectors.load_word2vec_format("tmp.txt") wv.init_sims(replace=True) # + id="9l1uIbEc_yWl" colab_type="code" colab={} def word_averaging(wv, words): all_words, mean = set(), [] for word in words: if isinstance(word, np.ndarray): mean.append(word) elif word in wv.vocab: mean.append(wv.syn0norm[wv.vocab[word].index]) all_words.add(wv.vocab[word].index) if not mean: logging.warning("cannot compute similarity with no input %s", words) # FIXME: remove these examples in pre-processing return np.zeros(wv.vector_size,) mean = gensim.matutils.unitvec(np.array(mean).mean(axis=0)).astype(np.float32) return mean def word_averaging_list(wv, text_list): return np.vstack([word_averaging(wv, post) for post in text_list ]) # + id="Ue-vtCsM3t5o" colab_type="code" outputId="41271292-7c7d-4fe6-974a-48555256167f" colab={"base_uri": "https://localhost:8080/", "height": 73} import nltk nltk.download('punkt') # + id="i7ohGolk_24-" colab_type="code" outputId="b637d2ce-1197-4dee-b9f2-025b59f4b2cb" colab={"base_uri": "https://localhost:8080/", "height": 710} from sklearn.model_selection import train_test_split import nltk nltk.download('punkt') def w2v_tokenize_text(text): tokens = [] for sent in nltk.sent_tokenize(text, language='english'): for word in nltk.word_tokenize(sent, language='english'): if len(word) < 2: continue tokens.append(word) return tokens train_w2v, test_w2v = train_test_split(train, test_size=0.2, random_state = 234) # x_t,x_v,y_t,y_v = train_test_split(train['tweet'],train['class'],test_size=0.2,random_state=234) test_tokenized = test_w2v.apply(lambda r: w2v_tokenize_text(r['text']), axis=1).values train_tokenized = train_w2v.apply(lambda r: w2v_tokenize_text(r['text']), axis=1).values X_train_word_average = word_averaging_list(wv,train_tokenized) X_test_word_average = word_averaging_list(wv,test_tokenized) # + id="K-7boMQoeQW_" colab_type="code" outputId="3a624107-95fd-4ec3-ac65-83493de09f66" colab={"base_uri": "https://localhost:8080/", "height": 93} wv.similarity(w1='queen',w2='royal') # + id="oRNqiuZ6eQH1" colab_type="code" outputId="0dc3503b-be56-48a6-8bd5-d4e6decf3f99" colab={"base_uri": "https://localhost:8080/", "height": 93} wv.similarity(w1='lion',w2='cub') # + id="4Z3j686dADhf" colab_type="code" colab={} from sklearn.linear_model import LogisticRegression from xgboost import XGBClassifier from lightgbm import LGBMClassifier from sklearn.ensemble import GradientBoostingClassifier from sklearn.ensemble import GradientBoostingClassifier from sklearn.metrics import f1_score, classification_report, roc_auc_score, confusion_matrix, accuracy_score # + [markdown] id="tiVo9hhL4UM7" colab_type="text" # # + id="V4FDiF73K7Sc" colab_type="code" colab={} def model_training(clf, x_t, y_t, x_v=None , y_v=None ,task='binary:logistic'): clf.fit(x_t,y_t) print('training accuracy', clf.score(x_t,y_t)) if task=='binary:logistic': print('validation accuracy', clf.score(x_v,y_v)) print('validation f1_score',f1_score(clf.predict(x_v),y_v)) print('validation roc_auc score',roc_auc_score(y_v,clf.predict_proba(x_v)[::,-1])) print('confusion matrix \n',confusion_matrix(y_v, clf.predict(x_v))) if task=='reg:linear': print('validation r2_score', clf.score(x_v,y_v)) print('validation MSE',mean_squared_error(clf.predict(x_v),y_v)) return clf # + id="II5GAU_0mS2L" colab_type="code" outputId="5b5221be-7d0f-4f07-be7a-1b76e885334a" colab={"base_uri": "https://localhost:8080/", "height": 168} # %%time xgb_w2v = XGBClassifier(n_estimators=500, max_depth=5,learning_rate=0.1,scale_pos_weight=1.4266790777602751) lgr_w2v = LogisticRegression(n_jobs=1) lgbm_w2v = LGBMClassifier(n_estimators=500) gbdt_w2v = GradientBoostingClassifier(n_estimators=500) model_training(xgb_w2v,X_train_word_average,train_w2v['target'],X_test_word_average,test_w2v['target']) # model_training(lgr_w2v,X_train_word_average,train_w2v['class'],X_test_word_average,test_w2v['class']) # model_training(lgbm_w2v,X_train_word_average,train_w2v['class'],X_test_word_average,test_w2v['class']) # model_training(gbdt_w2v,X_train_word_average,train_w2v['class'],X_test_word_average,test_w2v['class']) # + id="2vEq8rRoTGpe" colab_type="code" outputId="b74b5e7e-71f7-4a8c-cbb2-f03064618c4e" colab={"base_uri": "https://localhost:8080/", "height": 50} tuned_pred = (xgb_w2v.predict_proba(X_test_word_average)[::,-1]>0.3).astype(int) confusion_matrix(test_w2v['target'],tuned_pred) # + id="0f9dOMaSu6jb" colab_type="code" outputId="696f76b7-c400-48b6-df85-1c72153e7b2d" colab={"base_uri": "https://localhost:8080/", "height": 34} f1_score(test_w2v['target'],tuned_pred) # + id="frWc7zxJeMBK" colab_type="code" colab={} # + [markdown] id="ukagKnxShhJo" colab_type="text" # ### Word2Vec DOMAIN Training # + id="IfUHo00fhjM_" colab_type="code" colab={} # imports needed and set up logging import gzip import gensim import logging logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=logging.INFO) # + id="2FyKfzK_hm8f" colab_type="code" colab={} def read_input(data): """This method reads the input file which is in gzip format""" i=0 for line in data['text']: i+=1 if (i%10000==0): logging.info ("read {0} tweets".format(i)) # do some pre-processing and return a list of words for each tweet, basically doing tokenizing yield gensim.utils.simple_preprocess (line) # read the tokenized reviews into a list # each review item becomes a serries of words # so this becomes a list of lists documents = list(read_input(train)) logging.info ("Done reading data file") # + id="iRkv8_cThr_O" colab_type="code" colab={} documents = [] i=0 for line in train['text']: i+=1 if (i%1000==0): logging.info ("read {0} messages".format(i)) # do some pre-processing and return a list of words for each tweet, basically doing tokenizing # documents.append(gensim.utils.simple_preprocess(line)) documents.append(nltk.word_tokenize(line)) # + [markdown] id="IQRW7bPliNlU" colab_type="text" # Training Time # + id="g1hroNBgiB0l" colab_type="code" outputId="320e20f8-ad93-4c41-9772-dc6c2af233a9" colab={"base_uri": "https://localhost:8080/", "height": 101} # %%time model = gensim.models.Word2Vec(documents, size=50, min_count=2, workers=5) model.train(documents,total_examples=len(documents),epochs=10) # + id="Er1sK2U8iP05" colab_type="code" outputId="11bcae53-b68d-4557-d360-96f2528b9bc4" colab={"base_uri": "https://localhost:8080/", "height": 238} w1 = "discount" model.wv.most_similar(positive=w1) # + id="Ja45F7nNi_oi" colab_type="code" outputId="1b632f89-c7af-4acd-c7bb-77e4fd03f3fb" colab={"base_uri": "https://localhost:8080/", "height": 70} model.wv.save_word2vec_format('tmp.txt', binary=False) ## save the model # + id="POtGkQKpjPhe" colab_type="code" outputId="45688f7d-77a6-4ae3-84e4-92167fe3b56d" colab={"base_uri": "https://localhost:8080/", "height": 70} wv = gensim.models.KeyedVectors.load_word2vec_format('tmp.txt') wv.init_sims(replace=True) # + id="wJ1soHPJmv8d" colab_type="code" outputId="6229fe5b-35c5-47d3-f4df-8da336d2215d" colab={"base_uri": "https://localhost:8080/", "height": 709} train_w2v, test_w2v = train_test_split(train, test_size=0.2, random_state = 234) # x_t,x_v,y_t,y_v = train_test_split(train['tweet'],train['class'],test_size=0.2,random_state=234) test_tokenized = test_w2v.apply(lambda r: w2v_tokenize_text(r['text']), axis=1).values train_tokenized = train_w2v.apply(lambda r: w2v_tokenize_text(r['text']), axis=1).values X_train_word_average = word_averaging_list(wv,train_tokenized) X_test_word_average = word_averaging_list(wv,test_tokenized) # + id="UC3eqqaPm2-P" colab_type="code" outputId="adf83403-d3bc-46e0-a292-1f53af1eff94" colab={"base_uri": "https://localhost:8080/", "height": 168} # %%time xgb_w2v = XGBClassifier(n_estimators=500, max_depth=5,learning_rate=0.1,scale_pos_weight=1.4266790777602751) lgr_w2v = LogisticRegression(n_jobs=1) lgbm_w2v = LGBMClassifier(n_estimators=500) gbdt_w2v = GradientBoostingClassifier(n_estimators=500) model_training(xgb_w2v,X_train_word_average,train_w2v['target'],X_test_word_average,test_w2v['target']) # model_training(lgr_w2v,X_train_word_average,train_w2v['class'],X_test_word_average,test_w2v['class']) # model_training(lgbm_w2v,X_train_word_average,train_w2v['class'],X_test_word_average,test_w2v['class']) # model_training(gbdt_w2v,X_train_word_average,train_w2v['class'],X_test_word_average,test_w2v['class']) # + id="YdPHSs5onWa_" colab_type="code" colab={} # + [markdown] id="qrs0rMyPnn5-" colab_type="text" # ### Transfer Learning # + id="Md53oizSnp-z" colab_type="code" colab={} from gensim.models import Word2Vec sentences = documents # size option needs to be set to 300 to be the same as Google's pre-trained model word2vec_model = Word2Vec(size = 300, window=5, min_count = 1, workers = 10) word2vec_model.build_vocab(sentences) # assign the vectors to the vocabs that are in Google's pre-trained model and your sentences defined above. # + id="Y-dxYs9MnvGJ" colab_type="code" outputId="2fee6eb9-a421-44a6-9a07-eee4651f4c30" colab={"base_uri": "https://localhost:8080/", "height": 70} word2vec_model.intersect_word2vec_format('/content/drive/My Drive/Shaastra Workshop Material/Text Analysis/GoogleNews-vectors-negative300.bin.gz', lockf=1.0, binary=True) # + id="ShcAp0oDoQNK" colab_type="code" outputId="600f7601-406f-4b69-a8e1-88ec545b15ab" colab={"base_uri": "https://localhost:8080/", "height": 50} # continue training with you own data word2vec_model.train(sentences, total_examples=len(sentences), epochs = 5) # + id="035N-J4WoYb3" colab_type="code" outputId="f0242417-fc9f-4d84-a67f-089b7e4eae0b" colab={"base_uri": "https://localhost:8080/", "height": 238} w1 = ["sale"] word2vec_model.wv.most_similar (positive=w1) # + id="TYMVuRHrpVj1" colab_type="code" outputId="991143bf-b5f6-4f5d-f640-2304bb2d001a" colab={"base_uri": "https://localhost:8080/", "height": 70} word2vec_model.wv.save_word2vec_format('model_transfer_learning.txt', binary=False) # + id="oGiOeMVjptEG" colab_type="code" colab={} #### Training on this # + id="9jDyZFLlp0-K" colab_type="code" outputId="63cf0304-adde-4342-f960-8e5b12c3e4c1" colab={"base_uri": "https://localhost:8080/", "height": 272} wv = gensim.models.KeyedVectors.load_word2vec_format('model_transfer_learning.txt') wv.init_sims(replace=True) train_w2v, test_w2v = train_test_split(train, test_size=0.2, random_state = 234) # x_t,x_v,y_t,y_v = train_test_split(train['tweet'],train['class'],test_size=0.2,random_state=234) test_tokenized = test_w2v.apply(lambda r: w2v_tokenize_text(r['text']), axis=1).values train_tokenized = train_w2v.apply(lambda r: w2v_tokenize_text(r['text']), axis=1).values X_train_word_average = word_averaging_list(wv,train_tokenized) X_test_word_average = word_averaging_list(wv,test_tokenized) # + id="r7-9Fxadp-LD" colab_type="code" outputId="b8b4968f-e67e-4b59-daab-8d03d18a90a7" colab={"base_uri": "https://localhost:8080/", "height": 168} # %%time xgb_w2v = XGBClassifier(n_estimators=500, max_depth=5,learning_rate=0.1,scale_pos_weight=1.4266790777602751) lgr_w2v = LogisticRegression(n_jobs=1) lgbm_w2v = LGBMClassifier(n_estimators=500) gbdt_w2v = GradientBoostingClassifier(n_estimators=500) model_training(xgb_w2v,X_train_word_average,train_w2v['target'],X_test_word_average,test_w2v['target']) # model_training(lgr_w2v,X_train_word_average,train_w2v['class'],X_test_word_average,test_w2v['class']) # model_training(lgbm_w2v,X_train_word_average,train_w2v['class'],X_test_word_average,test_w2v['class']) # model_training(gbdt_w2v,X_train_word_average,train_w2v['class'],X_test_word_average,test_w2v['class']) # + id="JirTZaICqDVh" colab_type="code" colab={} ### indeed an increase in F1-Score # + id="P9stWWKlqP0w" colab_type="code" colab={}
Spam_Classifier.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python [conda env:Anaconda3] # language: python # name: conda-env-Anaconda3-py # --- # # ICA Cancer import pandas as pd import numpy as np import time import matplotlib.pyplot as plt import csv from sklearn.model_selection import train_test_split from sklearn.decomposition import PCA from sklearn.mixture import GaussianMixture from sklearn import metrics from sklearn import preprocessing from sklearn.cluster import KMeans from sklearn.datasets import load_digits from sklearn.tree import DecisionTreeClassifier from sklearn.decomposition import PCA from sklearn.decomposition import FastICA from sklearn.model_selection import cross_val_score from sklearn.pipeline import Pipeline from sklearn.model_selection import GridSearchCV import scipy from sklearn import random_projection from cluster_func import em from cluster_func import kmeans # + from sklearn.datasets import load_breast_cancer data = load_breast_cancer() X = data.data # clean out '?' values X = np.nan_to_num(X) y = data.target X_f = data.data y_f = data.target #Splitting data into training and testing and keeping testing data aside X_train, X_test, y_train, y_test = train_test_split(X,y, test_size = 0.2) # + # data_full = pd.read_csv('data/covtype.data.csv', header=None) # #Randomly sample the data to reduce the size of dataset due to computation difficulty # RandInd = np.random.choice(len(data_full),5000) # data = data_full.iloc[RandInd,:].reset_index().drop(['index'], axis = 1) # X = data.iloc[:,:-1].as_matrix() # y = data.iloc[:,-1].as_matrix() - 1 # RandIndFull = np.random.choice(len(data_full), len(data_full)) # data_f = data_full.iloc[RandIndFull,:].reset_index().drop(['index'], axis = 1) # X_f = data_f.iloc[:,:-1].as_matrix() # y_f = data_f.iloc[:,-1].as_matrix() - 1 # #Splitting data into training and testing and keeping testing data aside # X_train, X_test, y_train, y_test = train_test_split(X,y, test_size = 0.2) # + ######################################################################################################################## ######################################################################################################################## #Dimensionality reduction ICA #kurtosis calculation print("Starting ICA") print("Dimensionality reduction") def _calculate(X, ica_, n_components): components = ica_.components_ ica_.components_ = components[:n_components] transformed = ica_.transform(X) ica_.components_ = components kurtosis = scipy.stats.kurtosis(transformed) return sorted(kurtosis, reverse = True) decisiontree = DecisionTreeClassifier(criterion = 'gini', max_depth = 15, min_samples_split = 5) ica = FastICA() pipe = Pipeline(steps=[('ica', ica), ('decisionTree', decisiontree)]) # Plot the ICA spectrum ica.fit(X) fig, ax = plt.subplots() #ax.bar(list(range(1,X.shape[1])), _calculate(X,ica, X.shape[1]) , linewidth=2, color = 'blue') ax.bar(np.arange(X.shape[1]), _calculate(X,ica, X.shape[1]) , linewidth=2, color = 'blue') plt.axis('tight') plt.xlabel('n_components') ax.set_ylabel('kurtosis') #Checking the accuracy for taking all combination of components n_components = range(1, X.shape[1]) # Parameters of pipelines can be set using ‘__’ separated parameter names: gridSearch = GridSearchCV(pipe, dict(ica__n_components=n_components), cv = 3) gridSearch.fit(X, y) results = gridSearch.cv_results_ ax1 = ax.twinx() #Plotting the accuracies and best component ax1.plot(results['mean_test_score'], linewidth = 2, color = 'red') ax1.set_ylabel('Mean Cross Validation Accuracy') ax1.axvline(gridSearch.best_estimator_.named_steps['ica'].n_components, linestyle=':', label='n_components chosen', linewidth = 2) plt.legend(prop=dict(size=12)) plt.title('Accuracy/kurtosis for ICA (best n_components= %d)'%gridSearch.best_estimator_.named_steps['ica'].n_components ) plt.show() #Reducing the dimensions with optimal number of components ica_new = FastICA(n_components = gridSearch.best_estimator_.named_steps['ica'].n_components) ica_new.fit(X_train) X_train_transformed = ica_new.transform(X_train) X_test_transformed = ica_new.transform(X_test) # + ############################################################################################################################### #Reconstruction Error print("Calculating Reconstruction Error") reconstruction_error = [] for comp in n_components: ica = FastICA(n_components = comp) X_transformed = ica.fit_transform(X_train) X_projected = ica.inverse_transform(X_transformed) reconstruction_error.append(((X_train - X_projected) ** 2).mean()) if(comp == gridSearch.best_estimator_.named_steps['ica'].n_components): chosen_error = ((X_train - X_projected) ** 2).mean() fig2,ax2 = plt.subplots() ax2.plot(n_components, reconstruction_error, linewidth= 2) ax2.axvline(gridSearch.best_estimator_.named_steps['ica'].n_components, linestyle=':', label='n_components chosen', linewidth = 2) plt.axis('tight') plt.xlabel('Number of components') plt.ylabel('Reconstruction Error') plt.title('Reconstruction error for n_components chosen %f '%chosen_error) plt.show() # + ################################################################################################################################ #Dimensionally reduce the full dataset #Reducing the dimensions with optimal number of components ica_new = FastICA(n_components = gridSearch.best_estimator_.named_steps['ica'].n_components) ica_new.fit(X_f) X_transformed_f = ica_new.transform(X_f) #Clustering after dimensionality reduction print("Clustering ICA") means_init = np.array([X_transformed_f[y_f == i].mean(axis=0) for i in range(2)]) #clustering experiments print("Expected Maximization") component_list, array_aic, array_bic, array_homo_1, array_comp_1, array_sil_1, array_avg_log = em(X_train_transformed, X_test_transformed, y_train, y_test, init_means = means_init, component_list = [3,4,5,6,7,8,9,10,11], num_class = 2, toshow =0) print("KMeans") component_list, array_homo_2, array_comp_2, array_sil_2, array_var = kmeans(X_train_transformed, X_test_transformed, y_train, y_test, init_means = means_init, component_list = [3,4,5,6,7,8,9,10,11], num_class = 2, toshow = 0) # + #Writing data to file component_list = np.array(component_list).reshape(-1,1) array_aic = np.array(array_aic).reshape(-1,1) array_bic = np.array(array_bic).reshape(-1,1) array_homo_1 = np.array(array_homo_1).reshape(-1,1) array_comp_1 = np.array(array_comp_1).reshape(-1,1) array_sil_1 = np.array(array_sil_1).reshape(-1,1) array_avg_log = np.array(array_avg_log).reshape(-1,1) array_homo_2 = np.array(array_homo_2).reshape(-1,1) array_comp_2 = np.array(array_comp_2).reshape(-1,1) array_sil_2 = np.array(array_sil_2).reshape(-1,1) array_var = np.array(array_var).reshape(-1,1) reconstruction_error = np.array(reconstruction_error).reshape(-1,1) data_em_ica_cancer = np.concatenate((component_list, array_aic, array_bic, array_homo_1, array_comp_1, array_sil_1, array_avg_log), axis =1) data_km_ica_cancer = np.concatenate((component_list, array_homo_2, array_sil_2, array_var), axis =1) reconstruction_error_ica_cancer = np.concatenate((np.arange(1,X.shape[1]).reshape(-1,1), reconstruction_error), axis = 1) file = './data/data_em_ica_cancer.csv' with open(file, 'w', newline = '') as output: writer = csv.writer(output, delimiter=',') writer.writerows(data_em_ica_cancer) file = './data/data_km_ica_cancer.csv' with open(file, 'w', newline = '') as output: writer = csv.writer(output, delimiter=',') writer.writerows(data_km_ica_cancer) file = './data/reconstruction_error_ica_cancer.csv' with open(file, 'w', newline = '') as output: writer = csv.writer(output, delimiter=',') writer.writerows(reconstruction_error_ica_cancer) # -
dim_reduce_cluster_ica_cancer.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Experiment Code # + import torch import torchvision import torchvision.transforms as transforms import torch.nn as nn import torch.nn.functional as F import torch.optim as optim import torchvision.models as models from torch.autograd import Variable from ada_hessian import AdaHessian import torch.optim.lr_scheduler as lr_scheduler import time import pandas as pd import numpy as np import math # - # ## Customized Resnet Implementation in Pytorch # # Previously we used the default implementation in Pytorch but with less success (~57\% validation accuracy). We found [other people also complain this issue](https://discuss.pytorch.org/t/resnet50-torchvision-implementation-gives-low-accuracy-on-cifar-10/82046/3) and recommend [an alternative implementation](https://github.com/kuangliu/pytorch-cifar) for a workaround. We use the new implementation and it worked. # + class BasicBlock(nn.Module): expansion = 1 def __init__(self, in_planes, planes, stride=1): super(BasicBlock, self).__init__() self.conv1 = nn.Conv2d( in_planes, planes, kernel_size=3, stride=stride, padding=1, bias=False) self.bn1 = nn.BatchNorm2d(planes) self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=1, padding=1, bias=False) self.bn2 = nn.BatchNorm2d(planes) self.shortcut = nn.Sequential() if stride != 1 or in_planes != self.expansion*planes: self.shortcut = nn.Sequential( nn.Conv2d(in_planes, self.expansion*planes, kernel_size=1, stride=stride, bias=False), nn.BatchNorm2d(self.expansion*planes) ) def forward(self, x): out = F.relu(self.bn1(self.conv1(x))) out = self.bn2(self.conv2(out)) out += self.shortcut(x) out = F.relu(out) return out class Bottleneck(nn.Module): expansion = 4 def __init__(self, in_planes, planes, stride=1): super(Bottleneck, self).__init__() self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=1, bias=False) self.bn1 = nn.BatchNorm2d(planes) self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride, padding=1, bias=False) self.bn2 = nn.BatchNorm2d(planes) self.conv3 = nn.Conv2d(planes, self.expansion * planes, kernel_size=1, bias=False) self.bn3 = nn.BatchNorm2d(self.expansion*planes) self.shortcut = nn.Sequential() if stride != 1 or in_planes != self.expansion*planes: self.shortcut = nn.Sequential( nn.Conv2d(in_planes, self.expansion*planes, kernel_size=1, stride=stride, bias=False), nn.BatchNorm2d(self.expansion*planes) ) def forward(self, x): out = F.relu(self.bn1(self.conv1(x))) out = F.relu(self.bn2(self.conv2(out))) out = self.bn3(self.conv3(out)) out += self.shortcut(x) out = F.relu(out) return out class ResNet(nn.Module): def __init__(self, block, num_blocks, num_classes=10): super(ResNet, self).__init__() self.in_planes = 64 self.conv1 = nn.Conv2d(3, 64, kernel_size=3, stride=1, padding=1, bias=False) self.bn1 = nn.BatchNorm2d(64) self.layer1 = self._make_layer(block, 64, num_blocks[0], stride=1) self.layer2 = self._make_layer(block, 128, num_blocks[1], stride=2) self.layer3 = self._make_layer(block, 256, num_blocks[2], stride=2) self.layer4 = self._make_layer(block, 512, num_blocks[3], stride=2) self.linear = nn.Linear(512*block.expansion, num_classes) def _make_layer(self, block, planes, num_blocks, stride): strides = [stride] + [1]*(num_blocks-1) layers = [] for stride in strides: layers.append(block(self.in_planes, planes, stride)) self.in_planes = planes * block.expansion return nn.Sequential(*layers) def forward(self, x): out = F.relu(self.bn1(self.conv1(x))) out = self.layer1(out) out = self.layer2(out) out = self.layer3(out) out = self.layer4(out) out = F.avg_pool2d(out, 4) out = out.view(out.size(0), -1) out = self.linear(out) return out def ResNet18(): return ResNet(BasicBlock, [2, 2, 2, 2]) def ResNet34(): return ResNet(BasicBlock, [3, 4, 6, 3]) def ResNet50(): return ResNet(Bottleneck, [3, 4, 6, 3]) def ResNet101(): return ResNet(Bottleneck, [3, 4, 23, 3]) def ResNet152(): return ResNet(Bottleneck, [3, 8, 36, 3]) # - # ## The CV experimentation code. # # For each optimizer, it measures: # # - Train loss each epoch # - Train Accuracy each epoch # - Test loss each epoch # - Test Accuracy each epoch # - Average optimizer.step() time in each step # # And then write the experiment data into specified directory. # # During training, the learning rate schedule is multi-step LR schedule which also used in Adahessian paper. # + device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") batch_size = 256 def getNet(device): net = ResNet18() net.to(device) return net def generateExperiment(net, optimizer, trainloader, testloader, isHessian, csv_name, device, criterion = nn.CrossEntropyLoss(), total_epochs = 160): scheduler = lr_scheduler.MultiStepLR( optimizer, [80, 120], gamma=0.1, last_epoch=-1) train_losses = [] train_acc = [] train_times = [] val_loss = [] val_acc = [] epochs = [] for epoch in range(total_epochs): train_loss = 0.0 train_step = 0 train_total = 0 train_correct = 0 opt_time = 0 for i, data in enumerate(trainloader, 0): # get the inputs; data is a list of [inputs, labels] train_step = train_step + 1 inputs, labels = data[0].to(device), data[1].to(device) # zero the parameter gradients optimizer.zero_grad() # forward + backward + optimize outputs = net(inputs) _, predicted = torch.max(outputs.data, 1) train_total += labels.size(0) train_correct += (predicted == labels).sum().item() loss = criterion(outputs, labels) if isHessian: loss.backward(create_graph=True) else: loss.backward() # Record optimizer time conumption t = time.process_time() optimizer.step() opt_time += time.process_time() - t scheduler.step() # print statistics train_loss += loss.item() test_loss = 0.0 test_step = 0 test_total = 0 test_correct = 0 for i, data in enumerate(testloader, 0): # get the inputs; data is a list of [inputs, labels] test_step = test_step + 1 inputs, labels = data[0].to(device), data[1].to(device) # forward + backward + optimize outputs = net(inputs) _, predicted = torch.max(outputs.data, 1) test_total += labels.size(0) test_correct += (predicted == labels).sum().item() loss = criterion(outputs, labels) test_loss += loss.item() train_losses.append(train_loss / train_step) train_acc.append(train_correct / train_total) train_times.append(opt_time / train_step) val_loss.append(test_loss / test_step) val_acc.append(test_correct / test_total) epochs.append(epoch) print("Epoch: " + str(epoch) + " finished") extract_dat = pd.DataFrame({ "epoch": epochs, "loss": train_losses, "accuracy": train_acc, "val_loss": val_loss, "val_acc": val_acc, "opt_time": train_times, }) extract_dat.to_csv(csv_name, index=False) # - # ## Experiment 1: Computer Vision # # Preprocessing follows the paper where they did normalization and simple data augmentation. # + nj=32 transform = transforms.Compose([ transforms.RandomCrop(32, padding=4), transforms.RandomHorizontalFlip(), transforms.ToTensor(), transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)), ]) transform_test = transforms.Compose([ transforms.ToTensor(), transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)), ]) trainset = torchvision.datasets.CIFAR10(root='./data', train=True, download=True, transform=transform) trainloader = torch.utils.data.DataLoader(trainset, batch_size=batch_size, shuffle=True, num_workers=nj) testset = torchvision.datasets.CIFAR10(root='./data', train=False, download=True, transform=transform_test) testloader = torch.utils.data.DataLoader(testset, batch_size=batch_size, shuffle=False, num_workers=nj) # + net = getNet(device) optimizer_ada = AdaHessian(net.parameters(), lr=0.15, average_conv_kernel=True, hessian_power=1, n_samples=1, weight_decay=5e-4) generateExperiment(net, optimizer_ada, trainloader, testloader, True, "AdaHess_torch_new.csv", device) # + net = getNet(device) optimizer = optim.SGD(net.parameters(), lr=0.1, momentum=0.9, weight_decay=5e-4) generateExperiment(net, optimizer, trainloader, testloader, False, "SGD_Moment_torch_new.csv", device) # + net = getNet(device) optimizer = optim.SGD(net.parameters(), lr=0.1, momentum=0, weight_decay=5e-4) generateExperiment(net, optimizer, trainloader, testloader, False, "SGD_torch_new.csv", device) # + net = getNet(device) optimizer_adam = optim.Adam (net.parameters(), lr=0.001, weight_decay=5e-4) generateExperiment(net, optimizer_adam, trainloader, testloader, False, "Adam_torch_new.csv", device) # + net = getNet(device) optimizer_adamw = optim.AdamW (net.parameters(), lr=0.01, weight_decay=5e-4) generateExperiment(net, optimizer_adamw, trainloader, testloader, False, "AdamW_torch_new.csv", device) # - # ## Experiment 2: DNN # # Here the choice of the data is eggholder, as we are interested in how first moment GD and second GD moment GD perform under hard function. # + n = 100000 batch_size=2000 def gen_egg_pts(n): x1 = np.random.uniform(-512, 512, n) x2 = np.random.uniform(-512, 512, n) f_x = -(x2 + 47) * np.sin(np.sqrt(np.abs(x1 / 2 + (x2 + 47)))) \ - x1 * np.sin(np.abs(x1 - (x2 + 47))) noise = np.random.normal(0, math.sqrt(0.3), n) X = np.transpose(np.array([x1, x2])) return X, f_x + noise x, y = gen_egg_pts(n) # + train_size = int(n * 0.8) test_size = n - train_size criterion = nn.MSELoss() tensor_x = torch.Tensor(x) tensor_y = torch.Tensor(y) my_dataset = torch.utils.data.TensorDataset(tensor_x,tensor_y) train_reg, test_reg = torch.utils.data.random_split(my_dataset, (train_size, test_size)) train_reg_loader = torch.utils.data.DataLoader(train_reg, batch_size = batch_size) test_reg_loader = torch.utils.data.DataLoader(test_reg, batch_size = batch_size) # + class Reg_Net(nn.Module): def __init__(self): super(Reg_Net, self).__init__() self.fc1 = nn.Linear(2, 120) self.fc2 = nn.Linear(120, 120) self.fc3 = nn.Linear(120, 120) self.fc4 = nn.Linear(120, 120) self.fc5 = nn.Linear(120, 1) def forward(self, x): x = F.(self.fc1(x)) x = F.relu(self.fc2(x)) x = F.relu(self.fc3(x)) x = F.relu(self.fc4(x)) x = self.fc5(x) return x def getRegNet(device): net = Reg_Net() net.to(device) return net # - # ## The DNN experimentation code. # # For each optimizer, it measures: # # - Train loss each epoch # - Test loss each epoch # - Average optimizer.step() time in each step # # And then write the experiment data into specified directory. # # During training, the learning rate schedule is the same as CV. However, learning rate is based on various trail-and-error experimentation where this lr provides best performance for the corresponding optimizer. def generateExperimentReg(net, optimizer, trainloader, testloader, isHessian, csv_name, device, criterion = nn.MSELoss(), total_epochs = 2000): scheduler = lr_scheduler.MultiStepLR( optimizer, [800, 1200], gamma=0.1, last_epoch=-1) train_losses = [] train_times = [] val_loss = [] epochs = [] for epoch in range(total_epochs): train_loss = 0.0 train_step = 0 train_total = 0 opt_time = 0 for i, data in enumerate(trainloader, 0): # get the inputs; data is a list of [inputs, labels] train_step = train_step + 1 inputs, y = data[0].to(device), data[1].to(device) y = torch.unsqueeze(y, 1) # zero the parameter gradients optimizer.zero_grad() # forward + backward + optimize outputs = net(inputs) train_total += y.size(0) loss = criterion(outputs, y) if isHessian: loss.backward(create_graph=True) else: loss.backward() # record optimizer time consumption t = time.process_time() optimizer.step() opt_time += time.process_time() - t scheduler.step() # print statistics train_loss += loss.item() test_loss = 0.0 test_step = 0 test_total = 0 for i, data in enumerate(testloader, 0): # get the inputs; data is a list of [inputs, labels] test_step = test_step + 1 inputs, y = data[0].to(device), data[1].to(device) y = torch.unsqueeze(y, 1) # forward + backward + optimize outputs = net(inputs) test_total += y.size(0) loss = criterion(outputs, y) # print statistics test_loss += loss.item() train_losses.append(train_loss / train_step) train_times.append(opt_time / train_step) val_loss.append(test_loss / test_step) epochs.append(epoch) print("Epoch: " + str(epoch) + " finished with training loss " + str(train_loss / train_step)) extract_dat = pd.DataFrame({ "epoch": epochs, "loss": train_losses, "val_loss": val_loss, "opt_time": train_times, }) extract_dat.to_csv(csv_name, index=False) # + net = getRegNet(device) optimizer = optim.SGD(net.parameters(), lr=10e-6, momentum=10e-3, weight_decay=5e-4) generateExperimentReg(net, optimizer, train_reg_loader, test_reg_loader, False, "SGD_Moment_Reg_torch.csv", device) # + net = getRegNet(device) optimizer = optim.SGD(net.parameters(), lr=10e-6, momentum=0, weight_decay=5e-4) generateExperimentReg(net, optimizer, train_reg_loader, test_reg_loader, False, "SGD_Reg_torch.csv", device) # + net = getRegNet(device) optimizer = optim.Adam(net.parameters(), weight_decay=5e-4) generateExperimentReg(net, optimizer, train_reg_loader, test_reg_loader, False, "Adam_Reg_torch.csv", device) # + net = getRegNet(device) optimizer = optim.AdamW(net.parameters(), weight_decay=5e-4) generateExperimentReg(net, optimizer, train_reg_loader, test_reg_loader, False, "AdamW_Reg_torch.csv", device) # + net = getRegNet(device) optimizer = AdaHessian(net.parameters(), lr=0.1, weight_decay=5e-4) generateExperimentReg(net, optimizer, train_reg_loader, test_reg_loader, True, "AdamHess_Reg_torch.csv", device)
experiment-torch.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + # this code takes the NFIRS Dataset and the Station_locations dataset and outputs # requires NFIRS_2009_2016.csv and Station_location.csv to be in the main path ### TO-DO UPDATE PATHs to reflect new code struture # + import os import pandas as pd import numpy as np import matplotlib.pyplot as plt # - # Change to your filepath to data and uncomment if NFIRS_2009_2016.csv not in the main path os.chdir('..\Data\Raw') # + NFIR = pd.read_csv('NFIRS_2009_2016_geocoded_with_tract.csv', encoding='latin_1', index_col=0, low_memory=False) STATION = pd.read_csv('Fire_stations.csv', index_col=0, low_memory=False) # - def haversine(lat1,long1,lat2,long2): earth_radius_miles = 3959 lat1, long1, lat2, long2 = map(np.deg2rad, [lat1, long1, lat2, long2]) dlat = lat2 - lat1 dlong = long2 - long1 # haversine formula hav_theta = np.sin(dlat / 2) ** 2 + np.cos(lat1) * np.cos(lat2) * np.sin(dlong/2)**2 theta = 2 * np.arcsin(np.sqrt(hav_theta)) # Arc Distance = Angle * Radius total_miles = earth_radius_miles * theta return total_miles # + # Data Cleaning NFIR = NFIR[['STATE','X','Y']] NFIR = NFIR[~NFIR['X'].isnull()] NFIR = NFIR[NFIR.STATE.str.match('[A-Z]{2}')] # from NFIR_geocode_cleaner- swaps x,y data if it is found to be outside the Lat/Long of the US, # under the assumption that with said data the x,y values were reversed. NFIR[['X','Y']] = NFIR[['Y','X']].where( (NFIR['X'] > 0) & (NFIR['Y'] < 0), NFIR[['X','Y']].values ) STATION = STATION[['ID','STATE','X','Y']] # - STATION['STATE'].value_counts() def closest_firestation(fires,stations): print("Running {}".format( fires['STATE'].unique() ) ) distance = haversine( fires['X'].values.reshape(1,-1), fires['Y'].values.reshape(1,-1), stations['X'].values.reshape(-1,1), stations['Y'].values.reshape(-1,1)) # find minimum distance and their row indicies min_dist = distance.min(axis = 0) row_idx = distance.argmin(axis = 0 ) # use row indicies to find STATION IDs closest = list(stations.iloc[row_idx,0]) # create the closest station (CS) dataframe and return it CS = pd.DataFrame( data = {'CLOSEST_STATION_ID':closest,'CLOSEST_STATION_MILES':min_dist }, index = fires.index ) return CS # Timing the largest state to see how long this will take. # #%%timeit #state = 'CA' #CS = closest_firestation(NFIR[ NFIR['STATE'] == state ], # STATION[ STATION['STATE'] == state] ) CS = pd.concat( [closest_firestation(NFIR[NFIR['STATE'] == state], STATION[STATION['STATE']== state]) for state in NFIR['STATE'].unique() ] ) CS.head() plt.hist(CS ['CLOSEST_STATION_MILES'],bins= 10000 ) plt.show() plt.hist(CS ['CLOSEST_STATION_MILES'],bins= 10000 ) plt.xlim([0, 5]) plt.show() # Output CS.to_csv('..\Transformed\Closest_Firestation') ## Verification # + # manual validation that the STATION_ID that was selected for each experiment is the correct distance away NFIR = NFIR.merge(CS, how= 'left', left_on='ID',right_on='ID') test = NFIR.merge(STATION[['ID','X','Y']],how= 'left',left_on= 'CLOSEST_STATION_ID',right_on='ID' ) Calculated = haversine(test['X_x'],test['Y_x'],test['X_y'],test['Y_y']) sum(Calculated - NFIR['CLOSEST_STATION_MILES'].values) < 1e-10 # + # TO-DO #1. discriptive statistics by state/county/tract. Mean dist, avg dist, max dist #2. replicate this methodology to Find tracts that are far away from fire stations #3.
Code/FeatureEngineering/Distance_to_Nearest_Fire_Station.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + id="nS8DgoDpXQbb" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 1000} outputId="0d58b99b-2585-416f-ff83-cc8345c65ebc" #Install deepchem + rdkit (chemistry libraries for computational drug discovery) # !wget -c https://repo.continuum.io/miniconda/Miniconda3-latest-Linux-x86_64.sh # !chmod +x Miniconda3-latest-Linux-x86_64.sh # !time bash ./Miniconda3-latest-Linux-x86_64.sh -b -f -p /usr/local # !conda install -y -c deepchem -c rdkit -c conda-forge -c omnia deepchem-gpu=2.1.0 python=3.6 # + id="31KFiVqHXgwM" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 217} outputId="667728a8-4042-42bb-b5f1-3fed59b6b181" #must run this after you run that^ import sys if sys.version_info[0] >= 3: sys.path.append('/usr/local/lib/python3.6/site-packages/') sys.path # + id="ZVZSbSdMkNzs" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 85} outputId="c2115e7b-0237-4506-8f33-af15ad7daa3c" # !conda info --envs # !conda env export -n base > environment_droplet.yml # + [markdown] id="9Sme2DmMX8Gd" colab_type="text" # ### **Utils** # + id="zZkl1jDWYQYf" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 389} outputId="3c1dcc98-3ad1-4740-a202-3b296c71e6a1" import csv import time import math import numpy as np import warnings from rdkit import Chem from rdkit import DataStructs from sklearn.model_selection import KFold, StratifiedKFold # + id="otird5-PYR47" colab_type="code" colab={} def get_fp(smiles): fp = [] processed_indices = [] invalid_indices = [] for i in range(len(smiles)): mol = smiles[i] tmp = np.array(mol2image(mol, n=2048)) if np.isnan(tmp[0]): invalid_indices.append(i) else: fp.append(tmp) processed_indices.append(i) return np.array(fp), processed_indices, invalid_indices # + id="T6X5omaFYSCz" colab_type="code" colab={} def get_desc(smiles, calc): desc = [] processed_indices = [] invalid_indices = [] for i in range(len(smiles)): sm = smiles[i] try: mol = Chem.MolFromSmiles(sm) tmp = np.array(calc(mol)) desc.append(tmp) processed_indices.append(i) except: invalid_indices.append(i) desc_array = np.array(desc) return desc_array, processed_indices, invalid_indices # + id="Beu4w0skYSNk" colab_type="code" colab={} def normalize_desc(desc_array, desc_mean=None): desc_array = np.array(desc_array).reshape(len(desc_array), -1) ind = np.zeros(desc_array.shape) for i in range(desc_array.shape[0]): for j in range(desc_array.shape[1]): try: if np.isfinite(desc_array[i, j]): ind[i, j] = 1 except: pass for i in range(desc_array.shape[0]): for j in range(desc_array.shape[1]): if ind[i, j] == 0: desc_array[i, j] = 0 if desc_mean is None: desc_mean = np.mean(desc_array, axis=0) for i in range(desc_array.shape[0]): for j in range(desc_array.shape[1]): if ind[i, j] == 0: desc_array[i, j] = desc_mean[j] return desc_array, desc_mean # + id="DJIGl9gpYcpt" colab_type="code" colab={} def mol2image(x, n=2048): try: m = Chem.MolFromSmiles(x) fp = Chem.RDKFingerprint(m, maxPath=4, fpSize=n) res = np.zeros(len(fp)) DataStructs.ConvertToNumpyArray(fp, res) return res except: return [np.nan] # + id="aPXdevqTYh01" colab_type="code" colab={} def sanitize_smiles(smiles, canonical=True, throw_warning=False): """ Takes list of SMILES strings and returns list of their sanitized versions. For definition of sanitized SMILES check http://www.rdkit.org/docs/api/rdkit.Chem.rdmolops-module.html#SanitizeMol Parameters ---------- smiles: list list of SMILES strings canonical: bool (default True) parameter specifying whether SMILES will be converted to canonical format throw_warning: bool (default False) parameter specifying whether warnings will be thrown if a SMILES is invalid Returns ------- new_smiles: list list of SMILES and NaNs if SMILES string is invalid or unsanitized. If canonical is True, returns list of canonical SMILES. When canonical is True this function is analogous to: canonical_smiles(smiles, sanitize=True). """ new_smiles = [] for sm in smiles: try: if canonical: new_smiles.append(Chem.MolToSmiles(Chem.MolFromSmiles(sm, sanitize=True))) else: new_smiles.append(sm) except: if throw_warning: warnings.warn('Unsanitized SMILES string: ' + sm, UserWarning) new_smiles.append('') return new_smiles # + id="yF3Ke4aTYkrs" colab_type="code" colab={} def canonical_smiles(smiles, sanitize=True, throw_warning=False): """ Takes list of SMILES strings and returns list of their canonical SMILES. Parameters ---------- smiles: list list of SMILES strings to convert into canonical format sanitize: bool (default True) parameter specifying whether to sanitize SMILES or not. For definition of sanitized SMILES check http://www.rdkit.org/docs/api/rdkit.Chem.rdmolops-module.html#SanitizeMol throw_warning: bool (default False) parameter specifying whether warnings will be thrown if a SMILES is invalid Returns ------- new_smiles: list list of canonical SMILES and NaNs if SMILES string is invalid or unsanitized (when sanitize is True) When sanitize is True the function is analogous to: sanitize_smiles(smiles, canonical=True). """ new_smiles = [] for sm in smiles: try: mol = Chem.MolFromSmiles(sm, sanitize=sanitize) new_smiles.append(Chem.MolToSmiles(mol)) except: if throw_warning: warnings.warn(sm + ' can not be canonized: invalid ' 'SMILES string!', UserWarning) new_smiles.append('') return new_smiles # + id="BKEhwZvkYnqo" colab_type="code" colab={} def save_smi_to_file(filename, smiles, unique=True): """ Takes path to file and list of SMILES strings and writes SMILES to the specified file. Args: filename (str): path to the file smiles (list): list of SMILES strings unique (bool): parameter specifying whether to write only unique copies or not. Output: success (bool): defines whether operation was successfully completed or not. """ if unique: smiles = list(set(smiles)) else: smiles = list(smiles) f = open(filename, 'w') for mol in smiles: f.writelines([mol, '\n']) f.close() return f.closed # + id="uNiHiVbrYtX1" colab_type="code" colab={} def read_smi_file(filename, unique=True, add_start_end_tokens=False): """ Reads SMILES from file. File must contain one SMILES string per line with \n token in the end of the line. Args: filename (str): path to the file unique (bool): return only unique SMILES Returns: smiles (list): list of SMILES strings from specified file. success (bool): defines whether operation was successfully completed or not. If 'unique=True' this list contains only unique copies. """ f = open(filename, 'r') molecules = [] for line in f: if add_start_end_tokens: molecules.append('<' + line[:-1] + '>') else: molecules.append(line[:-1]) if unique: molecules = list(set(molecules)) else: molecules = list(molecules) f.close() return molecules, f.closed # + id="bYySqz7lYvKu" colab_type="code" colab={} def tokenize(smiles, tokens=None): """ Returns list of unique tokens, token-2-index dictionary and number of unique tokens from the list of SMILES Parameters ---------- smiles: list list of SMILES strings to tokenize. tokens: list, str (default None) list of unique tokens Returns ------- tokens: list list of unique tokens/SMILES alphabet. token2idx: dict dictionary mapping token to its index. num_tokens: int number of unique tokens. """ if tokens is None: tokens = list(set(''.join(smiles))) tokens = list(np.sort(tokens)) tokens = ''.join(tokens) token2idx = dict((token, i) for i, token in enumerate(tokens)) num_tokens = len(tokens) return tokens, token2idx, num_tokens # + id="aHuSouDGYyie" colab_type="code" colab={} def time_since(since): s = time.time() - since m = math.floor(s / 60) s -= m * 60 return '%dm %ds' % (m, s) # + id="3nBYgIEJYw_n" colab_type="code" colab={} def cross_validation_split(x, y, n_folds=5, split='random', folds=None): assert(len(x) == len(y)) x = np.array(x) y = np.array(y) if split not in ['random', 'stratified', 'fixed']: raise ValueError('Invalid value for argument \'split\': ' 'must be either \'random\', \'stratified\' ' 'or \'fixed\'') if split == 'random': cv_split = KFold(n_splits=n_folds, shuffle=True) folds = list(cv_split.split(x, y)) elif split == 'stratified': cv_split = StratifiedKFold(n_splits=n_folds, shuffle=True) folds = list(cv_split.split(x, y)) elif split == 'fixed' and folds is None: raise TypeError( 'Invalid type for argument \'folds\': found None, but must be list') cross_val_data = [] cross_val_labels = [] if len(folds) == n_folds: for fold in folds: cross_val_data.append(x[fold[1]]) cross_val_labels.append(y[fold[1]]) elif len(folds) == len(x) and np.max(folds) == n_folds: for f in range(n_folds): left = np.where(folds == f)[0].min() right = np.where(folds == f)[0].max() cross_val_data.append(x[left:right + 1]) cross_val_labels.append(y[left:right + 1]) return cross_val_data, cross_val_labels # + id="_FBT7ziQY1qx" colab_type="code" colab={} def read_object_property_file(path, delimiter=',', cols_to_read=[0, 1], keep_header=False): f = open(path, 'r') reader = csv.reader(f, delimiter=delimiter) data_full = np.array(list(reader)) if keep_header: start_position = 0 else: start_position = 1 assert len(data_full) > start_position data = [[] for _ in range(len(cols_to_read))] for i in range(len(cols_to_read)): col = cols_to_read[i] data[i] = data_full[start_position:, col] f.close() if len(cols_to_read) == 1: data = data[0] return data # + [markdown] id="Ytn5MmqbY7lz" colab_type="text" # ### **Data Process** # + id="AIz58LRnZJ9T" colab_type="code" colab={} import torch import random import numpy as np # + id="erBnpdwiZOqc" colab_type="code" colab={} class GeneratorData(object): def __init__(self, training_data_path, tokens=None, start_token='<', end_token='>', max_len=120, use_cuda=None, **kwargs): super(GeneratorData, self).__init__() if 'cols_to_read' not in kwargs: kwargs['cols_to_read'] = [] data = read_object_property_file(training_data_path, **kwargs) self.start_token = start_token self.end_token = end_token self.file = [] for i in range(len(data)): if len(data[i]) <= max_len: self.file.append(self.start_token + data[i] + self.end_token) self.file_len = len(self.file) self.all_characters, self.char2idx, \ self.n_characters = tokenize(self.file, tokens) self.use_cuda = use_cuda if self.use_cuda is None: self.use_cuda = torch.cuda.is_available() def load_dictionary(self, tokens, char2idx): self.all_characters = tokens self.char2idx = char2idx self.n_characters = len(tokens) def random_chunk(self): index = random.randint(0, self.file_len-1) return self.file[index] def char_tensor(self, string): tensor = torch.zeros(len(string)).long() for c in range(len(string)): tensor[c] = self.all_characters.index(string[c]) if self.use_cuda: return torch.tensor(tensor).cuda() else: return torch.tensor(tensor) def random_training_set(self, smiles_augmentation): chunk = self.random_chunk() if smiles_augmentation is not None: chunk = '<' + smiles_augmentation.randomize_smiles(chunk[1:-1]) + '>' inp = self.char_tensor(chunk[:-1]) target = self.char_tensor(chunk[1:]) return inp, target def read_sdf_file(self, path, fields_to_read): raise NotImplementedError def update_data(self, path): self.file, success = read_smi_file(path, unique=True) self.file_len = len(self.file) assert success # + id="dyiZIGwfZO0g" colab_type="code" colab={} class PredictorData(object): def __init__(self, path, delimiter=',', cols=[0, 1], get_features=None, has_label=True, labels_start=1, **kwargs): super(PredictorData, self).__init__() data = read_object_property_file(path, delimiter, cols_to_read=cols) if has_label: self.objects = np.array(data[:labels_start]).reshape(-1) self.y = np.array(data[labels_start:], dtype='float32') self.y = self.y.reshape(-1, len(cols) - labels_start) if self.y.shape[1] == 1: self.y = self.y.reshape(-1) else: self.objects = np.array(data[:labels_start]).reshape(-1) self.y = [None]*len(self.object) assert len(self.objects) == len(self.y) if get_features is not None: self.x, processed_indices, invalid_indices = \ get_features(self.objects, **kwargs) self.invalid_objects = self.objects[invalid_indices] self.objects = self.objects[processed_indices] self.invalid_y = self.y[invalid_indices] self.y = self.y[processed_indices] else: self.x = self.objects self.invalid_objects = None self.invalid_y = None self.binary_y = None def binarize(self, threshold): self.binary_y = np.array(self.y >= threshold, dtype='int32') # + [markdown] id="u3rslGElbhNk" colab_type="text" # ### **Smiles Enumerator** # + id="WJnOHyVCbsTM" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 353} outputId="23494c6e-de78-439a-d051-f8b87bfdbd3b" from rdkit import Chem import numpy as np import threading # + id="3uzmFW7nbsjD" colab_type="code" colab={} class Iterator(object): """Abstract base class for data iterators. # Arguments n: Integer, total number of samples in the dataset to loop over. batch_size: Integer, size of a batch. shuffle: Boolean, whether to shuffle the data between epochs. seed: Random seeding for data shuffling. """ def __init__(self, n, batch_size, shuffle, seed): self.n = n self.batch_size = batch_size self.shuffle = shuffle self.batch_index = 0 self.total_batches_seen = 0 self.lock = threading.Lock() self.index_generator = self._flow_index(n, batch_size, shuffle, seed) if n < batch_size: raise ValueError('Input data length is shorter than batch_size\nAdjust batch_size') def reset(self): self.batch_index = 0 def _flow_index(self, n, batch_size=32, shuffle=False, seed=None): # Ensure self.batch_index is 0. self.reset() while 1: if seed is not None: np.random.seed(seed + self.total_batches_seen) if self.batch_index == 0: index_array = np.arange(n) if shuffle: index_array = np.random.permutation(n) current_index = (self.batch_index * batch_size) % n if n > current_index + batch_size: current_batch_size = batch_size self.batch_index += 1 else: current_batch_size = n - current_index self.batch_index = 0 self.total_batches_seen += 1 yield (index_array[current_index: current_index + current_batch_size], current_index, current_batch_size) def __iter__(self): # Needed if we want to do something like: # for x, y in data_gen.flow(...): return self def __next__(self, *args, **kwargs): return self.next(*args, **kwargs) # + id="HlMnsIfabsnN" colab_type="code" colab={} class SmilesIterator(Iterator): """Iterator yielding data from a SMILES array. # Arguments x: Numpy array of SMILES input data. y: Numpy array of targets data. smiles_data_generator: Instance of `SmilesEnumerator` to use for random SMILES generation. batch_size: Integer, size of a batch. shuffle: Boolean, whether to shuffle the data between epochs. seed: Random seed for data shuffling. dtype: dtype to use for returned batch. Set to keras.backend.floatx if using Keras """ def __init__(self, x, y, smiles_data_generator, batch_size=32, shuffle=False, seed=None, dtype=np.float32 ): if y is not None and len(x) != len(y): raise ValueError('X (images tensor) and y (labels) ' 'should have the same length. ' 'Found: X.shape = %s, y.shape = %s' % (np.asarray(x).shape, np.asarray(y).shape)) self.x = np.asarray(x) if y is not None: self.y = np.asarray(y) else: self.y = None self.smiles_data_generator = smiles_data_generator self.dtype = dtype super(SmilesIterator, self).__init__(x.shape[0], batch_size, shuffle, seed) def next(self): """For python 2.x. # Returns The next batch. """ # Keeps under lock only the mechanism which advances # the indexing of each batch. with self.lock: index_array, current_index, current_batch_size = next(self.index_generator) # The transformation of images is not under thread lock # so it can be done in parallel batch_x = np.zeros( tuple([current_batch_size] + [self.smiles_data_generator.pad, self.smiles_data_generator._charlen]), dtype=self.dtype) for i, j in enumerate(index_array): smiles = self.x[j:j + 1] x = self.smiles_data_generator.transform(smiles) batch_x[i] = x if self.y is None: return batch_x batch_y = self.y[index_array] return batch_x, batch_y # + id="LTXrGNyDbsgm" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 395} outputId="b8a21d82-94e1-41d7-db46-b7f65a769e4e" class SmilesEnumerator(object): """SMILES Enumerator, vectorizer and devectorizer #Arguments charset: string containing the characters for the vectorization can also be generated via the .fit() method pad: Length of the vectorization leftpad: Add spaces to the left of the SMILES isomericSmiles: Generate SMILES containing information about stereogenic centers enum: Enumerate the SMILES during transform canonical: use canonical SMILES during transform (overrides enum) """ def __init__(self, charset='@C)(=cOn1S2/H[N]\\', pad=120, leftpad=True, isomericSmiles=True, enum=True, canonical=False): self._charset = None self.charset = charset self.pad = pad self.leftpad = leftpad self.isomericSmiles = isomericSmiles self.enumerate = enum self.canonical = canonical @property def charset(self): return self._charset @charset.setter def charset(self, charset): self._charset = charset self._charlen = len(charset) self._char_to_int = dict((c, i) for i, c in enumerate(charset)) self._int_to_char = dict((i, c) for i, c in enumerate(charset)) def fit(self, smiles, extra_chars=[], extra_pad=5): """Performs extraction of the charset and length of a SMILES datasets and sets self.pad and self.charset #Arguments smiles: Numpy array or Pandas series containing smiles as strings extra_chars: List of extra chars to add to the charset (e.g. "\\\\" when "/" is present) extra_pad: Extra padding to add before or after the SMILES vectorization """ charset = set("".join(list(smiles))) self.charset = "".join(charset.union(set(extra_chars))) self.pad = max([len(smile) for smile in smiles]) + extra_pad def randomize_smiles(self, smiles): """Perform a randomization of a SMILES string must be RDKit sanitizable""" m = Chem.MolFromSmiles(smiles) ans = list(range(m.GetNumAtoms())) np.random.shuffle(ans) nm = Chem.RenumberAtoms(m, ans) return Chem.MolToSmiles(nm, canonical=self.canonical, isomericSmiles=self.isomericSmiles) def transform(self, smiles): """Perform an enumeration (randomization) and vectorization of a Numpy array of smiles strings #Arguments smiles: Numpy array or Pandas series containing smiles as strings """ one_hot = np.zeros((smiles.shape[0], self.pad, self._charlen), dtype=np.int8) for i, ss in enumerate(smiles): if self.enumerate: ss = self.randomize_smiles(ss) for j, c in enumerate(ss): one_hot[i, j, self._char_to_int[c]] = 1 return one_hot def reverse_transform(self, vect): """ Performs a conversion of a vectorized SMILES to a smiles strings charset must be the same as used for vectorization. #Arguments vect: Numpy array of vectorized SMILES. """ smiles = [] for v in vect: # mask v v = v[v.sum(axis=1) == 1] # Find one hot encoded index with argmax, translate to char and join to string smile = "".join(self._int_to_char[i] for i in v.argmax(axis=1)) smiles.append(smile) return np.array(smiles) if __name__ == "__main__": smiles = np.array(["CCC(=O)O[C@@]1(CC[NH+](C[C@H]1CC=C)C)c2ccccc2", "CCC[S@@](=O)c1ccc2c(c1)[nH]/c(=N/C(=O)OC)/[nH]2"] * 10 ) # Test canonical SMILES vectorization sm_en = SmilesEnumerator(canonical=True, enum=False) sm_en.fit(smiles, extra_chars=["\\"]) v = sm_en.transform(smiles) transformed = sm_en.reverse_transform(v) if len(set(transformed)) > 2: print("Too many different canonical SMILES generated") # Test enumeration sm_en.canonical = False sm_en.enumerate = True v2 = sm_en.transform(smiles) transformed = sm_en.reverse_transform(v2) if len(set(transformed)) < 3: print("Too few enumerated SMILES generated") # Reconstruction reconstructed = sm_en.reverse_transform(v[0:5]) for i, smile in enumerate(reconstructed): if smile != smiles[i]: print("Error in reconstruction %s %s" % (smile, smiles[i])) break # test Pandas import pandas as pd df = pd.DataFrame(smiles) v = sm_en.transform(df[0]) if v.shape != (20, 52, 18): print("Possible error in pandas use") # BUG, when batchsize > x.shape[0], then it only returns x.shape[0]! # Test batch generation sm_it = SmilesIterator(smiles, np.array([1, 2] * 10), sm_en, batch_size=10, shuffle=True) X, y = sm_it.next() if sum(y == 1) - sum(y == 2) > 1: print("Unbalanced generation of batches") if len(X) != 10: print("Error in batchsize generation") # + [markdown] id="ENGjmE2cbLLW" colab_type="text" # ### **StackRNN Code** # + id="WxroIzrCbKT_" colab_type="code" colab={} import torch import torch.nn as nn from torch.autograd import Variable import torch.nn.functional as F import time from tqdm import trange # + id="s6Z8nXXTcAmX" colab_type="code" colab={} class StackAugmentedRNN(nn.Module): def __init__(self, input_size, hidden_size, output_size, layer_type='GRU', n_layers=1, is_bidirectional=False, has_stack=False, stack_width=None, stack_depth=None, use_cuda=None, optimizer_instance=torch.optim.Adadelta, lr=0.01): """ Constructor for the StackAugmentedRNN object. Parameters ---------- input_size: int number of characters in the alphabet hidden_size: int size of the RNN layer(s) output_size: int again number of characters in the alphabet layer_type: str (default 'GRU') type of the RNN layer to be used. Could be either 'LSTM' or 'GRU'. n_layers: int (default 1) number of RNN layers is_bidirectional: bool (default False) parameter specifying if RNN is bidirectional has_stack: bool (default False) parameter specifying if augmented memory stack is used stack_width: int (default None) if has_stack is True then this parameter defines width of the augmented stack memory stack_depth: int (default None) if has_stack is True then this parameter define depth of the augmented stack memory. Hint: no need fo stack depth to be larger than the length of the longest sequence you plan to generate use_cuda: bool (default None) parameter specifying if GPU is used for computations. If left unspecified, GPU will be used if available optimizer_instance: torch.optim object (default torch.optim.Adadelta) optimizer to be used for training lr: float (default 0.01) learning rate for the optimizer """ super(StackAugmentedRNN, self).__init__() if layer_type not in ['GRU', 'LSTM']: raise InvalidArgumentError('Layer type must be GRU or LSTM') self.layer_type = layer_type self.is_bidirectional = is_bidirectional if self.is_bidirectional: self.num_dir = 2 else: self.num_dir = 1 if layer_type == 'LSTM': self.has_cell = True else: self.has_cell = False self.has_stack = has_stack self.input_size = input_size self.hidden_size = hidden_size self.output_size = output_size if self.has_stack: self.stack_width = stack_width self.stack_depth = stack_depth self.use_cuda = use_cuda if self.use_cuda is None: self.use_cuda = torch.cuda.is_available() self.n_layers = n_layers if self.has_stack: self.stack_controls_layer = nn.Linear(in_features=self.hidden_size * self.num_dir, out_features=3) self.stack_input_layer = nn.Linear(in_features=self.hidden_size * self.num_dir, out_features=self.stack_width) self.encoder = nn.Embedding(input_size, hidden_size) if self.has_stack: rnn_input_size = hidden_size + stack_width else: rnn_input_size = hidden_size if self.layer_type == 'LSTM': self.rnn = nn.LSTM(rnn_input_size, hidden_size, n_layers, bidirectional=self.is_bidirectional) self.decoder = nn.Linear(hidden_size * self.num_dir, output_size) elif self.layer_type == 'GRU': self.rnn = nn.GRU(rnn_input_size, hidden_size, n_layers, bidirectional=self.is_bidirectional) self.decoder = nn.Linear(hidden_size * self.num_dir, output_size) self.log_softmax = torch.nn.LogSoftmax(dim=1) if self.use_cuda: self = self.cuda() self.criterion = nn.CrossEntropyLoss() self.lr = lr self.optimizer_instance = optimizer_instance self.optimizer = self.optimizer_instance(self.parameters(), lr=lr, weight_decay=0.00001) def load_model(self, path): """ Loads pretrained parameters from the checkpoint into the model. Parameters ---------- path: str path to the checkpoint file model will be loaded from. """ weights = torch.load(path) self.load_state_dict(weights) def save_model(self, path): """ Saves model parameters into the checkpoint file. Parameters ---------- path: str path to the checkpoint file model will be saved to. """ torch.save(self.state_dict(), path) def change_lr(self, new_lr): """ Updates learning rate of the optimizer. Parameters ---------- new_lr: float new learning rate value """ self.optimizer = self.optimizer_instance(self.parameters(), lr=new_lr) self.lr = new_lr def forward(self, inp, hidden, stack): """ Forward step of the model. Generates probability of the next character given the prefix. Parameters ---------- inp: torch.tensor input tensor that contains prefix string indices hidden: torch.tensor or tuple(torch.tensor, torch.tensor) previous hidden state of the model. If layer_type is 'LSTM', then hidden is a tuple of hidden state and cell state, otherwise hidden is torch.tensor stack: torch.tensor previous state of the augmented memory stack Returns ------- output: torch.tensor tensor with non-normalized probabilities of the next character next_hidden: torch.tensor or tuple(torch.tensor, torch.tensor) next hidden state of the model. If layer_type is 'LSTM', then next_hidden is a tuple of hidden state and cell state, otherwise next_hidden is torch.tensor next_stack: torch.tensor next state of the augmented memory stack """ inp = self.encoder(inp.view(1, -1)) if self.has_stack: if self.has_cell: hidden_ = hidden[0] else: hidden_ = hidden if self.is_bidirectional: hidden_2_stack = torch.cat((hidden_[0], hidden_[1]), dim=1) else: hidden_2_stack = hidden_.squeeze(0) stack_controls = self.stack_controls_layer(hidden_2_stack) stack_controls = F.softmax(stack_controls, dim=1) stack_input = self.stack_input_layer(hidden_2_stack.unsqueeze(0)) stack_input = torch.tanh(stack_input) stack = self.stack_augmentation(stack_input.permute(1, 0, 2), stack, stack_controls) stack_top = stack[:, 0, :].unsqueeze(0) inp = torch.cat((inp, stack_top), dim=2) output, next_hidden = self.rnn(inp.view(1, 1, -1), hidden) output = self.decoder(output.view(1, -1)) return output, next_hidden, stack def stack_augmentation(self, input_val, prev_stack, controls): """ Augmentation of the tensor into the stack. For more details see https://arxiv.org/abs/1503.01007 Parameters ---------- input_val: torch.tensor tensor to be added to stack prev_stack: torch.tensor previous stack state controls: torch.tensor predicted probabilities for each operation in the stack, i.e PUSH, POP and NO_OP. Again, see https://arxiv.org/abs/1503.01007 Returns ------- new_stack: torch.tensor new stack state """ batch_size = prev_stack.size(0) controls = controls.view(-1, 3, 1, 1) zeros_at_the_bottom = torch.zeros(batch_size, 1, self.stack_width) if self.use_cuda: zeros_at_the_bottom = Variable(zeros_at_the_bottom.cuda()) else: zeros_at_the_bottom = Variable(zeros_at_the_bottom) a_push, a_pop, a_no_op = controls[:, 0], controls[:, 1], controls[:, 2] stack_down = torch.cat((prev_stack[:, 1:], zeros_at_the_bottom), dim=1) stack_up = torch.cat((input_val, prev_stack[:, :-1]), dim=1) new_stack = a_no_op * prev_stack + a_push * stack_up + a_pop * stack_down return new_stack def init_hidden(self): """ Initialization of the hidden state of RNN. Returns ------- hidden: torch.tensor tensor filled with zeros of an appropriate size (taking into account number of RNN layers and directions) """ if self.use_cuda: return Variable(torch.zeros(self.n_layers * self.num_dir, 1, self.hidden_size).cuda()) else: return Variable(torch.zeros(self.n_layers * self.num_dir, 1, self.hidden_size)) def init_cell(self): """ Initialization of the cell state of LSTM. Only used when layers_type is 'LSTM' Returns ------- cell: torch.tensor tensor filled with zeros of an appropriate size (taking into account number of RNN layers and directions) """ if self.use_cuda: return Variable(torch.zeros(self.n_layers * self.num_dir, 1, self.hidden_size).cuda()) else: return Variable(torch.zeros(self.n_layers * self.num_dir, 1, self.hidden_size)) def init_stack(self): """ Initialization of the stack state. Only used when has_stack is True Returns ------- stack: torch.tensor tensor filled with zeros """ result = torch.zeros(1, self.stack_depth, self.stack_width) if self.use_cuda: return Variable(result.cuda()) else: return Variable(result) def train_step(self, inp, target): """ One train step, i.e. forward-backward and parameters update, for a single training example. Parameters ---------- inp: torch.tensor tokenized training string from position 0 to position (seq_len - 1) target: tokenized training string from position 1 to position seq_len Returns ------- loss: float mean value of the loss function (averaged through the sequence length) """ hidden = self.init_hidden() if self.has_cell: cell = self.init_cell() hidden = (hidden, cell) if self.has_stack: stack = self.init_stack() else: stack = None self.optimizer.zero_grad() loss = 0 for c in range(len(inp)): output, hidden, stack = self(inp[c], hidden, stack) loss += self.criterion(output, target[c].unsqueeze(0)) loss.backward() self.optimizer.step() return loss.item() / len(inp) def evaluate(self, data, prime_str='<', end_token='>', predict_len=100): """ Generates new string from the model distribution. Parameters ---------- data: object of type GeneratorData stores information about the generator data format such alphabet, etc prime_str: str (default '<') prime string that will be used as prefix. Deafult value is just the START_TOKEN end_token: str (default '>') when end_token is sampled from the model distribution, the generation of a new example is finished predict_len: int (default 100) maximum length of the string to be generated. If the end_token is not sampled, the generation will be aborted when the length of the generated sequence is equal to predict_len Returns ------- new_sample: str Newly generated sample from the model distribution. """ hidden = self.init_hidden() if self.has_cell: cell = self.init_cell() hidden = (hidden, cell) if self.has_stack: stack = self.init_stack() else: stack = None prime_input = data.char_tensor(prime_str) new_sample = prime_str # Use priming string to "build up" hidden state for p in range(len(prime_str)-1): _, hidden, stack = self.forward(prime_input[p], hidden, stack) inp = prime_input[-1] for p in range(predict_len): output, hidden, stack = self.forward(inp, hidden, stack) # Sample from the network as a multinomial distribution probs = torch.softmax(output, dim=1) top_i = torch.multinomial(probs.view(-1), 1)[0].cpu().numpy() # Add predicted character to string and use as next input predicted_char = data.all_characters[top_i] new_sample += predicted_char inp = data.char_tensor(predicted_char) if predicted_char == end_token: break return new_sample def fit(self, data, n_iterations, all_losses=[], print_every=100, plot_every=10, augment=False): """ This methods fits the parameters of the model. Training is performed to minimize the cross-entropy loss when predicting the next character given the prefix. Parameters ---------- data: object of type GeneratorData stores information about the generator data format such alphabet, etc n_iterations: int how many iterations of training will be performed all_losses: list (default []) list to store the values of the loss function print_every: int (default 100) feedback will be printed to std_out once every print_every iterations of training plot_every: int (default 10) value of the loss function will be appended to all_losses once every plot_every iterations of training augment: bool (default False) parameter specifying if SMILES enumeration will be used. For mode details on SMILES enumeration see https://arxiv.org/abs/1703.07076 Returns ------- all_losses: list list that stores the values of the loss function (learning curve) """ start = time.time() loss_avg = 0 if augment: smiles_augmentation = SmilesEnumerator() else: smiles_augmentation = None for epoch in trange(1, n_iterations + 1, desc='Training in progress...'): inp, target = data.random_training_set(smiles_augmentation) loss = self.train_step(inp, target) loss_avg += loss if epoch % print_every == 0: print('[%s (%d %d%%) %.4f]' % (time_since(start), epoch, epoch / n_iterations * 100, loss) ) print(self.evaluate(data=data, prime_str = '<', predict_len=100), '\n') if epoch % plot_every == 0: all_losses.append(loss_avg / plot_every) loss_avg = 0 return all_losses # + [markdown] id="uBwWzIjNhYZ8" colab_type="text" # ### **Predictor** # + id="fcV4gmndhbZe" colab_type="code" colab={} from __future__ import print_function from __future__ import division import numpy as np from sklearn.externals import joblib from sklearn import metrics # + id="zpV2jo59hcmo" colab_type="code" colab={} class VanillaQSAR(object): def __init__(self, model_instance=None, model_params=None, model_type='classifier', ensemble_size=5, normalization=False): super(VanillaQSAR, self).__init__() self.model_instance = model_instance self.model_params = model_params self.ensemble_size = ensemble_size self.model = [] self.normalization = normalization if model_type not in ['classifier', 'regressor']: raise InvalidArgumentError("model type must be either" "classifier or regressor") self.model_type = model_type if isinstance(self.model_instance, list): assert(len(self.model_instance) == self.ensemble_size) assert(isinstance(self.model_params, list)) assert(len(self.model_params) == self.ensemble_size) for i in range(self.ensemble_size): self.model.append(self.model_instance[i](**model_params[i])) else: for _ in range(self.ensemble_size): self.model.append(self.model_instance(**model_params)) if self.normalization: self.desc_mean = [0]*self.ensemble_size self.metrics_type = None def fit_model(self, data, cv_split='stratified'): eval_metrics = [] x = data.x if self.model_type == 'classifier' and data.binary_y is not None: y = data.binary_y else: y = data.y cross_val_data, cross_val_labels = cross_validation_split(x=x, y=y, split=cv_split, n_folds=self.ensemble_size) for i in range(self.ensemble_size): train_x = np.concatenate(cross_val_data[:i] + cross_val_data[(i + 1):]) test_x = cross_val_data[i] train_y = np.concatenate(cross_val_labels[:i] + cross_val_labels[(i + 1):]) test_y = cross_val_labels[i] if self.normalization: train_x, desc_mean = normalize_desc(train_x) self.desc_mean[i] = desc_mean test_x, _ = normalize_desc(test_x, desc_mean) self.model[i].fit(train_x, train_y.ravel()) predicted = self.model[i].predict(test_x) if self.model_type == 'classifier': eval_metrics.append(metrics.f1_score(test_y, predicted)) self.metrics_type = 'F1 score' elif self.model_type == 'regressor': r2 = metrics.r2_score(test_y, predicted) eval_metrics.append(r2) self.metrics_type = 'R^2 score' else: raise RuntimeError() return eval_metrics, self.metrics_type def load_model(self, path): # TODO: add iterable path object instead of static path self.model = [] for i in range(self.ensemble_size): m = joblib.load(path + str(i) + '.pkl') self.model.append(m) if self.normalization: arr = np.load(path + 'desc_mean.npy') self.desc_mean = arr def save_model(self, path): assert self.ensemble_size == len(self.model) for i in range(self.ensemble_size): joblib.dump(self.model[i], path + str(i) + '.pkl') if self.normalization: np.save(path + 'desc_mean.npy', self.desc_mean) def predict(self, objects=None, average=True, get_features=None, **kwargs): objects = objects invalid_objects = [] processed_objects = [] if get_features is not None: x, processed_indices, invalid_indices = get_features(objects, **kwargs) processed_objects = objects[processed_indices] invalid_objects = objects[invalid_indices] else: x = objects if len(x) == 0: processed_objects = [] prediction = [] invalid_objects = objects else: prediction = [] for i in range(self.ensemble_size): m = self.model[i] if self.normalization: x, _ = normalize_desc(x, self.desc_mean[i]) prediction.append(m.predict(x)) if average: prediction = prediction.mean(axis=0) return prediction '''ORIGINAL CODE def predict(self, objects=None, average=True, get_features=None, **kwargs): objects = np.array(objects) invalid_objects = [] processed_objects = [] if get_features is not None: x, processed_indices, invalid_indices = get_features(objects, **kwargs) processed_objects = objects[processed_indices] invalid_objects = objects[invalid_indices] else: x = objects if len(x) == 0: processed_objects = [] prediction = [] invalid_objects = objects else: prediction = [] for i in range(self.ensemble_size): m = self.model[i] if self.normalization: x, _ = normalize_desc(x, self.desc_mean[i]) prediction.append(m.predict(x)) prediction = np.array(prediction) if average: prediction = prediction.mean(axis=0) return processed_objects, prediction, invalid_objects ''' # + [markdown] id="SZxClZHoh84R" colab_type="text" # ### **Reinforcement.py** # + id="OT28fraYiCe9" colab_type="code" colab={} import torch import torch.nn.functional as F import numpy as np from rdkit import Chem # + id="qlyCXt7piCq7" colab_type="code" colab={} class Reinforcement(object): def __init__(self, generator, predictor, get_reward): """ Constructor for the Reinforcement object. Parameters ---------- generator: object of type StackAugmentedRNN generative model that produces string of characters (trajectories) predictor: object of any predictive model type predictor accepts a trajectory and returns a numerical prediction of desired property for the given trajectory get_reward: function custom reward function that accepts a trajectory, predictor and any number of positional arguments and returns a single value of the reward for the given trajectory Example: reward = get_reward(trajectory=my_traj, predictor=my_predictor, custom_parameter=0.97) Returns ------- object of type Reinforcement used for biasing the properties estimated by the predictor of trajectories produced by the generator to maximize the custom reward function get_reward. """ super(Reinforcement, self).__init__() self.generator = generator self.predictor = predictor self.get_reward = get_reward def policy_gradient(self, data, n_batch=10, gamma=0.97, std_smiles=False, grad_clipping=None, **kwargs): """ Implementation of the policy gradient algorithm. Parameters: ----------- data: object of type GeneratorData stores information about the generator data format such alphabet, etc n_batch: int (default 10) number of trajectories to sample per batch. When training on GPU setting this parameter to to some relatively big numbers can result in out of memory error. If you encountered such an error, reduce n_batch. gamma: float (default 0.97) factor by which rewards will be discounted within one trajectory. Usually this number will be somewhat close to 1.0. std_smiles: bool (default False) boolean parameter defining whether the generated trajectories will be converted to standardized SMILES before running policy gradient. Leave this parameter to the default value if your trajectories are not SMILES. grad_clipping: float (default None) value of the maximum norm of the gradients. If not specified, the gradients will not be clipped. kwargs: any number of other positional arguments required by the get_reward function. Returns ------- total_reward: float value of the reward averaged through n_batch sampled trajectories rl_loss: float value for the policy_gradient loss averaged through n_batch sampled trajectories """ rl_loss = 0 self.generator.optimizer.zero_grad() total_reward = 0 for _ in range(n_batch): # Sampling new trajectory reward = 0 trajectory = '<>' while reward == 0: trajectory = self.generator.evaluate(data) if std_smiles: try: mol = Chem.MolFromSmiles(trajectory[1:-1]) trajectory = '<' + Chem.MolToSmiles(mol) + '>' reward = self.get_reward(trajectory[1:-1], self.predictor, **kwargs) except: reward = 0 else: reward = self.get_reward(trajectory[1:-1], self.predictor, **kwargs) # Converting string of characters into tensor trajectory_input = data.char_tensor(trajectory) discounted_reward = reward total_reward += reward # Initializing the generator's hidden state hidden = self.generator.init_hidden() if self.generator.has_cell: cell = self.generator.init_cell() hidden = (hidden, cell) if self.generator.has_stack: stack = self.generator.init_stack() else: stack = None # "Following" the trajectory and accumulating the loss for p in range(len(trajectory)-1): output, hidden, stack = self.generator(trajectory_input[p], hidden, stack) log_probs = F.log_softmax(output, dim=1) top_i = trajectory_input[p+1] rl_loss -= (log_probs[0, top_i]*discounted_reward) discounted_reward = discounted_reward * gamma # Doing backward pass and parameters update rl_loss = rl_loss / n_batch total_reward = total_reward / n_batch rl_loss.backward() if grad_clipping is not None: torch.nn.utils.clip_grad_norm_(self.generator.parameters(), grad_clipping) self.generator.optimizer.step() return total_reward, rl_loss.item() # + [markdown] id="_pz3z9t8cKo5" colab_type="text" # ### **Maximize JAK2** # + id="vq_8ZqmocRtZ" colab_type="code" colab={} # %load_ext autoreload # %autoreload 2 # + id="_3oSREfCcUJg" colab_type="code" colab={} import sys # + id="_CDwPiLfcUTY" colab_type="code" colab={} sys.path.append('./release/') # + id="42hm_7s4cUeE" colab_type="code" colab={} import torch import torch.nn as nn from torch.optim.lr_scheduler import ExponentialLR, StepLR import torch.nn.functional as F # + id="sydzne8CcUij" colab_type="code" colab={} use_cuda = torch.cuda.is_available() # + id="r1DAO4N0cUQ4" colab_type="code" colab={} import numpy as np from tqdm import tqdm, trange import pickle from rdkit import Chem, DataStructs # + id="b6OuTXf3cUOt" colab_type="code" colab={} import matplotlib.pyplot as plt # %matplotlib inline import seaborn as sns # + id="x_ZJsc3Icj1P" colab_type="code" colab={} from google.colab import drive drive.mount('/content/drive', force_remount=True) # + id="qfr-BlyceQFl" colab_type="code" colab={} #data for generative model to train stack RNN (1st version of the model) gen_data_path = '/content/drive/My Drive/Synbiolic Files/Datasets for JAK2 Synbiolic/chembl_1mil_dataset.csv' # + id="dJx8QKQveIdF" colab_type="code" colab={} tokens = ['<', '>', '#', '%', ')', '(', '+', '-', '/', '.', '1', '0', '3', '2', '5', '4', '7', '6', '9', '8', '=', 'A', '@', 'C', 'B', 'F', 'I', 'H', 'O', 'N', 'P', 'S', '[', ']', '\\', 'c', 'e', 'i', 'l', 'o', 'n', 'p', 's', 'r', '\n'] # + id="c60msUebeNol" colab_type="code" colab={} #use data generator - this is the data to train import csv gen_data = GeneratorData(training_data_path=gen_data_path, delimiter='\t', cols_to_read=[0], keep_header=True, tokens=tokens) # + id="4xNe9waRPEpO" colab_type="code" colab={} prime_str='<' end_token='>' a = gen_data.char_tensor(prime_str) # + id="1RUSmhjLPulm" colab_type="code" colab={} print(a) # + id="mmC-vmDBPy0v" colab_type="code" colab={} b = tensor([0], device='cuda:0') # + id="ez-w8paF519A" colab_type="code" colab={} print(gen_data.n_characters) # + id="bhKEIgiBebSS" colab_type="code" colab={} #---------------------------distribution of molecules pre-training---------------------------- #-------------------for the stack memory RNN model, we don't have this for VAE :/----------------------- def plot_hist(prediction, n_to_generate): print("Mean value of predictions:", prediction.mean()) print("Proportion of valid SMILES:", len(prediction)/n_to_generate) ax = sns.kdeplot(prediction, shade=True) ax.set(xlabel='Predicted pIC50', title='Distribution of predicted pIC50 for generated molecules') plt.show() # + id="jgkBN6utepVW" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 55} outputId="98c6726a-b6ca-426d-ca3c-79d7c2188873" #estimate update function is used for training - loops through stuff, if reward is greater than expected it back progs a certain way basically ''' estimate_and_update function: 1) generates n_to_generate number of SMILES strings 2) filters invalid SMILES 3) predicts pIC50 for valid SMILES 4) plots histogram of predicted pIC50 5) Returns valid SMILES and their predicted pIC50s ''' # revision #1 without canonical smiles def estimate_and_update(generator, predictor, n_to_generate, **kwargs): generated = [] pbar = tqdm(range(n_to_generate)) for i in pbar: pbar.set_description("Generating molecules...") generated.append(generator.evaluate(gen_data, predict_len=120)[1:-1]) unique_smiles = list(np.unique(generated))[1:] smiles, prediction, nan_smiles = predictor.predict(unique_smiles, get_features=get_fp) plot_hist(prediction, n_to_generate) return smiles, prediction ''' --- # original v1 function w/ canonical smiles --- def estimate_and_update(generator, predictor, n_to_generate, **kwargs): generated = [] pbar = tqdm(range(n_to_generate)) for i in pbar: pbar.set_description("Generating molecules...") generated.append(generator.evaluate(gen_data, predict_len=120)[1:-1]) #canoical smiles - SMILES string has to be in this format sanitized = canonical_smiles(generated, sanitize=False, throw_warning=False)[:-1] unique_smiles = list(np.unique(sanitized))[1:] smiles, prediction, nan_smiles = predictor.predict(unique_smiles, get_features=get_fp) plot_hist(prediction, n_to_generate) return smiles, prediction ''' ''' --- # revision #2 estimate & update function without predictor RFR model --- def estimate_and_update(generator, n_to_generate, **kwargs): generated = [] pbar = tqdm(range(n_to_generate)) for i in pbar: pbar.set_description("Generating molecules...") generated.append(generator.evaluate(gen_data, predict_len=120)[1:-1]) #canoical smiles - SMILES string has to be in this format sanitized = canonical_smiles(generated, sanitize=False, throw_warning=False)[:-1] unique_smiles = list(np.unique(sanitized))[1:] return unique_smiles ''' ''' --- # revision #3 estimate & update function without rdkit dependencies --- def estimate_and_update(generator, n_to_generate, **kwargs): generated = [] pbar = tqdm(range(n_to_generate)) for i in pbar: pbar.set_description("Generating molecules...") generated.append(generator.evaluate(gen_data, predict_len=120)[1:-1]) unique_smiles = list(np.unique(generated))[1:] return unique_smiles ''' # + id="WpN-TE8nerY0" colab_type="code" colab={} hidden_size = 1500 stack_width = 1500 stack_depth = 200 layer_type = 'GRU' n_characters = 45 lr = 0.001 optimizer_instance = torch.optim.Adadelta #------------------stack memory RNN model for generative model---------- my_generator = StackAugmentedRNN(input_size=n_characters, hidden_size=hidden_size, output_size=n_characters, layer_type=layer_type, n_layers=1, is_bidirectional=False, has_stack=True, stack_width=stack_width, stack_depth=stack_depth, use_cuda=use_cuda, optimizer_instance=optimizer_instance, lr=lr) # + id="DlpYsWeKeuZO" colab_type="code" colab={} gen_model_path = "/content/drive/My Drive/Synbiolic Files/Synbiolic's Generative Platform: Final Saved Models/generative_max_model" #colab is the max_biased_generative_model --> biggest_rnn is the unbiased_generative_model; change this accordingly # + id="FNDpUIADnkjH" colab_type="code" colab={} #-----Train model code----------------------- losses = my_generator.fit(gen_data, 1500000) plt.plot(losses) my_generator.evaluate(gen_data) my_generator.save_model(model_path) # + id="elNUH6p2e7--" colab_type="code" colab={} #load the model without training ^ skipping above cell my_generator.load_model(gen_model_path) # + id="vFmkDhjFfwZf" colab_type="code" colab={} # !pip install mordred from mordred import Calculator, descriptors # + id="iCpFUbXkgxYu" colab_type="code" colab={} calc = Calculator(descriptors, ignore_3D=True) # + id="syckk45rgymY" colab_type="code" colab={} import csv pred_data = PredictorData(path='/content/drive/My Drive/Synbiolic Files/Datasets for JAK2 Synbiolic/jak2_data.csv', get_features=get_fp) # + id="Dwh42YxcgyyX" colab_type="code" colab={} from sklearn.ensemble import RandomForestRegressor as RFR # + id="WIeeNd5Lgy9D" colab_type="code" colab={} model_instance = RFR model_params = {'n_estimators': 250, 'n_jobs': 10} # + id="YrVtfpBbgzN_" colab_type="code" colab={} #my_predict = predictor model my_predictor = VanillaQSAR(model_instance=model_instance, model_params=model_params, model_type='regressor') # + id="uF5ag9oggzGe" colab_type="code" colab={} my_predictor.fit_model(pred_data, cv_split='random') # + id="XCtv7ZYYxBLB" colab_type="code" colab={} #only run if you want to save model import pickle filename='predict_model.sav' pickle.dump(my_predictor, open(filename, 'wb')) from google.colab import files files.download('/content/predict_model.sav') # + id="ulIrMW3Xy3dj" colab_type="code" colab={} #only run to save model from google.colab import files files.download('/content/predict_model.sav') # + id="oH3QjvLFunpd" colab_type="code" colab={} import pickle my_predict_path = "/content/drive/My Drive/Synbiolic Files/Synbiolic's Generative Platform: Final Saved Models/predict_model.sav" my_predictor = pickle.load(open(my_predict_path, 'rb')) # + id="NegySOi_hu-8" colab_type="code" colab={} # non-essential --> generates unbiased data (data prior to training the models) smiles_unbiased, prediction_unbiased = estimate_and_update(my_generator, my_predictor, n_to_generate=100) # + id="Ix0saZ5ehvKa" colab_type="code" colab={} #-------------Making a copy of the generator that will be optimized - maximized---------------- my_generator_max = StackAugmentedRNN(input_size=gen_data.n_characters, hidden_size=hidden_size, output_size=gen_data.n_characters, layer_type=layer_type, n_layers=1, is_bidirectional=False, has_stack=True, stack_width=stack_width, stack_depth=stack_depth, use_cuda=use_cuda, optimizer_instance=optimizer_instance, lr=lr) my_generator_max.load_model(model_path) # + id="E5lMz5oYhvWF" colab_type="code" colab={} n_to_generate = 200 n_policy_replay = 10 n_policy = 15 n_iterations = 5 # + id="Wk_RANBAhvgk" colab_type="code" colab={} def simple_moving_average(previous_values, new_value, ma_window_size=10): value_ma = np.sum(previous_values[-(ma_window_size-1):]) + new_value value_ma = value_ma/(len(previous_values[-(ma_window_size-1):]) + 1) return value_ma # + id="dQIDitH7iUhR" colab_type="code" colab={} def get_reward_max(smiles, predictor, invalid_reward=0.0, get_features=get_fp): mol, prop, nan_smiles = predictor.predict([smiles], get_features=get_features) if len(nan_smiles) == 1: return invalid_reward return np.exp(prop[0]/3) # + id="2MLt7hKqiWSf" colab_type="code" colab={} x = np.linspace(0, 12) y = np.exp(x/3) plt.plot(x, y) plt.xlabel('pIC50 value') plt.ylabel('Reward value') plt.title('Reward function for JAK2 activity maximization') plt.show() # + id="sXsaRs7niZGY" colab_type="code" colab={} """defining RL model which takes in the generative model, property prediction model & custom reward function""" RL_max = Reinforcement(my_generator_max, my_predictor, get_reward_max) # + id="YvH6uVI9iaRx" colab_type="code" colab={} rewards_max = [] rl_losses_max = [] # + id="DAw8jvwXiaaI" colab_type="code" colab={} #Training RL model for i in range(n_iterations): for j in trange(n_policy, desc='Policy gradient...'): cur_reward, cur_loss = RL_max.policy_gradient(gen_data, get_features=get_fp) rewards_max.append(simple_moving_average(rewards_max, cur_reward)) rl_losses_max.append(simple_moving_average(rl_losses_max, cur_loss)) plt.plot(rewards_max) plt.xlabel('Training iteration') plt.ylabel('Average reward') plt.show() plt.plot(rl_losses_max) plt.xlabel('Training iteration') plt.ylabel('Loss') plt.show() #Generating new molecules + predict pic50 smiles_cur, prediction_cur = estimate_and_update(RL_max.generator, my_predictor, n_to_generate, get_features=get_fp) print('Sample trajectories:') for sm in smiles_cur[:5]: print(sm) # + id="8cwXrb1DkT4J" colab_type="code" colab={} #Testing RL model - large number of generate molecules (10 000) smiles_biased_max, prediction_biased_max = estimate_and_update(RL_max.generator, my_predictor, n_to_generate=10000) # + id="RTWTnfKboTZS" colab_type="code" colab={} #Testing RL model smiles_biased_max, prediction_biased_max = estimate_and_update(RL_max.generator, my_predictor, n_to_generate=100) # + id="PQJHSDQXiah2" colab_type="code" colab={} #----------------DEPLOYING FINAL MODEL TESTING GROUND------------------ #smiles_biased_max, prediction_biased_max = estimate_and_update(my_generator_max, my_predictor, n_to_generate= 100) smiles_biased_max = estimate_and_update(my_generator_max, n_to_generate= 100) # + id="9NeqfPvHpZz5" colab_type="code" colab={} print(smiles_biased_max[0:5]) # + id="ztepvz0JiatR" colab_type="code" colab={} sns.kdeplot(prediction_biased_max,label='Maximized', shade=True, color='red') sns.kdeplot(prediction_unbiased, label='Unbiased', shade=True, color='grey') plt.xlabel('pIC50 values') plt.show() # + id="sc_UAgBKhNpN" colab_type="code" colab={} #saving array with maximized smiles strings to file data.csv locally from numpy import asarray from numpy import savetxt # define data data = asarray(smiles_biased_max) # save to csv file savetxt('maximimum_biased_generated_molecules.csv', data, delimiter=',', fmt='%s') # + id="A7CO-w9rjG_U" colab_type="code" colab={} #saving array with maximized smiles strings to file data.csv locally from numpy import asarray from numpy import savetxt # define data data = asarray(prediction_biased_max) # save to csv file savetxt('pred.csv', data, delimiter=',', fmt='%s') # + id="zTXZa0hSL69A" colab_type="code" colab={} my_generator_max.save_model(path = "gen_model") # + id="cabncJiAVF3W" colab_type="code" colab={} from google.colab import drive drive.mount('/content/drive') # + id="1bZqQz-7h-oB" colab_type="code" colab={} from google.colab import files files.download('/content/gen_model') # + id="9kgJqPf3jNrL" colab_type="code" colab={} from google.colab import files files.download('/content/pred.csv')
ml_models/train_models/Synbiolic_jak2_Stack_RNN_model.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .r # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: R # language: R # name: ir # --- # ## Calculate a cube of a number # # Function named `cube` that calculates a cube of a number: cube <- function(x) { x^3 } cube(2)
cube.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- """ The MIT License (MIT) Copyright (c) 2021 NVIDIA Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. """ # This code example demonstrates how to use a neural network to solve a regression problem, using the Boston housing dataset. More context for this code example can be found in the section "Programming Example: Predicting House Prices with a DNN" in Chapter 6 in the book Learning Deep Learning by <NAME> (ISBN: 9780137470358). # # Like the MNIST, the Boston Housing dataset is included in Keras, so it is simple to access using keras.datasets.boston_housing. We standardize both the training and test data by using the mean and standard deviation from the training data. The parameter axis=0 ensures that we compute the mean and standard deviation for each input variable separately. The resulting mean (and standard deviation) is a vector of means instead of a single value. That is, the standardized value of the nitric oxides concentration is not affected by the values of the per capita crime rate or any of the other variables. # # + import tensorflow as tf from tensorflow import keras from tensorflow.keras.models import Sequential from tensorflow.keras.layers import Dense import numpy as np import logging tf.get_logger().setLevel(logging.ERROR) EPOCHS = 500 BATCH_SIZE = 16 # Read and standardize the data. boston_housing = keras.datasets.boston_housing (raw_x_train, y_train), (raw_x_test, y_test) = boston_housing.load_data() x_mean = np.mean(raw_x_train, axis=0) x_stddev = np.std(raw_x_train, axis=0) x_train =(raw_x_train - x_mean) / x_stddev x_test =(raw_x_test - x_mean) / x_stddev # - # We then create the model. Here we use a different syntax than in previous examples. We first instantiate the model object without any layers, and then add them one by one using the member method add(). # # We define our network to have two hidden layers, so we are now officially doing DL! The two hidden layers in our network implementation have 64 ReLU neurons each, where the first layer is declared to have 13 inputs to match the dataset. The output layer consists of a single neuron with a linear activation function. We use MSE as the loss function and use the Adam optimizer. We tell the compile method that we are interested in seeing the metric mean absolute error. We print out a summary of the model with model.summary() and then start training. # # Create and train model. model = Sequential() model.add(Dense(64, activation='relu', input_shape=[13])) model.add(Dense(64, activation='relu')) # We are doing DL! model.add(Dense(1, activation='linear')) model.compile(loss='mean_squared_error', optimizer='adam', metrics =['mean_absolute_error']) model.summary() history = model.fit(x_train, y_train, validation_data=( x_test, y_test), epochs=EPOCHS, batch_size=BATCH_SIZE, verbose=2, shuffle=True) # After the training is done, we use our model to predict the price for the entire test set and then print out the first four predictions and the correct values so we can get an idea of how correct the model is. # # Print first 4 predictions. predictions = model.predict(x_test) for i in range(0, 4): print('Prediction: ', predictions[i], ', true value: ', y_test[i])
tf_framework/c6e1_boston.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Boosting y Bagging # De forma general, pueden definirse como combinaciones de algoritmos más simples. Son algoritmos ensamblados con el fin de generar un algoritmo más potente. # # Hay diversas formas de ensamblar algoritmos. Las más usadas son el bagging y el boosting. Existen otras como el stacking y voting. # # Ensambles - Bagging # Entrenar distintos modelos donde cada uno vea distintas porciones del set de entrenamiento. Acá se dice que los modelos son usados en paralelo, y su uso (el de cada modelo) es independiente (independencia de algoritmos). La idea esencial del bagging es entonces promediar muchos modelos ruidosos pero aproximadamente imparciales, y por tanto reducir la variación. Los árboles son los candidatos ideales para el bagging, dado que ellos pueden registrar estructuras de interacción compleja en los datos, y si crecen suficientemente profundo, tienen relativamente baja parcialidad. Producto de que los árboles son notoriamente ruidosos, ellos se benefician enormemente al promediar. # # El principal objetivo de Bagging es reducir la varianza. # # Para obtener resultados en bagging se puede usar la votación para los métodos de clasificación y el promedio para los métodos de regresión. # # Modelos de bagging comúnmente usados: # * Bagging meta-estimator # * Random forest # **Ejemplo 1:** clasificador Random Forest (RF) para dataset de cáncer de seno (o BreastCancer dataset). # + from sklearn import datasets data = datasets.load_breast_cancer() # - print(data.keys()) print(data.DESCR) import pandas as pd # Lectura del DataFrame usando feature data df = pd.DataFrame(data.data, columns=data.feature_names) # Agregar columna y completarla con el target df['target'] = data.target df.head() # Variables predictoras y variables a predecir X = data.data y = data.target # + # Reescalar los datos from sklearn.preprocessing import StandardScaler scl = StandardScaler() X = scl.fit_transform(X) # - # Separación de los datos from sklearn.model_selection import train_test_split X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2) # Instanciar el modelo # Se ingresa valor de n_estimators from sklearn.ensemble import RandomForestClassifier algoritmo = RandomForestClassifier(n_estimators = 10, criterion = 'entropy') # Entrenar el modelo algoritmo.fit(X_train, y_train) X_train.shape # Predecir y_pred = algoritmo.predict(X_test) # Evaluar - Matriz de confusión from sklearn.metrics import confusion_matrix matriz = confusion_matrix(y_test, y_pred) print('Matriz de Confusión:') print(matriz) # + Datos correctamente clasificados: los de la diagonal principal. # + Datos clasificados de forma incorrecta: los de la diagonal secundaria. # Evaluar - Precisión from sklearn.metrics import precision_score precision = precision_score(y_test, y_pred) print('Precisión del modelo:') print(precision) # A continuación se carga el dataset de otra forma, y se va a instanciar el modelo RF sin valores de entrada para los hiperparámetros. # + import numpy as np import pandas as pd import matplotlib.pyplot as plt import seaborn as sns sns.set() import warnings warnings.filterwarnings("ignore") from sklearn.datasets import load_breast_cancer data = load_breast_cancer() # - df = pd.DataFrame(np.c_[data['data'], data['target']], columns= np.append(data['feature_names'], ['target'])) df.head() features_mean = list(df.columns[0:10]) features_mean df = df[features_mean + ['target']] df.head() # Variables predictoras y variable a predecir X = data.data y = data.target # + # Reescalar los datos from sklearn.preprocessing import StandardScaler scl = StandardScaler() X = scl.fit_transform(X) # - # Separación de datos from sklearn.model_selection import train_test_split X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2) # Instanciar el modelo con valores de entrada para los hiperparámetros from sklearn.ensemble import RandomForestClassifier algoritmo = RandomForestClassifier(n_estimators = 10, criterion = 'entropy') # Entrenar el modelo algoritmo.fit(X_train, y_train) # Predecir y_pred = algoritmo.predict(X_test) # Evaluar - Matriz de Confusión from sklearn.metrics import confusion_matrix matriz = confusion_matrix(y_test, y_pred) print('Matriz de Confusión:') print(matriz) # Evaluar - Precisión from sklearn.metrics import precision_score precision = precision_score(y_test, y_pred) print('Precisión del modelo:') print(precision) # Ahora se realiza un nuevo ajuste usando RF, pero esta vez sin realizar escalado de datos. Este ajuste se realiza con los valores por defecto del RF. # Instanciar el modelo algoritmo2 = RandomForestClassifier() # Entrenar algoritmo2.fit(X_train, y_train) # Predecir y_pred2 = algoritmo2.predict(X_test) # Evaluar - Matriz de confusión from sklearn.metrics import confusion_matrix matriz = confusion_matrix(y_test, y_pred2) print('Matriz de Confusión:') print(matriz) # Evaluar - Precisión from sklearn.metrics import precision_score precision = precision_score(y_test, y_pred2) print('Precisión del modelo:') print(precision) # A continuación se presentan algunas formas de validación empleando el concepto del Out-of-bag error. # Buscando el valor óptimo de n_estimators. # + from sklearn.ensemble import RandomForestClassifier train_scores = [] oob_scores = [] # Valores evaluados estimator_range = range(1, 150, 5) # Ciclo para entrenar un modelo con los valores de n_estimators y extraer error de train y Out-of-Bag error. for n_estimators in estimator_range: modelo = RandomForestClassifier( n_estimators = n_estimators, criterion = 'entropy', max_depth = None, max_features = 'auto', oob_score = True, n_jobs = -1, random_state = 123 ) modelo.fit(X_train, y_train) train_scores.append(modelo.score(X_train, y_train)) oob_scores.append(modelo.oob_score_) # Gráfico fig, ax = plt.subplots(figsize=(7, 4)) ax.plot(estimator_range, train_scores, label="train scores") ax.plot(estimator_range, oob_scores, label="out-of-bag scores") ax.plot(estimator_range[np.argmax(oob_scores)], max(oob_scores), marker='o', color = "red", label="max score") ax.set_ylabel("R^2") ax.set_xlabel("n_estimators") ax.set_title("Evolución del out-of-bag-error vs número árboles") plt.legend(); print(f"Valor óptimo de n_estimators: {estimator_range[np.argmax(oob_scores)]}") # - # Validación empleando k-cross-validation y neg_root_mean_squared_error # + from sklearn.model_selection import cross_val_score from sklearn.metrics import mean_squared_error train_scores = [] cv_scores = [] # Rango de valores evaluados estimator_range = range(1, 150, 5) # Ciclo para entrenar modelo con valores de n_estimators y extraer error de train y de cross-validation. for n_estimators in estimator_range: modelo = RandomForestClassifier( n_estimators = n_estimators, criterion = 'entropy', max_depth = None, max_features = 'auto', oob_score = False, n_jobs = -1, random_state = 123 ) # Error de train modelo.fit(X_train, y_train) predicciones = modelo.predict(X = X_train) rmse = mean_squared_error( y_true = y_train, y_pred = predicciones, squared = False ) train_scores.append(rmse) # Error de validación cruzada scores = cross_val_score( estimator = modelo, X = X_train, y = y_train, scoring = 'neg_root_mean_squared_error', cv = 5 ) # Se agregan los scores de cross_val_score() y se pasa a positivo cv_scores.append(-1*scores.mean()) # Gráfico con evolución de los errores fig, ax = plt.subplots(figsize=(6, 3.84)) ax.plot(estimator_range, train_scores, label="train scores") ax.plot(estimator_range, cv_scores, label="cv scores") ax.plot(estimator_range[np.argmin(cv_scores)], min(cv_scores), marker='o', color = "red", label="min score") ax.set_ylabel("root_mean_squared_error") ax.set_xlabel("n_estimators") ax.set_title("Evolución del cv-error vs número árboles") plt.legend(); print(f"Valor óptimo de n_estimators: {estimator_range[np.argmin(cv_scores)]}") # - # Buscando la cantidad de features que maximiza el desempeño. # + train_scores = [] oob_scores = [] # Valores evaluados max_features_range = range(1, X_train.shape[1] + 1, 1) # Ciclo para entrenar modelo con valor de n_estimators definido, y cada valor de max_features y # extraer su error de train y Out-of-Bag. for max_features in max_features_range: modelo = RandomForestClassifier( n_estimators = 3, criterion = 'entropy', max_depth = None, max_features = max_features, oob_score = True, n_jobs = -1, random_state = 123 ) modelo.fit(X_train, y_train) train_scores.append(modelo.score(X_train, y_train)) oob_scores.append(modelo.oob_score_) # Gráfico con la evolución de los errores fig, ax = plt.subplots(figsize=(6, 3.84)) ax.plot(max_features_range, train_scores, label="train scores") ax.plot(max_features_range, oob_scores, label="out-of-bag scores") ax.plot(max_features_range[np.argmax(oob_scores)], max(oob_scores), marker='o', color = "red") ax.set_ylabel("R^2") ax.set_xlabel("max_features") ax.set_title("Evolución del out-of-bag-error vs número de predictores") plt.legend(); print(f"Valor óptimo de max_features: {max_features_range[np.argmax(oob_scores)]}") # - # Las anteriores validación son ejemplos de diversas alternativas para evaluar distintos valores de hiperparámetros. Sin embargo, es siempre recomendable usar la primera. X_train.shape # **Ejemplo 2:** Regresión - Ventas según gasto en Pauta # + import pandas as pd import numpy as np import matplotlib.pyplot as plt from matplotlib import style import seaborn as sns from scipy.stats import pearsonr from sklearn.model_selection import train_test_split from scipy import stats style.use('ggplot') or plt.style.use('ggplot') import warnings warnings.filterwarnings('ignore') # + # Datos tv = [230.1, 44.5, 17.2, 151.5, 180.8, 8.7, 57.5, 120.2, 8.6, 199.8, 66.1, 214.7, 23.8, 97.5, 204.1, 195.4, 67.8, 281.4, 69.2, 147.3, 218.4, 237.4, 13.2, 228.3, 62.3, 262.9, 142.9, 240.1, 248.8, 70.6, 292.9, 112.9, 97.2, 265.6, 95.7, 290.7, 266.9, 74.7, 43.1, 228.0, 202.5, 177.0, 293.6, 206.9, 25.1, 175.1, 89.7, 239.9, 227.2, 66.9, 199.8, 100.4, 216.4, 182.6, 262.7, 198.9, 7.3, 136.2, 210.8, 210.7, 53.5, 261.3, 239.3, 102.7, 131.1, 69.0, 31.5, 139.3, 237.4, 216.8, 199.1, 109.8, 26.8, 129.4, 213.4, 16.9, 27.5, 120.5, 5.4, 116.0, 76.4, 239.8, 75.3, 68.4, 213.5, 193.2, 76.3, 110.7, 88.3, 109.8, 134.3, 28.6, 217.7, 250.9, 107.4, 163.3, 197.6, 184.9, 289.7, 135.2, 222.4, 296.4, 280.2, 187.9, 238.2, 137.9, 25.0, 90.4, 13.1, 255.4, 225.8, 241.7, 175.7, 209.6, 78.2, 75.1, 139.2, 76.4, 125.7, 19.4, 141.3, 18.8, 224.0, 123.1, 229.5, 87.2, 7.8, 80.2, 220.3, 59.6, 0.7, 265.2, 8.4, 219.8, 36.9, 48.3, 25.6, 273.7, 43.0, 184.9, 73.4, 193.7, 220.5, 104.6, 96.2, 140.3, 240.1, 243.2, 38.0, 44.7, 280.7, 121.0, 197.6, 171.3, 187.8, 4.1, 93.9, 149.8, 11.7, 131.7, 172.5, 85.7, 188.4, 163.5, 117.2, 234.5, 17.9, 206.8, 215.4, 284.3, 50.0, 164.5, 19.6, 168.4, 222.4, 276.9, 248.4, 170.2, 276.7, 165.6, 156.6, 218.5, 56.2, 287.6, 253.8, 205.0, 139.5, 191.1, 286.0, 18.7, 39.5, 75.5, 17.2, 166.8, 149.7, 38.2, 94.2, 177.0, 283.6, 232.1] radio = [37.8, 39.3, 45.9, 41.3, 10.8, 48.9, 32.8, 19.6, 2.1, 2.6, 5.8, 24.0, 35.1, 7.6, 32.9, 47.7, 36.6, 39.6, 20.5, 23.9, 27.7, 5.1, 15.9, 16.9, 12.6, 3.5, 29.3, 16.7, 27.1, 16.0, 28.3, 17.4, 1.5, 20.0, 1.4, 4.1, 43.8, 49.4, 26.7, 37.7, 22.3, 33.4, 27.7, 8.4, 25.7, 22.5, 9.9, 41.5, 15.8, 11.7, 3.1, 9.6, 41.7, 46.2, 28.8, 49.4, 28.1, 19.2, 49.6, 29.5, 2.0, 42.7, 15.5, 29.6, 42.8, 9.3, 24.6, 14.5, 27.5, 43.9, 30.6, 14.3, 33.0, 5.7, 24.6, 43.7, 1.6, 28.5, 29.9, 7.7, 26.7, 4.1, 20.3, 44.5, 43.0, 18.4, 27.5, 40.6, 25.5, 47.8, 4.9, 1.5, 33.5, 36.5, 14.0, 31.6, 3.5, 21.0, 42.3, 41.7, 4.3, 36.3, 10.1, 17.2, 34.3, 46.4, 11.0, 0.3, 0.4, 26.9, 8.2, 38.0, 15.4, 20.6, 46.8, 35.0, 14.3, 0.8, 36.9, 16.0, 26.8, 21.7, 2.4, 34.6, 32.3, 11.8, 38.9, 0.0, 49.0, 12.0, 39.6, 2.9, 27.2, 33.5, 38.6, 47.0, 39.0, 28.9, 25.9, 43.9, 17.0, 35.4, 33.2, 5.7, 14.8, 1.9, 7.3, 49.0, 40.3, 25.8, 13.9, 8.4, 23.3, 39.7, 21.1, 11.6, 43.5, 1.3, 36.9, 18.4, 18.1, 35.8, 18.1, 36.8, 14.7, 3.4, 37.6, 5.2, 23.6, 10.6, 11.6, 20.9, 20.1, 7.1, 3.4, 48.9, 30.2, 7.8, 2.3, 10.0, 2.6, 5.4, 5.7, 43.0, 21.3, 45.1, 2.1, 28.7, 13.9, 12.1, 41.1, 10.8, 4.1, 42.0, 35.6, 3.7, 4.9, 9.3, 42.0, 8.6] periodico = [69.2, 45.1, 69.3, 58.5, 58.4, 75.0, 23.5, 11.6, 1.0, 21.2, 24.2, 4.0, 65.9, 7.2, 46.0, 52.9, 114.0, 55.8, 18.3, 19.1, 53.4, 23.5, 49.6, 26.2, 18.3, 19.5, 12.6, 22.9, 22.9, 40.8, 43.2, 38.6, 30.0, 0.3, 7.4, 8.5, 5.0, 45.7, 35.1, 32.0, 31.6, 38.7, 1.8, 26.4, 43.3, 31.5, 35.7, 18.5, 49.9, 36.8, 34.6, 3.6, 39.6, 58.7, 15.9, 60.0, 41.4, 16.6, 37.7, 9.3, 21.4, 54.7, 27.3, 8.4, 28.9, 0.9, 2.2, 10.2, 11.0, 27.2, 38.7, 31.7, 19.3, 31.3, 13.1, 89.4, 20.7, 14.2, 9.4, 23.1, 22.3, 36.9, 32.5, 35.6, 33.8, 65.7, 16.0, 63.2, 73.4, 51.4, 9.3, 33.0, 59.0, 72.3, 10.9, 52.9, 5.9, 22.0, 51.2, 45.9, 49.8, 100.9, 21.4, 17.9, 5.3, 59.0, 29.7, 23.2, 25.6, 5.5, 56.5, 23.2, 2.4, 10.7, 34.5, 52.7, 25.6, 14.8, 79.2, 22.3, 46.2, 50.4, 15.6, 12.4, 74.2, 25.9, 50.6, 9.2, 3.2, 43.1, 8.7, 43.0, 2.1, 45.1, 65.6, 8.5, 9.3, 59.7, 20.5, 1.7, 12.9, 75.6, 37.9, 34.4, 38.9, 9.0, 8.7, 44.3, 11.9, 20.6, 37.0, 48.7, 14.2, 37.7, 9.5, 5.7, 50.5, 24.3, 45.2, 34.6, 30.7, 49.3, 25.6, 7.4, 5.4, 84.8, 21.6, 19.4, 57.6, 6.4, 18.4, 47.4, 17.0, 12.8, 13.1, 41.8, 20.3, 35.2, 23.7, 17.6, 8.3, 27.4, 29.7, 71.8, 30.0, 19.6, 26.6, 18.2, 3.7, 23.4, 5.8, 6.0, 31.6, 3.6, 6.0, 13.8, 8.1, 6.4, 66.2, 8.7] ventas = [22.1, 10.4, 9.3, 18.5, 12.9, 7.2, 11.8, 13.2, 4.8, 10.6, 8.6, 17.4, 9.2, 9.7, 19.0, 22.4, 12.5, 24.4, 11.3, 14.6, 18.0, 12.5, 5.6, 15.5, 9.7, 12.0, 15.0, 15.9, 18.9, 10.5, 21.4, 11.9, 9.6, 17.4, 9.5, 12.8, 25.4, 14.7, 10.1, 21.5, 16.6, 17.1, 20.7, 12.9, 8.5, 14.9, 10.6, 23.2, 14.8, 9.7, 11.4, 10.7, 22.6, 21.2, 20.2, 23.7, 5.5, 13.2, 23.8, 18.4, 8.1, 24.2, 15.7, 14.0, 18.0, 9.3, 9.5, 13.4, 18.9, 22.3, 18.3, 12.4, 8.8, 11.0, 17.0, 8.7, 6.9, 14.2, 5.3, 11.0, 11.8, 12.3, 11.3, 13.6, 21.7, 15.2, 12.0, 16.0, 12.9, 16.7, 11.2, 7.3, 19.4, 22.2, 11.5, 16.9, 11.7, 15.5, 25.4, 17.2, 11.7, 23.8, 14.8, 14.7, 20.7, 19.2, 7.2, 8.7, 5.3, 19.8, 13.4, 21.8, 14.1, 15.9, 14.6, 12.6, 12.2, 9.4, 15.9, 6.6, 15.5, 7.0, 11.6, 15.2, 19.7, 10.6, 6.6, 8.8, 24.7, 9.7, 1.6, 12.7, 5.7, 19.6, 10.8, 11.6, 9.5, 20.8, 9.6, 20.7, 10.9, 19.2, 20.1, 10.4, 11.4, 10.3, 13.2, 25.4, 10.9, 10.1, 16.1, 11.6, 16.6, 19.0, 15.6, 3.2, 15.3, 10.1, 7.3, 12.9, 14.4, 13.3, 14.9, 18.0, 11.9, 11.9, 8.0, 12.2, 17.1, 15.0, 8.4, 14.5, 7.6, 11.7, 11.5, 27.0, 20.2, 11.7, 11.8, 12.6, 10.5, 12.2, 8.7, 26.2, 17.6, 22.6, 10.3, 17.3, 15.9, 6.7, 10.8, 9.9, 5.9, 19.6, 17.3, 7.6, 9.7, 12.8, 25.5, 13.4] datos = pd.DataFrame({'tv': tv, 'radio': radio, 'periodico':periodico, 'ventas': ventas}) datos.head() # + corr_matrix = datos.select_dtypes(include=['float64', 'int']).corr(method='pearson') fig, ax = plt.subplots(nrows=1, ncols=1, figsize=(4, 4)) sns.heatmap( corr_matrix, annot = True, cbar = False, annot_kws = {"size": 8}, vmin = -1, vmax = 1, center = 0, cmap = sns.diverging_palette(20, 220, n=200), square = True, ax = ax ) ax.set_xticklabels( ax.get_xticklabels(), rotation = 45, horizontalalignment = 'right', ) ax.tick_params(labelsize = 10) # + # Variables predictoras y variable a predecir X = datos[['tv', 'radio', 'periodico']] y = datos['ventas'] from sklearn.ensemble import RandomForestRegressor from sklearn.model_selection import train_test_split from sklearn.metrics import mean_absolute_error as mae # Separación de datos X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=.25, random_state=0) # Instanciar el modelo model = RandomForestRegressor(random_state=1) # Entrenar el modelo model.fit(X_train, y_train) # Predecir pred = model.predict(X_test) # Revisión de Feature Importances feat_importances = pd.Series(model.feature_importances_, index=X.columns) feat_importances.nlargest(25).plot(kind='barh',figsize=(10,10)) # - print('Score del modelo:') print(model.score(X_train, y_train)) # Predecir para todo el conjunto y_pred = model.predict(X) # Gráfico del nivel de ajuste labels = datos['ventas'] df_temp = pd.DataFrame({'Actual': labels, 'Predicted':y_pred}) from matplotlib.pyplot import figure figure(num=None, figsize=(15, 6), dpi=80, facecolor='w', edgecolor='k') y1 = df_temp['Actual'] y2 = df_temp['Predicted'] plt.plot(y1, label = 'Real') plt.plot(y2, label = 'Predicción') plt.legend() plt.show() # **Ejemplo 3:** regresión. Boston dataset. # + import numpy as np import matplotlib.pyplot as plt from sklearn import datasets import pandas as pd boston = datasets.load_boston() dataset = pd.DataFrame(boston.data, columns = boston.feature_names) # - print('Información en el dataset:') print(boston.keys()) print() print('Características del dataset:') print(boston.DESCR) print('Cantidad de datos:') print(boston.data.shape) print() print('Nombres columnas:') print(boston.feature_names) dataset.head() # Agrego el precio (target) a mi elemento dataset dataset['MEDV'] = boston.target dataset.head() # Variables predictoras y variable a predecir X = dataset.iloc[:, 0:13].values y = dataset.iloc[:, 13].values.reshape(-1,1) # Separación de datos from sklearn.model_selection import train_test_split X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2) #X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.3, random_state = 25) # Instanciar el modelo from sklearn.ensemble import RandomForestRegressor bar = RandomForestRegressor(n_estimators = 300, random_state = 0) # Entrenar el modelo bar.fit(X_train, y_train.ravel()) # Predecir Y_pred = bar.predict(X_test) # Evaluar print('Precisión del modelo:') print(bar.score(X_train, y_train)) # # Ensambles - Boosting # Generar un modelo fuerte a partir de entrenar sucesivamente (uso secuencial) modelos débiles y combinar sus resultados. La idea es que cada modelo débil agregado se enfoque en las instancias que fueron clasificadas erroneamente hasta el momento. Por esto se llaman algoritmos dependientes. El rendimiento general puede ser mejorado haciendo que un modelo simple posterior le de más importancia a los errores cometidos por un modelo simple previo. # # El principal objetivo de Boosting es reducción del sesgo de los modelos individuales a partir de los cuales está construido. # # Las predicciones de cada modelo simple se combinan por votación (clasificación) o por una suma ponderada (regresión). # # Modelos de boosting comúnmente usados: # * AdaBoost # * Gradient Boosting Machine (GBM) # * Extreme Gradient Boosting Machine (XGBM) # * Light GBM # * CatBoost # Ejemplo: Adadboost para Breast Cancer Dataset. from sklearn.ensemble import AdaBoostClassifier from sklearn.tree import DecisionTreeClassifier from sklearn.datasets import load_breast_cancer import pandas as pd import numpy as np from sklearn.model_selection import train_test_split from sklearn.metrics import confusion_matrix from sklearn.preprocessing import LabelEncoder breast_cancer = load_breast_cancer() # Variables predictoras y variable a predecir X = pd.DataFrame(breast_cancer.data, columns=breast_cancer.feature_names) y = pd.Categorical.from_codes(breast_cancer.target, breast_cancer.target_names) # Encoding encoder = LabelEncoder() binary_encoded_y = pd.Series(encoder.fit_transform(y)) # Separación de datos train_X, test_X, train_y, test_y = train_test_split(X, binary_encoded_y, random_state=1) # Instanciar el modelo classifier = AdaBoostClassifier( DecisionTreeClassifier(max_depth=1), n_estimators=200 ) # Entrenar classifier.fit(train_X, train_y) # Predecir predictions = classifier.predict(test_X) # Evaluar confusion_matrix(test_y, predictions) # Evaluar from sklearn.metrics import precision_score precision4 = precision_score(test_y, predictions) print('Precisión del modelo:') print(precision4) # Material adicional: # # + [Ensemble methods: bagging, boosting and stacking](https://towardsdatascience.com/ensemble-methods-bagging-boosting-and-stacking-c9214a10a205) # + [Intro a los sistemas multiclasificadores](https://www.tel.uva.es/descargar.htm;jsessionid=A05C98DFAB70A62FBE4AC2FB7BF7CB49?id=13808) # *Anexo: Random Forest* # # Random forest (Bosque aleatorio) es una combinación de árboles predictores no correlacionados que luego son promediados. Este modelo además de aplicar Bagging, también selecciona features al azar, así reduce la correlación de los distintos modelos de árbol creados. El objetivo es obtener una predicción más precisa y estable que con un modelo simple (por ejemplo un Árbol de decisión). # # * RF agrega aleatoriedad adicional al modelo, mientras crece los árboles. # * En lugar de buscar la característica más importante al dividir un nodo, busca la mejor característica entre un subconjunto aleatorio de características. Esto da como resultado una diversidad que generalmente resulta en un mejor modelo. # * Incluso puede hacer que los árboles sean más aleatorios, mediante el uso adicional de umbrales aleatorios para cada función en lugar de buscar los mejores umbrales posibles, como lo hace un árbol de decisión normal. # # De forma general, mientras más árboles en el bosque se vea, más robusto es el bosque. # # Ventajas # + Ser uno de los algoritmos de ML más certeros que hay disponible. Para un set de datos lo suficientemente grande produce un clasificador muy certero. A diferencia de los árboles de decisión, la clasificación hecha por RF es difícil de interpretar. # + Puede manejar fácilmente muchas de variables de entrada # + Es un método eficaz para estimar datos perdidos y mantener la exactitud cuando una gran proporción de los datos está perdida. # # Desventajas # + RF sobreajusta en ciertos grupos de datos con tareas de clasificación o regresión ruidosas. # + En datos con variables categóricas con diferentes niveles, RF tiende a irse en favor de esos atributos con más niveles. Por ende, la posición que marca la variable no es muy fiable para este tipo de datos. # + Tiende a ser un mejor modelo para predecir que para explicar. # Diferencia entre Árboles de Decisión y Bosques Aleatorios # # Si se ingresa un conjunto de datos de entrenamiento con características y etiquetas en un árbol de decisión, formulará un conjunto de reglas, que se utilizarán para hacer las predicciones. # # Por ejemplo, si deseas predecir si una persona hará clic en un anuncio en línea, puedes recopilar información sobre el anuncio, la persona que hizo clic en el pasado y algunas características que describen su decisión. Si colocamos estas características en un Árbol de Decisión, generará algunas reglas para luego predecir si se hará clic en el anuncio o no. Por su parte un Bosque Aleatorio selecciona al azar las observaciones y características para construir varios Árboles de Decisión y luego promedia los resultados. # # Otra diferencia es que cuando los Árboles de Decisión son muy profundos pueden sufrir de sobreajuste. Por su parte los Bosques Aleatorios evita el exceso de adaptación la mayor parte del tiempo, creando subconjuntos aleatorios de las características y construyendo árboles más pequeños utilizando estos subconjuntos, posteriormente combina los subárboles. Ten en cuenta que esto no funciona todas las veces y que también hace que el cálculo sea más lento, dependiendo de cuántos árboles genere el bosque al azar. # # Ventajas y Desventajas de los Bosques Aleatorios # Los Bosques Aleatorios se considera un algoritmo muy útil y fácil de usar ya que los parámetros predeterminados a menudo producen un buen resultado de predicción. De igual forma, el número de parámetros tampoco es tan alto y son fáciles de entender. # # Uno de los grandes problemas en Machine Learning es el sobreajuste, pero la mayoría de las veces esto no será tan fácil para un algoritmo de Bosques Aleatorios, esto se debe a que, si hay suficientes árboles en el bosque, el algoritmo no se adaptará al modelo. # # La principal limitación de Bosques es que una gran cantidad de árboles puede hacer que el algoritmo sea lento e ineficiente para las predicciones en tiempo real. En general, estos algoritmos son rápidos para entrenar, pero bastante lentos para crear predicciones una vez que están entrenados. Una predicción más precisa requiere más árboles, lo que resulta en un modelo más lento. En la mayoría de las aplicaciones del mundo real, el algoritmo de Bosque Aleatorio es lo suficientemente rápido, pero ciertamente puede haber situaciones en las que el rendimiento en tiempo de ejecución es importante y se prefiera otros enfoques. # # Adicionalmente, los Bosques Aleatorios es una herramienta de modelado predictivo y no una herramienta descriptiva. Eso significa que, si estás buscando una descripción de las relaciones en los datos, deberás elegir otro algoritmo.
Clustering y Ensambles/Notebook_20_Arboles_Bosques_aleatorios.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import pandas as pd from netCDF4 import * import xarray as xr import numpy as np import matplotlib.pyplot as plt from matplotlib.cm import get_cmap import calendar import matplotlib.lines as lines import cartopy.crs as ccrs import cartopy.feature as cfeature #from mpl_toolkits.basemap import maskoceans #from mpl_toolkits.basemap import Basemap, cm, shiftgrid, addcyclic #from wrf import to_np, getvar, smooth2d, get_basemap, latlon_coords, destagger import sys, os # + dir_fi = "/home/sak298/WRF_water_budget/Make_plots_Final/PGW/Wet_season/" dir_const = "/home/sak298/WRF_water_budget/" nc = Dataset(dir_fi+'QV_Daily_momnean_2012_07.nc',mode='r') nc_cont = Dataset(dir_const+'./wrfout_d01_2003-10-30_00:00:00',mode='r') lon=nc_cont['XLONG'][0,:,:] lat=nc_cont['XLAT'][0,:,:] slp = getvar(nc_cont,"HGT") lats, lons = latlon_coords(slp) bm = get_basemap(slp) x, y = bm(to_np(lons), to_np(lats)) ADV = nc['QV_AdvInt_day'][0,:,:] GRD = nc['QV_divInt_day'][0,:,:] mflux = ADV + GRD DIR="/home/sak298/WRF_CTRL_MFC/" plt.figure(figsize=(12,9)) levels = np.arange(-0.009, 0.009, 0.0001) #levels = np.arange(mflux.min(),mflux.max(),( mflux.max() - mflux.min() )/20 ) mdata = maskoceans(lons, lats, smooth2d(mflux,10)) bm.drawcoastlines(color = '0.11') bm.contourf(x, y, mdata, levels=levels, cmap=get_cmap("RdBu")) plt.title(r'Integrated MFLUX PGW', fontsize=16.5) plt.colorbar() parallels = np.arange(0.,81,10.) # labels = [left,right,top,bottom] bm.drawparallels(parallels,labels=[False,True,True,False]) meridians = np.arange(10.,351.,20.) bm.drawmeridians(meridians,labels=[True,False,False,True]) bm.readshapefile(DIR+"SRB_shapefile/srb2",'srb2') for info, shape in zip(bm.srb2, bm.srb2): x1, y1 = zip(*shape) bm.plot(x1, y1, marker=None,color='b',linewidth=2) bm.readshapefile(DIR+"Canada_map/mrb",\ 'mrb',drawbounds = False) for info, shape in zip(bm.mrb, bm.mrb): x1, y1 = zip(*shape) bm.plot(x1, y1, marker=None,color='b',linewidth=2) # - # !ls -lshrt /home/sak298/WRF_water_budget/wrfout_d01_2003-10-30_00:00:00 # + def plot_figures(PP,para,year): dir_const = "/home/sak298/WRF_water_budget/" nc_cont = Dataset(dir_const+'wrfout_d01_2003-10-30_00:00:00',mode='r') nc = Dataset(PP,mode='r') DIR_SHAPE="/home/sak298/WRF_CTRL_MFC/" lon=nc_cont['XLONG'][0,:,:] lat=nc_cont['XLAT'][0,:,:] slp = getvar(nc_cont,"HGT") lats, lons = latlon_coords(slp) bm = get_basemap(slp) x, y = bm(to_np(lons), to_np(lats)) ADV = nc[para+'_AdvInt_day'][0,:,:] GRD = nc[para+'_divInt_day'][0,:,:] mflux = ADV + GRD plt.figure(figsize=(12,9)) #levels = np.arange(-0.00009, 0.00009, 0.000005) # levels = np.arange(-0.0001, 0.0001, 0.00001) if para=='QV': levels = np.arange(-0.01, 0.01, abs(-0.01-0.01)/20) if para=="QC": levels = np.arange(-0.0003, 0.0003, abs(-0.0003-0.0003)/20) if para=='QS': levels = np.arange(-0.0009, 0.0009, abs(-0.0009-0.0009)/20) if para=='QR': levels = np.arange(-0.0005, 0.0005, abs(-0.0005-0.0005)/20) if para=='QI': levels = np.arange(-0.00001, 0.00001, abs(-0.00001- 0.00001)/20) if para=='QG': levels = np.arange(-0.0006, 0.0006, abs(-0.0006 - 0.0006)/20) # levels_QC = np.arange(-0.0003, 0.0003, 0.00001) mdata = maskoceans(lons, lats, smooth2d(mflux,10)) bm.drawcoastlines(color = '0.11') bm.contourf(x, y, mdata, levels=levels, cmap=get_cmap("RdBu")) plt.title(r'{} {} PGW'.format(para,year), fontsize=16.5) plt.colorbar() parallels = np.arange(0.,81,10.) bm.drawparallels(parallels,labels=[False,True,True,False]) meridians = np.arange(10.,351.,20.) bm.drawmeridians(meridians,labels=[True,False,False,True]) # plt.savefig("SEASON_MFLUX_"+str(year)+"_"+para+".eps",format='eps') bm.readshapefile(DIR_SHAPE+"SRB_shapefile/srb2",'srb2') for info, shape in zip(bm.srb2, bm.srb2): x1, y1 = zip(*shape) bm.plot(x1, y1, marker=None,color='b',linewidth=2) bm.readshapefile(DIR_SHAPE+"Canada_map/mrb",\ 'mrb',drawbounds = False) for info, shape in zip(bm.mrb, bm.mrb): x1, y1 = zip(*shape) bm.plot(x1, y1, marker=None,color='b',linewidth=2) plt.savefig("SEASON_MFLUX_"+str(year)+"_"+para+".png",format='png') #season = 'Dry' #expt = ['PGW', 'CTRL'] years = [2010,2011,2012] Para = ['QC','QV','QR','QI','QS','QG'] Dir="./" for yr in range(len(years)): for pr in range(len(Para)): PP=Dir+Para[pr]+"_Daily_momnean_"+str(years[yr])+"_07.nc" par=Para[pr] yrs=years[yr] plot_figures(PP,par,yrs) # + # # Plot Figures for individual variables def plot_figures(PP,para,year,expt,season): dir_const = "/home/sak298/WRF_water_budget/" nc_cont = Dataset(dir_const+'wrfout_d01_2003-10-30_00:00:00',mode='r') nc = Dataset(PP,mode='r') DIR_SHAPE="/home/sak298/WRF_CTRL_MFC/" lon=nc_cont['XLONG'][0,:,:] lat=nc_cont['XLAT'][0,:,:] slp = getvar(nc_cont,"HGT") lats, lons = latlon_coords(slp) bm = get_basemap(slp) x, y = bm(to_np(lons), to_np(lats)) ADV = nc[para+'_AdvInt_day'][0,:,:] GRD = nc[para+'_divInt_day'][0,:,:] mflux = ADV + GRD plt.figure(figsize=(12,9)) # levels_QV = np.arange(-0.009, 0.009, 0.0001) # levels_QC = np.arange(-0.0003, 0.0003, 0.00001) # levels_QS = np.arange(-0.0009, 0.0009, 0.00001) # levels_QR = np.arange(-0.0005, 0.0005, 0.00001) # levels_QI = np.arange(-0.00001, 0.00001, abs(-0.00001 - 0.00001)/20) # levels_QG = np.arange(-0.0006, 0.0006, abs(-0.0006 - 0.0006)/20) if para=='QV': levels = np.arange(-0.01, 0.01, abs(-0.01-0.01)/20) if para=="QC": levels = np.arange(-0.0003, 0.0003, abs(-0.0003-0.0003)/20) if para=='QS': levels = np.arange(-0.0009, 0.0009, abs(-0.0009-0.0009)/20) if para=='QR': levels = np.arange(-0.0005, 0.0005, abs(-0.0005-0.0005)/20) if para=='QI': levels = np.arange(-0.00001, 0.00001, abs(-0.00001- 0.00001)/20) if para=='QG': levels = np.arange(-0.0006, 0.0006, abs(-0.0006 - 0.0006)/20) # levels_QC = np.arange(-0.0003, 0.0003, 0.00001) mdata = maskoceans(lons, lats, smooth2d(mflux,10)) bm.drawcoastlines(color = '0.11') bm.contourf(x, y, mdata, levels=levels, cmap=get_cmap("RdBu")) plt.title(r'{} {} PGW'.format(para,year), fontsize=16.5) plt.colorbar() parallels = np.arange(0.,81,10.) bm.drawparallels(parallels,labels=[False,True,True,False]) meridians = np.arange(10.,351.,20.) bm.drawmeridians(meridians,labels=[True,False,False,True]) # plt.savefig("SEASON_MFLUX_"+str(year)+"_"+para+".eps",format='eps') bm.readshapefile(DIR_SHAPE+"SRB_shapefile/srb2",'srb2') for info, shape in zip(bm.srb2, bm.srb2): x1, y1 = zip(*shape) bm.plot(x1, y1, marker=None,color='b',linewidth=2) bm.readshapefile(DIR_SHAPE+"Canada_map/mrb",\ 'mrb',drawbounds = False) for info, shape in zip(bm.mrb, bm.mrb): x1, y1 = zip(*shape) bm.plot(x1, y1, marker=None,color='b',linewidth=2) plt.savefig(str(season)+"_"+str(expt)+"_MFLUX_"+str(year)+"_"+para+".png",format='png') # + import glob expt = ['PGW', 'CTRL'] seasons = ['Dry','Wet'] home_dir="/home/sak298/WRF_water_budget/Make_plots_Final/" years_wet = [2010,2011,2012] years_dry = [2001,2002,2003,2004] Para = ['QC','QV','QR','QI','QS','QG'] for ex in range(len(expt)): for ses in range(len(seasons)): Dir=home_dir+str(expt[ex])+"/"+str(seasons[ses])+"_season/" print(Dir) if str(seasons[ses])=='Dry': years = years_dry else: years = years_wet for yr in range(len(years)): for pr in range(len(Para)): PP=Dir+Para[pr]+"_Daily_momnean_"+str(years[yr])+"_07.nc" par=Para[pr] yrs=years[yr] plot_figures(PP,par,yrs,str(expt[ex]),str(seasons[ses])) # + def plot_figures_diff(PP_ctrl,PP_pgw,para,year,season): dir_const = "/home/sak298/WRF_water_budget/" nc_cont = Dataset(dir_const+'wrfout_d01_2003-10-30_00:00:00',mode='r') nc_ctrl = Dataset(PP_ctrl,mode='r') nc_pgw = Dataset(PP_pgw,mode='r') DIR_SHAPE="/home/sak298/WRF_CTRL_MFC/" lon=nc_cont['XLONG'][0,:,:] lat=nc_cont['XLAT'][0,:,:] slp = getvar(nc_cont,"HGT") lats, lons = latlon_coords(slp) bm = get_basemap(slp) x, y = bm(to_np(lons), to_np(lats)) ADV_c = nc_ctrl[para+'_AdvInt_day'][0,:,:] GRD_c = nc_ctrl[para+'_divInt_day'][0,:,:] mflux_ctrl = ADV_c + GRD_c ADV_p = nc_pgw[para+'_AdvInt_day'][0,:,:] GRD_p = nc_pgw[para+'_divInt_day'][0,:,:] mflux_pgw = ADV_p + GRD_p flux_diff = mflux_pgw - mflux_ctrl plt.figure(figsize=(12,9)) if para=='QV': levels = np.arange(-0.01, 0.01, abs(-0.01-0.01)/20) if para=="QC": levels = np.arange(-0.0003, 0.0003, abs(-0.0003-0.0003)/20) if para=='QS': levels = np.arange(-0.0009, 0.0009, abs(-0.0009-0.0009)/20) if para=='QR': levels = np.arange(-0.0005, 0.0005, abs(-0.0005-0.0005)/20) if para=='QI': levels = np.arange(-0.00001, 0.00001, abs(-0.00001- 0.00001)/20) if para=='QG': levels = np.arange(-0.0006, 0.0006, abs(-0.0006 - 0.0006)/20) # levels_QC = np.arange(-0.0003, 0.0003, 0.00001) mdata = maskoceans(lons, lats, smooth2d(flux_diff,10)) bm.drawcoastlines(color = '0.11') bm.contourf(x, y, mdata, levels=levels, cmap=get_cmap("RdBu")) plt.title(r'{} {} PGW'.format(para,year), fontsize=16.5) plt.colorbar() parallels = np.arange(0.,81,10.) bm.drawparallels(parallels,labels=[False,True,True,False]) meridians = np.arange(10.,351.,20.) bm.drawmeridians(meridians,labels=[True,False,False,True]) # plt.savefig("SEASON_MFLUX_"+str(year)+"_"+para+".eps",format='eps') bm.readshapefile(DIR_SHAPE+"SRB_shapefile/srb2",'srb2') for info, shape in zip(bm.srb2, bm.srb2): x1, y1 = zip(*shape) bm.plot(x1, y1, marker=None,color='b',linewidth=2) bm.readshapefile(DIR_SHAPE+"Canada_map/mrb",\ 'mrb',drawbounds = False) for info, shape in zip(bm.mrb, bm.mrb): x1, y1 = zip(*shape) bm.plot(x1, y1, marker=None,color='b',linewidth=2) # plt.savefig("Diff_"+str(season)+"_"+str(expt)+"_MFLUX_"+str(year)+"_"+para+".png",format='png') # + import glob seasons = ['Dry','Wet'] home_dir="/home/sak298/WRF_water_budget/Make_plots_Final/" years_wet = [2010,2011] years_dry = [2001,2002] Para = ['QC','QV'] for ses in range(len(seasons)): Dir_ctrl=home_dir+"CTRL/"+str(seasons[ses])+"_season/" print(Dir_ctrl) Dir_pgw=home_dir+"PGW/"+str(seasons[ses])+"_season/" print(Dir_pgw) if str(seasons[ses])=='Dry': years = years_dry else: years = years_wet for yr in range(len(years)): for pr in range(len(Para)): PP_ctrl=Dir_ctrl+Para[pr]+"_Daily_momnean_"+str(years[yr])+"_07.nc" PP_pgw=Dir_pgw+Para[pr]+"_Daily_momnean_"+str(years[yr])+"_07.nc" par=Para[pr] yrs=years[yr] plot_figures_diff(PP_ctrl,PP_pgw,par,yrs,str(seasons[ses])) # + import xarray as xr seasons = ['Dry','Wet'] home_dir="/home/sak298/WRF_water_budget/Make_plots_Final/" years_wet = [2010,2011] years_dry = [2001,2002] Para = ['QC','QV'] for ses in range(len(seasons)): Dir_ctrl=home_dir+"CTRL/"+str(seasons[ses])+"_season/" print(Dir_ctrl) Dir_pgw=home_dir+"PGW/"+str(seasons[ses])+"_season/" print(Dir_pgw) if str(seasons[ses])=='Dry': years = years_dry else: years = years_wet for yr in range(len(years)): for pr in range(len(Para)): PP_ctrl=Dir_ctrl+Para[pr]+"_Daily_momnean_"+str(years[yr])+"_07.nc" PP_pgw=Dir_pgw+Para[pr]+"_Daily_momnean_"+str(years[yr])+"_07.nc" par=Para[pr] yrs=years[yr] plot_figures_diff(PP_ctrl,PP_pgw,par,yrs,str(seasons[ses])) # + ds = xr.open_mfdataset('/home/sak298/WRF_water_budget/Make_plots_Final/PGW/Wet_season/SRB_PGW_Wet_QC_Avg_2010_01.nc') df = ds.to_dataframe() var=df['CTRL_Mflux_wrf_avg'][1] # + region = ['SRB','MRB'] seasons = ['Dry','Wet'] home_dir="/home/sak298/WRF_water_budget/Make_plots_Final/" years_wet = [2010,2011] years_dry = [2001,2002] Para = ['QC','QV'] expt = ['PGW','CTRL'] for ses in range(len(seasons)): Dir_ctrl=home_dir+"CTRL/"+str(seasons[ses])+"_season/" # print(Dir_ctrl) Dir_pgw=home_dir+"PGW/"+str(seasons[ses])+"_season/" # print(Dir_pgw) if str(seasons[ses])=='Dry': years = years_dry else: years = years_wet for rg in region: for yr in range(len(years)): for pr in range(len(Para)): PP_ctrl=Dir_ctrl+rg+"_PGW_"+str(seasons[ses])+"_"+Para[pr]+"_Avg_"+str(years[yr])+"_07.nc" print(PP_ctrl) ds = xr.open_mfdataset(PP_ctrl) df = ds.to_dataframe() var=df['CTRL_Mflux_wrf_avg'][1] print(var) # PP_ctrl=Dir_ctrl+Para[pr]+"_Daily_momnean_"+str(years[yr])+"_07.nc" PP_pgw=Dir_pgw+Para[pr]+"_Daily_momnean_"+str(years[yr])+"_07.nc" par=Para[pr] yrs=years[yr] # + region = ['SRB','MRB'] seasons = ['Dry','Wet'] home_dir="/home/sak298/WRF_water_budget/Make_plots_Final/" years_wet = [2010,2011] years_dry = [2001,2002,2003] Para = ['QC','QV'] expt = ['PGW','CTRL'] dfObj = pd.DataFrame(columns=['Region','Year','Exp','Variable','Mflux_avg', 'Mflux_sum']) print("Empty Dataframe ", dfObj, sep='\n') dfObj = dfObj.append({'Year': '01-2010','Exp': 'PGW','Variable': 'QC', 'Mflux_avg': '-4.632014e-06', 'Mflux_sum': '0.1986'}, ignore_index=True) dfObj #/home/sak298/WRF_water_budget/Make_plots_Final/CTRL/Dry_season for exp1 in expt: if exp1=='PGW': dir1=home_dir+"PGW/" else: dir1=home_dir+"CTRL/" for ses in range(len(seasons)): Dir_ctrl=dir1+str(seasons[ses])+"_season/" Dir_pgw=dir1+str(seasons[ses])+"_season/" if str(seasons[ses])=='Dry': years = years_dry else: years = years_wet for rg in region: for yr in range(len(years)): for pr in range(len(Para)): PP_ctrl=Dir_ctrl+rg+"_"+exp1+"_"+str(seasons[ses])+"_"+Para[pr]+"_Avg_"+str(years[yr])+"_07.nc" print(PP_ctrl) ds = xr.open_mfdataset(PP_ctrl) df = ds.to_dataframe() var1=df['CTRL_Mflux_wrf_avg'][1] var2=df['CTRL_Mflux_wrf_sum'][1] dfObj = dfObj.append({'Year': '01-'+str(years[yr]),'Exp': exp1,'Variable': Para[pr],\ 'Mflux_avg': var1, 'Mflux_sum': var2},ignore_index=True) # - # df = pd.read_pickle('monthly_avg') # dfy1 = df.set_index('Year') # df_mpd=dfy1.loc[(dfy1['Region'] == 'MRB') & (dfy1['Exp'] == 'PGW') & (dfy1['Season'] == 'Dry')] # df_mcd=dfy1.loc[(dfy1['Region'] == 'MRB') & (dfy1['Exp'] == 'CTRL') & (dfy1['Season'] == 'Dry')] # df_mpd = df_mpd[['Variable','Region','Mflux_avg']].groupby(['Variable','Region']) # df_mcd = df_mcd[['Variable','Region','Mflux_avg']].groupby(['Variable','Region']) #df_mcd.plot.bar() # + # df = pd.read_pickle('monthly_avg') # dfy1 = df.set_index('Year') # df_mpd=dfy1.loc[(dfy1['Region'] == 'MRB') & (dfy1['Season'] == 'Dry')] # df_mpd = df_mpd[['Variable','Exp','Mflux_avg']].groupby(['Exp','Variable']).plot.bar() # - # df_mpd = df_mpd[['Variable','Region','Mflux_avg']].groupby(['Variable']) # + ### DRY ## PERIOD df = pd.read_pickle('monthly_avg') dfy1 = df.set_index('Year') df_mcd=dfy1.loc[(dfy1['Region'] == 'MRB') & (dfy1['Season'] == 'Dry') & (dfy1['Exp'] == 'CTRL') ] df_mpd=dfy1.loc[(dfy1['Region'] == 'MRB') & (dfy1['Season'] == 'Dry') & (dfy1['Exp'] == 'PGW') ] dfc=df_mcd[['Variable','Mflux_avg']] dfp=df_mpd[['Mflux_avg']].rename(columns={'Mflux_avg':'Mflux_avg_pgw'}) df_new = pd.concat([dfc,dfp],axis=1,sort=False) #df_new.groupby('Variable').plot.bar() for grp_name, sub_df in df_new.groupby('Variable'): dd=df_new.loc[df_new.Variable == grp_name] dd.plot.bar(rot=0,title=grp_name) # + ### WET ## PERIOD df = pd.read_pickle('monthly_avg') dfy1 = df.set_index('Year') df_mcd=dfy1.loc[(dfy1['Region'] == 'MRB') & (dfy1['Season'] == 'Wet') & (dfy1['Exp'] == 'CTRL') ] df_mpd=dfy1.loc[(dfy1['Region'] == 'MRB') & (dfy1['Season'] == 'Wet') & (dfy1['Exp'] == 'PGW') ] dfc=df_mcd[['Variable','Mflux_avg']] dfp=df_mpd[['Mflux_avg']].rename(columns={'Mflux_avg':'Mflux_avg_pgw'}) df_new = pd.concat([dfc,dfp],axis=1,sort=False) #df_new.groupby('Variable').plot.bar() for grp_name, sub_df in df_new.groupby('Variable'): dd=df_new.loc[df_new.Variable == grp_name] dd.plot.bar(rot=0,title=grp_name) # - ### Winter #### Wet Season #### df = pd.read_pickle('monthly_avg_01') dfy1 = df.set_index('Year') df_mcd=dfy1.loc[(dfy1['Region'] == 'MRB') & (dfy1['Season'] == 'Wet') & (dfy1['Exp'] == 'CTRL') ] df_mpd=dfy1.loc[(dfy1['Region'] == 'MRB') & (dfy1['Season'] == 'Wet') & (dfy1['Exp'] == 'PGW') ] dfc=df_mcd[['Variable','Mflux_avg']] dfp=df_mpd[['Mflux_avg']].rename(columns={'Mflux_avg':'Mflux_avg_pgw'}) df_new = pd.concat([dfc,dfp],axis=1,sort=False) #df_new.groupby('Variable').plot.bar() for grp_name, sub_df in df_new.groupby('Variable'): dd=df_new.loc[df_new.Variable == grp_name] dd.plot.bar(rot=0,title=grp_name) # + ### Winter #### Dry Season #### df = pd.read_pickle('monthly_avg_01') dfy1 = df.set_index('Year') df_mcd=dfy1.loc[(dfy1['Region'] == 'MRB') & (dfy1['Season'] == 'Dry') & (dfy1['Exp'] == 'CTRL') ] df_mpd=dfy1.loc[(dfy1['Region'] == 'MRB') & (dfy1['Season'] == 'Dry') & (dfy1['Exp'] == 'PGW') ] dfc=df_mcd[['Variable','Mflux_avg']] dfp=df_mpd[['Mflux_avg']].rename(columns={'Mflux_avg':'Mflux_avg_pgw'}) df_new = pd.concat([dfc,dfp],axis=1,sort=False) #df_new.groupby('Variable').plot.bar() for grp_name, sub_df in df_new.groupby('Variable'): dd=df_new.loc[df_new.Variable == grp_name] dd.plot.bar(rot=0,title=grp_name) # - df2.indices df_new.Variable.unique() df_new.groupby('Variable') df_g.indices.keys
plot_final_figures.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Import Overlay # 导入Overlay # + import pynq.lib.dma import numpy as np dftol = pynq.Overlay("fft.bit") dma0 = dftol.axi_dma_0 dma1 = dftol.axi_dma_1 # - # ![Diagram.JPG](attachment:Diagram.JPG) # ![Address%20Editor.JPG](attachment:Address%20Editor.JPG) # + #生成输入数据并输出 from pynq import Xlnk xlnk = Xlnk() samplereal = xlnk.cma_array(shape=(1024,), dtype=np.float32) sampleimag = xlnk.cma_array(shape=(1024,), dtype=np.float32) outreal = xlnk.cma_array(shape=(1024,), dtype=np.float32) outimag = xlnk.cma_array(shape=(1024,), dtype=np.float32) for i in range(1024): samplereal[i] = i for j in range(1024): sampleimag[j] = 0 dma0.sendchannel.transfer(samplereal) dma1.sendchannel.transfer(sampleimag) dma0.recvchannel.transfer(outreal) dma1.recvchannel.transfer(outimag) print(outreal) print(outimag) # + #画图 from pynq import Overlay import pynq.lib.dma import pylab as py import scipy as scipy import matplotlib.pyplot as plt import scipy.fftpack import numpy.fft actualreal = samplereal[0:128] fig1 = plt.figure() ax1 = fig1.gca() plt.plot(outreal) fig2 = plt.figure() ax2 = fig2.gca() plt.plot(outimag) # -
boards/Pynq-Z2/notebooks/03-FFT.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + # coding: utf-8 # <img src='dados.png'> # <img src='var-retas.png'> # <img src='margem.png'> from sklearn.datasets import load_breast_cancer from sklearn.model_selection import train_test_split from sklearn.svm import SVC # - X,y = load_breast_cancer(return_X_y=True) X_train, X_test, y_train, y_test = train_test_split(X,y) svm = SVC(kernel='linear',C=1.0) svm.fit(X_train,y_train) svm.score(X_test,y_test) # + # <img src='insep.png'> # <img src='sep.png'> from sklearn.datasets import load_wine from sklearn.svm import SVC from sklearn.model_selection import train_test_split # - X,y = load_wine(return_X_y=True) X_train,X_test,y_train,y_test = train_test_split(X,y) svm = SVC(kernel='linear', max_iter= 100) svm.fit(X_train,y_train) svm.score(X_test,y_test) svm_kernel = SVC(kernel='poly',degree=3, gamma = 'auto') svm_kernel.fit(X_train,y_train) svm_kernel.score(X_test,y_test) svm_rbf = SVC(kernel='rbf') svm_rbf.fit(X_train,y_train) svm_rbf.score(X_test,y_test)
Material, Exercicios/Codigos/iadell11_svm.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- from netCDF4 import Dataset # http://code.google.com/p/netcdf4-python/ import os from os.path import exists ####################you will need to change some paths here!##################### #output files filename_out_kml='F:/data/cruise_data/saildrone/baja-2018/data_so_far.kml' ################################################################################# import datetime as dt import xarray as xr from datetime import datetime import pandas import matplotlib as mpl #import openpyxl #from mpl_toolkits.basemap import Basemap import matplotlib.pyplot as plt import numpy as np #from math import cos, radians # %matplotlib inline from matplotlib import style style.use('ggplot') import numpy as np import numpy.ma as ma from netCDF4 import Dataset, date2index, num2date from palettable import colorbrewer from datetime import datetime, timedelta from bs4 import BeautifulSoup import requests def listFD(url, ext=''): page = requests.get(url).text #print(page) soup = BeautifulSoup(page, 'html.parser') return [url + node.get('href') for node in soup.find_all('a') if (node.get('href').endswith(ext) and node.get('href').startswith('2'))] import datetime as dt from datetime import datetime from math import atan2 date_1970 = dt.datetime(1970,1,1,0,0,0) # start date is1/1/1970 filename_out_nc='F:/data/cruise_data/saildrone/baja-2018/data_so_far.nc' dataset = xr.open_dataset(filename_out_nc) ilen=(len(dataset.LAT['obs'])) print(dataset.LAT[0,1].values*1) print(type(dataset)) lats_usv=dataset.LAT[0,:].values lons_usv=dataset.LON[0,:].values print(lats_usv.shape) tdim=len(lats_usv) tem_date=[0]*tdim #np.zeros(tdim) tem_dy_from=np.zeros(tdim) for i in range(0,tdim): tem_dy=float(dataset.TIME[0,i].values)/86400000000000. tem_dy_from[i]=float(dataset.TIME[0,i].values)/86400000000000.-float(dataset.TIME[0,0].values)/86400000000000. tem_date[i]=date_1970+dt.timedelta(days=tem_dy) #create new time array that can be queried for year etc # + from palettable import colorbrewer from copy import copy import matplotlib.pyplot as plt import matplotlib.colors as colors import matplotlib.mlab as mlaba palette = copy(plt.cm.jet) palette.set_over('r', 1.0) palette.set_under('g', 1.0) palette.set_bad(alpha = 0.0) tem_dyr=np.zeros(tdim) for i in range(0,tdim): tem_dyr[i] = int(tem_date[i].timetuple().tm_yday) #print(tem_dyr[tem_dyr==day_of_year]) istart_data=1 for incr_day in range(-57,1): d = datetime.today() + timedelta(days=incr_day) day_of_year = d.timetuple().tm_yday url = 'https://opendap.jpl.nasa.gov/opendap/OceanTemperature/ghrsst/data/GDS2/L3U/VIIRS_NPP/OSPO/v2.41/' \ + str(d.year) + '/' + str(day_of_year).zfill(3) + '/' # url = 'https://opendap.jpl.nasa.gov/opendap/OceanTemperature/ghrsst/data/GDS2/L2P/VIIRS_NPP/OSPO/v2.41/' \ # + str(d.year) + '/' + str(day_of_year).zfill(3) + '/' ext = 'nc' filenames=listFD(url, ext) ilen=len(filenames) inew_data=1 for ic in range(1,ilen): file = filenames[ic] print(file) #save which files have been processed idyj=int(file[101:104]) ihr=int(file[113:115]) imin=int(file[115:117]) print(idyj,ihr,imin) nc = Dataset(file) sst = nc.variables['sea_surface_temperature'][:,2500:3250,2500:3500] sst = np.flip(sst, 2) cnt = ma.count(sst) lat = nc.variables['lat'][2500:3250] lat = np.flip(lat, 0) lon = nc.variables['lon'][2500:3500] nc.close() if istart_data==1: sst_new_all = ma.copy(sst) istart_data = 0 if inew_data==1: sst_new = ma.copy(sst) inew_data = 0 if cnt<5: continue print(ic,cnt,file) mask = sst_new < -10 sst_new[mask] = sst[mask] sst_new_all[mask] = sst[mask] sst_new2 = sst_new[0,:,:] sstx = ma.masked_values (sst_new2, -9999.) sst3x = np.flip(sstx, 1) sst4x = np.flip(sst3x, 0) sst5x = ma.swapaxes(sst4x,0,1) cmap = colorbrewer.get_map('Spectral', 'diverging', 11, reverse=True).mpl_colormap fig = plt.figure(figsize=(11.7,8.3)) m = Basemap(projection='merc', lat_0 = 27, lon_0 = -120, resolution = 'l', area_thresh = 0.1, llcrnrlon=-126.0, llcrnrlat=25.0,urcrnrlon=-114.0, urcrnrlat=38.) m.bluemarble() # m.fillcontinents(color='grey',lake_color='white') lat_grid, lon_grid = np.meshgrid(lat, lon) x,y = m(lon_grid,lat_grid) cs = m.pcolormesh(x,y, sst5x - 273.15, cmap=cmap, vmin = 12, vmax = 19) sub_lons=lons_usv[tem_dyr==day_of_year] sub_lats=lats_usv[tem_dyr==day_of_year] x,y = m(sub_lons,sub_lats) m.plot(x,y,'w') m.colorbar(location="bottom",label='SST [C]') # draw colorbar plt.show() filename_png='F:/data/cruise_data/saildrone/baja-2018/figs/cruise_sst_'+str(day_of_year)+'.png' fig.savefig(filename_png, transparent=False, format='png') print('done') sub_lons=lons_usv[tem_dyr==day_of_year] sub_lats=lats_usv[tem_dyr==day_of_year] min_lon=min(sub_lons)-3 max_lon=min(sub_lons)+3 min_lat=min(sub_lats)-3 max_lat=min(sub_lats)+3 center_lon=.5*(max_lon-min_lon)+min_lon center_lat=.5*(max_lat-min_lat)+min_lat print(min_lon,max_lon,min_lat,max_lat,center_lon,center_lat) fig = plt.figure(figsize=(11.7,8.3)) m = Basemap(projection='merc', lat_0 = center_lat, lon_0 = center_lon, resolution = 'l', area_thresh = 0.1, llcrnrlon=min_lon, llcrnrlat=min_lat,urcrnrlon=max_lon, urcrnrlat=max_lat) m.bluemarble() # m.fillcontinents(color='grey',lake_color='white') lat_grid, lon_grid = np.meshgrid(lat, lon) x,y = m(lon_grid,lat_grid) cs = m.pcolormesh(x,y, sst5x - 273.15, cmap=cmap, vmin = 12, vmax = 19) x,y = m(sub_lons,sub_lats) m.plot(x,y,'w') m.colorbar(location="bottom",label='SST [C]') # draw colorbar plt.show() filename_png='F:/data/cruise_data/saildrone/baja-2018/figs/cruise_sst_'+str(day_of_year)+'_zoom.png' fig.savefig(filename_png, transparent=False, format='png') print('done') sst_new2 = sst_new_all[0,:,:] sstx = ma.masked_values (sst_new2, -9999.) sst3x = np.flip(sstx, 1) sst4x = np.flip(sst3x, 0) sst5x = ma.swapaxes(sst4x,0,1) sub_lons=lons_usv sub_lats=lats_usv fig = plt.figure(figsize=(11.7,8.3)) m = Basemap(projection='merc', lat_0 = 27, lon_0 = -120, resolution = 'l', area_thresh = 0.1, llcrnrlon=-126.0, llcrnrlat=25.0,urcrnrlon=-114.0, urcrnrlat=38.) m.bluemarble() #m.fillcontinents(color='grey',lake_color='white') lat_grid, lon_grid = np.meshgrid(lat, lon) x,y = m(lon_grid,lat_grid) cs = m.pcolormesh(x,y, sst5x - 273.15, cmap=cmap, vmin = 12, vmax = 19) x,y = m(sub_lons,sub_lats) m.plot(x,y,'w') m.colorbar(location="bottom",label='SST [C]') # draw colorbar plt.show() filename_png='F:/data/cruise_data/saildrone/baja-2018/figs/cruise_all_sst_track'+str(day_of_year)+'.png' fig.savefig(filename_png, transparent=False, format='png') print('done') # - sub_lons=lons_usv sub_lats=lats_usv fig = plt.figure(figsize=(11.7,8.3)) m = Basemap(projection='merc', lat_0 = 27, lon_0 = -120, resolution = 'l', area_thresh = 0.1, llcrnrlon=-126.0, llcrnrlat=25.0,urcrnrlon=-114.0, urcrnrlat=38.) m.bluemarble() #m.fillcontinents(color='grey',lake_color='white') lat_grid, lon_grid = np.meshgrid(lat, lon) x,y = m(lon_grid,lat_grid) cs = m.pcolormesh(x,y, sst5x - 273.15, cmap=cmap, vmin = 12, vmax = 19) x,y = m(sub_lons,sub_lats) m.plot(x,y,'k') m.colorbar(location="bottom",label='SST [C]') # draw colorbar plt.show() filename_png='F:/data/cruise_data/saildrone/baja-2018/figs/cruise_sst_track.png' fig.savefig(filename_png, transparent=False, format='png') print('done') # + from mpl_toolkits.basemap import Basemap import matplotlib.pyplot as plt fig = plt.figure(figsize=(11.7,8.3)) #Custom adjust of the subplots plt.subplots_adjust(left=0.05,right=0.95,top=0.90,bottom=0.05,wspace=0.15,hspace=0.05) ax = plt.subplot(111) #Let's create a basemap of the world m = Basemap(projection='merc', lat_0 = 27, lon_0 = -120, resolution = 'l', area_thresh = 0.1, llcrnrlon=-126.0, llcrnrlat=25.0,urcrnrlon=-114.0, urcrnrlat=38.) m.fillcontinents(color='coral',lake_color='white') x,y = m(lons_usv,lats_usv) m.plot(x, y, 'bo-', markersize=1, linewidth = 1) #plt.show() fig_fname="f:/data/cruise_data/saildrone/baja-2018/figs/track_sst.pdf" plt.savefig(fig_fname,dpi=300) print(fig_fname) # - # + # + from palettable import colorbrewer from copy import copy import matplotlib.pyplot as plt import matplotlib.colors as colors import matplotlib.mlab as mlab palette = copy(plt.cm.jet) palette.set_over('r', 1.0) palette.set_under('g', 1.0) palette.set_bad(alpha = 0.0) #fig = plt.figure(figsize=(6, 5.4)) # plot using 'continuous' color map print(sst.shape) fig, ax = plt.subplots() im = ax.imshow(sst[0,:,:].T-273.15, interpolation='bilinear',cmap=palette,norm=colors.Normalize(vmin=10, vmax=17.0),aspect='auto',origin='lower') # We want to show all ticks... #ax.set_xticks(lon2) #ax.set_yticks(lat2) # ... and label them with the respective list entries #ax.set_xticklabels(farmers) #extent=[x0, x1, y0, y1]) #fig.set_title('SST') #cbar = fig.colorbar(im, extend='both', shrink=0.9, ax=im) #cbar.set_label('uniform') #for ticklabel in ax1.xaxis.get_ticklabels(): # ticklabel.set_visible(False) plt.show() # + if inew_data==1: sst_new = ma.copy(sst) inew_data = 0 if cnt<5: continue print(ic,cnt,file) mask = sst_new < -10 sst_new[mask] = sst[mask] if itotal_proc>0: lat, lon = np.meshgrid(lat, lon) mask = (sst_new < -10) sst_new[mask]=-9999. sst_new2 = sst_new[0,:,:] sstx = ma.masked_values (sst_new2, -9999.) print(lon.shape,lat.shape,sstx.shape) sst3x = np.flip(sstx, 1) sst4x = np.flip(sst3x, 0) sst5x = ma.swapaxes(sst4x,0,1) print(lon.shape,lat.shape,sst5x.shape) pixels = 1024 * 10 cmap = colorbrewer.get_map('Spectral', 'diverging', 11, reverse=True).mpl_colormap fig, ax = gearth_fig(llcrnrlon=lon.min(), llcrnrlat=lat.min(), urcrnrlon=lon.max(), urcrnrlat=lat.max(), pixels=pixels) cs = ax.pcolormesh(lon, lat, sst5x - 273.15, cmap=cmap, vmin = 10, vmax = 17) ax.set_axis_off() fig.savefig(filename_png, transparent=False, format='png') print('done') print(lat[1,1],lat[1,-1],lon[1,1],lon[-1,1]) make_kml(llcrnrlon=lon[1,1], llcrnrlat=lat[1,1], urcrnrlon=lon[-1,1], urcrnrlat=lat[1,-1], figs= [filename_png], colorbar='legend.png', kmzfile=filename_kmz, name='VIIRS Sea Surface Temperature') np.save(filename_viirs_proc, proc_save) sst_new.dump(filename_viirs_sst) fig = plt.figure(figsize=(1.0, 4.0), facecolor=None, frameon=False) ax = fig.add_axes([0.0, 0.05, 0.2, 0.9]) cb = fig.colorbar(cs, cax=ax) cb.set_label('SST [C]', rotation=-90, color='k', labelpad=20) fig.savefig('legend.png', transparent=False, format='png') # Change transparent to True if your colorbar is not on space :) # + # # -
.ipynb_checkpoints/viirs_l2p_image_mission_data-checkpoint.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 2 # language: python # name: python2 # --- # # Load/Build NEURON model # Load NEURON from neuron import h, gui # + # Build cells soma1 = h.Section(name='soma1') soma2 = h.Section(name='soma2') soma1.insert('hh') soma1.insert('pas') soma2.insert('hh') soma2.insert('pas') # Add some 3D points h.pt3dadd(-100, 0, 0, 10, sec=soma1) h.pt3dadd(-110, 0, 0, 10, sec=soma1) h.pt3dadd(100, 0, 0, 10, sec=soma2) h.pt3dadd(110, 0, 0, 10, sec=soma2) # + # Add synapses between the cells syn1 = h.Exp2Syn(0.5, sec=soma1) syn2 = h.Exp2Syn(0.5, sec=soma2) syn1.tau2 = syn2.tau2 = 0.1 # The synapses will be reciprocal excitatory # Triggering a spike in one cell will result in a spike in the second cell # Which in turn will excite the first cell # Resulting in an self-excitatory loop nc1 = h.NetCon(soma2(0.5)._ref_v, syn1, 0, 20, 1, sec=soma2) nc2 = h.NetCon(soma1(0.5)._ref_v, syn2, 0, 40, 1, sec=soma1) # + # Trigger first cell ns = h.NetStim() ns.number = 1 ns.start = 10 ns.noise = 0 nc_start = h.NetCon(ns, syn1, 0, 0, 1) # + # Record voltage t = h.Vector() t.record(h._ref_t) v1 = h.Vector() v1.record(soma1(0.5)._ref_v) v2 = h.Vector() v2.record(soma2(0.5)._ref_v) # - # Run sim h.tstop = 150 h.run() from matplotlib import pyplot as plt # + # Plot plt.plot(t.as_numpy(), v1.as_numpy()) plt.plot(t.as_numpy(), v2.as_numpy()) plt.show() # - # # Insert LFP electrode from LFPsimpy import LfpElectrode le = LfpElectrode(x=0, y=10, z=10, sampling_period=h.dt) h.run() # Plot the LFP signal plt.plot(le.times, le.values) plt.show() # # Multiple electrodes # + # Insert a second electrode le2 = LfpElectrode(x=105, y=20, z=20, sampling_period=h.dt) h.run() # Plot both plt.plot(le.times, le.values) plt.plot(le2.times, le2.values) plt.show() # - # # Different estimation methods # + # Insert a third electrode - use 'Point' method (see Parasuram et. al. (2016)) # Default is 'Line' le3 = LfpElectrode(x=105, y=20, z=20, sampling_period=h.dt, method='Point') # Insert a fourth electrode - use 'RC' method le4 = LfpElectrode(x=105, y=20, z=20, sampling_period=h.dt, method='RC') h.run() # Plot all three methods plt.plot(le2.times, le2.values) plt.plot(le3.times, le3.values) plt.plot(le4.times, le4.values) plt.show() # - # Close-up plt.plot(le2.times, le2.values,label='Line', color='black') plt.plot(le3.times, le3.values,label='Point',color='yellow') plt.plot(le4.times, le4.values,label='RC', color='red') plt.xlim(50,53.5) plt.ylim(-0.21e-14, 0.3e-14) plt.legend() plt.show() # # MPI usage # # - Insert electrodes as above on all ranks # - Run psolve() # - Rank 0 contains aggregated electrode .times and .values # - Other ranks contain only .values from the sections on those ranks
examples.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Accuracy as a function of mag for stars/galaxies # # In this notebook we show how the "update" RF model classifies both point sources and extended sources as a function of magnitude. # + import sys,os,math import numpy as np import pandas as pd from matplotlib import pyplot as plt from matplotlib import rcParams rcParams["font.family"] = "sans-serif" rcParams['font.sans-serif'] = ['DejaVu Sans'] from matplotlib import gridspec as grs from matplotlib import cm from matplotlib.legend import Legend from mpl_toolkits.axes_grid1.inset_locator import inset_axes from astropy.table import Table import seaborn as sns import statsmodels.nonparametric.api as smnp from statsmodels.nonparametric.kernel_density import KDEMultivariate from scipy import stats from sklearn.ensemble import RandomForestClassifier from sklearn.metrics import roc_curve, accuracy_score, auc, roc_auc_score from sklearn.model_selection import StratifiedKFold, train_test_split from numpy import interp # - # %matplotlib notebook # ## Preload the data hst_tab = Table.read("HST_COSMOS_Forcefeatures_adamamiller.fit").to_pandas() fnames = ['E1', 'E2', 'FPSFKronDist', 'FPSFApDist', 'FPSFApRatio', 'FPSFKronRatio', 'FPSFflxR5Ratio', 'FPSFflxR6Ratio', 'FPSFflxR7Ratio'] fil = 'ww' features = [fil + feat for feat in fnames] classifier = RandomForestClassifier(n_estimators = 900, min_samples_leaf = 2, max_features= 3, n_jobs=-1, random_state = 20) # + labels_GT = np.array(hst_tab.MU_CLASS - 1, dtype=int) hst_det_mask = (hst_tab.nDetections > 0) print("There are {:d} sources in the PS1 subset from HST".format(sum(hst_det_mask))) hst_GT = labels_GT[hst_det_mask] # + # get simple model classification threshold gridsize=100 grid = np.linspace(0.5e-6, 3e-6, gridsize) acc_arr = np.zeros_like(grid) for th_num, thrsh in enumerate(grid): smpl_labels = np.logical_not(np.array(hst_tab["wwFPSFApDist"].loc[hst_det_mask]) < thrsh).astype(int) acc_arr[th_num] = accuracy_score(hst_GT, smpl_labels) print('The optimal threshold for the simple model is: {:.5e}'.format(grid[np.argmax(acc_arr)])) # - def get_CV_preds(hst_tab, hst_det_mask, feats, n_splits=10, rs=23): ps1_preds = np.array(hst_tab["iFPSFminusFKron"].loc[hst_det_mask]) simple_preds = np.array(hst_tab["wwFPSFApDist"].loc[hst_det_mask]) X_hst = np.array(hst_tab[feats].loc[hst_det_mask]) y_hst = np.array(hst_tab["MU_CLASS"].loc[hst_det_mask] - 1, dtype=int) rf_preds = np.empty_like(simple_preds) cv = StratifiedKFold(n_splits=n_splits, random_state=rs) for train, test in cv.split(X_hst, y_hst): rf_preds[test] = classifier.fit(X_hst[train], y_hst[train]).predict_proba(X_hst[test])[:,1] return y_hst, ps1_preds, simple_preds, rf_preds y_hst, ps1_preds, simple_preds, rf_preds = get_CV_preds(hst_tab, hst_det_mask, features, n_splits=10, rs=23) def calc_fom(fpr, tpr, thresh): return interp(0.005, fpr, tpr) def calc_summary_stats(y_hst, ps1_preds, simple_preds, rf_preds, ps1_ct = 0.05, simple_ct = 1.48e-06, rf_ct = 0.5, make_plot = False, fold_plot = True): ps1_fpr, ps1_tpr, ps1_thresh = roc_curve(y_hst, -1*ps1_preds) ps1_fom = calc_fom(ps1_fpr, ps1_tpr, ps1_thresh) ps1_auc = roc_auc_score(y_hst, -1*ps1_preds) ps1_acc = accuracy_score(y_hst, ps1_preds <= ps1_ct) simple_fpr, simple_tpr, simple_thresh = roc_curve(y_hst, simple_preds) simple_fom = calc_fom(simple_fpr, simple_tpr, simple_thresh) simple_auc = roc_auc_score(y_hst, simple_preds) simple_acc = accuracy_score(y_hst, simple_preds >= simple_ct) rf_fpr, rf_tpr, rf_thresh = roc_curve(y_hst, rf_preds) rf_fom = calc_fom(rf_fpr, rf_tpr, rf_thresh) rf_auc = roc_auc_score(y_hst, rf_preds) rf_acc = accuracy_score(y_hst, rf_preds >= rf_ct) if make_plot: lw = 0.5 if fold_plot else 2 alpha = 0.4 if fold_plot else 1 for ax in [main_ax, axins]: ax.plot(rf_fpr, rf_tpr, lw=lw, color="#7570b3", alpha=alpha) ax.plot(simple_fpr, simple_tpr, lw=lw, color="#1b9e77", alpha=alpha) ax.plot(ps1_fpr, ps1_tpr, lw=lw, color="#34495e", alpha=alpha) return ps1_auc, ps1_acc, ps1_fom, simple_auc, simple_acc, simple_fom, rf_auc, rf_acc, rf_fom ps1_det_mask = ~np.isnan(hst_tab[hst_det_mask].iFPSFminusFKron) # + jupyter={"outputs_hidden": false} fig, main_ax = plt.subplots(figsize=(7,5)) axins = inset_axes(main_ax, width="58.5%", height="60%", loc=3, bbox_to_anchor=(0.4, 0.28, 1., 1.), bbox_transform=main_ax.transAxes) cv = StratifiedKFold(n_splits=10, random_state=23) X_hst = np.array(hst_tab[features].loc[hst_det_mask]) cv_summary_stats = calc_summary_stats(y_hst[ps1_det_mask], ps1_preds[ps1_det_mask], simple_preds[ps1_det_mask], rf_preds[ps1_det_mask], make_plot=True, fold_plot=False) for fold_num, (train, test) in enumerate(cv.split(X_hst[ps1_det_mask], y_hst[ps1_det_mask])): fold_results = calc_summary_stats(y_hst[ps1_det_mask][test], ps1_preds[ps1_det_mask][test], simple_preds[ps1_det_mask][test], rf_preds[ps1_det_mask][test], make_plot=True) if fold_num == 0: each_fold_sum_stats = fold_results else: each_fold_sum_stats = np.vstack((each_fold_sum_stats, fold_results)) main_ax.set_xlim(0,1) main_ax.set_ylim(0,1.02) main_ax.grid(alpha=0.5, lw=0.5, c='grey', linestyle=':') main_ax.tick_params(which="both", top=True, right=True, labelsize = 15) main_ax.minorticks_on() main_ax.set_xlabel('False Positive Rate', fontsize=15) main_ax.set_ylabel('True Positive Rate', fontsize=15) # legend stuff main_ax.plot([1e6,1e6], [1e6,1e6], lw=2, color="#7570b3", alpha=1, label='RF model') main_ax.plot([1e6,1e6], [1e6,1e6], lw=2, color="#1b9e77", alpha=1, label='simple model') main_ax.plot([1e6,1e6], [1e6,1e6], lw=2, color="#34495e", alpha=1, label='PS1 model') main_ax.legend(loc=3, borderaxespad=0, fontsize=13, bbox_to_anchor=(0.13, 0.01, 1., 0.102), ) # SDSS color = #7570b3 # axins.vlines([5e-3], 1e-3, 1, color='grey', lw=0.5) axins.vlines([5e-3], 1e-3, 1, linestyles=":", color='DarkSlateGrey', lw=2) axins.text(5e-3, 0.5, 'FoM', color='DarkSlateGrey', rotation=90, ha="right", fontsize=14) axins.set_xlim(1e-3, 9e-3) axins.set_ylim(0.45, 0.72) axins.tick_params(labelsize = 15) axins.minorticks_on() fig.subplots_adjust(right=0.97,top=0.98,bottom=0.11,left=0.1) fig.savefig("../paperII/figures/CV_ROC_FHST.pdf") # + jupyter={"outputs_hidden": false} print(r""" RF & {0:.3f} $\pm$ {9:.3f} & {1:.3f} $\pm$ {10:.3f} & {2:.3f} $\pm$ {11:.3f} \\ simple & {3:.3f} $\pm$ {12:.3f} & {4:.3f} $\pm$ {13:.3f} & {5:.3f} $\pm$ {14:.3f} \\ PS1 & {6:.3f} $\pm$ {15:.3f} & {7:.3f} $\pm$ {16:.3f} & {8:.3f} $\pm$ {17:.3f} \\ """.format(*np.append(cv_summary_stats[::-1], np.std(each_fold_sum_stats, ddof=1, axis=0)[::-1]))) # - # + kron_mag = np.array(-2.5*np.log10(hst_tab['wwFKronFlux'].loc[hst_det_mask]/3631)) binwidth = 0.5 mag_array = np.arange(15 , 23.5+binwidth, binwidth) # - ml_labels = rf_preds simple_decision_thresh = 1.48e-06 # maximize acc on training set simple_labels = np.logical_not(np.array(hst_tab["wwFPSFApDist"].loc[hst_det_mask]) < simple_decision_thresh).astype(int) ps1_labels = np.array(hst_tab["iFPSFminusFKron"].loc[hst_det_mask] < 0.05).astype(int) # ## New stuff # Below –– Calculate the accuracy of stars and galaxies in individual mag bins # + Nboot = 100 # bootstrap acc arrays simple_acc_arr = np.zeros_like(mag_array) ps1_acc_arr = np.zeros_like(mag_array) ml_acc_arr = np.zeros_like(mag_array) simple_boot_scatt = np.vstack((np.zeros_like(mag_array), np.zeros_like(mag_array))) ps1_boot_scatt = np.vstack((np.zeros_like(mag_array), np.zeros_like(mag_array))) ml_boot_scatt = np.vstack((np.zeros_like(mag_array), np.zeros_like(mag_array))) # bootstrap star arrays simple_star_acc_arr = np.zeros_like(mag_array) ml_star_acc_arr = np.zeros_like(mag_array) simple_star_boot_scatt = np.vstack((np.zeros_like(mag_array), np.zeros_like(mag_array))) ml_star_boot_scatt = np.vstack((np.zeros_like(mag_array), np.zeros_like(mag_array))) # bootstrap galaxy arrays simple_gal_acc_arr = np.zeros_like(mag_array) ml_gal_acc_arr = np.zeros_like(mag_array) simple_gal_boot_scatt = np.vstack((np.zeros_like(mag_array), np.zeros_like(mag_array))) ml_gal_boot_scatt = np.vstack((np.zeros_like(mag_array), np.zeros_like(mag_array))) for bin_num, binedge in enumerate(mag_array): bin_sources = np.where((kron_mag >= binedge) & (kron_mag < binedge + binwidth) & (np.isfinite(hst_tab["iFPSFminusFKron"].loc[hst_det_mask]))) simple_acc_arr[bin_num] = accuracy_score(hst_GT[bin_sources], simple_labels[bin_sources]) ps1_acc_arr[bin_num] = accuracy_score(hst_GT[bin_sources], ps1_labels[bin_sources]) ml_acc_arr[bin_num] = accuracy_score(hst_GT[bin_sources], np.round(ml_labels[bin_sources])) ### STARS bin_stars = np.where((kron_mag >= binedge) & (kron_mag < binedge + binwidth) & (hst_GT == 1)) simple_star_acc_arr[bin_num] = accuracy_score(hst_GT[bin_stars], simple_labels[bin_stars]) ml_star_acc_arr[bin_num] = accuracy_score(hst_GT[bin_stars], np.round(ml_labels[bin_stars])) ### GALAXIES bin_gals = np.where((kron_mag >= binedge) & (kron_mag < binedge + binwidth) & (hst_GT == 0)) simple_gal_acc_arr[bin_num] = accuracy_score(hst_GT[bin_gals], simple_labels[bin_gals]) ml_gal_acc_arr[bin_num] = accuracy_score(hst_GT[bin_gals], np.round(ml_labels[bin_gals])) # get the bootstrap accuracies simple_boot_acc = np.empty(Nboot) ps1_boot_acc = np.empty_like(simple_boot_acc) ml_boot_acc = np.empty_like(simple_boot_acc) simple_star_boot_acc = np.empty_like(simple_boot_acc) ml_star_boot_acc = np.empty_like(simple_boot_acc) simple_gal_boot_acc = np.empty_like(simple_boot_acc) ml_gal_boot_acc = np.empty_like(simple_boot_acc) for i in range(Nboot): boot_sources = np.random.choice(bin_sources[0], len(bin_sources[0]), replace=True) simple_boot_acc[i] = accuracy_score(hst_GT[boot_sources], simple_labels[boot_sources]) ps1_boot_acc[i] = accuracy_score(hst_GT[boot_sources], ps1_labels[boot_sources]) ml_boot_acc[i] = accuracy_score(hst_GT[boot_sources], np.round(ml_labels[boot_sources])) star_boot_sources = np.random.choice(bin_stars[0], len(bin_stars[0]), replace=True) simple_star_boot_acc[i] = accuracy_score(hst_GT[star_boot_sources], simple_labels[star_boot_sources]) ml_star_boot_acc[i] = accuracy_score(hst_GT[star_boot_sources], np.round(ml_labels[star_boot_sources])) gal_boot_sources = np.random.choice(bin_gals[0], len(bin_gals[0]), replace=True) simple_gal_boot_acc[i] = accuracy_score(hst_GT[gal_boot_sources], simple_labels[gal_boot_sources]) ml_gal_boot_acc[i] = accuracy_score(hst_GT[gal_boot_sources], np.round(ml_labels[gal_boot_sources])) simple_boot_scatt[:,bin_num] = np.percentile(simple_boot_acc, [16, 84]) ps1_boot_scatt[:,bin_num] = np.percentile(ps1_boot_acc, [16, 84]) ml_boot_scatt[:,bin_num] = np.percentile(ml_boot_acc, [16, 84]) simple_star_boot_scatt[:,bin_num] = np.percentile(simple_star_boot_acc, [16, 84]) ml_star_boot_scatt[:,bin_num] = np.percentile(ml_star_boot_acc, [16, 84]) simple_gal_boot_scatt[:,bin_num] = np.percentile(simple_gal_boot_acc, [16, 84]) ml_gal_boot_scatt[:,bin_num] = np.percentile(ml_gal_boot_acc, [16, 84]) # + # get the KDEs star_norm = np.sum(hst_GT==1)/len(hst_GT) gal_norm = np.sum(hst_GT==0)/len(hst_GT) All_kde = stats.gaussian_kde(kron_mag) Star_kde = stats.gaussian_kde(kron_mag[hst_GT==1]) Gal_kde = stats.gaussian_kde(kron_mag[hst_GT==0]) n = (np.arange(12,25,0.01)) All_kde_pdf = All_kde(n) Star_kde_pdf = Star_kde(n)*star_norm Gal_kde_pdf = Gal_kde(n)*gal_norm # - # + cmap = plt.get_cmap("Dark2") color_dict = {'ml': cmap(0.33), 'sdss': cmap(0.66), 'simple': cmap(0.), 'ps1': cmap(1.)} color_dict = {'ml': cmap(0.33), 'sdss': cmap(0.66), 'simple': cmap(0.), 'ps1': cmap(1.)} # apple colors color_dict = {'ml': "#0072c6", #"#1C1858", 'sdss': "#5BC236", #"#00C78E", 'simple': "#C864AF", #"#C70039", 'ps1': "#C65400"} # color blind friendly color_dict = {'ml': '#0072b2', 'sdss': '#d55e00', 'simple': '#cc79a7', 'ps1': '#009e73'} # color brewer color_dict = {'ml': '#7570b3', 'sdss': '#d95f02', 'simple': '#1b9e77', 'ps1': '#34495e'} ls_dict = {'ml': '-', 'sdss': '-.', 'simple': '--', 'ps1': '--'} lw_dict = {'ml': .75, 'sdss': .5, 'simple': .5, 'ps1': .5} bias = 0.5 cmap_star = sns.cubehelix_palette(rot=0.5, light=0.7,dark=0.3,as_cmap=True) cmap_gal = sns.cubehelix_palette(start=0.3,rot=-0.5,light=0.7,dark=0.3,as_cmap=True) mag_bin_centers = mag_array + binwidth/2 fig, (ax, ax_sg) = plt.subplots(2, 1, sharex=True, gridspec_kw={'height_ratios': [7, 3]}, figsize=(7,9)) ax.vlines(mag_bin_centers, ps1_boot_scatt[0], ps1_boot_scatt[1], color=color_dict['ps1'], alpha=0.5, linewidth=1, zorder=3) ax.vlines(mag_bin_centers, simple_boot_scatt[0], simple_boot_scatt[1], color=color_dict['simple'], alpha=0.5, linewidth=1, zorder=4) ax.vlines(mag_bin_centers, ml_boot_scatt[0], ml_boot_scatt[1], color=color_dict['ml'], alpha=0.5, linewidth=1, zorder=5) ax.errorbar(mag_bin_centers, ml_acc_arr, color=color_dict['ml'], yerr=np.zeros_like(mag_array), fmt='o', ms=10, zorder=10, mew=0.4, mec="0.2", alpha=0.9, ls=ls_dict['ml'], lw = lw_dict['ml'], label='RF model') ax.errorbar(mag_bin_centers, simple_acc_arr, color=color_dict['simple'], yerr=np.zeros_like(mag_array), fmt='^',ms=8, zorder=5, mew=0.4, mec="0.2", alpha=0.9, ls=ls_dict['simple'], lw = lw_dict['simple'], label='Simple model') ax.errorbar(mag_bin_centers, ps1_acc_arr, color=color_dict['ps1'], yerr=np.zeros_like(mag_array), fmt='p', ms=7, zorder=25, mew=0.4, mec="0.2", alpha=0.9, ls=ls_dict['ps1'], lw = lw_dict['ps1'], dashes=(8, 4), label='PS1 model') ax.fill(n, All_kde_pdf + bias, alpha=0.4, color="0.7", zorder=0, label="HST training set") ax.fill(n, Gal_kde_pdf + bias, alpha=0.5, color=cmap_gal(0.25), zorder=1, label="HST resolved") ax.fill(n, Star_kde_pdf + bias, alpha=0.5, color=cmap_star(0.25), zorder=2, label="HST unresolved") ax.plot(n, All_kde_pdf + bias, lw=2, color="0.7", zorder=3) ax.plot(n, Gal_kde_pdf + bias, lw=2, color=cmap_gal(0.25), zorder=4) ax.plot(n, Star_kde_pdf + bias, lw=2, color=cmap_star(0.25), zorder=5) ax.set_ylim(bias,1.01) ax.set_xlim(15, 24.0) handles, labels = ax.get_legend_handles_labels() leg_lines = ax.legend(handles[3:], labels[3:], bbox_to_anchor=(0.225, 0.375, 1., 0.102), loc=3, fontsize=13, handlelength=3.5) leg_kde = Legend(ax, handles[:3], labels[:3], bbox_to_anchor=(0.01, 0.03, 1., 0.102), loc=3,fontsize=10, frameon=False) ax.add_artist(leg_kde) ax.tick_params(labelsize = 15, which="both", top=True, right=True) ax.minorticks_on() ax_sg.set_xlabel('$\mathtt{whiteFKronMag}$', fontsize=15) ax.set_ylabel('Accuracy', fontsize=15) ax_sg.errorbar(mag_bin_centers+0.0, ml_star_acc_arr, yerr=np.abs(ml_star_boot_scatt - ml_star_acc_arr), ls =ls_dict['ml'], lw=.75, fmt='*', ms=10, mec="0.2", mew=0.5, color=color_dict['ml'], label="RF unresolved") # ax_sg.errorbar(mag_bin_centers, simple_star_acc_arr, # yerr=np.abs(simple_star_boot_scatt - simple_star_acc_arr), # ls =ls_dict['simple'], lw=.5, fmt='*', ms=10, # mec="0.2", mew=0.5, # color=color_dict['simple'], label="Simple model") ax_sg.scatter(mag_bin_centers-0.0, ml_gal_acc_arr, marker=r'$\S$', s=150, edgecolor='k', linewidths=0.3, color=color_dict['ml'], label="RF resolved") ax_sg.errorbar(mag_bin_centers-0.0, ml_gal_acc_arr, yerr=np.abs(ml_gal_boot_scatt - ml_gal_acc_arr), ls =ls_dict['ml'], lw=.75, fmt=',', color=color_dict['ml']) # ax_sg.scatter(mag_bin_centers-0.1, simple_gal_acc_arr, # marker=r'$\S$', s=150, edgecolor='k', linewidths=0.3, # color=color_dict['simple'], label="simple galaxies") # ax_sg.errorbar(mag_bin_centers-0.1, simple_gal_acc_arr, # yerr=np.abs(simple_star_boot_scatt - simple_star_acc_arr), # ls =ls_dict['simple'], lw=.75, fmt=',', # color=color_dict['simple']) ax_sg.legend() ax_sg.tick_params(labelsize = 15, which="both", top=False, right=True) ax_sg.minorticks_on() fig.subplots_adjust(right=0.975,top=0.985,bottom=0.115,left=0.1, hspace=0.05) fig.savefig("../paperII/figures/CV_Accuracy_FHST.pdf") # - faint = np.where(kron_mag > 22.5) print('{} sources are faint; {:.2f}% are galaxies'.format(len(faint[0]), 100*(1-sum(hst_GT[faint]/len(hst_GT[faint]))))) # ### Make table with TPR as function of TM18 thresholds nDet = hst_tab.nDetections.loc[hst_det_mask] det3 = np.where(nDet >= 3) rf_fpr, rf_tpr, rf_thresh = roc_curve(hst_GT[det3], rf_preds[det3]) from scipy import interpolate tpr_interp = interpolate.interp1d(rf_thresh, rf_tpr) tpr_tex = tpr_interp([0.829, 0.724, 0.597, 0.397, 0.224]) fpr_interp = interpolate.interp1d(rf_thresh, rf_fpr) fpr_tex = fpr_interp([0.829, 0.724, 0.597, 0.397, 0.224]) # + Nboot = 100 tpr_boot_arr = np.zeros((Nboot,5)) fpr_boot_arr = np.zeros((Nboot,5)) for i in range(Nboot): boot_samp = np.random.choice(det3[0], len(det3[0]), replace=True) rf_fpr, rf_tpr, rf_thresh = roc_curve(hst_GT[boot_samp], rf_preds[boot_samp]) tpr_interp = interpolate.interp1d(rf_thresh, rf_tpr) tpr_boot_arr[i] = tpr_interp([0.829, 0.724, 0.597, 0.397, 0.224]) fpr_interp = interpolate.interp1d(rf_thresh, rf_fpr) fpr_boot_arr[i] = fpr_interp([0.829, 0.724, 0.597, 0.397, 0.224]) # - tpr_scatt = np.percentile(tpr_boot_arr, (10,90), axis=0) fpr_scatt = np.percentile(fpr_boot_arr, (10,90), axis=0) with open('../paperII/tables/thresholds.tex', 'w') as fw: print(r'\begin{deluxetable*}{l|lccccc}', file=fw) print(r' \tablecolumns{7}', file=fw) print(r' \tablewidth{0pt}', file=fw) print(r' \tablecaption{TPR and FPR for TM18 Thresholds\label{tbl:thresh}}', file=fw) print(r' \tablehead{', file=fw) print(r' \colhead{Catalog} & \colhead{Threshold} & \colhead{0.829} & \colhead{0.724} & \colhead{0.597} & \colhead{0.397} & \colhead{0.224}', file=fw) print(r' }', file=fw) print(r' \startdata', file=fw) print(r' \multirow{2}{*}{TM18} & TPR & 0.734 & 0.792 & 0.843 & 0.0904 & 0.947 \\', file=fw) print(r' & FPR & 0.005 & 0.01 & 0.02 & 0.05 & 0.1 \\', file=fw) print(r' \hline', file=fw) plus = tpr_scatt[1]-tpr_tex minus = tpr_tex - tpr_scatt[0] print(r' \multirow{2}{*}{This work}'+r' & TPR & {:.3f}$^{{+{:.3f}}}_{{-{:.3f}}}$ & {:.3f}$^{{+{:.3f}}}_{{-{:.3f}}}$ & {:.3f}$^{{+{:.3f}}}_{{-{:.3f}}}$ & {:.3f}$^{{+{:.3f}}}_{{-{:.3f}}}$ & {:.3f}$^{{+{:.3f}}}_{{-{:.3f}}}$\\'.format(tpr_tex[0],plus[0], minus[0], tpr_tex[1],plus[1], minus[1], tpr_tex[2],plus[2], minus[2], tpr_tex[3],plus[3], minus[3], tpr_tex[4],plus[4], minus[4]), file=fw) plus = fpr_scatt[1]-fpr_tex minus = fpr_tex - fpr_scatt[0] print(r' & FPR & {:.3f}$^{{+{:.3f}}}_{{-{:.3f}}}$ & {:.3f}$^{{+{:.3f}}}_{{-{:.3f}}}$ & {:.3f}$^{{+{:.3f}}}_{{-{:.3f}}}$ & {:.3f}$^{{+{:.3f}}}_{{-{:.3f}}}$ & {:.3f}$^{{+{:.3f}}}_{{-{:.3f}}}$\\'.format(fpr_tex[0],plus[0], minus[0], fpr_tex[1],plus[1], minus[1], fpr_tex[2],plus[2], minus[2], fpr_tex[3],plus[3], minus[3], fpr_tex[4],plus[4], minus[4]), file=fw) print(r' \enddata', file=fw) print(r''' \tablecomments{The table reports the TPR and FPR for different classification thresholds given in Table~3 in \citet{Tachibana18}. To estimate the TPR and FPR we perform 10-fold CV on the entire training set, but only include sources with $\mathtt{nDetections} \ge 3$ in the final TPR and FPR calculations. The first row (TM18) summarizes the results from \citet{Tachibana18}, while the second row uses the RF model from this study. The reported uncertainties represent the central 90\% interval from 100 bootstrap resamples of the training set.}''', file=fw) print(r'\end{deluxetable*}', file=fw)
catalog_update/sg_accuracy_mag.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + import sys sys.path.insert(0, '../Libraries/') import matplotlib.pyplot as plt import numpy as np import pandas as pd import scipy.constants as csts from scipy.integrate import solve_ivp from scipy.interpolate import interp1d import thermodynamics as thermo import HT_external_convection as extconv import HT_natural_convection as natconv # - # ## Adding Lake Placid, NY Weather Data # + xl = pd.ExcelFile('Climate/April-October-2020.xlsx') loc = xl.sheet_names[6] weather = pd.read_excel("Climate/April-October-2020.xlsx",sheet_name=loc,header=0,converters= {'Date time': pd.to_datetime},) weather = weather.set_index('Date time') #weather['Name'].head(10) # - weather['Solar Radiation'] = weather['Solar Radiation'].fillna(0.0) weather['Cloud Cover'] = weather['Cloud Cover'].fillna(0.0) weather['Precipitation'] = weather['Precipitation'].fillna(0.0) weather['Relative Humidity'] = weather['Relative Humidity'].fillna(0.0) #weather['Name'].head(-2) ax0 = weather.plot(y='Solar Radiation') ax0 = weather.plot(y='Temperature') ax0 = weather.plot(y='Cloud Cover') ax0 = weather.plot(y='Wind Speed') ax0 = weather.plot(y='Dew Point') ax0 = weather.plot(y='Precipitation') ax0 = weather.plot(y='Relative Humidity') # + t_data = np.arange(0,weather.shape[0]*15*60,15*60) weather['Wind Speed'] = weather['Wind Speed'].interpolate('cubic') weather['Temperature'] = weather['Temperature'].interpolate('cubic') weather['Solar Radiation'] = weather['Solar Radiation'].interpolate('cubic') weather['Cloud Cover'] = weather['Cloud Cover'].interpolate('cubic') weather['Dew Point'] = weather['Dew Point'].interpolate('cubic') U_atm = np.abs(weather['Wind Speed'][:].to_numpy()/3.6) T_atm = weather['Temperature'][:].to_numpy() q_sun = weather['Solar Radiation'][:].to_numpy() cc = weather['Cloud Cover'][:].to_numpy()/100 cctmp = np.minimum(cc, np.ones_like(cc)) cc = np.maximum(cctmp,np.zeros_like(cc)) T_dp = weather['Dew Point'][:].to_numpy() RH = weather['Relative Humidity'][:].to_numpy() f_U_atm = interp1d(t_data,U_atm,kind='cubic') f_T_atm = interp1d(t_data,T_atm,kind='cubic') f_q_sun = interp1d(t_data,q_sun,kind='cubic') f_CC = interp1d(t_data,cc,kind='cubic') f_T_dp = interp1d(t_data,T_dp,kind='cubic') f_RH = interp1d(t_data,RH,kind='cubic') # - # ## Numerical Method Simulation for Warehouse Cooling Requirements # + rho = 20. #density of air C_p = 2500. #specific heat of air R_wall = 7.27 #m^3K/W H = 0.3048 #thickness of concrete warehouse walls: m k = R_wall*H #thermal conductivity of the wall alpha_s = 0.5 #diffusivity of the sky eps_sky = 1. #emissivity of the sky L_w = 70. #length of the square warehouse: m h_w = 6. #height of the warehouse: m A_wall = 4*(L_w*h_w) #total surface area of the walls A_roof = L_w**2 #total surface are of the roof A = A_wall + A_roof #total surface area of the warehouse exposed to the atmosphere T_s_lower = 20 #room temp: C def f(t,T): global L_w,k,H,eps,f_U_atm,f_T_atm,f_q_sun,rho,C_p,alpha_s,f_CC,f_T_dp,f_RH # film temperature between ambient and upper surface Tinf = f_T_atm(t) Uinf = np.max([f_U_atm(t),0.]) q_irr = alpha_s*f_q_sun(t) T_f = (T[0]+Tinf)/2 air_f = thermo.Fluid('air',T_f,"C") # Wet bulb temp T_wb = f_T_atm(t)*np.arctan((0.151977)*(f_RH(t)+8.313659)**(1/2)) + np.arctan(f_T_atm(t) + f_RH(t)) - np.arctan(f_RH(t)-1.676331) + 0.00391838*(f_RH(t))**(3/2)*np.arctan(0.023101*f_RH(t))-4.686035 # sky radiation Tdp = f_T_dp(t) eps_clear = 0.711 + 0.56*(Tdp/100.) + 0.73*(Tdp/100.)**2 cc = f_CC(t)/100. Ca = 1. + 0.02224*cc + 0.0035*cc**2 + 0.00028*cc**3 Tsky = (Ca*eps_clear)**0.25*thermo.C2K(Tinf) qsky = eps_sky*csts.sigma*(Tsky**4 - thermo.C2K(T[0])**4) Re = np.abs(Uinf)*L_w/air_f.nu Gr = natconv.Gr(beta=air_f.beta,DT=np.abs(T-T_f),D=L_w,nu=air_f.nu) Ra = natconv.Ra(beta=air_f.beta,DT=np.abs(T-T_f),D=L_w,nu=air_f.nu,alpha=air_f.alpha) # Forced convection flux if Re >0 and (Re <= 5e5): airflow = extconv.FlatPlate('laminar','isothermal',U_infty=Uinf,nu=air_f.nu,alpha=air_f.alpha, L=L_w,xi=0,Re_xc= 5e5) airflow.average(L_w) hconv_f = airflow.Nu_ave*air_f.k/L_w elif Re > 5e5: airflow = extconv.FlatPlate('mixed','isothermal',U_infty=Uinf,nu=air_f.nu,alpha=air_f.alpha, L=L_w,xi=0,Re_xc= 5e5) airflow.average(L_w) hconv_f = airflow.Nu_ave*air_f.k/L_w else: hconv_f = 0 #Natural convection flux if Ra > 1e4: if T[0] >= Tinf: airflow = natconv.FlatPlate(Ra,air_f.Pr,'upper','hot') else: airflow = natconv.FlatPlate(Ra,air_f.Pr,'upper','cold') hconv_n = airflow.Nu*air_f.k/L_w else: hconv_n = 0 #Total convection flux (here not a function of Ri) h = hconv_n + hconv_f qpp_conv = h*(Tinf - T[0]) #Conduction flux qpp_cond = k*(T[0] - T_s_lower)/H return (q_irr+qpp_conv+qsky - qpp_cond)/(rho*C_p) # Simulation tmax = t_data[-1] mask = np.where(t_data <= tmax) sol = solve_ivp(f,[0,tmax],[10.],method='LSODA',t_eval=t_data[mask],max_step=5*60.) # + #Ts upper, q out, q irr are assigned to columns in weather panda data weather['Ts upper'] = sol.y[0] weather['q out'] = k*A*sol.y[0]/H weather['q irr'] = alpha_s*weather['Solar Radiation'][:]*A #Plotting cooling requirement: q out vs solar irradiation heat flux: q irr ax0 = weather.plot(y='q irr') weather.plot(y='q out',ax = ax0) #Total sum of cooling load in J? weather['q out'].sum()*15*60*1e-6 # + #Scrubbing Atmospheric Temperature Data np.argwhere(np.isnan(weather['Temperature'][:].to_numpy())) #Plotting Ts upper versus Atmospheric temperature ax0 = weather.plot(y='Ts upper') weather.plot(y='Temperature',ax = ax0) # + #Resampling weather data weather_1H = weather.resample("1H").mean() weather_1H['Energy slab'] = k*A/H*weather_1H['Ts upper'] ax0 = weather_1H.plot(y='Ts upper') weather_1H.plot(y='Temperature',ax = ax0) weather_1H.plot(y='Energy slab') weather_1H['Energy slab'].sum() # -
Snow-Cooling/.ipynb_checkpoints/Final Project Outline-checkpoint.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3.6 # language: python # name: python36 # --- # ### 1. Reading the CSV file into a pandas dataframe. # + import pandas as pd data = pd.read_csv("../data/Fremont_Bridge_Hourly_Bicycle_Counts_by_Month_October_2012_to_present.csv") # - data.head() #to validate the data that has been read into the session. # ### 2. Add columns to the dataframe containing: # #### The total (East + West) bicycle count # #### The hour of the day # #### The year data['Total (East + West) bicycle count'] = data['Fremont Bridge East Sidewalk'] + data['Fremont Bridge West Sidewalk'] data['Hour of the day'] = pd.to_datetime(data['Date']).dt.hour data['year'] = pd.to_datetime(data['Date']).dt.year data.head() # to validate the new columns that have been added # ### 3. Create a dataframe with the subset of data from the year 2016 # Create a new dataframe with just the data from 2016 data_2016 = data[data['year'] == 2016] # Print the first few rows to quickly validate the result data_2016.head() # ### 4. Use pandas + matplotlib to plot the counts by hour. (i.e. hour of the day on the x-axis, total daily counts on the y-axis) # + from matplotlib import pyplot as plt # %matplotlib inline plt.bar('Hour of the day', 'Total (East + West) bicycle count', data = data_2016) plt.xlabel('Hour of the day') plt.ylabel('Total (East + West) bicycle count') plt.title('fremont bicycle count by hour') plt.grid(True) plt.show() # - # ### 5. Use pandas to determine what is (on average) the busiest hour of the day # First get the mean for each hour as a series data_2016_hourly_mean_series = data_2016.groupby(['Hour of the day'])['Total (East + West) bicycle count'].mean() # Convert the result to a dataframe data_2016_hourly_mean = pd.DataFrame({'Hour': data_2016_hourly_mean_series.index, 'mean_bicycle_count': data_2016_hourly_mean_series.values}) # Get the busiest hour data_2016_hourly_mean_busiest_hour = data_2016_hourly_mean[data_2016_hourly_mean['mean_bicycle_count'] == data_2016_hourly_mean['mean_bicycle_count'].max()] # Print the result print('The busiest hour of a day is: ' + data_2016_hourly_mean_busiest_hour['Hour'].to_string(index = False) + ':00')
hw1/analysis/sumabh_hw1.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ### Looplara Giris # Oncelikle klavyemde Turkce karakter olmadigi icin bu sekilde yazmak zorunda kaldigimi belirteyim. # # Looplari anlatmadan once looplara neden ihtiyac duydugumuzu anlatmak isterim. Mesela su ana kadar ogrendiklerimizle kullanicidan 10 tane input alip bunlarin ortalamasini hesaplayalim ve bunu standart outputa basalim: input1 = int(input("Data 1: ")) input2 = int(input("Data 2: ")) input3 = int(input("Data 3: ")) input4 = int(input("Data 4: ")) input5 = int(input("Data 5: ")) input6 = int(input("Data 6: ")) input7 = int(input("Data 7: ")) input8 = int(input("Data 8: ")) input9 = int(input("Data 9: ")) input10 = int(input("Data 10: ")) print((input1 + input2 + input3 + input4 + input5 + input6 + input7 + input8 + input9 + input10) / 10) # Bunu yapmanin daha kolay bir yolu olmasaydi programlama kopyala yapistirdan ibaret hale gelirdi. Biz bunu kolaylastirmak icin looplari yani donguleri kullaniyoruz. Python'da anlatacagimiz 2 loop var. Bunlar for ve while looplar. Gelin yukaridaki kodu iki loopu da ayri ayri kullanarak yazalim. Ilk olarak while loop ile baslayalim: i = 0 inputList = [] while i < 10: inputList.append(int(input('Data ' + str(i + 1) + ': '))) i += 1 i = 0 total = 0 while i < 10: total += inputList[i] i += 1 print(total / 10) # Gordugunuz gibi daha kolaylasti. Aslinda suan satir olarak pek bir fark yok ama 1000 input almamiz gerektigini dusunelim. Tabii 1000 inputu dosyadan okumak daha mantikli ama simdilik oraya girmiyorum. 1000 input aldigimizda while loopta "i < 10"lari "i < 1000" yapmamiz yeterli olurdu. Ama ustteki kod 1000 satirdan uzun olurdu ve ayni seylerin tekrarindan olusurdu. # Simdi de aynisini for loopla yapalim: total = 0 for i in range(10): total += int(input('Data ' + str(i + 1) + ': ')) print(total / 10) # Bu sefer yukaridaki gibi liste olusturup ikinci bir loop yapmaktansa direkt olarak total'e ekleme yaptim. Yukaridaki liste olusturma islemini, yaygin kullanilan bir islem oldugu icin, gosterme amaciyla yapmistim. While loopta da direkt olarak totale ekleme yaparak tek loopla isi halledebilirdik. # ### Functionlar # Peki herseyi boyle her seferinde satir satir mi yazmak zorundayiz. Yani bir kod olusturup onu ihtiyacimiz oldugunda cagirma ihtimalimiz yok mu? Tabii ki var. Fonksiyon tanimlayarak bu islemi yapabiliriz. Ancak fonksiyon tanimlarken genel amacli ve en az 2 ya da 3 kez kullanilacak kodlari fonksiyona cevirmemiz gerektigini belirteyim. Eger boyle bir durum yoksa fonksiyon tanimlamanin tek anlami ilerideki potansiyel kullanimlari garakti altina almak olur. Gelin simdi bir liste alip o listedeki elemanlarin ortalamasini hesaplayan bir fonksiyon tanimlayalim: # + def average(myList): total = 0 for i in myList: total += i return total / len(myList) avg1 = average([10, 12, 15, 22, 17, 14, 15, 16, 18]) avg2 = average([55, 44, 22, 11, 55, 65, 76, 88, 11, 45, 66, 21, 55, 11, 18, 99]) print(avg1, avg2) # - # ### Ornekler # <NAME>diklerimizle 2 ornek cozup konuyu bitirelim. # Ilk ornegimiz kullanicidan boslukla ayrilmis 2 sayiyi input olarak alip bunlari kartezyen duzlemdeki x ve y olarak dusunerek egimi hesaplamak. Hesapladigimiz bu egimi kullanarak bircok noktayi input olarak alip bu noktalari kesmek icin orijinden kac dogru cekmemiz gerektigini bulalim: # + noktalar = [] for i in range(5): myInput = input('Nokta ' + str(i + 1) + ': ') myInput = myInput.split(' ') noktalar.append(myInput) def egim(noktalar): egimler = [] for i in noktalar: egim = int(i[1]) / int(i[0]) egimler.append(egim) return egimler def dogruSayisi(egimler): esit = 0 for i in range(len(egimler)): for j in range(i + 1, len(egimler)): print(egimler[i], egimler[j]) if egimler[i] == egimler[j]: esit += 1 break return len(egimler) - esit print(egim(noktalar)) print(dogruSayisi(egim(noktalar))) # - # Simdi de bir matrix uzerinde ilerleyen bir robot kodlayalim. Robot her engelle karsilastiginda sola donuyor. Matrixin koseleri de engel kabul ediliyor. Boyle buyrun: # + l = [[True, False, True, True], \ [False, True, True, False], \ [True, True, True, True], \ [False, False, True, True]] def hareket(myList, konum, yon): if yon == 'K': if konum[0] != 0 and myList[konum[0]-1][konum[1]]: return ((konum[0]-1, konum[1]), 'K') else: return (konum[0], konum[1]), 'B' elif yon == 'G': if konum[0] != len(myList) - 1 and myList[konum[0]+1][konum[1]]: return ((konum[0]+1, konum[1]), 'G') else: return (konum[0], konum[1]), 'D' elif yon == 'B': if konum[1] != 0 and myList[konum[0]][konum[1]-1]: return ((konum[0], konum[1]-1), 'B') else: return (konum[0], konum[1]), 'G' elif yon == 'D': if konum[1] != len(myList) - 1 and myList[konum[0]][konum[1]+1]: return ((konum[0], konum[1]+1), 'D') else: return (konum[0], konum[1]), 'K' myHareket = hareket(l, (2, 3), 'K') for i in range(254): myHareket = hareket(l, myHareket[0], myHareket[1]) print(myHareket) # - # # Kapanis # Dersimiz burada bitiyor. Umarim anlattiklarim anlasilabilmistir. Anlamadiginiz herhangi yer olursa email adresimden her zaman iletisim kurabilirsiniz. Elimden geldigince yardimci olmaya calisirim.
lesson2/jupyter2.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # %matplotlib inline import pandas as pd import seaborn as sbn sbn.set() highs = pd.DataFrame(columns=('date','high')) highs.head() for m in range(1,13): if m < 10: high = pd.read_table('shtmax0'+str(m)+'.txt',sep="\s+",header=None) monthstr = '0'+str(m) else: high = pd.read_table('shtmax'+str(m)+'.txt',sep="\s+",header=None) monthstr = str(m) for (i,row) in high.iterrows(): year = row[0] day = 0 for c in row: if day > 0: if day < 10: daystr = '0' + str(day) else: daystr = str(day) date = pd.to_datetime(str(year)+monthstr+daystr,format="%Y%m%d") newrow = pd.DataFrame({'date':[date],'high':[c]}) highs = pd.concat([highs,newrow]) day += 1 highs lows = pd.DataFrame(columns=('date','low')) for m in range(1,13): if m < 10: low = pd.read_table('shtmin0'+str(m)+'.txt',sep="\s+",header=None) monthstr = '0'+str(m) else: low = pd.read_table('shtmin'+str(m)+'.txt',sep="\s+",header=None) monthstr = str(m) for (i,row) in low.iterrows(): year = row[0] day = 0 for c in row: if day > 0: if day < 10: daystr = '0' + str(day) else: daystr = str(day) date = pd.to_datetime(str(year)+monthstr+daystr,format="%Y%m%d") newrow = pd.DataFrame({'date':[date],'low':[c]}) lows = pd.concat([lows,newrow]) day += 1 lows highsandlows = pd.merge(highs,lows,on='date') highsandlows precips = pd.DataFrame(columns=('date','pre')) for m in range(1,13): if m < 10: precip = pd.read_table('shtpre0'+str(m)+'.txt',sep="\s+",header=None) monthstr = '0'+str(m) else: low = pd.read_table('shtpre'+str(m)+'.txt',sep="\s+",header=None) monthstr = str(m) for (i,row) in precip.iterrows(): year = row[0] day = 0 for c in row: if day > 0: if day < 10: daystr = '0' + str(day) else: daystr = str(day) date = pd.to_datetime(str(year)+monthstr+daystr,format="%Y%m%d") newrow = pd.DataFrame({'date':[date],'pre':[c]}) precips = pd.concat([precips,newrow]) day += 1 precips snows = pd.DataFrame(columns=('date','snow')) for m in range(1,13): if m < 10: snow = pd.read_table('shtsno_b0'+str(m)+'.txt',sep="\s+",header=None) monthstr = '0'+str(m) else: snow = pd.read_table('shtsno_b'+str(m)+'.txt',sep="\s+",header=None) monthstr = str(m) for (i,row) in snow.iterrows(): year = row[0] day = 0 for c in row: if day > 0: if day < 10: daystr = '0' + str(day) else: daystr = str(day) date = pd.to_datetime(str(year)+monthstr+daystr,format="%Y%m%d") newrow = pd.DataFrame({'date':[date],'snow':[c]}) snows = pd.concat([snows,newrow]) day += 1 snows[(snows.date.dt.year == 2011) & (snows.date.dt.month == 12)] precipsandsnows = pd.merge(precips,snows,on='date') precipsandsnows weather = pd.merge(highsandlows,precipsandsnows,on='date') weather weather.to_csv('weather.csv')
Weather/Weather.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + #import standard libraries import numpy as np import matplotlib.pyplot as plt import pyblock from pymbar import timeseries # - job_name = '50558343' # '50557478', '50558343', '50559635' data_dir = '/Volumes/_work/genius/scratch/' # + visc_le = open('visc_le.txt', 'a') cutoff = 200000 cutoff = 2**17 time = np.arange(0.0, cutoff*0.005*100, 0.005*100) shear_vel = 1.0 for i in range(0,40): j = i+1 with open('./params_dpd.csv') as f: line = f.readlines()[j] line = line.split(',') rho, a_ij = float(line[0]), float(line[1]) print(rho,a_ij) l = (10000/rho) ** (1/3) vol = 10000/rho gamma_dot = shear_vel/l eta_theo = 2*np.pi*4.5*rho**2/1575 kin_stress = -1.0 * np.genfromtxt( data_dir + 'le_visc_' + job_name + '[' + str(j) +']/kin_stress.txt', delimiter=';')[-cutoff:] old_stress = -1.0 * np.genfromtxt( data_dir + 'le_visc_' + job_name + '[' + str(j) +']/old_stress.txt', delimiter=';')[-cutoff:] dpd_stress = -1.0 * np.genfromtxt( data_dir + 'le_visc_' + job_name + '[' + str(j) +']/dpd_stress.txt', delimiter=';')[-cutoff:,1] temp = np.genfromtxt( data_dir + 'le_visc_' + job_name + '[' + str(j) +']/temp.txt', delimiter=';')[-cutoff:] / (10000 * 3/2) # Determine equlibrated region # Compute indices of uncorrelated timeseries A_t_equil = A_t[t0:] plt.figure(figsize=(20,20*2/3)) # Kinetic viscosity ax5 = plt.subplot(245) reblock_data = pyblock.blocking.reblock(kin_stress) for reblock_iter in reblock_data: #print(reblock_iter) #ax5.errorbar(reblock_iter[0], reblock_iter[2], yerr=reblock_iter[4], fmt='ko') ax5.errorbar(reblock_iter[0], reblock_iter[4], yerr=reblock_iter[5], fmt='ko') ax5.ticklabel_format(axis="y", style="sci", scilimits=(0,0)) ax5.set_ylabel('Standard error') ax5.set_xlabel('block') opt = pyblock.blocking.find_optimal_block(len(kin_stress), reblock_data) mean_kin_visc = reblock_data[opt[0]][2] / gamma_dot std_kin_visc = reblock_data[opt[0]][4] / gamma_dot ax1 = plt.subplot(241) ax1.plot(time, kin_stress/gamma_dot) ax1.ticklabel_format(axis="both", style="sci", scilimits=(0,0)) ax1.axhline(y=mean_kin_visc, color='tab:red') ax1.set_ylabel('Kinematic viscosity') ax1.set_xlabel('time') # Old viscosity ax6 = plt.subplot(246) reblock_data = pyblock.blocking.reblock(old_stress) for reblock_iter in reblock_data: #print(reblock_iter) ax6.errorbar(reblock_iter[0], reblock_iter[4], yerr=reblock_iter[5], fmt='ko') ax6.ticklabel_format(axis="y", style="sci", scilimits=(0,0)) ax6.set_ylabel('Standard error') ax6.set_xlabel('block') opt = pyblock.blocking.find_optimal_block(len(old_stress), reblock_data) mean_old_visc = reblock_data[opt[0]][2] / gamma_dot std_old_visc = reblock_data[opt[0]][4] / gamma_dot ax2 = plt.subplot(242) ax2.plot(time, old_stress/gamma_dot) ax2.ticklabel_format(axis="both", style="sci", scilimits=(0,0)) ax2.axhline(y=mean_old_visc, color='tab:red') ax2.set_ylabel('Total viscosity') ax2.set_xlabel('time') # DPD viscosity ax7 = plt.subplot(247) reblock_data = pyblock.blocking.reblock(dpd_stress) for reblock_iter in reblock_data: #print(reblock_iter) ax7.errorbar(reblock_iter[0], reblock_iter[4], yerr=reblock_iter[5], fmt='ko') ax7.ticklabel_format(axis="y", style="sci", scilimits=(0,0)) ax7.set_ylabel('Standard error') ax7.set_xlabel('block') opt = pyblock.blocking.find_optimal_block(len(dpd_stress), reblock_data) mean_dpd_visc = reblock_data[opt[0]][2] / gamma_dot std_dpd_visc = reblock_data[opt[0]][4] / gamma_dot ax3 = plt.subplot(243) ax3.plot(time, dpd_stress/gamma_dot) ax3.ticklabel_format(axis="both", style="sci", scilimits=(0,0)) ax3.axhline(y=mean_dpd_visc, color='tab:red') ax3.axhline(y=eta_theo, color='tab:green') ax3.set_ylabel('DPD viscosity') ax3.set_xlabel('time') # Temperature ax8 = plt.subplot(248) reblock_data = pyblock.blocking.reblock(temp) for reblock_iter in reblock_data: #print(reblock_iter) ax8.errorbar(reblock_iter[0], reblock_iter[4], yerr=reblock_iter[5], fmt='ko') ax8.ticklabel_format(axis="y", style="sci", scilimits=(0,0)) ax8.set_ylabel('Standard error') ax8.set_xlabel('block') opt = pyblock.blocking.find_optimal_block(len(temp), reblock_data) mean_temp = reblock_data[opt[0]][2] std_temp = reblock_data[opt[0]][4] ax4 = plt.subplot(244) ax4.plot(time, temp) ax4.ticklabel_format(axis="both", style="sci", scilimits=(0,0)) ax4.axhline(y=mean_temp, color='tab:red') ax4.axhline(y=1.0, color='tab:green') ax4.set_ylabel('Temperature') ax4.set_xlabel('time') plt.tight_layout() print("Temperature:", mean_temp, '+/-', std_temp) print("Predicted dissipative viscosity:", eta_theo) print("Dissipative viscosity:", mean_dpd_visc, '+/-', std_dpd_visc) print("Kinetic viscosity:", mean_kin_visc, '+/-', std_kin_visc) print("Total viscosity:", mean_old_visc, '+/-', std_old_visc) print("\n") visc_le.write('{}; {}; {}; {}; {}; {}; {}; {}; {}; {} \n'.format(rho, a_ij, mean_kin_visc, std_kin_visc, mean_old_visc, std_old_visc, mean_dpd_visc, std_dpd_visc, mean_temp, std_temp )) visc_le.close() # -
notebooks/05-evaluate_Lees-Edwards_viscosity.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ## Outline # - DataFrames # - sorting & subsetting # - creating new columns # - Aggregating Data # - summary stattistics # - counting # - grouped summary statistics # - Slicing & indexing data # - subsetting using slicing # - indexes & subsetting using indexes # - Creating & Visualizing Data # - plotting # - handling missing values # - reading data into DataFrame # - Pandas is built on NumPy and Matplotlib # ## DataFrames import numpy as np import pandas as pd titanic = pd.read_csv('titanic.csv') titanic.head() titanic.info() titanic.shape titanic.describe() titanic.values titanic.columns titanic.index # ##### 1- sorting & subsetting # ###### sorting titanic.sort_values(by='PassengerId') titanic.sort_values(by='Age', ascending='False', na_position='first') titanic.sort_values(['Survived', 'Sex']) titanic.sort_values(['Survived', 'Sex'], ascending=[True,False]) # ###### Subsetting titanic['Name'] titanic[['Name', 'Survived']] # name_survived = ['Name', 'Survived'] # titanic[name_survived] titanic['Age'] > 20 titanic[titanic['Age']> 30] titanic[titanic['Sex'] == 'male'] # Similarly subsetting is possible on date # we can pass variable also # variable = titanic['Sex'] == 'male' # titanic[variable] sex_male = titanic['Sex'] == 'male' not_survived = titanic['Survived'] == 0 titanic[sex_male & not_survived] titanic[titanic['Pclass'].isin([2,1])] # #### 2- Creating New Columns titanic['Discounted_Fare'] = titanic['Fare']/100 titanic Age_lt_20 = titanic[titanic.Age < 20] desending_order = Age_lt_20.sort_values('Age', ascending=False) desending_order[['Name', 'Age', 'Sex']] # ## Aggregating Data # #### Summary statistics
03 Data Manipulation with Pandas/practise work/pandas_class1.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + import sys eff_root = "/home/haridas/projects/opensource/automl/efficientdet/" sys.path.insert(0, eff_root) # + import numpy as np import pandas as pd import os from PIL import Image # efficientdet related packages. import tensorflow.compat.v1 as tf import hparams_config import dataloader import det_model_fn from keras import train, train_lib os.environ["CUDA_VISIBLE_DEVICES"] = "" # + # print_tensors_in_checkpoint_file(file_name="/home/haridas/projects/opensource/automl/efficientdet/efficientdet-d1/", tensor_name="resample_p1", all_tensors=True) # - # ## TFRecord analysis tfr_file = "/home/haridas/projects/opensource/automl/efficientdet/data/val/val-00000-of-00001.tfrecord" raw_dataset = tf.data.TFRecordDataset(tfr_file) raw_record = raw_dataset.take(1) example = tf.train.Example() # + # example.ParseFromString(raw_record.numpy()) # - # ## Model Inference Experiment min_score_threh = 0.35 max_boxes_to_draw = 100 line_thickness = 2 img_path = "/home/haridas/projects/mystique/data/pic2card_dataset_01_apr_2021/test/2.png" img = Image.open(img_path) img.size # + # # !python /home/haridas/projects/opensource/automl/efficientdet/model_inspect.py --help # tf.estimator.tpu.RunConfig( # model_dir="path", # evaluation_master="", # cluster=None # ) # - # ## Model Inspect checkpoint = tf.train.get_checkpoint_state(os.path.join(eff_root, "out/efficientdet-d1-finetuned-2020-11-04-1604487101/")) checkpoint.model_checkpoint_path model_fn_instance = det_model_fn.get_model_fn("efficientdet-d1") config = hparams_config.get_detection_config("efficientdet-d1") params = dict(config.as_dict(), model_name="efficientdet-d1", iterations_per_loop=100, model_dir=checkpoint.mo0jd ) # ## Saved Model Inspection saved_model_path = "/home/haridas/projects/opensource/automl/efficientdet/out/efficientdet-d1-finetuned-2021-04-29-1619710132/saved_model" imported.signatures["serving_default"] # + # imported = tf.saved_model.load_v2(saved_model_path) # - # ## Frozen Model Inference # # The model is trained with custom configuration and augmentation, the changes are commited here: https://github.com/haridas/automl/tree/pic2card # # Reference: https://github.com/google/automl/issues/933 # # # Export the saved models from the training checkpoints using the following command, # # ```bash # $ python model_inspect.py --runmode=saved_model \ # --model_name=efficientdet-d1 \ # --ckpt_path out/efficientdet-d1-finetuned-2021-04-29-1619710132 \ # --saved_model_dir out/efficientdet-d1-finetuned-2021-04-29-1619710132/ # ``` # # Above command will save both `saved_model` and `frozen_graph` version of the trained checkpoint. # We are using frozen_graph for our deployments. frozen_model_path = "/home/haridas/projects/opensource/automl/efficientdet/out/efficientdet-d1-finetuned-2021-04-29-1619710132/saved_model/efficientdet-d1_frozen.pb" def get_image_array(im_path): im = Image.open(im_path).convert("RGB") im_arr = np.frombuffer(im.tobytes(), dtype=np.uint8) im_arr = im_arr.reshape((1, im.size[1], im.size[0], 3))jkk return im_arr with tf.io.gfile.GFile(frozen_model_path, 'rb') as f: graph_def = tf.compat.v1.GraphDef() loaded = graph_def.ParseFromString(f.read()) # + def wrap_frozen_graph(graph_def, inputs, outputs, print_graph=False): def _imports_graph_def(): tf.compat.v1.import_graph_def(graph_def, name="") wrapped_import = tf.compat.v1.wrap_function(_imports_graph_def, []) import_graph = wrapped_import.graph # if print_graph == True: # print("-" * 50) # print("Frozen model layers: ") # layers = [op.name for op in import_graph.get_operations()] # for layer in layers: # print(layer) # print("-" * 50) return wrapped_import.prune( tf.nest.map_structure(import_graph.as_graph_element, inputs), tf.nest.map_structure(import_graph.as_graph_element, outputs)) # - frozen_func = wrap_frozen_graph(graph_def=graph_def, inputs=["image_arrays:0"], outputs=["detections:0"] ) # + # get_image_array(img_path).shape # - detections = frozen_func(tf.constant(get_image_array(img_path)))[0].numpy().squeeze() detections[:, 5] # # Keras implementation of EfficientDet config_d1 = hparams_config.get_detection_config("efficientdet-d1") config_d1.model_dir train_lib.EfficientDetNetTrain(config=config) # + # with tf.Session(graph=tf.Graph()) as sess: # saver = tf.train.import_meta_graph(checkpoint.model_checkpoint_path + '.meta', clear_devices=True) # - # # References # # 1. [https://ml6.eu/a-practical-guide-to-fine-tuning-efficientdet-for-object-detection/](https://ml6.eu/a-practical-guide-to-fine-tuning-efficientdet-for-object-detection/)
source/pic2card/notebooks/EfficientDet.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + import torch from fastai.text import * from pathlib import Path import news_utils.plot import pymongo from collections import defaultdict import sklearn.metrics # print all available databases client = pymongo.MongoClient('localhost', 27017) cursor = client.list_databases() for db in cursor: print(db) import altair as alt alt.renderers.enable('notebook') # - def get_mets(db, conf=None): mydb = client[db] res = mydb["metrics"].aggregate([{ "$match": {"name": 'kappa_score'} # only consider metric }, {"$unwind": "$values"}, {"$group": {'_id': '$_id', 'val': {'$max': "$values"}, 'run_id' : { '$first': '$run_id' }} }, # find min values {"$sort": {"val": -1}} # sort ]) if not conf is None: runs = mydb['runs'].find(conf) runs = [r['_id'] for r in list(runs)] res = [r for r in res if r['run_id'] in runs] best = list(res)[0] epoch = None max_epochs = 0 for x in mydb['metrics'].find({'run_id': best['run_id'], 'name': 'kappa_score'}): max_epochs = len(x['values']) for i, v in enumerate(x['values']): if v == best['val'] and epoch is None: epoch = i + 1 for x in mydb['metrics'].find({'run_id': best['run_id'], 'name': 'F1_macro'}): f1_macro = x['values'][epoch - 1] for x in mydb['metrics'].find({'run_id': best['run_id'], 'name': 'accuracy'}): f1_micro = x['values'][epoch - 1] run = list(mydb['runs'].find({'_id': best['run_id']}))[0] mod = '' if 'mod' in run['config']: mod= run['config']['mod'] return best['val'], f1_micro, f1_macro, epoch, max_epochs, run['config']['exp_id'], run['config']['drop_mult'], mod all_cols = ['claudience', 'clpersuasive', 'clsentiment', 'clagreement', 'cldisagreement', 'clinformative', 'clmean', 'clcontroversial', 'cltopic'] choos_cat = 'dat_false_par_true_hea_false30000_cl_cltopic' get_mets(db=choos_cat, conf={}) model_id = '2019_ 2_26_16_36_45_362323' p = list(Path('/home/group7/data/ynacc_proc').glob(f'**/{model_id}.pth'))[0] p_fixed = '/'.join(p.parts[:-2])[1:] exp = p.parts[-3] data_lm = TextLMDataBunch.load(Path('/mnt/data/group07/johannes/ynacc_proc/proper_baseline/exp/' + exp)) def setup_data(clas): # UT = Path('~/data/ynacc_proc/proper_threads/data/cls/' + args.exp) # UT = Path('~/data/ynacc_proc/proper_threads/data/cls/only_threads_unlimited_30000_cut') UT = Path('~/data/ynacc_proc/proper_baseline/cls/dat_false_par_true_hea_false') data_clas_train = pd.read_csv(UT/'train.csv') data_clas_val = pd.read_csv(UT/'val.csv') print(data_clas_val.shape) data_clas_train = data_clas_train[[clas, 'text_proc']] data_clas_val = data_clas_val[[clas, 'text_proc']] data_clas_train = data_clas_train.dropna() data_clas_val = data_clas_val.dropna() data_clas_train[clas] = data_clas_train[clas].astype(int) data_clas_val[clas] = data_clas_val[clas].astype(int) data_clas = TextClasDataBunch.from_df(p_fixed, data_clas_train, data_clas_val, vocab=data_lm.train_ds.vocab, bs=64, text_cols=['text_proc'], label_cols=[clas],tokenizer=Tokenizer(cut_n_from_behind=1398)) return data_clas data = setup_data('cltopic') del data_lm learn = text_classifier_learner(data).load(p.stem) news_utils.plot.all(learn) preds, y_true = learn.get_preds(ordered=True) _, y_pred = preds.max(dim=1) str(learn.data.valid_ds[0][0]) # + lens = [] lens_true = [] for i, d in enumerate(learn.data.valid_ds): if len(str(d[0])) > 600: continue if y_true[i] != y_pred[i]: lens.append(len(str(d[0]))) else: lens_true.append(len(str(d[0]))) # + d_l = [x//20 for x in lens] d_lt = [x//20 for x in lens_true] for i in range(1, 20): print(str(i*20) + '-' + str((i+1)*20), d_lt.count(i) / (d_lt.count(i) + d_l.count(i)), ' Context-agnostic') # - d_l alt.Chart(pd.DataFrame({'x': lens})).mark_bar().encode( alt.X("x:Q", bin=alt.Bin(maxbins=60)), y='count()', ) alt.Chart(pd.DataFrame({'x': lens_true})).mark_bar().encode( alt.X("x:Q", bin=alt.Bin(maxbins=60)), y='count()', ) # + root_i = [] other_i = [] for i, d in enumerate(learn.data.valid_ds): if len(str(d[0])) > 100: # if 'xxreply_false' in str(d[0]): root_i.append(i) else: other_i.append(i) # - sklearn.metrics.classification_report([x for i, x in enumerate(y_true) if i in root_i], [x for i, x in enumerate(y_pred) if i in root_i], output_dict=True) sklearn.metrics.classification_report([x for i, x in enumerate(y_true) if not i in root_i], [x for i, x in enumerate(y_pred) if not i in root_i], output_dict=True) learn.data.valid_ds[2] def cut(text): start_token = 'xx_comment_start' end = 'xx_comment_end xx_thread_end' i = text.rindex(start_token) return text[:i -1], text[i + len(start_token):-len(end)] # + idx = 0 conf = [] for i, (x, y) in enumerate(zip(res[0], res[1])): if np.argmax(x) != y: conf.append([i, max(x), y.item()]) # - learn.data.valid_ds[0][0].text
code/ynacc/12/Verify-Copy1.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- """ Explore speaker relationship with network analysis: nodes: speakers - color: node['profession'] = 'entertainment','tech','education','author'...? TBD - size: degree centrality edge: defined by overlap of words used """ # + import re, itertools import networkx as nx import pandas as pd import numpy as np import matplotlib.pyplot as plt from itertools import combinations, count # - # ## 1. Load data speech_df = pd.read_pickle('../dump/speech_clean_lemma') speaker_info = pd.read_pickle('../dump/speaker_info') speech_df speaker_info # ## 2. Fill missing values # ### Profession update_profession = [ 'writer', 'actor', 'Educator', 'Professor', 'CEO of Right to Start', 'CEO of Microsoft', 'Professor', 'Poet', 'spiritual leader', 'Attorney', 'Finance', 'Educator', 'Biologist', 'Tech (Cisco)', 'Environmental diplomat', 'Pulitzer Prize winner author', 'Professor', 'Professor', 'Student', 'Tech (Instagram)', 'spiritual (pastor)', 'Educator', 'Finance (investor)', 'Finance (investor)', 'Politician', 'student', 'Culture (911 museum)', 'spiritual (pastor)', 'writer', 'writer', '??', 'Tech (Instagram)', 'Tech, educator', 'Tech, educator','Science (NASA)', 'CEO (NanoHorizons)','writer', 'writer', 'CEO (Coca-cola)', 'CEO (Coca-cola)', 'student', 'Judge' ] len(update_profession) test_df['profession_update'] = update_profession test_df update = ['writer']*42 test_df.loc[test_df.profession.isna(),'profession']= test_df pd.Series(update) speaker_info[speaker_info.profession.isna()]['profession']='writer' [] test_df = speaker_info[speaker_info['profession'].isna()] test_df.shape test_list = ['hi']*42 test_df['profession'] = pd.Series(test_list) test_df.iloc[10]['profession'] = "writer" # ### Year born speaker_info['born'] = speaker_info['born'].replace("nan",np.nan) speaker_info[speaker_info['born'].isna()] speaker_info.info() speaker_info[speaker_info['born'].isnull()] speaker_info['born'].fillna(0).unique() wrong_born = speaker_info[speaker_info['born'] == "Born:"] def correct_born(row): row['born'] = 2021 - row['age'] wrong_born.apply(correct_born) speaker_info['age_now'] = 2020 - speaker_info['born'].fillna(0).astype(int) # ## 3. Network analysis
notebook/scratchpad_missing-values.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Numerical Integration and Reaction Kinetics # # In addition to processing experimental data from the real world, Python can also be used to generate simulations of physical systems that change over time. In this notebook, we will practice performing numerical integration to solve systems of differential equations that describe chemical systems. # To simulate a physical system, we need to describe how that system changes over time as a function of its current state. This description often takes the form of a system of ordinary differential equations (ODEs). Although solving ODEs analytically is sometimes difficult or impossible, their solutions can be approximated by numerically integrating them over time, given some initial conditions. Python provides a collection of powerful general-purpose numeral integration tools that can be used for solving an initial value problem (IVP) of this kind. We will be using the `solve_ivp` function for this purpose. The `solve_ivp` function takes three inputs: # # 1. An arbitrary function describing the derivative of the variable(s) # 2. A time span on which to compute the solution to the IVP # 3. The initial conditions at the beginning of the time span # # The function returns a bundle of information to us. In particular it gives us the following: # # 1. An array of times within the range specified in the input # 2. The value of the function at every time in the array # # Learn more about how `solve_ivp` works here: https://docs.scipy.org/doc/scipy/reference/generated/scipy.integrate.solve_ivp.html # ## Example 1: Radioactive Decay # # You have successfully synthesized a 10 mg sample of yttrium-87 and want to how much will be left after a month. Since $^{87}\text{Y}$ undergoes beta decay to $^{87}\text{Sr}$ with a half-life of about $t_{1/2} \approx 3.4\ \text{days}$, we can describe the amount of $^{87}\text{Y}$ over time with the following initial value problem. # # $$ \frac{\text{d}y}{\text{d}t} = - \frac{\ln(2)}{t_{1/2}}y \qquad \qquad y(0) = y_0 $$ # # Here $y$ is the mass of yttrium-87 that changes over time $t$, while and $y_0 = 10\ \text{mg}$ is the initial amount at $t=0$. Here's how we compute the solution in Python: # + import numpy as np from scipy.integrate import solve_ivp import matplotlib.pyplot as plt # define constants t12 = 3.4 # half-life of 3.4 days y0 = [10] # starting with 10 mg (has to be in a list or array) # the derivitive of y as a function of t and y def yprime(t, y): return - (np.log(2) / t12) * y # we want to see how the system changes over one month t_span = [0, 31] # compute the solution sol = solve_ivp(yprime, t_span, y0) # unpack the pieces we want t = sol.t # an array of times y = sol.y[0] # the value of the function at each time # plot the results plt.figure(figsize=(10,3)) plt.plot(t, y) plt.title("Mass of yttrium-87 over time") plt.xlabel("time (days)") plt.ylabel("mass (mg)") plt.show() # - # The solution makes sense because if we solve this IVP analytically by normal methods of solving differential equations, we obtain a decaying exponential function. Try modifying $t_{1/2}$ and $y_0$ to see how the output changes. Although an analytical solution is easy to obtain for this system, using Python is much easier for more complex IVPs. # # You may have noticed a couple of strange things in the example above. When specifying the initial value `y0 = [10]` it was required to contain it inside a list or array. Additionally, we extracted the solution with `sol.y[0]`. The reason for both is that `solve_ivp` is designed to work for IVPs with any number of variables. Next we will explore an example of a such a multi-variable IVP. # ## Example 2: Predator-Prey Dynamics # # In the nearby area there are populations of both hawks and rabbits. When there are lots of rabbits, the hawks thrive on an abundance of food, decimating the rabbit population. But as their food source dwindles, the hawk population falls, leading to a resurgence of rabbits as they freely reproduce. We can use the [Lotka-Volterra Model](https://en.wikipedia.org/wiki/Lotka%E2%80%93Volterra_equations) to simulate this behavior. If $r$ represents the number of rabbits and $h$ represents the number of hawks, then the population dynamics are described by the following IVP. # # \begin{align*} # \frac{\text{d}r}{\text{d}t} &= a r - b rh & r(0) &= r_0 \\ # \frac{\text{d}h}{\text{d}t} &= -c h + d rh & h(0) &= h_0 \\ # \end{align*} # # For this simulation, let $a=8$, $b=2$, $c=3$, and $d=1$. Assume we start with $r_0 = 50$ rabbits and $h_0 = 50$ hawks. # + from scipy.integrate import solve_ivp import matplotlib.pyplot as plt # define constants a = 8 b = 2 c = 3 d = 1 # array of initial conditions [r0, h0] y0 = [50, 50] # the derivatives of both r0 and h0 over time def yprime(t, y): r = y[0] # unpack arguments h = y[1] rprime = a*r - b*r*h # compute derivatives hprime = -c*h + d*r*h return [rprime, hprime] # pack them up again # specify time span of solution t_span = [0, 20] # compute the solution sol = solve_ivp(yprime, t_span, y0) # unpack the pieces we want t = sol.t # an array of times r = sol.y[0] # unpack both variables h = sol.y[1] # plot the results plt.figure(figsize=(10,3)) plt.plot(t, r) plt.plot(t, h) plt.title("Lotka-Volterra Model") plt.xlabel("time (years)") plt.ylabel("population (individuals)") plt.legend(["rabbits", "hawks"]) plt.show() # - # As expected, the rabbit and hawk populations oscillate over time. # **Python Question 1** # # You can now apply these concepts to simulate a chemical reaction with first-order kinetics. Consider the following reversible association/dissociation reaction. This could represent an acid-base or solubility process, for example. # # $$ \text{A} + \text{B} \quad {}_{\xleftarrow[k_2]{}}^{ \xrightarrow{k_1}} \quad \text{AB} \\[0.5em] $$ # # Assuming a first order kinetics mechanism, the system is described by the following IVP (make sure you understand how this was derived). # # $$ \begin{align*} # \frac{\text{d}[\text{A}]}{\text{d}t} &= - k_1 [\text{A}][B] + k_2[\text{AB}] & \left [\text{A}] \right |_{t=0} &= [\text{A}]_0 \\ # \frac{\text{d}[\text{B}]}{\text{d}t} &= - k_1 [\text{A}][\text{B}] + k_2[\text{AB}] & \left [\text{B}] \right |_{t=0} &= [\text{B}]_0 \\ # \frac{\text{d}[\text{AB}]}{\text{d}t} &= k_1 [\text{A}][\text{B}] - k_2[\text{AB}] & \left [\text{AB}] \right |_{t=0} &= [\text{AB}]_0 # \end{align*} $$ # # Assume the initial conditions $[\text{A}]_0 = 0.1\ \text{M}$, $[\text{B}]_0 = 0.2\ \text{M}$, and $[\text{AB}]_0 = 0\ \text{M}$. Let the rate constants be $k_1 = 0.5 \ \text{M}^{-1}\text{s}^{-1}$ and $k_2 = 0.01 \ \text{s}^{-1}$. Complete the code below to simulate the reaction over the course of 120 seconds. # + from scipy.integrate import solve_ivp import matplotlib.pyplot as plt # define constants k1 = ??? k2 = ??? # define initial conditions [A0, B0, AB0] y0 = [???, ???, ???] # the derivatives of all chemical species over time def yprime(t, y): A, B, AB = y[0], y[1], y[2] # unpack arguments Aprime = ??? # compute derivatives Bprime = ??? ABprime = ??? return [Aprime, Bprime, ABprime] # pack them up again # specify time span of solution t_span = [0, 20] # compute the solution sol = solve_ivp(yprime, t_span, y0) # unpack the pieces we want t = sol.t # an array of times A = sol.y[0] # unpack both variables B = sol.y[1] C = sol.y[2] # plot the results plt.figure(figsize=(10,3)) plt.plot(t, A) plt.plot(t, B) plt.plot(t, C) plt.title("First Order Kinetics") plt.xlabel("time (s)") plt.ylabel("concentration (M)") plt.legend(["[A]", "[B]", "[C]"]) plt.show()
hw4-kinetics.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 2 # language: python # name: python2 # --- # # 单向波动差多市场择时的投资策略 # 根据国信证券研报 - 《20151022 基于相对强弱下单向波动差值应用》 # 基于对波动率的分解, 可以对市场的走势进行择时判断。 # import OneSideVolatilityStrategy as st import odo import pandas as pd import matplotlib # %matplotlib inline #df_price = odo.odo('index_sh.csv', pd.DataFrame) import Quandl df_price = Quandl.get("YAHOO/INDEX_SSEC", trim_start="1970-01-01") df_price = df_price.loc['2004-04-20'::] def AbsoluteVol(data, n_ma=60) : df = data df['po_std'] = df['High'] - df['Open'] df['ne_std'] = df['Open'] - df['Low'] df['delta_std'] = df['po_std'] - df['ne_std'] df['ma_delta_std'] = pd.rolling_mean(df['delta_std'], n_ma) df = df.dropna() return df # ## 策略 1.1: 使用单向波动差对市场进行择时 # + def PosNegVolatilityStrategy(df_price, n_vol=20, n_ma=60) : df_spread = df_price['Close'] - df_price['Open'] # calculate the returns df_ret_co = df_price['Close'] / df_price['Open'] -1 df_ret_cc = df_price['Close'] / df_price['Close'].shift(1) -1 df_ret_oo = df_price['Open'] / df_price['Open'].shift(1) - 1 df_ret_oc = df_price['Open'] / df_price['Close'].shift(1) - 1 # calculate the vol df_spread_vol = AbsoluteVol(df_price, n_ma) #df_ret_co_vol = GetOneSideVol(df_ret_co, n_ma) # strategy 1: using the daily Close-Open spread signal = pd.DataFrame() signal['ma'] = (df_spread_vol['ma_delta_std'] > 0) * 1 \ # + (df_spread_vol['ma_delta_std'] < 0) * -1 signal['cross'] = (df_spread_vol['delta_std'] > df_spread_vol['ma_delta_std']) * 1 \ # + (df_spread_vol['delta_std'] < df_spread_vol['ma_delta_std']) * -1 ret = pd.DataFrame() ret.index.tz = None ret['cc'] = df_ret_cc ret['oo'] = df_ret_oo ret['co'] = df_ret_co ret['oc'] = df_ret_oc ret['cross_cc'] = signal['cross'].shift(1) * df_ret_cc ret['cross_co'] = signal['cross'].shift(1) * df_ret_co ret['cross_oo'] = signal['cross'].shift(1) * df_ret_oo ret['ma_cc'] = signal['ma'].shift(1) * df_ret_cc ret['ma_co'] = signal['ma'].shift(1) * df_ret_co ret['ma_oo'] = signal['ma'].shift(2) * df_ret_oo ret = ret.dropna() return ret ret = PosNegVolatilityStrategy(df_price) # - (1+ret[['cc', 'cross_cc', 'ma_cc']]).cumprod().plot(figsize=[15,10]) # ## 策略 1.2: 使用相对强弱RPS指标择时: # # RPS测量市场是否处于强势:RPS取值[0, 1]之间, 1为最强。 # # RPS为市场当前价格与一年250日内最低价之差, 与250年最高最低价之比。 由于指标噪音比较高, 取值10日均线。 # + df_close = df_price['Close'] def RPS(price, n_days = 250, n_ma = 10) : ''' ''' spread = price - price.shift(n_days) max_price = pd.rolling_max(price, n_days) min_price = pd.rolling_min(price, n_days) rps = pd.DataFrame() rps['value'] = (price - min_price) / (max_price - min_price) rps['ma'] = pd.rolling_mean(rps['value'], n_ma) return rps # + def RPSStrategy(df_price, n_days = 250, n_ma=1) : # calculate the returns df_ret_co = df_price['Close'] / df_price['Open'] -1 df_ret_cc = df_price['Close'] / df_price['Close'].shift(1) -1 df_ret_oo = df_price['Open'] / df_price['Open'].shift(1) - 1 df_ret_oc = df_price['Open'] / df_price['Close'].shift(1) - 1 # calculate the RPS df_rps = RPS(df_price['Close'], n_days, n_ma) # strategy 1: using the daily Close-Open spread signal = pd.DataFrame() signal['rps'] = (df_rps['ma'] > 0.8) * 1 \ # + (df_rps['ma'] < 0.2) * -1 ret = pd.DataFrame() ret.index.tz = None ret['cc'] = df_ret_cc #ret['oo'] = df_ret_oo #ret['co'] = df_ret_co #ret['oc'] = df_ret_oc ret['rps_cc'] = signal['rps'].shift(1) * df_ret_cc #ret['rps_co'] = signal['rps'].shift(1) * df_ret_co ret = ret.dropna() return ret ret = RPSStrategy(df_price) (1+ret).cumprod().plot(figsize=[15,7]) # - # ## 策略 1.3: 将RPS与单边波动结合 # # 策略步骤简介: # 1. 计算相应指数相对强弱RPS # 2. 计算相应指数上行波动率、下行波动率,并计算二者差值 # 3. 计算当天波动率差值的移动均值(天数由RPS值确定、RPS值越大相就取的天数越多 # 4. 观察前一天的(波动率差值的移动均值),如为正就保持持有(或卖入)、否则就保持空仓(或卖出)。 # 5. 注:考虑交易成本 # + def CombinedStrategy(df_price) : df_spread = df_price['Close'] - df_price['Open'] # calculate the returns df_ret_co = df_price['Close'] / df_price['Open'] -1 df_ret_cc = df_price['Close'] / df_price['Close'].shift(1) -1 df_ret_oo = df_price['Open'] / df_price['Open'].shift(1) - 1 df_ret_oc = df_price['Open'] / df_price['Close'].shift(1) - 1 # calculate the RPS df_rps = RPS(df_price['Close']) # calculate the one side vol n_ma = int((1-df_rps['ma'].tail(1)) * 100) + 1 print (n_ma) df_vol = AbsoluteVol(df_price, n_ma) # strategy 1: using the daily Close-Open spread signal = pd.DataFrame() signal['ma'] = (df_vol['ma_delta_std'] > 0) * 1 signal['cross'] = (df_vol['delta_std'] > df_vol['ma_delta_std']) * 1 ret = pd.DataFrame() ret.index.tz = None ret['cc'] = df_ret_cc ret['ma_cc'] = signal['ma'].shift(1) * df_ret_cc #ret['ma_co'] = signal['ma'].shift(1) * df_ret_co ret['cross_cc'] = signal['cross'].shift(1) * df_ret_cc #ret['cross_co'] = signal['cross'].shift(1) * df_ret_co ret = ret.dropna() return ret ret = CombinedStrategy(df_price) # - (1+ret).cumprod().plot(figsize=[15,7]) # ## 结果分析 # 根据上述的策略回报曲线,可以看出将RPS与单边波动率相结合,可以产生更稳定的市场择时信号。各种组合中,以ma_cc的回报率最高: ret.groupby(ret.index.year).sum()
Trading_Strategies/TimingStrategy/.ipynb_checkpoints/UpDownVolatilityStrategy-checkpoint.ipynb