code
stringlengths
38
801k
repo_path
stringlengths
6
263
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Iris - Logistic Regression # This example applies Logistic Regression in order to predict the type of iris plant based on sepal and petal length and width. # # ## Import some useful libraries and get Spark Context # + from pyspark import SparkConf, SparkContext from pyspark.sql import SQLContext import pandas as pd import numpy as np import matplotlib.pyplot as plt import seaborn as sns # %matplotlib inline sc= SparkContext() sqlContext = SQLContext(sc) # - # ## Load the database, convert it to Spark and split data into train and test sets # # [Iris Data Set](https://archive.ics.uci.edu/ml/datasets/iris) # # Attribute Information: # # - sepal length in cm # - sepal width in cm # - petal length in cm # - petal width in cm # - class: # - Iris-Setosa # - Iris-Versicolour # - Iris-Virginica pdf = pd.read_csv('iris.data') pdf.columns = ['sepal_length', 'sepal_width', 'petal_length', 'petal_width', 'species'] data = sqlContext.createDataFrame(pdf) pdf.head() # ## Data exploration pdf.describe() data.printSchema() data.show(3) # + from pyspark.ml.feature import StringIndexer indexer = StringIndexer(inputCol="species", outputCol="label") indexed = indexer.fit(data).transform(data) indexed.select(['label']).distinct().show() # + from pyspark.ml.feature import VectorAssembler vectorAssembler = VectorAssembler(inputCols = ['sepal_length', "sepal_width", "petal_length", "petal_width"], outputCol = 'features') vData = vectorAssembler.transform(indexed) vData = vData.select(['features', 'label']) vData.show(3) # + train, test = vData.randomSplit([0.7, 0.3]) print("Training Dataset Count: " + str(train.count())) print("Test Dataset Count: " + str(test.count())) # - # ## Logistic Regression # + from pyspark.ml.classification import LogisticRegression lr = LogisticRegression(featuresCol = 'features', labelCol = 'label', maxIter=10) lrModel = lr.fit(train) # - print(lrModel.coefficientMatrix) predictions = lrModel.transform(test) predictions.select('prediction', 'label', 'rawPrediction', 'probability').show(10) # + from pyspark.ml.evaluation import MulticlassClassificationEvaluator evaluator = MulticlassClassificationEvaluator() print('Test Area Under ROC', evaluator.evaluate(predictions)) print('Higher the AUC, better the model is at predicting') # -
notebooks/Logistic Regression/Iris-Logistic_Regression-Spark.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="view-in-github" colab_type="text" # <a href="https://colab.research.google.com/github/zahraDehghanian97/DenseNet_Vs._ResNet/blob/master/DenseNet.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # + [markdown] id="TWheA6CSj3ft" # # **Mount google drive** # + colab={"base_uri": "https://localhost:8080/"} id="9r3444wwj6Mr" outputId="fbabe007-518c-4fa7-ec7a-4102e0477dcc" from google.colab import drive drive.mount('/content/drive') # + [markdown] id="YTSvl-QYjyo3" # # **prerequisit** # + id="s3bPfRvshs1o" # %tensorflow_version 2.x import tensorflow as tf from tensorflow import keras import numpy as np from sklearn.metrics import confusion_matrix, accuracy_score from tensorflow.keras.preprocessing import image from keras.layers import Flatten, Dense, Input, Dropout import glob from sklearn.model_selection import train_test_split from keras.models import Sequential # + [markdown] id="AqTjhr0Rj0RW" # # **call back** # + id="XZmRtsodjztK" class EarlyStoppingCallback(keras.callbacks.Callback): def __init__(self,patience=0): super(EarlyStoppingCallback,self).__init__() self.patience=patience self.best=np.Inf self.wait=0 self.stopped_epoch=0 def on_epoch_end(self,epoch,logs=None): current_loss=logs.get("val_loss") if np.less(current_loss,self.best): self.best=current_loss self.wait=0 self.best_weights=self.model.get_weights() else: self.wait+=1 print("\nwait mode, step: %d"% self.wait) if self.wait>=self.patience: self.stopped_epoch=epoch self.model.stop_training=True self.model.set_weights(self.best_weights) print("epoch: %d : early stopping."% self.stopped_epoch) self.wait = 0 es_callback=EarlyStoppingCallback(patience=5) # + [markdown] id="fjMMgNOLkSzX" # # **Make dataset ready** # + id="97QoNMEWkXoZ" def load_photos(type,dir_name): photo_list =[] y = [] for file_name in (glob.glob(dir_name+'/*')): img = image.load_img(file_name, target_size=input_size) img = np.array(img) photo_list.append(img) y.append(type) return photo_list , y input_size = (250,250) input_shape = (250,250,3) dir_name_indoor = "/content/drive/MyDrive/Colab Notebooks/indoor" dir_name_outdoor = "/content/drive/MyDrive/Colab Notebooks/outdoor" X1 , y1= load_photos(0,dir_name_indoor) X2 , y2= load_photos(1,dir_name_outdoor) X1.extend(X2) y1.extend(y2) X_train, X_test, y_train, y_test = train_test_split(X1, y1, test_size=0.1, random_state=42) X_train = np.array(X_train) X_test = np.array(X_test) y_test = np.array(y_test) y_train = np.array(y_train) # + [markdown] id="A1R7jCWK8lg2" # # **DenseNet** # + id="xBRmXnpA8lhf" colab={"base_uri": "https://localhost:8080/", "height": 77} outputId="9185587e-727b-466e-e7b0-4f0d860569d8" keras.backend.clear_session() base_model = keras.applications.DenseNet201(include_top=False, weights="imagenet", input_shape=input_shape) base_model.trainable = False model_dense = Sequential() model_dense.add(base_model) model_dense.add(Flatten()) model_dense.add(Dense(2500,activation='relu')) model_dense.add(Dropout(0.2)) model_dense.add(Dense(1000,activation='relu')) model_dense.add(Dropout(0.1)) model_dense.add(Dense(250,activation='relu')) model_dense.add(Dense(50,activation='relu')) model_dense.add(Dense(10,activation='relu')) model_dense.add(Dense(2)) tf.keras.utils.plot_model(model_dense,show_shapes=True,expand_nested=True) # + id="QQtHFjQm8lhn" colab={"base_uri": "https://localhost:8080/"} outputId="58e6159a-69f7-48df-8a20-62d72713f0e8" model_dense.compile(optimizer=keras.optimizers.Adam(learning_rate=0.001), loss=keras.losses.SparseCategoricalCrossentropy(from_logits=True),metrics=['accuracy']) tb_callback = keras.callbacks.TensorBoard(log_dir="./logs",histogram_freq=1) model_dense.fit(X_train,y_train,epochs=1000,validation_split=0.2,callbacks=[es_callback,tb_callback]) prediction_without_fine_tune_dense =np.argmax(model_dense.predict(X_test),axis=1) # + [markdown] id="fk1l3F_88lhq" # ##fine tune # + id="N7k4FSuL8lhr" colab={"base_uri": "https://localhost:8080/"} outputId="6f94197e-5780-45f8-d641-2357d0a39254" layers = base_model.layers for layer in layers[-50:-1]: layer.trainable = True model_dense_fine=keras.models.Sequential() model_dense_fine.add(base_model) for i in range(1,len(model_dense.layers)): model_dense_fine.add(model_dense.layers[i]) model_dense_fine.compile(optimizer=keras.optimizers.Adam(learning_rate=0.0001), loss=keras.losses.SparseCategoricalCrossentropy(from_logits=True),metrics=['accuracy']) model_dense_fine.fit(X_train,y_train,epochs=1000,validation_split=0.2,callbacks=[es_callback]) # + id="n2ywWEFM8lhs" colab={"base_uri": "https://localhost:8080/"} outputId="8c1fccd7-8db2-4759-994f-842856e781e3" prediction_fine_tune_dense=np.argmax(model_dense_fine.predict(X_test),axis=1) print("DenseNet Result =====================>>>>") print('accuracy without fine tuning = '+ str(accuracy_score(y_test,prediction_without_fine_tune_dense))) print('confusion matrix without fine tuning'+ str( confusion_matrix(y_test,prediction_without_fine_tune_dense))) print('accuracy with fine tuning = '+ str(accuracy_score(y_test,prediction_fine_tune_dense))) print('confusion matrix with fine tuning'+ str( confusion_matrix(y_test,prediction_fine_tune_dense)))
DenseNet.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Statistical Physics # ## Exercise 1.2.6 # Import relevant packages # + import numpy as np import matplotlib.pyplot as plt import tqdm.notebook # Progress bar for loops # Import user written module import ising # %matplotlib inline # Ensure to reload the module whenever it is updated # %load_ext autoreload # %autoreload 2 # - # Some description of the Ising model and the Metropolis Hastings algorithm # Define parameter values - maybe move N = 200 n_max = 100000 n_0 = 80000 # Create and plot and initial state for visualization # + # Define initial state config = ising.initialstate(N) # Test plot plt.figure(dpi=100) plt.imshow(config, cmap='binary') plt.colorbar() # - # Run the Metroplis-Hastings algorithm for the Ising model. # For each Monte Carlo step, I save the resulting average energy and magnetization per site. # + # Initialize config = ising.initialstate(N) energy_vec = np.empty(n_max) mag_vec = np.empty(n_max) beta = 0.5 # Perform n_max Monte Carlo steps for i in range(n_max) : # Energy per site and average magnetization energy_vec[i] = ising.calcEnergy(config) mag_vec[i] = ising.calcMag(config) # Perform MC move ising.mcmove(config, beta) # - # Plot the resulting lattice plt.figure(dpi=100) plt.imshow(config, cmap='binary') plt.colorbar() plt.show() # Plot average energy and magnetization is a function of the iteration number # + # Initialize figure fig,(ax1,ax2) = plt.subplots(1,2,figsize=(12,3),dpi=100) # Energy ax1.plot(energy_vec, linewidth = 0.5) ax1.set_title('Average Energy') ax1.set_xlabel('Iteration nr') ax1.set_ylabel('$\\langle E \\rangle$') # Magnetization ax2.plot(mag_vec, linewidth = 0.5) ax2.set_title('Magnetization per Site') ax2.set_ylabel('$M$') ax2.set_xlabel('Iteration nr') plt.show() # - # Compute internal energy $\langle E \rangle$ and magnetization per site $\langle s \rangle$ as a function of $k_B T = \beta^{-1}$. # + ################ ## Initialize ## ################ kbT_vec = np.linspace(0.5, 3.5, 20) # Vector of values of inverse beta energy_vec_kbT = np.empty(len(kbT_vec)) mag_vec_kbT = np.empty(len(kbT_vec)) repetitions = 5 ##################### ## Run Monte Carlo ## ##################### for i,kbT in enumerate(kbT_vec): # Initialize config = ising.initialstate(N) mag_temp = np.zeros(n_max - n_0 - 1) + np.nan # Beta is the inverse temperature beta_val = 1.0/kbT # run MC-MC for it in range(n_max): # Run Monte Carlo ising.mcmove(config, beta_val) # Begin to store data after n0 mc steps if it > n_0: mag_temp[it - n_0 - 1] = config.mean() # Save the resulting energy and magnetization mag_vec_kbT[i] = np.abs(np.mean(mag_temp)) # Calculate the absolute magnetization as the solution is symmetric # - # Plot the results and compare with Onsagers exact solution # Compute function of Onsager's solution x_onsager = np.linspace(0.5, 2.2691853, 200) # Check these values out y_onsager = (1 - np.sinh(2 / x_onsager)**(-4))**(1/8) # + # Initialize figure fig = plt.figure(figsize=(6,4)) ax = fig.add_subplot(1,1,1) # ax.scatter(kbT_vec, mag_vec_kbT, color = '0.3', s = 10) ax.plot(kbT_vec, mag_vec_kbT, linewidth = 0.5, color = 'blue', linestyle = '-') # ax.scatter(kbT_vec, mag_vec_kbT) ax.plot(x_onsager, y_onsager, linewidth = 1, color = 'green', linestyle = ':') ax.set_title('Magnetization per Site') ax.set_xlabel('$k_B T$') ax.set_ylabel('$\\langle M \\rangle$') ax.set_xlim(1,3) plt.show() # - # Try to determine $\beta$ around $k_BT \sim 2.2$ numerically # Consider writing the below code as a function instead. # # I solve this by simulating the model in the area around the critical point $T = T_c$. # Theory tells that $\log |M| \propto (-t)^\beta$ where $t = \frac{T- T_c}{T_c}$ is the temperature relative to the critical temperature. # # The critical exponent can therefore be estimated by the equation # $$ \log |M| = \alpha + \beta \log(-t) + \varepsilon $$ # using OLS. # + ################ ## Initialize ## ################ kbT_vec_2 = np.linspace(2.2, 2.3, 30) # Vector of values of inverse beta energy_vec_kbT_2 = np.empty(len(kbT_vec_2)) mag_vec_kbT_2 = np.empty(len(kbT_vec_2)) ##################### ## Run Monte Carlo ## ##################### for i,kbT in enumerate(kbT_vec_2): # Specify system size here N = 80 # Initialize config = ising.initialstate(N) mag_temp = np.zeros(n_max - n_0 - 1) + np.nan # Beta is the inverse temperature beta_val = 1.0/kbT # run MC-MC for it in range(n_max): # Run Monte Carlo ising.mcmove(config, beta_val) # Begin to store data after n0 mc steps if it > n_0: mag_temp[it - n_0 - 1] = config.mean() # Save the resulting energy and magnetization mag_vec_kbT_2[i] = np.abs(np.mean(mag_temp)) # Calculate the absolute magnetization as the solution is symmetric # - # Check how the system behaves plt.scatter(kbT_vec_2, mag_vec_kbT_2) # Then estimate ovre the region $T \in (2.25 , 2.3) $. # I assume $T_c = 2.3$ # Relative temperature T_c = 2.35 + 10e-5 T_vec = kbT_vec_2 t_vec = (T_vec - T_c) / T_c # + # Estimate log_t = np.log(-t_vec) log_M = np.log(mag_vec_kbT_2) # Estimate the coefficient using OLS beta_1 = np.cov(log_M,-log_t)[1,0] / np.var(-log_t) beta_0 = np.mean(log_M - beta_1*(-log_t)) # Fitted values fitted = beta_0 + beta_1*(-log_t) # - # Plot the results # + # Initialize figure fig = plt.figure(figsize=(8,6)) ax = fig.add_subplot(1,1,1) ax.scatter(-log_t, log_M, color = 'blue', alpha = 0.4, edgecolor = 'k', s = 10, label = 'Data') ax.plot(-log_t, fitted, linewidth = 0.5, color = '0.2', linestyle = '--', label = 'Fitted Values') ax.set_title('Magnetization per Site') ax.set_xlabel('$\log(-t)$') ax.set_ylabel('$ \log |M|$') ax.legend() plt.show() # - print('The value of beta is ' + str(-beta_1) + ' the true value is ' + str(1/8))
ising_monte_carlo.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: twitter_analysis # language: python # name: twitter_analysis # --- # + # change to root directory of project import os os.chdir('/home/tm/sciebo/corona/twitter_analysis/') from bld.project_paths import project_paths_join as ppj # - from IPython.display import display # + import numpy as np import pandas as pd from sklearn.metrics import classification_report from sklearn.metrics import confusion_matrix from sklearn.preprocessing import LabelEncoder from textblob import TextBlob from vaderSentiment.vaderSentiment import SentimentIntensityAnalyzer #import requests #import json #import argparse #from google.cloud import language #from google.oauth2 import service_account #from google.cloud.language import enums #from google.cloud.language import types # - # ## Data management # + data = pd.read_csv( ppj("IN_DATA", "training_data/data_clean_translated.csv") ).iloc[:, 1:] data_processed = pd.read_csv( ppj("IN_DATA", "training_data/data_processed_translated.csv"), ).iloc[:, 1:] df = data.copy() df["processed"] = data_processed.text df['sentiment_score'] = df.sentiment.replace({'neutral': 0, 'negative': -1, 'positive': 1}) df = df.dropna() # - # ## Functions def classify_sentiment(list_of_text, method): """Classify sentiment for each item in ``list_of_text``. Args: list_of_text (list): List of strings for which the sentiment should be classified. method (str): Name of method that should be used. Possible values are 'google', 'vader', 'textblob'. Returns: sentiments (list): List of respective sentiment score for each item in ``list_of_text``. """ analyzer = return_sentiment_analyzer(method) sentiments = analyzer(list_of_text) return sentiments def return_sentiment_analyzer(method): """Return specific sentiment analyzer function. Args: method (str): Name of method that should be used. Possible values are 'google', 'vader', 'textblob'. Returns: analyzer (function): Function which return a sentiment score given text input. Inner workings depend on ``method``. """ functions = { 'google': analyze_google, 'textblob': analyze_textblob, 'vader': analyze_vader, } analyzer = functions[method] return analyzer def analyze_google(list_of_text): """Return sentiment for each text in ``list_of_text``. Sentiments are analyzed using googles cloud natural language api. Args: list_of_text (list): List of strings for which the sentiment should be classified. Returns: sentiments (list): List of respective sentiment score for each item in ``list_of_text``, where the sentiment score is computed using google cloud natural language. """ client = language.LanguageServiceClient.from_service_account_json( 'src/keys/ose-twitter-analysis-8508806b2efb.json' ) sentiments = [] for text in list_of_text: document = types.Document( content=text, type=enums.Document.Type.PLAIN_TEXT ) annotations = client.analyze_sentiment(document=document) sentiments.append(annotations.document_sentiment.score) return sentiments def analyze_textblob(list_of_text): """Return sentiment for each text in ``list_of_text`` using ``textblob``. Args: list_of_text (list): List of strings for which the sentiment should be classified. Returns: sentiments (list): List of respective sentiment score for each item in ``list_of_text``, where the sentiment score is computed using the package ``textblob``. """ sentiments = [ TextBlob(text).sentiment.polarity for text in list_of_text ] return sentiments def analyze_vader(list_of_text): """Return sentiment for each text in ``list_of_text`` using ``vaderSentiment``. Args: list_of_text (list): List of strings for which the sentiment should be classified. Returns: sentiments (list): List of respective sentiment score for each item in ``list_of_text``, where the sentiment score is computed using the package ``vaderSentiment``. """ analyzer = SentimentIntensityAnalyzer() sentiments = [ analyzer.polarity_scores(text)['compound'] for text in list_of_text ] return sentiments # ## Analysis analyzers = ['textblob', 'vader'] #, 'google'] for col in ['text', 'processed']: for m in analyzers: df[m + "_" + col] = classify_sentiment(df[col].to_list(), method=m) def continuous_to_class(score): new_score = np.zeros(score.shape) new_score[score < -0.33] = -1 new_score[score > 0.33] = 1 new_score = pd.Series(new_score).replace( {-1: 'negative', 0: 'neutral', 1: 'positive'} ) return new_score def confusion_matrix_to_readable(cmat, labels): columns = ['pred_' + lab for lab in labels] rows = ['true_' + lab for lab in labels] df = pd.DataFrame(cmat, columns=columns, index=rows) return df def absolute_to_freq(cmat): total = cmat.sum(axis=1) return cmat / total[:, np.newaxis] # + le = LabelEncoder() le = le.fit(df["sentiment"]) y_true = le.transform(df["sentiment"]) columns = [ 'textblob_text', 'vader_text', 'textblob_processed', 'vader_processed' ] predictions = [ le.transform(continuous_to_class(df[col])) for col in columns ] cmats = [ confusion_matrix(y_true, pred) for pred in predictions ] cmats_freq = [absolute_to_freq(cmat) for cmat in cmats] df_cmats = [ confusion_matrix_to_readable(cmat, le.classes_) for cmat in cmats_freq ] # - # ## Benchmark weights = pd.Series(y_true).value_counts() / len(y_true) weights = weights.reindex(le.transform(['negative', 'neutral', 'positive'])) weights # ### Evaluation for col, df_tmp in zip(columns, df_cmats): print(col) display(df_tmp) print(f"Percent correctly classified: {df_tmp.values.diagonal().dot(weights)}")
src/model/pretrained.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Environment (conda_tensorflow_p36) # language: python # name: conda_tensorflow_p36 # --- # + import keras from keras.models import Sequential, Model, load_model from keras.layers import Dense, Dropout, Activation, Flatten, Input, Lambda from keras.layers import Conv2D, MaxPooling2D, Conv1D, MaxPooling1D, LSTM, ConvLSTM2D, GRU, BatchNormalization, LocallyConnected2D, Permute from keras.layers import Concatenate, Reshape, Softmax, Conv2DTranspose, Embedding, Multiply from keras.callbacks import ModelCheckpoint, EarlyStopping, Callback from keras import regularizers from keras import backend as K import keras.losses import tensorflow as tf from tensorflow.python.framework import ops import isolearn.keras as iso import numpy as np import tensorflow as tf import logging logging.getLogger('tensorflow').setLevel(logging.ERROR) import pandas as pd import os import pickle import numpy as np import scipy.sparse as sp import scipy.io as spio import matplotlib.pyplot as plt import random import isolearn.io as isoio import isolearn.keras as isol from genesis.visualization import * from genesis.generator import * from genesis.predictor import * from genesis.optimizer import * from definitions.generator.aparent_deconv_conv_generator_concat_trainmode import load_generator_network from definitions.predictor.aparent import load_saved_predictor class IdentityEncoder(iso.SequenceEncoder) : def __init__(self, seq_len, channel_map) : super(IdentityEncoder, self).__init__('identity', (seq_len, len(channel_map))) self.seq_len = seq_len self.n_channels = len(channel_map) self.encode_map = channel_map self.decode_map = { nt: ix for ix, nt in self.encode_map.items() } def encode(self, seq) : encoding = np.zeros((self.seq_len, self.n_channels)) for i in range(len(seq)) : if seq[i] in self.encode_map : channel_ix = self.encode_map[seq[i]] encoding[i, channel_ix] = 1. return encoding def encode_inplace(self, seq, encoding) : for i in range(len(seq)) : if seq[i] in self.encode_map : channel_ix = self.encode_map[seq[i]] encoding[i, channel_ix] = 1. def encode_inplace_sparse(self, seq, encoding_mat, row_index) : raise NotImplementError() def decode(self, encoding) : seq = '' for pos in range(0, encoding.shape[0]) : argmax_nt = np.argmax(encoding[pos, :]) max_nt = np.max(encoding[pos, :]) seq += self.decode_map[argmax_nt] return seq def decode_sparse(self, encoding_mat, row_index) : raise NotImplementError() from keras.backend.tensorflow_backend import set_session def contain_tf_gpu_mem_usage() : config = tf.ConfigProto() config.gpu_options.allow_growth = True sess = tf.Session(config=config) set_session(sess) contain_tf_gpu_mem_usage() import editdistance def compute_edit_distance(seqs, opt_len=None) : shuffle_index = np.arange(len(seqs)) shuffle_index = shuffle_index[::-1]#np.random.shuffle(shuffle_index) seqs_shuffled = [seqs[shuffle_index[i]] for i in range(len(seqs))] edit_distances = np.ravel([float(editdistance.eval(seq_1, seq_2)) for seq_1, seq_2 in zip(seqs, seqs_shuffled)]) if opt_len is not None : edit_distances /= opt_len return edit_distances # + #Define target isoform loss function def get_isoform_loss(target_isos, isoform_start=80, isoform_end=115, use_start=0, use_end=70, use_target_bits=1.8, cse_start=70, cse_end=76, cse_target_bits=1.8, dse_start=76, dse_end=125, dse_target_bits=1.8, entropy_weight=0.0, similarity_weight=0.0, similarity_margin=0.5, punish_dn_cse=0.0, punish_up_c=0.0, punish_dn_c=0.0, punish_up_g=0.0, punish_dn_g=0.0, punish_up_aa=0.0, punish_dn_aa=0.0) : entropy_anneal_coeff = K.variable(0.) entropy_anneal_func = lambda alpha, epoch: 1. # - 0.95 ** epoch target_iso = np.zeros((len(target_isos), 1)) for i, t_iso in enumerate(target_isos) : target_iso[i, 0] = t_iso masked_use_entropy_mse = get_target_entropy_sme_masked(pwm_start=use_start, pwm_end=use_end, target_bits=use_target_bits) cse_entropy_mse = get_target_entropy_sme(pwm_start=cse_start, pwm_end=cse_end, target_bits=cse_target_bits) masked_dse_entropy_mse = get_target_entropy_sme_masked(pwm_start=dse_start, pwm_end=dse_end, target_bits=dse_target_bits) punish_dn_cse_func = get_punish_cse(pwm_start=74, pwm_end=dse_end) punish_up_c_func = get_punish_c(pwm_start=use_start, pwm_end=use_end) punish_dn_c_func = get_punish_c(pwm_start=dse_start, pwm_end=dse_end) punish_up_g_func = get_punish_g(pwm_start=use_start, pwm_end=use_end) punish_dn_g_func = get_punish_g(pwm_start=use_start, pwm_end=use_end) punish_up_aa_func = get_punish_aa(pwm_start=use_start, pwm_end=use_end) punish_dn_aa_func = get_punish_aa(pwm_start=dse_start, pwm_end=dse_end) pwm_sample_entropy_func = get_pwm_margin_sample_entropy_masked(pwm_start=70-60, pwm_end=76+60, margin=similarity_margin, shift_1_nt=True) extra_sim = np.ones((len(target_isos), 1, 205, 4, 1)) for i in range(len(target_isos)) : extra_sim[i, 0, 70-4:76, :, 0] = 0.0 def loss_func(loss_tensors) : _, _, _, sequence_class, pwm_logits_1, pwm_logits_2, pwm_1, pwm_2, sampled_pwm_1, sampled_pwm_2, mask, sampled_mask, iso_pred, cut_pred, iso_score_pred, cut_score_pred = loss_tensors #Create target isoform with sample axis iso_targets = K.constant(target_iso) iso_true = K.gather(iso_targets, sequence_class[:, 0]) iso_true = K.tile(K.expand_dims(iso_true, axis=-1), (1, K.shape(sampled_pwm_1)[1], 1)) #Specify costs iso_loss = 2.0 * K.mean(symmetric_sigmoid_kl_divergence(iso_true, iso_pred), axis=1) seq_loss = 0.0 seq_loss += punish_dn_cse * K.mean(punish_dn_cse_func(sampled_pwm_1), axis=1) seq_loss += punish_up_c * K.mean(punish_up_c_func(sampled_pwm_1), axis=1) seq_loss += punish_dn_c * K.mean(punish_dn_c_func(sampled_pwm_1), axis=1) seq_loss += punish_up_g * K.mean(punish_up_g_func(sampled_pwm_1), axis=1) seq_loss += punish_dn_g * K.mean(punish_dn_g_func(sampled_pwm_1), axis=1) seq_loss += punish_up_aa * K.mean(punish_up_aa_func(sampled_pwm_1), axis=1) seq_loss += punish_dn_aa * K.mean(punish_dn_aa_func(sampled_pwm_1), axis=1) extra_sims = K.constant(extra_sim) extra_sim_mask = K.gather(extra_sims, sequence_class[:, 0]) extra_sim_mask = K.tile(extra_sim_mask, (1, K.shape(sampled_pwm_1)[1], 1, 1, 1)) entropy_loss = entropy_anneal_coeff * entropy_weight * (masked_use_entropy_mse(pwm_1, mask) + cse_entropy_mse(pwm_1) + masked_dse_entropy_mse(pwm_1, mask)) entropy_loss += similarity_weight * K.mean(pwm_sample_entropy_func(sampled_pwm_1, sampled_pwm_2, sampled_mask * extra_sim_mask), axis=1) #Compute total loss total_loss = iso_loss + seq_loss + entropy_loss return total_loss def val_loss_func(loss_tensors) : _, _, _, sequence_class, pwm_logits_1, pwm_logits_2, pwm_1, pwm_2, sampled_pwm_1, sampled_pwm_2, mask, sampled_mask, iso_pred, cut_pred, iso_score_pred, cut_score_pred = loss_tensors #Create target isoform with sample axis iso_targets = K.constant(target_iso) iso_true = K.gather(iso_targets, sequence_class[:, 0]) iso_true = K.tile(K.expand_dims(iso_true, axis=-1), (1, K.shape(sampled_pwm_1)[1], 1)) #Specify costs iso_loss = 2.0 * symmetric_sigmoid_kl_divergence(iso_true, iso_pred)[:, 0, ...] seq_loss = 0.0 seq_loss += punish_dn_cse * punish_dn_cse_func(sampled_pwm_1)[:, 0, ...] seq_loss += punish_up_c * punish_up_c_func(sampled_pwm_1)[:, 0, ...] seq_loss += punish_dn_c * punish_dn_c_func(sampled_pwm_1)[:, 0, ...] seq_loss += punish_up_g * punish_up_g_func(sampled_pwm_1)[:, 0, ...] seq_loss += punish_dn_g * punish_dn_g_func(sampled_pwm_1)[:, 0, ...] seq_loss += punish_up_aa * punish_up_aa_func(sampled_pwm_1)[:, 0, ...] seq_loss += punish_dn_aa * punish_dn_aa_func(sampled_pwm_1)[:, 0, ...] extra_sims = K.constant(extra_sim) extra_sim_mask = K.gather(extra_sims, sequence_class[:, 0]) extra_sim_mask = K.tile(extra_sim_mask, (1, K.shape(sampled_pwm_1)[1], 1, 1, 1)) entropy_loss = entropy_anneal_coeff * entropy_weight * (masked_use_entropy_mse(pwm_1, mask) + cse_entropy_mse(pwm_1) + masked_dse_entropy_mse(pwm_1, mask)) entropy_loss += similarity_weight * pwm_sample_entropy_func(sampled_pwm_1, sampled_pwm_2, sampled_mask * extra_sim_mask)[:, 0, ...] #Compute total loss total_loss = iso_loss + seq_loss + entropy_loss return total_loss def val_loss_func_noentropy(loss_tensors) : _, _, _, sequence_class, pwm_logits_1, pwm_logits_2, pwm_1, pwm_2, sampled_pwm_1, sampled_pwm_2, mask, sampled_mask, iso_pred, cut_pred, iso_score_pred, cut_score_pred = loss_tensors #Create target isoform with sample axis iso_targets = K.constant(target_iso) iso_true = K.gather(iso_targets, sequence_class[:, 0]) iso_true = K.tile(K.expand_dims(iso_true, axis=-1), (1, K.shape(sampled_pwm_1)[1], 1)) #Specify costs iso_loss = 2.0 * symmetric_sigmoid_kl_divergence(iso_true, iso_pred)[:, 0, ...] seq_loss = 0.0 seq_loss += punish_dn_cse * punish_dn_cse_func(sampled_pwm_1)[:, 0, ...] seq_loss += punish_up_c * punish_up_c_func(sampled_pwm_1)[:, 0, ...] seq_loss += punish_dn_c * punish_dn_c_func(sampled_pwm_1)[:, 0, ...] seq_loss += punish_up_g * punish_up_g_func(sampled_pwm_1)[:, 0, ...] seq_loss += punish_dn_g * punish_dn_g_func(sampled_pwm_1)[:, 0, ...] seq_loss += punish_up_aa * punish_up_aa_func(sampled_pwm_1)[:, 0, ...] seq_loss += punish_dn_aa * punish_dn_aa_func(sampled_pwm_1)[:, 0, ...] extra_sims = K.constant(extra_sim) extra_sim_mask = K.gather(extra_sims, sequence_class[:, 0]) extra_sim_mask = K.tile(extra_sim_mask, (1, K.shape(sampled_pwm_1)[1], 1, 1, 1)) entropy_loss = similarity_weight * pwm_sample_entropy_func(sampled_pwm_1, sampled_pwm_2, sampled_mask * extra_sim_mask)[:, 0, ...] #Compute total loss total_loss = iso_loss + seq_loss + entropy_loss return total_loss def val_loss_func_onlyisoform(loss_tensors) : _, _, _, sequence_class, pwm_logits_1, pwm_logits_2, pwm_1, pwm_2, sampled_pwm_1, sampled_pwm_2, mask, sampled_mask, iso_pred, cut_pred, iso_score_pred, cut_score_pred = loss_tensors #Create target isoform with sample axis iso_targets = K.constant(target_iso) iso_true = K.gather(iso_targets, sequence_class[:, 0]) iso_true = K.tile(K.expand_dims(iso_true, axis=-1), (1, K.shape(sampled_pwm_1)[1], 1)) #Specify costs #iso_loss = 2.0 * symmetric_sigmoid_kl_divergence(iso_true, iso_pred)[:, 0, ...] iso_loss = -1.0 * iso_score_pred #Compute total loss total_loss = iso_loss return total_loss return loss_func, val_loss_func, val_loss_func_noentropy, val_loss_func_onlyisoform, entropy_anneal_coeff, entropy_anneal_func # + #Sequence optimization monitor during training class GenesisMonitor(Callback): def __init__(self, generator_model, sequence_encoder, run_dir="", run_prefix="", val_steps=1, batch_size=32) : self.generator_model = generator_model self.batch_size = batch_size self.val_steps = val_steps self.sequence_encoder = sequence_encoder self.run_prefix = run_prefix self.run_dir = run_dir self.edit_distance_samples = [] if not os.path.exists(self.run_dir): os.makedirs(self.run_dir) seqs = self._sample_sequences() self._store_sequences(seqs, 0) self._compute_edit_distances(seqs, 0) def _sample_sequences(self) : gen_bundle = self.generator_model.predict(x=None, steps=self.val_steps) _, _, _, _, _, sampled_pwm, _, _, _ = gen_bundle seqs = [ self.sequence_encoder.decode(sampled_pwm[i, 0, :, :, 0]) for i in range(sampled_pwm.shape[0]) ] return seqs def _compute_edit_distances(self, seqs, epoch) : edit_dists = compute_edit_distance(seqs) self.edit_distance_samples.append(edit_dists.reshape(1, -1)) def _store_sequences(self, seqs, epoch) : #Save sequences to file with open(self.run_dir + self.run_prefix + "_epoch_" + str(epoch) + "_" + str(self.val_steps) + "_steps.txt", "wt") as f: for i in range(len(seqs)) : f.write(seqs[i] + "\n") def on_epoch_end(self, epoch, logs={}) : seqs = self._sample_sequences() self._store_sequences(seqs, epoch) self._compute_edit_distances(seqs, epoch) class ValidationCallback(Callback): def __init__(self, val_name, val_loss_model, val_steps) : self.val_name = val_name self.val_loss_model = val_loss_model self.val_steps = val_steps self.val_loss_history = [] #Track val loss self.val_loss_history.append(self.val_loss_model.predict(x=None, steps=self.val_steps)) def on_epoch_end(self, epoch, logs={}) : #Track val loss val_loss_values = self.val_loss_model.predict(x=None, steps=self.val_steps) self.val_loss_history.append(val_loss_values) print("mean(" + self.val_name + ") = " + str(np.mean(val_loss_values))) class EpochVariableCallback(Callback): def __init__(self, my_variable, my_func): self.my_variable = my_variable self.my_func = my_func def on_epoch_end(self, epoch, logs={}): K.set_value(self.my_variable, self.my_func(K.get_value(self.my_variable), epoch)) #Function for running GENESIS def run_genesis(run_prefix, sequence_templates, loss_funcs, library_contexts, batch_size=32, sample_mode='pwm', n_samples=1, n_epochs=10, steps_per_epoch=100, val_steps=100, normalize_logits=False) : loss_func, val_loss_func, val_loss_func_noentropy, val_loss_func_onlyisoform, entropy_anneal_coeff, entropy_anneal_func = loss_funcs if sample_mode == 'both' : return _run_both_genesis(run_prefix, sequence_templates, loss_funcs, library_contexts, batch_size, n_samples, n_epochs, steps_per_epoch, val_steps) #Build Generator Network _, generator = build_generator(batch_size, len(sequence_templates[0]), load_generator_network, n_classes=len(sequence_templates), n_samples=n_samples, sequence_templates=sequence_templates, batch_normalize_pwm=normalize_logits, validation_sample_mode='sample') #Build Predictor Network and hook it on the generator PWM output tensor _, predictor = build_predictor(generator, load_saved_predictor(model_path, library_contexts=library_contexts), batch_size, n_samples=n_samples, eval_mode=sample_mode) _, val_predictor = build_predictor(generator, load_saved_predictor(model_path, library_contexts=library_contexts), batch_size, n_samples=n_samples, eval_mode='sample') for layer in val_predictor.layers : if 'aparent' in layer.name : layer.name += "_valversion" #Build Loss Model (In: Generator seed, Out: Loss function) _, loss_model = build_loss_model(predictor, loss_func) _, val_loss_model = build_loss_model(val_predictor, val_loss_func) _, val_loss_noentropy_model = build_loss_model(val_predictor, val_loss_func_noentropy) _, val_loss_onlyisoform_model = build_loss_model(val_predictor, val_loss_func_onlyisoform) #Specify Optimizer to use #opt = keras.optimizers.SGD(lr=0.1) opt = keras.optimizers.Adam(lr=0.001, beta_1=0.9, beta_2=0.999) #Compile Loss Model (Minimize self) loss_model.compile(loss=lambda true, pred: pred, optimizer=opt) validation_history = ValidationCallback('val_loss', val_loss_model, val_steps) validation_noentropy_history = ValidationCallback('val_loss_no_entropy', val_loss_noentropy_model, val_steps) validation_onlyisoform_history = ValidationCallback('val_loss_only_isoform', val_loss_onlyisoform_model, val_steps) #Standard sequence decoder acgt_encoder = IdentityEncoder(205, {'A':0, 'C':1, 'G':2, 'T':3}) #Build callback for printing intermediate sequences genesis_monitor = GenesisMonitor(generator, acgt_encoder, run_dir="./samples/" + run_prefix + "/", run_prefix="intermediate", val_steps=val_steps, batch_size=batch_size) #Specify callback entities callbacks =[ EpochVariableCallback(entropy_anneal_coeff, entropy_anneal_func), validation_history, validation_noentropy_history, validation_onlyisoform_history, genesis_monitor ] #Fit Loss Model train_history = loss_model.fit( [], np.ones((1, 1)), epochs=n_epochs, steps_per_epoch=steps_per_epoch, callbacks=callbacks ) validation_history_dict = { 'val_name' : validation_history.val_name, 'val_loss_history' : validation_history.val_loss_history } validation_history.val_loss_model = None validation_noentropy_history_dict = { 'val_name' : validation_noentropy_history.val_name, 'val_loss_history' : validation_noentropy_history.val_loss_history } validation_noentropy_history.val_loss_model = None validation_onlyisoform_history_dict = { 'val_name' : validation_onlyisoform_history.val_name, 'val_loss_history' : validation_onlyisoform_history.val_loss_history } validation_onlyisoform_history.val_loss_model = None edit_distance_dict = { 'edit_distance_samples' : genesis_monitor.edit_distance_samples } genesis_monitor.generator_model = None return generator, predictor, train_history, [validation_history_dict, validation_noentropy_history_dict, validation_onlyisoform_history_dict, edit_distance_dict] def _run_both_genesis(run_prefix, sequence_templates, loss_funcs, library_contexts, batch_size=32, n_samples=1, n_epochs=10, steps_per_epoch=100, val_steps=100, normalize_logits=False) : loss_func, val_loss_func, val_loss_func_noentropy, val_loss_func_onlyisoform, entropy_anneal_coeff, entropy_anneal_func = loss_funcs #Build Generator Network _, generator = build_generator(batch_size, len(sequence_templates[0]), load_generator_network, n_classes=len(sequence_templates), n_samples=n_samples, sequence_templates=sequence_templates, batch_normalize_pwm=normalize_logits, validation_sample_mode='sample') #Build Predictor Network and hook it on the generator PWM output tensor _, pwm_predictor = build_predictor(generator, load_saved_predictor(model_path, library_contexts=library_contexts), batch_size, n_samples=1, eval_mode='pwm') _, sample_predictor = build_predictor(generator, load_saved_predictor(model_path, library_contexts=library_contexts), batch_size, n_samples=n_samples, eval_mode='sample') for layer in pwm_predictor.layers : if 'aparent' in layer.name : layer.name += "_pwmversion" _, val_predictor = build_predictor(generator, load_saved_predictor(model_path, library_contexts=library_contexts), batch_size, n_samples=n_samples, eval_mode='sample') for layer in val_predictor.layers : if 'aparent' in layer.name : layer.name += "_valversion" #Build Loss Model (In: Generator seed, Out: Loss function) _, pwm_loss_model = build_loss_model(pwm_predictor, loss_func) _, sample_loss_model = build_loss_model(sample_predictor, loss_func) dual_loss_out = Lambda(lambda x: 0.5 * x[0] + 0.5 * x[1])([pwm_loss_model.outputs[0], sample_loss_model.outputs[0]]) loss_model = Model(inputs=pwm_loss_model.inputs, outputs=dual_loss_out) _, val_loss_model = build_loss_model(val_predictor, val_loss_func) _, val_loss_noentropy_model = build_loss_model(val_predictor, val_loss_func_noentropy) _, val_loss_onlyisoform_model = build_loss_model(val_predictor, val_loss_func_onlyisoform) #Specify Optimizer to use #opt = keras.optimizers.SGD(lr=0.1) opt = keras.optimizers.Adam(lr=0.001, beta_1=0.9, beta_2=0.999) #Compile Loss Model (Minimize self) loss_model.compile(loss=lambda true, pred: pred, optimizer=opt) validation_history = ValidationCallback('val_loss', val_loss_model, val_steps) validation_noentropy_history = ValidationCallback('val_loss_no_entropy', val_loss_noentropy_model, val_steps) validation_onlyisoform_history = ValidationCallback('val_loss_only_isoform', val_loss_onlyisoform_model, val_steps) #Standard sequence decoder acgt_encoder = IdentityEncoder(205, {'A':0, 'C':1, 'G':2, 'T':3}) #Build callback for printing intermediate sequences genesis_monitor = GenesisMonitor(generator, acgt_encoder, run_dir="./samples/" + run_prefix + "/", run_prefix="intermediate", val_steps=val_steps, batch_size=batch_size) #Specify callback entities callbacks =[ EpochVariableCallback(entropy_anneal_coeff, entropy_anneal_func), validation_history, validation_noentropy_history, validation_onlyisoform_history, genesis_monitor ] #Fit Loss Model train_history = loss_model.fit( [], np.ones((1, 1)), epochs=n_epochs, steps_per_epoch=steps_per_epoch, callbacks=callbacks ) validation_history_dict = { 'val_name' : validation_history.val_name, 'val_loss_history' : validation_history.val_loss_history } validation_history.val_loss_model = None validation_noentropy_history_dict = { 'val_name' : validation_noentropy_history.val_name, 'val_loss_history' : validation_noentropy_history.val_loss_history } validation_noentropy_history.val_loss_model = None validation_onlyisoform_history_dict = { 'val_name' : validation_onlyisoform_history.val_name, 'val_loss_history' : validation_onlyisoform_history.val_loss_history } validation_onlyisoform_history.val_loss_model = None edit_distance_dict = { 'edit_distance_samples' : genesis_monitor.edit_distance_samples } genesis_monitor.generator_model = None return generator, sample_predictor, train_history, [validation_history_dict, validation_noentropy_history_dict, validation_onlyisoform_history_dict, edit_distance_dict] # + #Specfiy file path to pre-trained predictor network save_dir = os.path.join(os.getcwd(), '../../../aparent/saved_models') model_name = 'aparent_plasmid_iso_cut_distalpas_all_libs_no_sampleweights_sgd.h5' model_path = os.path.join(save_dir, model_name) # + #Maximize isoform proportions for all native minigene libraries sequence_templates = [ 'TCCCTACACGACGCTCTTCCGATCTNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNANTAAANNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNAATAAATTGTTCGTTGGTCGGCTTGAGTGCGTGTGTCTCGTTTAGATGCTGCGCCTAACCCTAAGCAGATTCTTCATGCAATTG' ] library_contexts = [ 'simple' ] target_isos = [ 1.0 ] # - def set_seed(seed_value) : # 1. Set the `PYTHONHASHSEED` environment variable at a fixed value os.environ['PYTHONHASHSEED']=str(seed_value) # 2. Set the `python` built-in pseudo-random generator at a fixed value random.seed(seed_value) # 3. Set the `numpy` pseudo-random generator at a fixed value np.random.seed(seed_value) # 4. Set the `tensorflow` pseudo-random generator at a fixed value tf.set_random_seed(seed_value) # 5. Configure a new global `tensorflow` session session_conf = tf.ConfigProto(intra_op_parallelism_threads=1, inter_op_parallelism_threads=1) sess = tf.Session(graph=tf.get_default_graph(), config=session_conf) K.set_session(sess) # + #Train APA Cleavage GENESIS Network print("Training GENESIS (Max Isoform)") #Sampling conditions to evaluate sampling_conds = [ ['pwm', 1, False], ['sample', 1, False], ['sample', 10, False], ['both', 1, False], ['both', 10, False] ] #Number of PWMs to generate per objective batch_size = 32 #Number of epochs per objective to optimize n_epochs = 100 #Number of steps (grad updates) per epoch steps_per_epoch = 50 #Number of validation steps val_steps = 50 #Number of independent trial runs n_runs = 5 seeds = [51125, 222842, 325484, 475737, 727322] for sampling_cond_ix, sampling_cond in enumerate(sampling_conds) : print("Sampling mode = '" + str(sampling_cond[0]) + "', n_samples = '" + str(sampling_cond[1]) + "'") if len(sampling_cond) <= 3 : sampling_dict = {'train' : [], 'val' : []} sampling_cond.append(sampling_dict) for run_ix in range(n_runs) : K.clear_session() set_seed(seeds[run_ix]) losses = get_isoform_loss( target_isos, use_start=22, use_end=70, use_target_bits=1.95, cse_start=70, cse_end=76, cse_target_bits=1.95, dse_start=76, dse_end=121, dse_target_bits=1.95, entropy_weight=0.0, similarity_weight=5.0, similarity_margin=0.5, punish_dn_cse=1.0, punish_up_c=0.0015, punish_dn_c=0.0001, punish_up_g=0.0001, punish_dn_g=0.0001, punish_up_aa=0.00025, punish_dn_aa=0.005 ) run_prefix = "genesis_apa_max_isoform_simple_eval_sampling_modes_no_entropy_penalty_longtrain_cond_" + str(sampling_cond_ix) + "_run_" + str(run_ix) genesis_generator, genesis_predictor, train_history, validation_histories = run_genesis(run_prefix, sequence_templates, losses, library_contexts, batch_size, sampling_cond[0], sampling_cond[1], n_epochs, steps_per_epoch, val_steps, sampling_cond[2]) sampling_dict['train'].append(train_history.history) sampling_dict['val'].append(validation_histories) # + #Store generated results experiment_name = "apa_max_isoform_eval_sampling_modes_no_entropy_penalty_longtrain_results_" + str(len(seeds)) + "_seeds" pickle.dump({ 'sampling_conds' : sampling_conds }, open(experiment_name + ".pickle", 'wb')) # + #Load cached results n_epochs = 100 n_runs = 5 experiment_name = "apa_max_isoform_eval_sampling_modes_no_entropy_penalty_longtrain_results_" + str(n_runs) + "_seeds" sampling_conds = pickle.load(open(experiment_name + ".pickle", 'rb'))['sampling_conds'] # + f = plt.figure(figsize=(10, 6)) summary_mode = 'median' fig_suffix = "_" + str(n_epochs) + '_epochs_no_entropy_eval_pwm_vs_sample' ls = [] min_y = 10000 max_y = 0 save_figs = True for sampling_cond in sampling_conds : norm_prefix = '' if sampling_cond[2] == True : norm_prefix = '-IN' label = 'PWM' + norm_prefix linestyle = '-' if sampling_cond[0] == 'sample' : label = "Sampled" + norm_prefix + " (" + str(sampling_cond[1]) + "x)" linestyle = '--' elif sampling_cond[0] == 'both' : label = "PWM" + norm_prefix + " + Sampled" + norm_prefix + " (" + str(sampling_cond[1]) + "x)" linestyle = ':' train_hists = np.concatenate([np.array(sampling_cond[3]['train'][i]['loss']).reshape(-1, 1) for i in range(len(np.array(sampling_cond[3]['train'])))], axis=1) mean_train_hist = np.mean(train_hists, axis=-1) if summary_mode == 'mean' else np.median(train_hists, axis=-1) l1 = plt.plot(np.arange(mean_train_hist.shape[0]), mean_train_hist, linewidth=3, linestyle=linestyle, label=label) ls.append(l1[0]) if np.max(mean_train_hist) > max_y : max_y = np.max(mean_train_hist) if np.min(mean_train_hist) < min_y : min_y = np.min(mean_train_hist) plt.xlim(0, n_epochs-1) plt.xticks([0, n_epochs-1], [1, n_epochs], fontsize=14) plt.ylim(min_y, max_y) plt.yticks(fontsize=14) plt.xlabel("Epoch", fontsize=16) plt.ylabel("Training Loss", fontsize=16) plt.legend(handles=ls, fontsize=14) plt.tight_layout() if save_figs : plt.savefig('apa_genesis_max_isoform_learning_loss_curves_training_' + summary_mode + fig_suffix + '.png', dpi=150, transparent=True) plt.savefig('apa_genesis_max_isoform_learning_loss_curves_training_' + summary_mode + fig_suffix + '.eps') plt.savefig('apa_genesis_max_isoform_learning_loss_curves_training_' + summary_mode + fig_suffix + '.svg') plt.show() # + start_from_epoch = 0 summary_mode = 'median' fig_suffix = "_" + str(n_epochs) + '_epochs_no_entropy_eval_pwm_vs_sample_from_epoch_' + str(start_from_epoch) save_figs = True for val_ix, val_name in enumerate(['Total', 'No Entropy', 'Only Isoform']) : f = plt.figure(figsize=(10, 6)) ls = [] min_y = 10000 max_y = -10000 for sampling_cond in sampling_conds : val_hists = np.concatenate([np.array([np.mean(sampling_cond[3]['val'][i][val_ix]['val_loss_history'][j]) for j in range(n_epochs + 1)] ).reshape(-1, 1) for i in range(len(np.array(sampling_cond[3]['train'])))], axis=1) mean_val_hist = np.mean(val_hists, axis=-1) if summary_mode == 'mean' else np.median(val_hists, axis=-1) norm_prefix = '' if sampling_cond[2] == True : norm_prefix = '-IN' label = 'PWM' + norm_prefix linestyle = '-' if sampling_cond[0] == 'sample' : label = "Sampled" + norm_prefix + " (" + str(sampling_cond[1]) + "x)" linestyle = '--' elif sampling_cond[0] == 'both' : label = "PWM" + norm_prefix + " + Sampled" + norm_prefix + " (" + str(sampling_cond[1]) + "x)" linestyle = ':' l1 = plt.plot(np.arange(mean_val_hist.shape[0]), mean_val_hist, linewidth=3, linestyle=linestyle, label=label) ls.append(l1[0]) if np.max(mean_val_hist[start_from_epoch:]) > max_y : max_y = np.max(mean_val_hist[start_from_epoch:]) if np.min(mean_val_hist[start_from_epoch:]) < min_y : min_y = np.min(mean_val_hist[start_from_epoch:]) plt.xlim(start_from_epoch, n_epochs) plt.xticks([start_from_epoch, n_epochs], [start_from_epoch, n_epochs], fontsize=14) plt.ylim(min_y, max_y) plt.yticks(fontsize=14) plt.xlabel("Epoch", fontsize=16) plt.ylabel(val_name, fontsize=16) plt.legend(handles=ls, fontsize=14) plt.tight_layout() if save_figs : plt.savefig('apa_genesis_max_isoform_learning_loss_curves_validation_' + str(val_ix) + '_' + summary_mode + fig_suffix + '.png', dpi=150, transparent=True) plt.savefig('apa_genesis_max_isoform_learning_loss_curves_validation_' + str(val_ix) + '_' + summary_mode + fig_suffix + '.eps') plt.savefig('apa_genesis_max_isoform_learning_loss_curves_validation_' + str(val_ix) + '_' + summary_mode + fig_suffix + '.svg') plt.show() #Plot edit distance curves opt_len = 90 f = plt.figure(figsize=(10, 6)) ls = [] min_y = 10000 max_y = -10000 for sampling_cond in sampling_conds : val_hists = np.concatenate([np.array([np.mean(sampling_cond[3]['val'][i][3]['edit_distance_samples'][j]) for j in range(n_epochs + 1)] ).reshape(-1, 1) for i in range(len(np.array(sampling_cond[3]['train'])))], axis=1) mean_val_hist = np.mean(val_hists, axis=-1) if summary_mode == 'mean' else np.median(val_hists, axis=-1) if opt_len is not None : mean_val_hist /= opt_len norm_prefix = '' if sampling_cond[2] == True : norm_prefix = '-IN' label = 'PWM' + norm_prefix linestyle = '-' if sampling_cond[0] == 'sample' : label = "Sampled" + norm_prefix + " (" + str(sampling_cond[1]) + "x)" linestyle = '--' elif sampling_cond[0] == 'both' : label = "PWM" + norm_prefix + " + Sampled" + norm_prefix + " (" + str(sampling_cond[1]) + "x)" linestyle = ':' l1 = plt.plot(np.arange(mean_val_hist.shape[0]), mean_val_hist, linewidth=3, linestyle=linestyle, label=label) ls.append(l1[0]) if np.max(mean_val_hist[start_from_epoch:]) > max_y : max_y = np.max(mean_val_hist[start_from_epoch:]) if np.min(mean_val_hist[start_from_epoch:]) < min_y : min_y = np.min(mean_val_hist[start_from_epoch:]) plt.xlim(start_from_epoch, n_epochs) plt.xticks([start_from_epoch, n_epochs], [start_from_epoch, n_epochs], fontsize=14) plt.ylim(min_y, max_y) plt.yticks(fontsize=14) plt.xlabel("Epoch", fontsize=16) plt.ylabel("Edit distance", fontsize=16) plt.legend(handles=ls, fontsize=14) plt.tight_layout() if save_figs : plt.savefig('apa_genesis_max_isoform_learning_loss_curves_validation_edit_distance_' + summary_mode + fig_suffix + '.png', dpi=150, transparent=True) plt.savefig('apa_genesis_max_isoform_learning_loss_curves_validation_edit_distance_' + summary_mode + fig_suffix + '.eps') plt.savefig('apa_genesis_max_isoform_learning_loss_curves_validation_edit_distance_' + summary_mode + fig_suffix + '.svg') plt.show() # + start_from_epoch = 1 summary_mode = 'median' fig_suffix = "_" + str(n_epochs) + '_epochs_no_entropy_eval_pwm_vs_sample_from_epoch_' + str(start_from_epoch) save_figs = True for val_ix, val_name in enumerate(['Total', 'No Entropy', 'Only Isoform']) : f = plt.figure(figsize=(10, 6)) ls = [] min_y = 10000 max_y = -10000 for sampling_cond in sampling_conds : val_hists = np.concatenate([np.array([np.mean(sampling_cond[3]['val'][i][val_ix]['val_loss_history'][j]) for j in range(n_epochs + 1)] ).reshape(-1, 1) for i in range(len(np.array(sampling_cond[3]['train'])))], axis=1) mean_val_hist = np.mean(val_hists, axis=-1) if summary_mode == 'mean' else np.median(val_hists, axis=-1) norm_prefix = '' if sampling_cond[2] == True : norm_prefix = '-IN' label = 'PWM' + norm_prefix linestyle = '-' if sampling_cond[0] == 'sample' : label = "Sampled" + norm_prefix + " (" + str(sampling_cond[1]) + "x)" linestyle = '--' elif sampling_cond[0] == 'both' : label = "PWM" + norm_prefix + " + Sampled" + norm_prefix + " (" + str(sampling_cond[1]) + "x)" linestyle = ':' l1 = plt.plot(np.arange(mean_val_hist.shape[0]), mean_val_hist, linewidth=3, linestyle=linestyle, label=label) ls.append(l1[0]) if np.max(mean_val_hist[start_from_epoch:]) > max_y : max_y = np.max(mean_val_hist[start_from_epoch:]) if np.min(mean_val_hist[start_from_epoch:]) < min_y : min_y = np.min(mean_val_hist[start_from_epoch:]) plt.xlim(start_from_epoch, n_epochs) plt.xticks([start_from_epoch, n_epochs], [start_from_epoch, n_epochs], fontsize=14) plt.ylim(min_y, max_y) plt.yticks(fontsize=14) plt.xlabel("Epoch", fontsize=16) plt.ylabel(val_name, fontsize=16) plt.legend(handles=ls, fontsize=14) plt.tight_layout() if save_figs : plt.savefig('apa_genesis_max_isoform_learning_loss_curves_validation_' + str(val_ix) + '_' + summary_mode + fig_suffix + '.png', dpi=150, transparent=True) plt.savefig('apa_genesis_max_isoform_learning_loss_curves_validation_' + str(val_ix) + '_' + summary_mode + fig_suffix + '.eps') plt.savefig('apa_genesis_max_isoform_learning_loss_curves_validation_' + str(val_ix) + '_' + summary_mode + fig_suffix + '.svg') plt.show() #Plot edit distance curves opt_len = 90 f = plt.figure(figsize=(10, 6)) ls = [] min_y = 10000 max_y = -10000 for sampling_cond in sampling_conds : val_hists = np.concatenate([np.array([np.mean(sampling_cond[3]['val'][i][3]['edit_distance_samples'][j]) for j in range(n_epochs + 1)] ).reshape(-1, 1) for i in range(len(np.array(sampling_cond[3]['train'])))], axis=1) mean_val_hist = np.mean(val_hists, axis=-1) if summary_mode == 'mean' else np.median(val_hists, axis=-1) if opt_len is not None : mean_val_hist /= opt_len norm_prefix = '' if sampling_cond[2] == True : norm_prefix = '-IN' label = 'PWM' + norm_prefix linestyle = '-' if sampling_cond[0] == 'sample' : label = "Sampled" + norm_prefix + " (" + str(sampling_cond[1]) + "x)" linestyle = '--' elif sampling_cond[0] == 'both' : label = "PWM" + norm_prefix + " + Sampled" + norm_prefix + " (" + str(sampling_cond[1]) + "x)" linestyle = ':' l1 = plt.plot(np.arange(mean_val_hist.shape[0]), mean_val_hist, linewidth=3, linestyle=linestyle, label=label) ls.append(l1[0]) if np.max(mean_val_hist[start_from_epoch:]) > max_y : max_y = np.max(mean_val_hist[start_from_epoch:]) if np.min(mean_val_hist[start_from_epoch:]) < min_y : min_y = np.min(mean_val_hist[start_from_epoch:]) plt.xlim(start_from_epoch, n_epochs) plt.xticks([start_from_epoch, n_epochs], [start_from_epoch, n_epochs], fontsize=14) plt.ylim(min_y, max_y) plt.yticks(fontsize=14) plt.xlabel("Epoch", fontsize=16) plt.ylabel("Edit distance", fontsize=16) plt.legend(handles=ls, fontsize=14) plt.tight_layout() if save_figs : plt.savefig('apa_genesis_max_isoform_learning_loss_curves_validation_edit_distance_' + summary_mode + fig_suffix + '.png', dpi=150, transparent=True) plt.savefig('apa_genesis_max_isoform_learning_loss_curves_validation_edit_distance_' + summary_mode + fig_suffix + '.eps') plt.savefig('apa_genesis_max_isoform_learning_loss_curves_validation_edit_distance_' + summary_mode + fig_suffix + '.svg') plt.show() # + start_from_epoch = 50 summary_mode = 'median' fig_suffix = "_" + str(n_epochs) + '_epochs_no_entropy_eval_pwm_vs_sample_from_epoch_' + str(start_from_epoch) save_figs = True for val_ix, val_name in enumerate(['Total', 'No Entropy', 'Only Isoform']) : f = plt.figure(figsize=(10, 6)) ls = [] min_y = 10000 max_y = -10000 for sampling_cond in sampling_conds : val_hists = np.concatenate([np.array([np.mean(sampling_cond[3]['val'][i][val_ix]['val_loss_history'][j]) for j in range(n_epochs + 1)] ).reshape(-1, 1) for i in range(len(np.array(sampling_cond[3]['train'])))], axis=1) mean_val_hist = np.mean(val_hists, axis=-1) if summary_mode == 'mean' else np.median(val_hists, axis=-1) norm_prefix = '' if sampling_cond[2] == True : norm_prefix = '-IN' label = 'PWM' + norm_prefix linestyle = '-' if sampling_cond[0] == 'sample' : label = "Sampled" + norm_prefix + " (" + str(sampling_cond[1]) + "x)" linestyle = '--' elif sampling_cond[0] == 'both' : label = "PWM" + norm_prefix + " + Sampled" + norm_prefix + " (" + str(sampling_cond[1]) + "x)" linestyle = ':' l1 = plt.plot(np.arange(mean_val_hist.shape[0]), mean_val_hist, linewidth=3, linestyle=linestyle, label=label) ls.append(l1[0]) if np.max(mean_val_hist[start_from_epoch:]) > max_y : max_y = np.max(mean_val_hist[start_from_epoch:]) if np.min(mean_val_hist[start_from_epoch:]) < min_y : min_y = np.min(mean_val_hist[start_from_epoch:]) plt.xlim(start_from_epoch, n_epochs) plt.xticks([start_from_epoch, n_epochs], [start_from_epoch, n_epochs], fontsize=14) plt.ylim(min_y, max_y) plt.yticks(fontsize=14) plt.xlabel("Epoch", fontsize=16) plt.ylabel(val_name, fontsize=16) plt.legend(handles=ls, fontsize=14) plt.tight_layout() if save_figs : plt.savefig('apa_genesis_max_isoform_learning_loss_curves_validation_' + str(val_ix) + '_' + summary_mode + fig_suffix + '.png', dpi=150, transparent=True) plt.savefig('apa_genesis_max_isoform_learning_loss_curves_validation_' + str(val_ix) + '_' + summary_mode + fig_suffix + '.eps') plt.savefig('apa_genesis_max_isoform_learning_loss_curves_validation_' + str(val_ix) + '_' + summary_mode + fig_suffix + '.svg') plt.show() #Plot edit distance curves opt_len = 90 f = plt.figure(figsize=(10, 6)) ls = [] min_y = 10000 max_y = -10000 for sampling_cond in sampling_conds : val_hists = np.concatenate([np.array([np.mean(sampling_cond[3]['val'][i][3]['edit_distance_samples'][j]) for j in range(n_epochs + 1)] ).reshape(-1, 1) for i in range(len(np.array(sampling_cond[3]['train'])))], axis=1) mean_val_hist = np.mean(val_hists, axis=-1) if summary_mode == 'mean' else np.median(val_hists, axis=-1) if opt_len is not None : mean_val_hist /= opt_len norm_prefix = '' if sampling_cond[2] == True : norm_prefix = '-IN' label = 'PWM' + norm_prefix linestyle = '-' if sampling_cond[0] == 'sample' : label = "Sampled" + norm_prefix + " (" + str(sampling_cond[1]) + "x)" linestyle = '--' elif sampling_cond[0] == 'both' : label = "PWM" + norm_prefix + " + Sampled" + norm_prefix + " (" + str(sampling_cond[1]) + "x)" linestyle = ':' l1 = plt.plot(np.arange(mean_val_hist.shape[0]), mean_val_hist, linewidth=3, linestyle=linestyle, label=label) ls.append(l1[0]) if np.max(mean_val_hist[start_from_epoch:]) > max_y : max_y = np.max(mean_val_hist[start_from_epoch:]) if np.min(mean_val_hist[start_from_epoch:]) < min_y : min_y = np.min(mean_val_hist[start_from_epoch:]) plt.xlim(start_from_epoch, n_epochs) plt.xticks([start_from_epoch, n_epochs], [start_from_epoch, n_epochs], fontsize=14) plt.ylim(min_y, max_y) plt.yticks(fontsize=14) plt.xlabel("Epoch", fontsize=16) plt.ylabel("Edit distance", fontsize=16) plt.legend(handles=ls, fontsize=14) plt.tight_layout() if save_figs : plt.savefig('apa_genesis_max_isoform_learning_loss_curves_validation_edit_distance_' + summary_mode + fig_suffix + '.png', dpi=150, transparent=True) plt.savefig('apa_genesis_max_isoform_learning_loss_curves_validation_edit_distance_' + summary_mode + fig_suffix + '.eps') plt.savefig('apa_genesis_max_isoform_learning_loss_curves_validation_edit_distance_' + summary_mode + fig_suffix + '.svg') plt.show() # -
analysis/apa/apa_max_isoform_genesis_eval_sampling_modes_no_entropy_with_edit_distances.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 (ipykernel) # language: python # name: python3 # --- # + [markdown] slideshow={"slide_type": "slide"} # # Coordinates usage in ctapipe # + slideshow={"slide_type": "fragment"} import astropy.units as u import copy import numpy as np import matplotlib.pyplot as plt from ctapipe.io import EventSource from ctapipe.calib import CameraCalibrator from ctapipe.utils import get_dataset_path from ctapipe.visualization import ArrayDisplay # %matplotlib inline # + slideshow={"slide_type": "fragment"} from astropy.coordinates import SkyCoord, AltAz from ctapipe.coordinates import ( GroundFrame, TiltedGroundFrame, NominalFrame, TelescopeFrame, CameraFrame, ) # - # make plots and fonts larger plt.rcParams['figure.figsize'] = (12, 8) plt.rcParams['font.size'] = 16 # + [markdown] slideshow={"slide_type": "subslide"} # ## Open test dataset # + slideshow={"slide_type": "fragment"} filename = get_dataset_path("gamma_prod5.simtel.zst") source = EventSource(filename) events = [copy.deepcopy(event) for event in source] event = events[4] layout = set(source.subarray.tel_ids) # - # ### Choose event with LST # This ensures that the telescope is not "parked" (as it would be in an event where it is not triggered) but is actually pointing to a source. # + slideshow={"slide_type": "subslide"} print(f'Telescope with data: {event.r1.tel.keys()}') tel_id = 3 # + [markdown] slideshow={"slide_type": "slide"} # ## AltAz # # See [Astropy Docs on AltAz](http://docs.astropy.org/en/stable/api/astropy.coordinates.AltAz.html). # # Pointing direction of telescopes or the origin of a simulated shower are described in the `AltAz` frame. # This is a local, angular coordinate frame, with angles `altitude` and `azimuth`. # Altitude is the measured from the Horizon (0°) to the Zenith (90°). # For the azimuth, there are different conventions. In Astropy und thus ctapipe, Azimuth is oriented East of North (i.e., N=0°, E=90°). # - from astropy.time import Time from astropy.coordinates import EarthLocation # + slideshow={"slide_type": "fragment"} obstime = Time('2013-11-01T03:00') location = EarthLocation.of_site('Roque de los Muchachos') altaz = AltAz(location=location, obstime=obstime) array_pointing = SkyCoord( alt=event.pointing.array_azimuth, az=event.pointing.array_altitude, frame=altaz, ) print(array_pointing) # + [markdown] slideshow={"slide_type": "subslide"} # ## CameraFrame # # Camera coordinate frame. # # The camera frame is a 2d cartesian frame, describing position of objects in the focal plane of the telescope. # # The frame is defined as in H.E.S.S., starting at the horizon, the telescope is pointed to magnetic north in azimuth and then up to zenith. # # Now, x points north and y points west, so in this orientation, the camera coordinates line up with the CORSIKA ground coordinate system. # # MAGIC and FACT use a different camera coordinate system: Standing at the dish, looking at the camera, x points right, y points up. # To transform MAGIC/FACT to ctapipe, do x' = -y, y' = -x. # # **Typical usage**: Position of pixels in the focal plane. # + slideshow={"slide_type": "fragment"} geometry = source.subarray.tel[tel_id].camera.geometry pix_x = geometry.pix_x pix_y = geometry.pix_y focal_length = source.subarray.tel[tel_id].optics.equivalent_focal_length # + slideshow={"slide_type": "fragment"} telescope_pointing = SkyCoord( alt=event.pointing.tel[tel_id].altitude, az=event.pointing.tel[tel_id].azimuth, frame=altaz, ) camera_frame = CameraFrame( focal_length=focal_length, rotation=0 * u.deg, telescope_pointing=telescope_pointing, ) cam_coords = SkyCoord(x=pix_x, y=pix_y, frame=camera_frame) print(cam_coords) # + slideshow={"slide_type": "fragment"} plt.scatter(cam_coords.x, cam_coords.y) plt.title(f'Camera type: {geometry.camera_name}') plt.xlabel(f'x / {cam_coords.x.unit}') plt.ylabel(f'y / {cam_coords.y.unit}') plt.axis('square'); # - # The implementation of the coordinate system with astropy makes it easier to use time of the observation and location of the observing site, to understand, for example which stars are visible during a certain night and how they might be visible in the camera. # # + from ctapipe.visualization import CameraDisplay from ctapipe.instrument import CameraGeometry location = EarthLocation.of_site('Roque de los Muchachos') obstime = Time('2018-11-01T04:00') crab = SkyCoord.from_name("crab nebula") altaz = AltAz(location=location, obstime=obstime) pointing = crab.transform_to(altaz) camera_frame = CameraFrame( telescope_pointing=pointing, focal_length=focal_length, obstime=obstime, location=location, ) cam = CameraGeometry.from_name('LSTCam') fig, ax = plt.subplots() display = CameraDisplay(cam, ax=ax) ax.set_title( f'La Palma, {obstime}, az={pointing.az.deg:.1f}°, zenith={pointing.zen.deg:.1f}°, camera={geometry.camera_name}' ) for i, name in enumerate(['crab nebula', 'o tau', 'zet tau']): star = SkyCoord.from_name(name) star_cam = star.transform_to(camera_frame) x = star_cam.x.to_value(u.m) y = star_cam.y.to_value(u.m) ax.plot(x, y, marker='*', color=f'C{i}') ax.annotate( name, xy=(x, y), xytext=(5, 5), textcoords='offset points', color=f'C{i}', ) plt.show() # + [markdown] slideshow={"slide_type": "subslide"} # ## TelescopeFrame # # Telescope coordinate frame. # A `Frame` using a `UnitSphericalRepresentation`. # # This is basically the same as a `HorizonCoordinate`, but the origin is at the telescope's pointing direction. # This is what astropy calls a `SkyOffsetFrame`. # # The axis of the telescope frame, `fov_lon` and `fov_lat`, are aligned with the horizontal system's azimuth and altitude respectively. # # Pointing corrections should applied to the transformation between this frame and the camera frame. # + slideshow={"slide_type": "fragment"} telescope_frame = TelescopeFrame( telescope_pointing=pointing, obstime=pointing.obstime, location=pointing.location, ) telescope_coords = cam_coords.transform_to(telescope_frame) # + slideshow={"slide_type": "fragment"} wrap_angle = telescope_pointing.az + 180* u.deg plt.axis('equal') plt.scatter( telescope_coords.fov_lon.deg, telescope_coords.fov_lat.deg, alpha=0.2, color='gray' ) for i, name in enumerate(['crab nebula', 'o tau', 'zet tau']): star = SkyCoord.from_name(name) star_tel = star.transform_to(telescope_frame) plt.plot(star_tel.fov_lon.deg, star_tel.fov_lat.deg, '*', ms=10) plt.annotate( name, xy=(star_tel.fov_lon.deg, star_tel.fov_lat.deg), xytext=(5, 5), textcoords='offset points', color=f'C{i}', ) plt.xlabel('fov_lon / {}'.format(telescope_coords.altaz.az.unit)) plt.ylabel('fov_lat / {}'.format(telescope_coords.altaz.alt.unit)) # + [markdown] slideshow={"slide_type": "subslide"} # ## NominalFrame # # Nominal coordinate frame. # A Frame using a `UnitSphericalRepresentation`. # This is basically the same as a `HorizonCoordinate`, but the # origin is at an arbitray position in the sky. # This is what astropy calls a `SkyOffsetFrame` # If the telescopes are in divergent pointing, this `Frame` can be # used to transform to a common system. # - 2D reconstruction (`HillasIntersector`) is performed in this frame # - 3D reconstruction (`HillasReconstructor`) doesn't need this frame # + [markdown] slideshow={"slide_type": "fragment"} # Let's play a bit with 3 LSTs with divergent pointing # + location = EarthLocation.of_site('Roque de los Muchachos') obstime = Time('2018-11-01T02:00') altaz = AltAz(location=location, obstime=obstime) crab = SkyCoord.from_name("crab nebula") # let's observe crab array_pointing = crab.transform_to(altaz) # let the telescopes point to different positions alt_offsets = u.Quantity([1, -1, -1], u.deg) az_offsets = u.Quantity([0, -2, +2], u.deg) tel_pointings = SkyCoord( alt=array_pointing.alt + alt_offsets, az=array_pointing.az + az_offsets, frame=altaz, ) camera_frames = CameraFrame( telescope_pointing=tel_pointings, # multiple pointings, so we get multiple frames focal_length=focal_length, obstime=obstime, location=location, ) nom_frame = NominalFrame(origin=array_pointing, obstime=obstime, location=location) # + slideshow={"slide_type": "fragment"} fig, ax = plt.subplots(figsize=(15, 10)) ax.set_aspect(1) for i in range(3): cam_coord = SkyCoord(x=pix_x, y=pix_y, frame=camera_frames[i]) nom_coord = cam_coord.transform_to(nom_frame) ax.scatter( x=nom_coord.fov_lon.deg, y=nom_coord.fov_lat.deg, label=f'Telescope {i + 1}', s=30, alpha=0.15, ) for i, name in enumerate(['Crab', 'o tau', 'zet tau']): s = SkyCoord.from_name(name) s_nom = s.transform_to(nom_frame) ax.plot( s_nom.fov_lon.deg, s_nom.fov_lat.deg, '*', ms=10, ) ax.annotate( name, xy=(s_nom.fov_lon.deg, s_nom.fov_lat.deg), xytext=(5, 5), textcoords='offset points', color=f'C{i}', ) ax.set_xlabel(f'fov_lon / deg') ax.set_ylabel(f'fov_lat / deg') ax.legend() plt.show() # + [markdown] slideshow={"slide_type": "subslide"} # ## GroundFrame # # # Ground coordinate frame. The ground coordinate frame is a simple # cartesian frame describing the 3 dimensional position of objects # compared to the array ground level in relation to the nomial # centre of the array. Typically this frame will be used for # describing the position on telescopes and equipment # # **Typical usage**: positions of telescopes on the ground (x, y, z) # + slideshow={"slide_type": "fragment"} source.subarray.peek() # + [markdown] slideshow={"slide_type": "fragment"} # In case a layout is selected, the following line will produce a different output from the picture above. # + slideshow={"slide_type": "fragment"} source.subarray.select_subarray(layout, name="Prod3b layout").peek() # - # ![Ground Frame](ground_frame.png) # In this image all the telescope from the `gamma_test.simtel.gz` file are plotted as spheres in the GroundFrame. # ## TiltedGroundFrame # Tilted ground coordinate frame. # # The tilted ground coordinate frame is a cartesian system describing the 2 dimensional projected positions of objects in a tilted plane described by pointing_direction. The plane is rotated along the z_axis by the azimuth of the `pointing_direction` and then it is inclined with an angle equal to the zenith angle of the `pointing_direction`. # # This frame is used for the reconstruction of the shower core position. # ![Tilted Ground Frame](tilted_ground_frame.png) # This image picture both the telescopes in the GroundFrame (red) and in the TiltedGroundFrame (green) are displayed: in this case since the azimuth of the `pointing_direction` is 0 degrees, then the plane is just tilted according to the zenith angle. # # For playing with these and with more 3D models of the telescopes themselves, have a look at the [CREED_VTK](https://github.com/thomasgas/CREED_VTK) library.
docs/tutorials/coordinates_example.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import numpy as np # ## Matrix power for stationary distribution P = np.array([[0.9, 0.1, 0, 0], [ 0, 0, 0.5, 0.5], [0.6, 0.4, 0, 0], [ 0, 0, 0.3, 0.7]]) v = np.array([0.25, 0.25, 0.25, 0.25]) v = np.array([0, 1, 0, 0]) pi = v@np.linalg.matrix_power(P, 1000000) pi pi@P # ## Numpy eig to solve w, v = np.linalg.eig(P.T) w v v[:, 0] / sum(v[:, 0]) P = np.array([[3/4, 0, 0, 1/4], [1/2, 1/2, 0, 0], [ 0, 3/4, 1/4, 0], [ 0, 0, 1, 0]]) w, v = np.linalg.eig(P.T) w v v[:, 1] / sum(v[:, 1]) 12/25 # ##### 3/25 # 4/25 from tqdm import tqdm dice_outputs = [1, 2, 3, 4, 5, 6] N = 1000000 groups = [] for _ in tqdm(range(N)): group = [] for _ in range(5): group.append(np.random.choice(dice_outputs)) groups.append(group) groups # + counter = 0 for group in groups: group_set = set(group) if len(group_set) == len(group): counter += 1 print(f"{counter/N}") # + counter = 0 for group in groups[:N//2]: group_set = set(group) if len(group_set) == 2: counter += 1 print(f"{counter/(N//2)}") # + counter = 0 for group in groups: group_set = set(group) if len(group_set) == 2: counter += 1 print(f"{counter/(N)}") # - NUM = 100000000 ps = np.random.binomial(n=1000, p=0.5, size=(NUM)) np.where(ps >=550)[0].shape[0] print(np.where(ps >=550)[0].shape[0]/NUM) summer = 0 for group in groups: summer += sum(group) print(summer/N)
rough_work/Testing.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 (ipykernel) # language: python # name: python3 # --- # # Identifying Diversification Opportunities # # Employee share schemes have clear benefits for the employer, but for employees there are benefits and risks. The benefits are the incentive towards long term savings and possibly a starting point into share ownership. The main downside is the concentration of risk. If your employer hits a rough patch you may find yourself laid off and at the same time that a chunk of your savings have evaporated. More likely, you find yourself with a need to unlock some money urgently, but the share price has dipped and you really don't want to sell right now. # # Diversification would avoid some of these risks. Selling some of your employers shares and purchasing others. But what is the best strategy? Would it be purchasing shares in the main competitor, thinking that if your company does badly they will do better, or in a completely unrelated industry, and if so which? # # One possibility would be to look at historical correlation. If another company's price has historically moved in the opposite direction, then there's a good chance it will in the future. The downside here is that it only captures situations similar to those in the past. # # In this analysis we will look at using Singular Value Decomposition (specifically Funk SVD) to calculate latent features. The hope is that this would be more robust than simply using correlation. Rather than simply looking for companies which have moved differently, we will look for companies whose prices have responded differently to these latent factors. That way if in future we see these factors move in unusual ways, we will still have protection. # # For example, oil prices and transportation costs would often move together. If your company benefits from higher oil prices and lower transportation costs, correlation is unlikely to find good matches. If our model is able to identify these as latent features then we can identify other companies which have the opposite relationship. This would of course depend on them moving differently often enough to be picked up by the model, and we would never know whether or not these specific features were actually picked up. # # The aim is to identify companies who may respond differently in the short term to external impacts. We won't be saying anything about the value of a stock or if it is likely to grow - there is already a lot of research on those topics and there isn't a clear answer. The main aim is to identify a group of stocks such that if I urgently need to sell and my main stock has taken a dip, I have some others in my pocket which are likely to have gone up or at least stayed the same. # # Obviously there are still risks with either of these approaches - if your company has been growing steadily, it is likely that companies which have been declining steadily will be pulled out as comparisons. Judgement is therefore still required. # ### References # # https://www.pythonpool.com/python-tqdm/ # # http://theautomatic.net/2018/01/25/coding-yahoo_fin-package/ # # https://joblib.readthedocs.io/en/latest/parallel.html # # https://stackoverflow.com/questions/9390126/pythonic-way-to-check-if-something-exists # # https://www.geeksforgeeks.org/python-intersection-two-lists/ # TODO # - get details on recommended stocks - name, industry etc # - compare to correlation # - clustering on groups - what companies tend to move together # - add group features to web app? # - calculate the number of iterations required based on rate of change - if there is a log decay, estimate number of iterations to get to an acceptable error # - should we use grid search to optimize benefits of results? # + import pandas as pd import matplotlib.pyplot as plt import numpy as np import random from sklearn.metrics import mean_squared_error from lenskit.algorithms.funksvd import FunkSVD from joblib import Parallel, delayed import pickle import time import itertools from tqdm import tqdm import yahoo_fin.stock_info as si # - # # Data Cleaning # # Data is downloaded from the yahoo_fin api, and weekly stock price movements are calculated and stored in a dataframe. # # We want stocks which move in opposite directions. Aggregating to weekly removes some noise such as extreme daily fluctuations and day of week seasonality which aren't of interest here. We are interested in medium term movements. # + def download_new_prices(ticker_groups, start_date=None, end_date=None, filename=None): ''' Downloads historical stock price data from Yahoo Finance using the yahoo_fin package, saving the results if a filename is provided. INPUT ticker_group - a string representing the group of tickers to load ('sp500', 'ftse100' or 'ftse250') filename - an optional string providing a location for the results to be saved for future use OUTPUT price_data - a dictionary of stock prices ''' tickers = [] if not isinstance(ticker_groups, list): ticker_groups = [ticker_groups] for ticker_group in ticker_groups: if ticker_group == 'sp500': tickers.extend(si.tickers_sp500()) elif ticker_group == 'ftse250': tickers.extend(si.tickers_ftse250()) elif ticker_group == 'ftse100': tickers.extend(si.tickers_ftse100()) elif ticker_group == 'other': tickers.extend(si.tickers_other()) elif ticker_group == 'dow': tickers.extend(si.tickers_dow()) elif ticker_group == 'nasdaq': tickers.extend(si.tickers_nasdaq()) else: print("Ticker group not recognized. Options are 'sp500', 'ftse100', 'ftse250', 'other','dow','nasdaq'") # remove duplicates tickers = list(set(tickers)) print(f'getting data for {len(tickers)} stocks') def get_data_skip_missing(ticker, start_date, end_date): '''Gets price data, skipping any tickers which aren't found ''' try: return si.get_data(ticker, start_date=start_date, end_date=end_date) except: pass price_data = {ticker : get_data_skip_missing( ticker, start_date=start_date, end_date=end_date) for ticker in tqdm(tickers)} if filename is not None: with open(filename, "wb") as tf: pickle.dump(price_data,tf) return price_data def get_price_data(ticker_group, start_date=None, end_date=None, filename=None, update=False): ''' Downloads historical stock price data from Yahoo Finance using the yahoo_fin package. If no filename is provided this simply downloads and returns the price data. If a filenam is provided then: If update is set to False (the default) we will first try to load data from that file. If update is set to True or the file can't be opened, new prices are downloaded and saved to the filename provided. INPUT ticker_group - a string representing the group of tickers to load ('sp500', 'ftse100' or 'ftse250') filename - an optional string providing a location for the results to be saved for future use update - boolean specifying whether we should try to load from file OUTPUT price_data - a dictionary of stock prices ''' if update: price_data = download_new_prices(ticker_group, start_date, end_date, filename) else: try: with open(filename, "rb") as tf: price_data = pickle.load(tf) except: price_data = download_new_prices(ticker_group, start_date, end_date, filename) return price_data # + # get historical pricing data for S&P start_date = '2009-01-01' update = False ticker_groups = ['sp500','ftse100','ftse250'] price_data_sp_ftse = get_price_data( ticker_groups,filename='Outputs//price_data_sp_ftse.pkl', start_date=start_date, update=update) # + # add all the valid prices # todo - implement without a loop, use sets and the stock tickers valid_prices = {} #for i in [price_data_nasdaq, price_data_sp_ftse, price_data_other]: for i in [price_data_sp_ftse]: for j, k in i.items(): if (k is None) or (k['adjclose'].count() <= 100): pass elif k['adjclose'].count() <= 100: pass else: valid_prices[j] = k # - len(valid_prices) # + # create a dataframe from the dictionary avoiding duplicates # todo - can this be implemented without a loop, to speed up? prices_df = pd.DataFrame(columns = ['ticker','open', 'high', 'low', 'close', 'adjclose', 'volume']) for i in tqdm(valid_prices.keys()): if i not in prices_df['ticker'].values: prices_df = prices_df.append(valid_prices[i]) prices_df # + #resample to weekly logic = {'open' : 'first', 'high' : 'max', 'low' : 'min', 'close' : 'last', 'adjclose' : 'last', 'volume': 'sum'} weekly_prices = prices_df.groupby('ticker').resample('W').apply(logic) weekly_prices = weekly_prices.reset_index().rename(columns={'level_1':'week_ending'}).set_index('week_ending') # + # remove low volume outliers (less that 5% of records) # weekly_prices = weekly_prices[weekly_prices['volume']>10] # calculate weekly movements weekly_prices['movement'] = np.where( weekly_prices['ticker'].shift()==weekly_prices['ticker'], weekly_prices['adjclose']/weekly_prices['adjclose'].shift()-1, None) weekly_prices.head() # - # save weekly prices filename='Outputs//weekly_prices.pkl' with open(filename, "wb") as tf: pickle.dump(weekly_prices,tf) # reshape weekly_movements = weekly_prices.reset_index().groupby(['ticker', 'week_ending'])['movement'].max().unstack() # save weekly movements filename='Outputs//weekly_movements.pkl' with open(filename, "wb") as tf: pickle.dump(weekly_movements,tf) # + # read weekly prices filename='Outputs//weekly_prices.pkl' with open(filename, "rb") as tf: weekly_prices = pickle.load(tf) # remove low volume outlier stocks volume_by_ticker = weekly_prices.groupby('ticker')['volume'].sum() big_tickers = volume_by_ticker[volume_by_ticker > volume_by_ticker.quantile(0.10)].index.values price_data = weekly_prices[weekly_prices['ticker'].isin(big_tickers)] # - # read weekly movements filename='Outputs//weekly_movements.pkl' with open(filename, "rb") as tf: weekly_movements = pickle.load(tf) # ## Data Exploration # # The example below shows some weekly movements for a few stocks. We can see that in the first few weeks there are several drops in the AMZL share price, whereas JNJ is flat and AMD increases. This would be a bad time to sell AMZN shares. In this example holding a few AMD shares would have provided some protection against the AMZN price fall - but is this generally the case? Can we find shares which are expected to reliably protect against falls in a particular stock? # + from_date = '2021-07-01' to_date = '2021-10-11' stock_list = ['AMZN','GOOG','JNJ','AMD'] plt.figure(figsize=(15,5)) for stock in stock_list: plt.plot(weekly_prices[(weekly_prices['ticker']==stock) & (weekly_prices.index>=from_date) & (weekly_prices.index<=to_date)]['movement'], label=stock) plt.legend() plt.show() # - # ## Modelling # # We use the FunkSVD functions from Lenskit as this is faster than manually calculating and allows us to run more iterations and test more parameter combinations. This requires us to rename the columns to match the required inputs, which assume we are working with movie ratings. The stock ticker replaces the movie, the week replaces the user and the price movement replaces the rating. # # To find the recommended stock we look for the one with the smallest MSE compared to the inverse of the target stock. So if our target stock responded +2 to an increase in feature 1 and -3 to an increase in feature to, we would be looking for a stock which responded -2 and +3. # # The MSE is averaged over three periods of three months. This should give us the stock which is likely to perform best over the coming three months. # # Performance is evaluated by calculating the volatility of the recommended portfolio made up of the target stock and the n recommended stocks, and comparing it to portfolios made up of the target stock and randomly selected stocks. If a recommended portfolio has lower volatility than 50% of simulated portfolios then that was a successful recommendation. We then look at the average performance of all stocks, and if we perform better than 50% overall then the approach works. # + def train_test_split(input_df, train_start, test_start, test_end): ''' Prepares data for FunkSVD analysis As the function we are using was set up for movie ratings, it expects inputs in a specific format i.e. columns called user, item and rating INPUT input_df - a DataFrame with three columns - the first column containing the stock ticker, the second the time period, and the third the metric of interest (e.g. stock price, price movement) train_start, train_end, test_end - dates to slice the data into train and test sets OUTPUT train_data - a DataFrame ready for the FunkSVD function, with the date in the "user" column, ticker in "item" and the movement in the "rating" column test_data - a DataFrame of stock price movements with stock tickers on the rows and time periods on the columns ''' input_df = input_df.sort_index() # slice to the training period, drop any which aren't in the data for at least 100 weeks (there are four) train_data = input_df.loc[slice(train_start, test_start)].reset_index() # remove any rows or columns with no data train_data = train_data.dropna(axis=1,how='all') train_data = train_data.dropna(axis=0,how='all') # rename to the required column names for FunkSVD function train_data.rename(columns={'week_ending':'user','ticker':'item', train_data.columns[2]:'rating'}, inplace=True) train_data['rating'] = train_data['rating'].astype('float') train_data['rating'] = round(train_data['rating']*100,1) train_data.dropna(inplace=True) train_data['rating'] = np.where(train_data['rating'].isna(),np.nan,np.where((train_data['rating']<0),-1,1)) # slice to the test period test_data = input_df.loc[slice(test_start, test_end)].reset_index().set_index('ticker') # remove any stocks from the test data with incomplete test data test_summ = test_data.groupby('ticker')['movement'].count() test_data = test_data.loc[test_summ[test_summ == test_summ.max()].index.values] return train_data, test_data def random_stocks(stocks_list, target=None, n=5): '''Randomly select n stocks. If a target stock is provided, exclude this from the list.''' if target is not None: try: stocks_list.remove(target) except: pass rands = random.sample(stocks_list, k=n) return rands def inverse_mse(test_df, target, div_stocks): ''' Calculates the accuracy (MSE) of selected stocks vs. the inverse of the target For example if our target stock increases by 5%, ideally our recommended stock would decrease by 5% ''' errs = pd.DataFrame(columns=['ticker','error','inv_error']) for stock in div_stocks: #print(target, stock) err = mean_squared_error(test_df.loc[target]['movement'], test_df.loc[stock]['movement']) inv_err = mean_squared_error(-test_df.loc[target]['movement'], test_df.loc[stock]['movement']) errs = errs.append(pd.DataFrame([[stock, err, inv_err]], columns=['ticker','error','inv_error']), ignore_index=True) return errs def measure_random_mse(stocks_df, test_data, target, n): '''Finds random stocks and the MSE to the inverse of the target to create a baseline''' divs = random_stocks(stocks_df.index.values.tolist(), target, n) err = inverse_mse(test_data, target, divs) err['target']=target return err def measure_distances(stock_df, target): ''' Measures the euclidean distance between the price movements of stocks and the inverse of the target''' # create an empty dataframe to store the results distances = pd.DataFrame(columns=['ticker','dist']) # iterate through all of the stocks for stock in stock_df.index: # measure the euclidean distance to the opposite of the target dist = np.linalg.norm((-stock_df.loc[target].values)-stock_df.loc[stock].values) dist = pd.DataFrame([[stock,dist]], columns=['ticker','dist']) distances = distances.append(dist) distances = distances.sort_values('dist') return distances def diverse_stocks(stock_df, target, n=5): '''Uses the FunkSVD matrix to find the stocks closest to the inverse of the target stocks movements''' distances = measure_distances(stock_df, target) similar = distances[distances['ticker'] != target].sort_values('dist')[:n] divs = similar['ticker'].values.tolist() return divs def measure_recommended_mse(stock_df, test_data, target, n): '''Finds recommended diverse stocks and the MSE to the inverse of the target''' divs = diverse_stocks(stock_df, target, n) err = inverse_mse(test_data, target, divs) err['target']=target return err # http://techflare.blog/how-to-calculate-historical-volatility-and-sharpe-ratio-in-python/ def calc_portfolio_volatility(test_df, target, target_weight, div_stocks): '''Calculates volatility of a portfolio created using our recommended stocks''' # create a list of the portfolio stocks stocks = [target] stocks.extend(div_stocks) # create a list with the target portfolio weights weights = [target_weight] n_recs = len(div_stocks) # calculate the average volatility movements_pivot = test_data.loc[stocks].reset_index().pivot( index='week_ending',columns='ticker',values='movement') w_mat = movements_pivot.copy() w_mat = w_mat - w_mat w_mat = w_mat + (1-target_weight)/n_recs w_mat[target] = target_weight avg_vol = np.average( movements_pivot, weights=w_mat, axis=0).std()*np.sqrt(movements_pivot.shape[0]) return avg_vol # + # train model # iterate train test splits for multiple time periods results = {} param_list=[ # datesdiv [ ['2019-01-01','2021-01-01','2021-04-01'], ['2019-04-01','2021-04-01','2021-07-01'], ['2019-07-01','2021-07-01','2021-10-01'] ], # features [1, 2, 3], # iterations [200, 500, 1000], # learning rate [0.001, 0.0005], # n [1, 3, 5] ] parameters = list(itertools.product(*param_list)) # train the model across multiple time periods and parameters print('Training models') for i, param in enumerate(tqdm(parameters)): train_start = param[0][0] test_start = param[0][1] test_end = param[0][2] features = param[1] iterations = param[2] lrate = param[3] n = param[4] train_data, test_data = train_test_split(price_data[['ticker','movement']], train_start, test_start, test_end) # train the FunkSVD model with a range of parameters fsvd = FunkSVD(features=features, iterations=iterations, lrate=lrate) model = fsvd.fit(train_data) stock_df = pd.DataFrame(model.item_features_, index=model.item_index_) # find the opposite stocks and measure mean standard error # only calculate for stocks which are in both test and train targets = list(set(test_data.index.values).intersection(set(stock_df.index.values))) stock_df = stock_df.loc[targets] errs = Parallel(n_jobs=-1)( delayed(measure_recommended_mse)(stock_df, test_data, target, n) for target in targets ) avg_inv_mse = np.mean([x['inv_error'] for x in errs]) # save the output with parameter details results[i] = {'train_start':train_start, 'test_start':test_start, 'test_end':test_end, 'features':features, 'iterations':iterations, 'lrate':lrate, 'n':n, 'avg_inv_mse':avg_inv_mse, 'stock_df':stock_df } # calculate the MSE for the different models results_df = pd.DataFrame.from_dict(results).stack().unstack(0) # save the results dataframe result_df_filename = 'Outputs//results_df' + time.strftime('%d%b%Y_%H%M',time.localtime()) + '.pkl' results_df.to_pickle(result_df_filename) print(f'Saved results_df to {result_df_filename}') # save the results result_filename = 'Outputs//results' + time.strftime('%d%b%Y_%H%M',time.localtime()) + '.pkl' pickle.dump(results, open( result_filename, "wb" )) print(f'Saved results to {result_filename}') results_summary = results_df.groupby(['features','iterations','lrate','n'])['avg_inv_mse'].sum() results_summary # + # to load an existing model from file, update load_from_file to true and update the filenames load_from_file = False results_df_filename='Outputs//results_df07May2022_1824.pkl' results_filename='Outputs//results07May2022_1824.pkl' if load_from_file: with open(results_df_filename, "rb") as tf: results_df = pickle.load(tf) with open(results_filename, "rb") as tf: results = pickle.load(tf) results_summary = results_df.groupby(['features','iterations','lrate','n'])['avg_inv_mse'].sum() results_summary # - winner = results_summary[results_summary == results_summary.min()] winner # identify the best performing model winner_idx = results_df[(results_df['test_end']==results_df['test_end'].max()) & (results_df['features']==winner.index.values[0][0]) & (results_df['iterations']==winner.index.values[0][1]) & (results_df['lrate']==winner.index.values[0][2]) & (results_df['n']==winner.index.values[0][3])].index.values[0] winner_idx # + # select and save the best performing model stock_df = results[8]['stock_df'] winner_filename='Models//FunkSVD_matrix.pkl' stock_df.to_pickle(winner_filename) print(f'Saved winning model to {winner_filename}') # for testing, drop stocks not in the test data - these will generally be stocks which are no longer available stock_df = stock_df[stock_df.index.isin(test_data.index.values)] # - # ## Measuring effectiveness # # To assess whether this strategy works we will look at the reduction in volatility between the target stock, and a diversified portfolio made up of the recommended stocks. We will compare this to randomly selected portfolios. This approach will be successful if we can generate portfolios which perform on average better than the median of randomly selected portfolios. # # To start off we will compare a portfolio of 50% the target stock and 10% each of the top 5 most different stocks. In future we may adjust this to allow different portfolio constructions, for example having greater or fewer stocks, or weighting them based on their similarity to the target. # # Volatility is a useful metric as the more volatile a stock is, the more likely that it will have fallen (or increased) significantly at the time you need to sell. Another useful metric which we may add later would be max drawdown which specifically looks at the greatest drop in price. # # We want to find stocks that are most likely NOT to move similarly to our target stock given the same external conditions. Therefore we want to find those which are closest to the opposite of the values for our target stock. We achieve this by measuring the difference between the inverse of the target stock values and the candidate stock. # # For example, if our target stock (the one we are seeking to diversify against) moves positively with a value of +2 against the first latent feature and negatively with -3 against the second, we would ideally want to find a stock which was the exact opposite, i.e. -2 and +3. # + def model_portfolios(stock_df, price_data, train_start, test_start, test_end, n=3, targets=None, target_weight=0.5): '''Compare the volatility of recommended portfolios with randomly generated ones''' pred_results=pd.DataFrame(columns=['ticker','recs','reduction','percentile']) train_data, test_data = train_test_split(price_data[['ticker','movement']], train_start, test_start, test_end) if targets is None: targets = list(set(test_data.index.values).intersection(set(stock_df.index.values))) for target in tqdm(targets): divs = diverse_stocks(stock_df=stock_df, target=target, n=n) targeted_volatility = calc_portfolio_volatility( test_df=test_data, target=target, target_weight=target_weight, div_stocks=divs) vols = [] for i in range(100): recs = random_stocks(stocks_list=stock_df.index.values.tolist(), target=target, n=n) vol = calc_portfolio_volatility( test_df=test_data, target=target, target_weight=target_weight, div_stocks=recs) if not np.isnan(vol): vols.append(vol) mean_volatility = np.mean(vols) # calculate the reduction in volatility compared to the random average volatility_reduction = (mean_volatility-targeted_volatility)/mean_volatility # in what percentile are our picks in terms of volatility reduction percentage_beat = np.sum(vols>targeted_volatility)/len(vols) pred_results=pred_results.append(pd.DataFrame([[target, divs, volatility_reduction, percentage_beat]] , columns=['ticker','recs','reduction','percentile'])) return pred_results model_portfolios(stock_df = stock_df, price_data = price_data, n = 1, train_start = '2019-07-01', test_start = '2021-10-01', test_end = '2022-03-01', targets = ['AMZN','NVDA','O'], target_weight = 0.5) # + portfolio_results = model_portfolios(stock_df = stock_df, price_data = price_data, n = 1, train_start = '2019-07-01', test_start = '2022-01-01', test_end = '2022-04-01', target_weight = 0.5) print(portfolio_results['percentile'].mean()) print(portfolio_results) # - # looking at O - Realty Income Corp - similar stocks are mostly other real estate holding firms. # Recommendations are mostly industrial and materials - e.g. steel manufacturing o_comps = measure_distances(stock_df, 'O') print(o_comps[:10]) print(o_comps[-10:]) # for NVDA similar companies include competitor AMD; Synopsys, a silicon design company; and tech/internet stocks # Recommendations include insurance, financial services, chemical and materials companies nvda_comps = measure_distances(stock_df, 'NVDA') print(nvda_comps[:10]) print(nvda_comps[-10:]) amzn_comps = measure_distances(stock_df, 'AMZN') print(amzn_comps[:10]) print(amzn_comps[-10:])
DiversificationRecommender.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 (ipykernel) # language: python # name: python3 # --- # # Import Libraries from tensorflow.keras.preprocessing.image import ImageDataGenerator from tensorflow.keras.applications import MobileNetV2 from tensorflow.keras.layers import AveragePooling2D from tensorflow.keras.layers import Dropout from tensorflow.keras.layers import Flatten from tensorflow.keras.layers import Dense from tensorflow.keras.layers import Input from tensorflow.keras.models import Model from tensorflow.keras.optimizers import Adam from tensorflow.keras.applications.mobilenet_v2 import preprocess_input from tensorflow.keras.preprocessing.image import img_to_array from tensorflow.keras.preprocessing.image import load_img from tensorflow.keras.utils import to_categorical from sklearn.preprocessing import LabelBinarizer from sklearn.model_selection import train_test_split from sklearn.metrics import classification_report from imutils import paths import matplotlib.pyplot as plt from sklearn import metrics import numpy as np import os import seaborn as sns from sklearn.metrics import confusion_matrix from sklearn.metrics import plot_confusion_matrix # # Set Data Directory folder_path = r"C:\Users\<NAME>\OneDrive - Universiti Teknologi PETRONAS\Desktop\ML Project\dataset" #directory for image dataset categories = ["mask","no_mask"] # name of folder for each categories # # Assign Images to Array # ## Declare Array to Store Image Data data = [] # array to store image data labels = [] # array to store image labels as in mask or no mask # ## Loop to Transform Append Image Data into Array for sub_folders in categories: #loop through each foler file_location = os.path.join(folder_path, sub_folders) #set path as mask or no_mask according to the value of i for image_names in os.listdir(file_location): # loop thorough all the images in each file location img_path = os.path.join (file_location, image_names)#extract the image according to name in the folder image = load_img(img_path, target_size = (224,224)) #resize the image image = img_to_array (image) #convert image to array image = preprocess_input (image) #convert image array to adequaten data type data.append (image) #push the image array into the data array labels.append (sub_folders) # push the labels name into the labels array # ## Preprocess Each Array into Numpy Arrays labels = LabelBinarizer().fit_transform(labels) # convert categorical data to numpy array labels = to_categorical(labels) # convert to numpy array to binary class matrix data = np.array(data, dtype="float32") # convert to numpy array labels = np.array(labels) # convert to numpy array # # Split Training and Testing Data X_train, X_test, y_train, y_test = train_test_split(data, labels,test_size=0.20, stratify=labels, random_state=42) # split data into training and testing dataset at a ratio of 80:20 and dividing the data into subgroups before shuffling the dataset # # CNN # ## Distort Image Dataset augment_image = ImageDataGenerator ( rotation_range = 20, # rotate random image at 20 degree zoom_range = 0.15, # zoom in or zoom out image from 0 to 0.15 scale width_shift_range = 0.2, # horizontally shift random image height_shift_range = 0.2, # vertically shift random image shear_range = 0.15, # distord image along an axis horizontal_flip = True, #randomly flip images upside down fill_mode = "nearest" # replace empty area with nearest pixel value when pixel move outside the image ) # ## Declare Permutation Variables and Optimizer learn_rate = 0.0001 # assign learning rate epoch = 20 # run 20 passes through the training dataset batch_sizes = 32 # assign 32 number of sample processed before model is updated opt = Adam(learning_rate=learn_rate, decay=learn_rate / 20) # use Adam optimizer with a learning rate of 0.0001 # ## Prepare MobileNetV2 Model # ### Declare Input image_size = (224, 224, 3) # declare size of image input_model = MobileNetV2(weights="imagenet", include_top=False,input_tensor=Input(shape=image_size)) # construct the model # ### Declare Output output_model = input_model.output # duplicate the output of model output_model = AveragePooling2D(pool_size=(7, 7))(output_model) # calculate average for each patch of feature map output_model = Flatten(name="flatten")(output_model) # convert into single dimension output_model = Dense(128, activation="relu")(output_model) # use relu activation function output_model = Dropout(0.5)(output_model) # randomly set input unit to 0.5 output_model = Dense(2, activation="softmax")(output_model) # use softmax activation function # ### Compile Model input_model.trainable = False # freeze the convolutional layers to use the base model as a feature extractor model = Model(inputs=input_model.input, outputs=output_model) # place feature class on top of input_model model.compile(loss="binary_crossentropy", optimizer=opt,metrics=["accuracy"]) # compile the model using the declared variables # ## Train the Model cnn_model = model.fit( augment_image.flow(X_train, y_train), # fit in distorted data into the model steps_per_epoch=len(X_train) // batch_sizes, #divide training dataset based on batch size validation_data=(X_test, y_test), #divide validation dataset based on batch size validation_steps=len(X_test) // batch_sizes,#divide validation dataset based on batch size epochs=epoch) #declare number of epochs # # Evaluate the Model # ## Validation and Loss acc = cnn_model.history['accuracy'] # assign accuracy to variable val_acc = cnn_model.history['val_accuracy'] # assign validation accuracy to variable loss = cnn_model.history['loss'] # assign training loss to variable val_loss = cnn_model.history['val_loss'] # assign validation loss to variable epochs_range = range(epoch) #set epoch range plt.figure(figsize=(8, 8)) # set chart size plt.subplot(1, 2, 1) # declare subplot plt.plot(epochs_range, acc, label='Training Accuracy') # plot training accuracy plt.plot(epochs_range, val_acc, label='Validation Accuracy') # plot validation accuracy plt.legend(loc='lower right') #place legend on bottom right corner plt.title('Training and Validation Accuracy') # name the first chart plt.subplot(1, 2, 2) # create a subplot plt.plot(epochs_range, loss, label='Training Loss' )# plot training loss plt.plot(epochs_range, val_loss, label='Validation Loss') # plot validation loss plt.legend(loc='upper right') #place legend on top right corner plt.title('Training and Validation Loss') # name the first chart plt.show() #show the charts rounded_predictions = np.argmax(model.predict(X_test), axis=1) # round off prediction values rounded_labels = np.argmax(y_test, axis=1) # round off test dataset values cm = confusion_matrix(rounded_labels, rounded_predictions) # ## Confusion Matrix ax = sns.heatmap(cm, annot=True, cmap='Blues',fmt='g') # setup chart ax.set_title('Confusion Matrix'); # set chart title ax.set_xlabel('\nPredicted Values') #set x axis labels ax.set_ylabel('Actual Values '); #set y axis labels ax.xaxis.set_ticklabels(['Mask','No Mask']) #set labels ax.yaxis.set_ticklabels(['Mask','No Mask']) #set labels plt.show() #display the chart # # Export Model model.save("mask_detector.model", save_format="h5") #save model in h5 format
Training Code.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="LcVMEKIOSIa0" # <div> # <img src="https://drive.google.com/uc?export=view&id=1vK33e_EqaHgBHcbRV_m38hx6IkG0blK_" width="350"/> # </div> # # #**Artificial Intelligence - MSc** # CS6501 - MACHINE LEARNING APPLICATIONS # # ###Instructor: <NAME> # ###CS6501_Exercise_2.1 # + [markdown] id="MuZ0F5C9qUmr" # ## Imports # + id="H4759neSP_Kh" # import libraries from sklearn.datasets import make_blobs from sklearn.linear_model import LogisticRegression from pandas import DataFrame from mlxtend.plotting import plot_decision_regions import matplotlib.pyplot as plt import numpy as np # + [markdown] id="XLp5xCeOqMEV" # ## Synthetic Dataset # + [markdown] id="wTSNAvMfRYFn" # Generate isotropic Gaussian blobs using "[make_blobs](https://scikit-learn.org/stable/modules/generated/sklearn.datasets.make_blobs.html)" # # + id="JmQAB80HQxfC" ## randomly generate a 2d, 2 classes dataset # using: n_samples=200 X4, y4 = # create a data frame df = DataFrame(dict(x=X1[:,0], y=X1[:,1], label=y1)) # use: 'green', 'cyan' colors = # figure fig, ax = plt.subplots() grouped = df.groupby('label') # scatter plot for key, group in grouped: group.plot(ax=ax, kind='scatter', x='x', y='y', label=key, color=colors[key]) # show the plot plt.show() # + id="bFLeyfH0UeW2" # show first 7 rows #df.head() # + id="LrM6v2A6pMfb" # count the data elements for each class df['label'].value_counts() # + [markdown] id="_ZZS9l69qwVJ" # ## Logistic Regresion # + [markdown] id="IIgx7MOprsnT" # * [Logistic regression](https://en.wikipedia.org/wiki/Logistic_regression) is a statistical model that in its basic form uses a logistic function to model a binary dependent variable, although many more complex extensions exist. # * In regression analysis, logistic regression (or logit regression) is estimating the parameters of a logistic model (a form of binary regression). # # # # + id="HsRi3LDyNyKI" # Logistic Regression model LR_model = LogisticRegression() # + id="cyjd46yftFug" # change the vars names to correctly run this cell LR_model.fit(X1, y1) # + [markdown] id="HhoTcXVzqOHl" # Decision boundary from [Wikipedia](https://en.wikipedia.org/wiki/Decision_boundary): # # * A decision boundary is the region of a problem space in # which the output label of a classifier is ambiguous. # # * If the decision surface is a hyperplane, then the classification problem is linear, and the classes are linearly separable. # # * In a statistical-classification problem with two classes, a decision boundary or decision surface is a hypersurface that partitions the underlying vector space into two sets, one for each class. # # * You can find a related information to this topic in this [publication](https://towardsdatascience.com/classification-problems-and-exploring-decision-boundaries-3317e03afcdb) # # # # + id="8jMzZwuqgyYv" # Plotting the decision boundary # change the vars names to correctly run this cell plot_decision_regions(X1, y1, clf=LR_model, legend=2) # + id="PDhJEg0LtsOk" # Plotting the decision boundary plot_decision_regions(X1, y1, clf=LR_model, legend=2) # + id="4QoKXFJtt5vF" # vary the value of pos from 1 to 10 (1,2,3,4,5,6,7,8,9,10) # and report your findings in the next cell pos = 1 plot_decision_regions(X1, y1, clf=LR_model, legend=pos) # + [markdown] id="_mCJAPNltzai" # Here your report: # # * List item # * List item # # # + [markdown] id="-1Ns97SgxHRP" # ## New Data # + id="dTvTkAdzyQge" # new data instances, use 50 samples # and add the missing part of the code X_new, _ = make_blobs(, centers=2, n_features=2) # + [markdown] id="34ZOLzB_rFrK" # ## Predictions # + id="Rn2NQt61O7nA" # make predictions (assign class labels) # add the missing code in this cell y_pred = LR_model.predict() # show the inputs and predicted outputs for i in range(len()): print("X{0} = {1}, Class Predicted = {2}".format(i, [i], y_pred[i])) # + id="ej4fLLOa2tth" # create a data frame # add the missing code in this cell df_new = DataFrame(dict(X_new[:,0], X_new[:,1], label=y_pred)) # + id="wbEc5UPp0mmH" # show 7 rows df_new.head() # + id="wqyLkxR4xYLP" # use: 'red', 'blue' colors = {} # figure fig, ax = plt.subplots() # new data grouped = df_new.groupby('label') # scatter plot # solve the issues you find with vars name for key2, group2 in grouped2: group2.plot(ax=ax2, kind='scatter', x='x', y='y', label=key2, color=colors2[key2]) # show the plot plt.show() # + id="gUfxHcbG3r4p" # Plotting the new decision boundary # from the LogisticRegression model plot_decision_regions(X_new, y_pred, clf=LR_model, legend=2)
Week-2/CS6501_Exercise_2_1.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + import os import json from pathlib import Path from pymatgen.core.composition import Composition from unflatten import unflatten from pandas import DataFrame # pip install mpcontribs-client # this notebook tested with version 3.11.2 from mpcontribs.client import Client, Attachment # - # ## load raw data name = "genesis_efrc_minipipes" # MPContribs project name indir = Path(f"/Users/patrick/GoogleDriveLBNL/MaterialsProject/gitrepos/mpcontribs-data/{name}") # + config_path = indir / "basic.mpj" config = json.loads(config_path.read_bytes()) # adding project name and API key to config (TODO: set through minipipes UI) config["meta"]["mpcontribs"] = { "project": name, "apikey": os.environ["MPCONTRIBS_API_KEY"] } ped_path = indir / "PED of BMG for PDF 1-29-20_0035-0070.gr" ped = json.loads(ped_path.read_bytes()) png_path = indir / "DP_spotty_92x70.png" # - # ## init client # retrieve MPContribs config and init client # using pop here to avoid saving API key in attachment mpcontribs_config = config["meta"].pop("mpcontribs") name = mpcontribs_config["project"] client = Client( host = "lightsources-api.materialsproject.org", apikey = mpcontribs_config["apikey"] ) # ## prep project # + # TODO init MPContribs columns and its units for project (see client.init_columns()) # TODO update `other` in project with columns legend # - client.get_project(name) # ## prep contribution runs_meta = config["runs"]["meta"] composition = runs_meta[-1]["config"]["composition"] formula = Composition(composition).get_integer_formula_and_factor()[0] contrib = { "project": name, "identifier": "TODO", # usually mp-id, can be custom "formula": formula, "is_public": True, # will make this contribution public automatically when project is set to public # data, tables and attachments added explicitly below } # FYI submitting a contribution with its ID triggers update of this contribution # ### data # + names_map = { "i_Reduce_Data.Mask_Images.Mask_f": "mask", "i_Reduce_Data.Image_to_IQ.Integrate_f": "integrate", "i_Reduce_Data.IQ_to_PDF.Transform_f": "transform" } keys_maps = [ # len(runs_meta) = 3 { "alpha": "α", "edge": "edge", "lower_threshold": "thresholds.lower", "upper_threshold": "thresholds.upper", "smoothing function": "smoothing", "vmin": "v.min", "vmax": "v.max" }, { "wavelength (A)": "λ", # TODO unit Angstrom "polarization": "polarization", "detector": "detector" }, { "processor": "processor", "mode": "mode", "qmax": "q.max", "qmin": "q.min", "rpoly": "r.poly", "rmin": "r.min", "rmax": "r.max", "step": "step", "shift": "shift" } ] flat_data = {} for idx, meta in enumerate(runs_meta): root_key = names_map[meta["name"]] keys_map = keys_maps[idx] for old_key, new_key in keys_map.items(): key = f"{root_key}.{new_key}" value = meta["config"][old_key] # TODO add units flat_data[key] = value contrib["data"] = unflatten(flat_data) # - # ### tables x, y = "r", "G(r)" df = DataFrame(data={x: ped["data"][0], y: ped["data"][1]}) df.set_index(x, inplace=True) df.index.name = f"{x} [Å]" df.columns.name = "spectral type" df.attrs["name"] = y df.attrs["title"] = "Radial Distribution Function" df.attrs["labels"] = {"value": f"{y} [Å⁻²]"} # df.plot(**df.attrs) contrib["tables"] = [df] # ### attachments config_attachment = Attachment.from_data("config", config) contrib["attachments"] = [config_attachment, png_path] # ## submit contributions client.submit_contributions([contrib])
mpcontribs-portal/notebooks/lightsources.materialsproject.org/genesis_efrc_minipipes.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # --- import panel as pn import pandas as pd import hvplot.pandas # Defer loading of the data and populating the widgets until the page is rendered: # + stocks_url = 'https://raw.githubusercontent.com/vega/datalib/master/test/data/stocks.csv' select_ticker = pn.widgets.Select(name='Stock Ticker') def load_data(): if 'stocks' not in pn.state.cache: pn.state.cache['stocks'] = df = pd.read_csv(stocks_url, parse_dates=['date']).set_index('symbol') else: df = pn.state.cache['stocks'] symbols = list(df.index.unique()) select_ticker.options = symbols select_ticker.value = symbols[0] pn.state.onload(load_data) # - # If `'stocks'` is not yet in cache we show a spinner widget, otherwise let us plot the data: # + @pn.depends(select_ticker) def plot_ticker(ticker): if 'stocks' not in pn.state.cache or not ticker: return pn.indicators.LoadingSpinner(value=True) return pn.state.cache['stocks'].loc[ticker].hvplot.line('date', 'price') pn.Row(select_ticker, plot_ticker).servable()
examples/gallery/simple/defer_data_load.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # 1.5 Exercise 5 - Code on a quantum computer with cloud platform IBM Q # As you may have noticed, all our tests up until now have only been running on simulators being either 'qasm_simulator' or the 'statevector_simulator'.<br> # # The simulators are fine for running basic Quantum Circuits or for testing your code, before sending it off to a real Quantum Computer. So, now that we have gone throug several examples using simulators, let's do some examples with real Quantum Computers via our cloud platform, IBM Q!<br> # --- # ## 1.5.0 Import of libraries for the program # + # Full import of Qiskit library from qiskit import * # Method import to find the least busy quantum computer from qiskit.providers.ibmq import least_busy # Method import to monitor monitor Quantum Execution in real time from qiskit.tools.monitor import job_monitor # - # <div class="alert alert-info" role="alert"> # <strong>Notice!</strong> Before you can connect to the IBM Q cloud platform and use the Quantum Computers, you need to load your API token from your Account: # # 1. Follow this link, <a href="https://quantumexperience.ng.bluemix.net/qx/account/advanced">QuantumExperience</a>, to either sign-up/sign-in to retrieve your API token. # <br> [**!! Here is a guide !!**](./Appendix%20-%20Create%20user%20%26%20Get%20API%20Token.ipynb) # 2. Insert the API token in the section marked Your_Key_Here in the cell below. # 3. Don't delete the "". # # </div> API_Token = "<PASSWORD>" # 4. If this is your **first time** using this API token, please uncomment # # IBMQ.save_account(API_Token, overwrite=True) # by deleting the # symbol in front of the line. # # For future runs you should comment out the line again, by adding the # symbol again. The line should then read: # # #IBMQ.save_account(API_Token, overwrite=True) # Save the account so we can use it with IBMQ IBMQ.save_account(API_Token, overwrite=True) # 5. When the step above has been executed, we move on to loading the account. # # To avoid errors when the page first loads (aka. before you have entered you API Token), we have commentet out command here as well. Go ahead and remove the # symbol here as well. The line in question should read: # # IBMQ.load_accounts() # This will register which account we are using provider =IBMQ.get_provider(); # 6. To use the least busy Quantum Computer for our experiements below, we define the device like this # # This might fail if your account wasn't loaded propperly or if there is a problem with the API Token. device = least_busy(provider.backends(simulator=False)) # --- # ## 1.5.1 The Quantum Circuit # This step is just business as usual with initiating our Quantum Circuit. # # <strong style="color: orange;">Firstly</strong>, we initiate our quantum program with the three main compontents: # + # Create a Quantum Register with 2 qubits. qr = QuantumRegister(2) # Create a classical register with 2 bits cr = ClassicalRegister(2) # Create a Quantum Circuit containing our QR and CR. circuit = QuantumCircuit(qr,cr) # Prepare the method to draw our quantum program circuit.draw(); # - # --- # ## 1.5.2 Adding operations to the Quantum Circuit # <strong style="color: orange;">Secondly</strong>, as an example, we will use a [x]-gate - _but feel free to use whichever gate you want to try on the real Quantum Computer!_ # + # Adding a single [x]-gate to one of the two Quantum Registers circuit.x(qr[0]); # Adding a barrier for visualising purposes circuit.barrier() # Adding the measurement operation to all Quantum Registers circuit.measure(qr, cr); # - # --- # ## 1.5.3 Visualising the Quantum Circuit # <strong style="color: orange;">Thirdly</strong>, we want to view our Quantum Circuit to see, if it resembles what we had in mind: One [x]-gate first, followed by two measurement operations. circuit.draw(output='mpl') # --- # ## 1.5.4 Run the Quantum Program # <strong style="color: orange;">Fourthly</strong>, as mentioned earlier we run our Quantum Program on the least busy Quantum Computer. Unfortunatly, there you might not be the only one in line. After running the code below, you should see an output with the status of your job. This might take a little while, depending on how busy the machines are. So grab a cup of coffee, reflect on what you have learned so far or ask the person sitting next to you, what you have found to be the most challenging part of this workshop so far. # + # We excecute the job on the choosen backend job = execute(circuit, device) # The job monitor makes it possible for us to monitor our job in real time job_monitor(job) # - # When the Job Status says "job has successfully run", we will use the code below to retrieve the output. job_result = job.result() counts = job_result.get_counts(circuit) # --- # ## 1.5.5 Visualize the Result! # <strong style="color: orange;">Lastly</strong>, let's use the histogram to, yet again, visualize the output from our Quantum Device. from qiskit.tools.visualization import plot_histogram # Which then can be used to create our visualization: plot_histogram(counts) # If you followed our example and chose to add a single [x]-gate to the first Qubit, you should see a strong tendency towards the result 01. # # Because Quantum Computing is about statistics, you might also see some weak tendencys towards other answers. This is part of Quantum Computing, the results we get are rarely 100% certain, instead we're looking for tendencies and how to amplify them. # --- # <div class="alert alert-success" role="alert"> # <h1 class="alert-heading">Well done!</h1> # <p><b>Aww yeah</b>, you should now have successfully created a few Quantum Circuits, used different types of Quantum Gates and even have tested your code on a real Quantum Computer in the cloud!<br><br> # # **But!** We are not done yet!<br><br> # # We will move on to making a minor Quantum Program and then move on to fiddling with some of the larger and best-known Quantum Algorithms. # </p> # </div> # --- # ## Let's move on to the last Notebook we go through together! # [1.6 Exercise 6 - Random Number Generator](1.6%20-%20Random%20Number%20Generator.ipynb)
notebook-answers/1.5 - Run on a real quantum computer.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Data Privacy & Anonymization # # A lot of data, perhaps the vast majority of data typically used in data science, is, directly or indirectly, about people. # # Individuals have privacy rights regarding who can know or share information about specifically identified individuals. This is true in particular about certain classes of sensitive information. For example, health-related information has special protections. Regardless of the data type, data privacy and security should also be a key concern when analyzing human data. # ## Information Privacy # <div class="alert alert-success"> # Information (or Data) Privacy refers to the legal, ethical, and practical issues of collecting, using and releasing data in which there is identifiable information about people included in the dataset. It also deals with when and how to deal with data privacy issues, and how to protect users' privacy. # </div> # # <div class="alert alert-info"> # <a href=https://en.wikipedia.org/wiki/Information_privacy class="alert-link">Wikipedia</a> # has an overview of information privacy. # </div> # ## Anonymization # <div class="alert alert-success"> # Data Anonymization is a type of information sanitization - that is the removal of sensitive information - for the purpose of privacy protection. It is a procedure to modify a data set such that the individuals it reflect are anonymous. Typically this means the removal or personally identifiable information from data sets such that the identify of individuals contained in the data set are anonymous. # </div> # # <div class="alert alert-info"> # <a href="https://en.wikipedia.org/wiki/Data_anonymization" class="alert-link">Wikipedia</a> # also has an overview of data anonymization. # </div> # Data protection and anonymization are interdisciplinary components of data science and data practice. Data protection includes everything from considerations of the ethics & legalities of data use, to the practical and technical challenges of protecting and anonymizing data. # # Anonymizing data typically comes down to removing any personally identifiable data from a dataset, or, if this information must be kept, separating the identifiable data from sensitive information. # # Part of the difficulty of data anonymization is that while we can provably demonstrate that a given dataset is anonymized, this rests on particular assumptions. Most notably, datasets are only provably anonymized under the assumption that no extra external information is available to be used to attempt to de-identify it. In practice, this means that de-anonymization of data can often be done by combining multiple datasets. By using information from multiple information sources, one can often use processes of elimination to decode the individuals included in a particular dataset. # # Regulations # # There are many official guidelines, rules and standards for data privacy and user identity protection, although much of it is case specific. # # At the minimum, what is legally required in terms of data protection depends, amongst other things, on: # - What the data is / contains, and who it is about, # - Certain data types, and/or populations may have special protections, for example health-related information. # - Who owns the data and in what capacity they are acting (company, university, etc.) # - For example, regulations for scientific research are different than those for companies # - User agreements / consent procedures that were in place when the data were collected. # - Individuals have a right to self-determination in terms of what their data is used for. Data should only be used for things that are covered by it's terms of use / terms of collection / consent procedures. # - What the data is to be used for. # - Often a combination of the what and the who, there may be specific regulations about how you must deal with, and what you can do, based on the goal of having and using the data. # - Where the data was collected and where it is stored, and who it is about. # - Different regions (countries, etc) often have different regulations. # # Much of these regulations apply more directly to the collection, storage, and release of datasets (rather than analysis), but aspects also apply to the use of datasets, including publicly available datasets. Available datasets often have a user agreement for using the data, and, in particular, attempting to identify individuals from datasets may at a minimum break user agreements, and/or (depending on the nature of the data) be illegal based on consumer and research subject protection laws. # ## Research Standards # <div class="alert alert-success"> # Data collected and used for research purposes has it's own set of guidelines and requirements regarding the treatment of human subjects, and the collection, storage, use, and dissemination of data. These regulations focus, among other things, on the right to self-determination of human subjects to consent to what data is collected, and how it is used. Data collected for research purposes must follow restrictions based on these consent procedures. # </div> # # <div class="alert alert-info"> # Research protections under the # <a href="https://en.wikipedia.org/wiki/Declaration_of_Helsinki" class="alert-link">Declaration of Helsinki</a>. # </div> # ## HIPAA - Protection for Health Related Information # <div class="alert alert-success"> # The Health Insurance Portability and Accountability Act (HIPAA) is a US federal government regulation that standardizes and protects individuals medical records and health related data. It includes terms for how data must be stored, and how they can be used & shared. # </div> # # <div class="alert alert-info"> # The official US federal government HIPAA information # <a href="https://www.hhs.gov/hipaa/" class="alert-link">guidelines</a> # include an overview of HIPAA. # </div> # ## Safe Harbour Method # <div class="alert alert-success"> # Safe Harbor is an official agreement regarding how to deal with datasets that have personal data. It describes specific guidelines on what information to remove from datasets in order to anonymize them. It is a single set of data protection requirements shared across many contexts and countries. # </div> # # <div class="alert alert-info"> # The # <a href="https://www.hhs.gov/hipaa/for-professionals/privacy/special-topics/de-identification/" class="alert-link">official documentation</a> # for Safe Harbour includes guidelines on how to anonymize data. # </div> # The Safe Harbor method requires that the following identifiers of the individuals be removed: # - Names # - Geographic Subdivisions smaller than a state** # - Dates (such as birth dates, etc), and all ages above 90 # - Telephone Numbers # - Vehicle Identification Numbers # - Fax numbers # - Device identifiers and serial numbers # - Email addresses # - Web Universal Resource Locators (URLs) # - Social security numbers # - Internet Protocol (IP) addresses # - Medical record numbers # - Biometric identifiers, including finger and voice prints # - Health plan beneficiary numbers # - Full-face photographs and any comparable images # - Account numbers # - Certificate/license numbers # - Any other unique identifying number, characteristic, or code # # ** The first three numbers of the zip code can be kept, provided that more than 20,000 people live in the region covered by all the zip codes that share the same initial three digits (the same geographic subdivision). # ### Unique Identifiers # # The goal of Safe Harbor, and Data Anonymization in general, is to remove any unique information that could be used to identify you. # # This is perhaps most obvious for things like names. Other, perhaps less obvious specifications of Safe Harbour, are also based on the that this information being in a dataset creates a risk for identification of individuals contained in the dataset. # # For example, while it may be innocuous to talk about a 37 year old male who lives in Los Angeles (as there are many candidates, such that the specific individual is not revealed), it might actually be quite obvious who the person is when talking about a 37 year old male who lives in Potrero, California, a town of about 700 people. This is the same reason ages above 90 have to be removed - even in a fairly large area, say San Diego, it may be fairly obvious who the 98 year old female participant is. # # Basically - any information that makes you stand out is liable to identify you. Anonymization attempts to remove these kinds of indications from the data, such that individuals do not stand out in a way that lets someone figure out who they are. # # This also underlies the difficulty in protecting data in the face of multiple data sources, since collecting observations together makes it much easier to start to pick out people more uniquely. It may still be relatively easy to identify the 37 year old male from LA if you also happen to know (or figure out) that he has a poodle, is 5'6", works at UCLA, and was at Griffith Park on Saturday, April 15th. All of this extra information may be relatively easy to figure out by combining publicly available, or easily obtainable, data.
08-DataPrivacy&Anonymization.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import pandas as pd import numpy as np import seaborn as sns import matplotlib.pyplot as plt data = pd.read_csv("../Data/aps_failure_training_set.csv",na_values="na") data.head() data.describe() data.info() data.columns sns.pairplot(data)
Codes/.ipynb_checkpoints/initialization-checkpoint.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Db2 Jupyter: Using Prepared Statements # Normal the `%sql` magic command is used to execute SQL commands immediately to get a result. If this statement needs to be executed multiple times with different variables, the process is inefficient since the SQL statement must be recompiled every time. # # The use of the `PREPARE` and `EXECUTE` command allow the user to optimize the statement once, and then re-execute the statement using different parameters. # # In addition, the commit scope can be modified so that not every statement gets committed immediately. By managing the commit scope, overhead in the database engine can be avoided. # %run db2.ipynb # ## Autocommit and Commit Scope # By default, any SQL statements executed with the `%sql` magic command are immediately commited. This means that the log file has the transaction details and the results are committed to disk. In other words, you can't change your mind after the statement finishes execution. # # This behavior is often referred to as `AUTOCOMMIT` and adds a level of overhead to statement execution because at the end of every statement the results must be "hardened". On the other hand, autocommit means you don't have to worry about explicitly committing work or causing potential locking issues because you are holding up resources. When a record is updated, no other user will be able to view it (unless using "dirty read") until you commit. Holding the resource in a lock means that other workloads may come to a halt while they wait for you to commit your work. # # Here is a classic example of wanting a commit scope that is based on a series of statements: # ``` # withdrawal = 100 # # %sql update checking set balance = balance - withdrawal # # %sql update savings set balance = balance + withdrawal # ``` # If autocommit is `ON`, you could have a problem with the transaction if the system failed after the first update statement. You would have taken money out of the checking account, but have not updated the savings account. To make sure that this transaction is run successfully: # ``` # # %sql autocommit off # withdrawal = 100 # # %sql update checking set balance = balance - withdrawal # # %sql update savings set balance = balance + withdrawal # # %sql commit work # ``` # If the transaction fails before the `COMMIT WORK`, all changes to the database will be rolled back to its original state, thus protecting the integrity of the two tables. # ### AUTOCOMMIT # Autocommit can be turned on or off using the following syntax: # ``` # # %sql AUTOCOMMIT ON | OFF # ``` # If you turn `AUTOCOMMIT OFF` then you need to make sure that you COMMIT work at the end of your code. If you don't there is possible you lose your work if the connection is lost to Db2. # ### COMMIT, ROLLBACK # To `COMMIT` all changes to the database you must use the following syntax: # ``` # # %sql COMMIT [WORK | HOLD] # ``` # The command `COMMIT` or `COMMIT WORK` are identical and will commit all work to the database. Issuing a `COMMIT` command also closes all open cursors or statements that are open. If you had created a prepared statement (see section below) then the compiled statement will be no longer valid. By issuing a `COMMIT` you are releasing all of the resources and locks that your application may be holding. # # `COMMIT HOLD` will allow you to commit your work to disk, but keeps all of the resources open for further execution. This is useful for situations where you are inserting or updating 1000's of records and do not want to tie up log space waiting for a commit to occur. The following pseudocode gives you an example how this would be used: # ``` # # %sql autocommit off # for i = 1 to 1000 # # %sql insert into x values i # if (i / 100 == 0) # print i "Records inserted" # # %sql commit work # end if # end for # # %sql commit work # # %sql autocommit on # ``` # You should always remember to turn `AUTOCOMMIT ON` at the end of any code block or you will have to issue `COMMIT` at the end of any SQL command to commit it to the database. # ## PREPARE and EXECUTE # The `PREPARE` and `EXECUTE` commands are useful in situations where you want to repeat an SQL statement multiple times while just changing the parameter values. There isn't any benefit from using these statements for simple tasks that may only run occassionally. The benefit of `PREPARE/EXECUTE` is more evident when dealing with a large number of transactions that are the same. # # The `PREPARE` statement can be used against many types of SQL, but in this implementation, only the following SQL statements are supported: # * SELECT # * INSERT # * UPDATE # * DELETE # * MERGE # # To prepare a statement, you must use the following syntax: # ```Python # # stmt = %sql PREPARE sql .... # ``` # # The `PREPARE` statement always returns a statement handle. You must assign the results of the `PREPARE` statement to a variable since it will be required when you `EXECUTE` the statement. # # The SQL statement must have any variables replaced with a question mark `?`. For instance, if you wanted to insert a single value into a table you would use the following syntax: # ```Python # # stmt = %sql PREPARE insert into x values (?) # ``` # # One important note with parameter markers. If you require the parameter to have a specific data type (say INTEGER) then you may want to place a `CAST` statement around it to force the proper conversion. Usually strings, integers, decimals, etc... convert fine when using this syntax, but occasionally you may run across a data type issue. For the previous example we could modify it to: # ```Python # # stmt = %sql PREPARE insert into x values (CAST(? AS INTEGER)) # ``` # Once you have prepared a statement, you can execute it using the following syntax: # ```Python # # %sql EXECUTE :stmt USING :v1,:v2,:v3,.... # ``` # # You must provide the variable names with a colon `:` in front of them and separate each one with a comma. This allows the SQL parser to differentiate between a host variable and a column or SQL keyword. You can also use constants as part of the `EXECUTE` statement: # ```Python # # %sql EXECUTE :stmt USING 3,'asdsa',24.5 # ``` # # Using variables are more convenient when dealing with strings that may contain single and double quotes. # ### Using Arrays and Multiple Parameters # When using the `PREPARE` statement, it can become cumbersome when dealing with many parameter markers. For instance, in order to insert 10 columns into a table the code would look similar to this: # ``` # # stmt = %sql PREPARE INSERT INTO X VALUES (?,?,?,?,?,?,?,?,?,?) # ``` # The `%sql` command allows you to use the shortform `?*#` where `#` is an integer representing the number of columns you want in the list. The above statement could be written as: # ``` # # stmt = %sql PREPARE INSERT INTO X VALUES (?*10) # ``` # The syntax can also be used to create groups of parameter markers: # ``` # # stmt = %sql PREPARE INSERT INTO X VALUES (?*3,?*7) # ``` # While this may seem a strange way of providing parameters, this becomes more useful when we use the `EXECUTE` command. # # The `EXECUTE` command can use Python arrays (lists) as input arguments. For the previous example with 10 parameters you could issue the following command: # ``` # # %sql EXECUTE :stmt USING :v1,:v2,:v3,:v4,:v5,:v6,:v7,:v8,:v9,:v10 # ``` # If you placed all of these values into an array, you could also do the following: # ``` # # %sql EXECUTE :stmt USING :v[0],:v[1],:v[2],:v[3],:v[4],:v[5],:v[6],:v[7],:v[8],:v[9] # ``` # That isn't much simpler but shows that you could use items within an array (one dimensional only). The easiest syntax is: # ``` # # %sql EXECUTE :stmt USING :v # ``` # The only requirement is that the array `v` has exactly the number of values required to satisfy the parameter list required by the prepared statement. # # When you split the argument list into groups, you can use multiple arrays to contain the data. Given the following prepare statement: # ``` # # stmt = %sql PREPARE INSERT INTO X VALUES (?*3,?*7) # ``` # You could execute the statement using two arrays: # ``` # # %sql EXECUTE :stmt USING :name, :details # ``` # This would work as long as the total number of parameters supplied by the `name` array and `details` array is equal to 10. # ## Performance Comparisons # The following examples will show the use of `AUTOCOMMIT` and `PREPARE/EXECUTE` when running SQL statements. # # This first SQL statement will load the EMPLOYEE and DEPARTMENT tables (if they don't already exist) and then return an array of all of the employees in the company using a SELECT statement. # %sql -sampledata # employees = %sql -r select * from employee # The `employees` variable contains all of the employee data as a Python array. The next statement will retrieve the contents of the first row only (Remember that row 0 contains the name of the columns). print(employees[1]) # We now will create another `EMPLOYEE` table that is an exact duplicate of what we already have. # + magic_args="-q" language="sql" # DROP TABLE EMPLOYEE2; # CREATE TABLE EMPLOYEE2 AS (SELECT * FROM EMPLOYEE) DEFINITION ONLY; # - # ### Loop with INSERT Statements # One could always use SQL to insert into this table, but we will use a loop to execute insert statements. The loop will be timed so we can get a sense of the cost of running this code. In order to make the loop run a longer the insert block is run 50 times. # %sql -q DELETE FROM EMPLOYEE2 print("Starting Insert") start_time = time.time() i = 0 for k in range(0,50): for record in employees[1:]: i += 1 empno,firstnme,midinit,lastname,workdept,phoneno,hiredate,job,edlevel,sex,birthdate,salary,bonus,comm = record # %sql -q INSERT INTO EMPLOYEE2 VALUES ( \ # :empno,:firstnme,:midinit, \ # :lastname,:workdept,:phoneno, \ # :hiredate,:job,:edlevel, \ # :sex,:birthdate,:salary, \ # :bonus,:comm) end_time = time.time() print('Total load time for {:d} records is {:.2f} seconds'.format(i,end_time-start_time)) time_insert = end_time-start_time # ### Loop with PREPARE Statement # An alternative method would be to use a prepared statement that allows us to compile the statement once in Db2 and then reuse the statement in Db2's memory. This method uses the individual column values as input into the `EXECUTE` statement. # %sql -q DELETE FROM EMPLOYEE2 print("Starting Insert") start_time = time.time() i = 0 # prep = %sql prepare INSERT INTO EMPLOYEE2 VALUES (?,?,?,?,?,?,?,?,?,?,?,?,?,?) for k in range(0,50): for record in employees[1:]: i += 1 empno,firstnme,midinit,lastname,workdept,phoneno,hiredate,job,edlevel,sex,birthdate,salary,bonus,comm = record # %sql execute :prep using :empno,:firstnme,:midinit,:lastname,:workdept,:phoneno,:hiredate,:job,:edlevel,:sex,:birthdate,:salary,:bonus,:comm end_time = time.time() print('Total load time for {:d} records is {:.2f} seconds'.format(i,end_time-start_time)) time_prepare = end_time-start_time # ### Loop with PREPARE Statement and Arrays # You will notice that it is a bit tedious to write out all of the columns that are required as part of an `INSERT` statement. A simpler option is to use a Python list or array to and assign it directly in the `EXECUTE` statement. So rather than: # ``` # # %sql execute :prep using :empno, :firstnme, ... # ``` # We can just use the array variable generated as part of the for loop: # ``` # # %sql execute :prep using :record # ``` # The following SQL demonstrates this approach. # + # %sql -q DELETE FROM EMPLOYEE2 print("Starting Insert") start_time = time.time() i = 0 # prep = %sql prepare INSERT INTO EMPLOYEE2 VALUES (?,?,?,?,?,?,?,?,?,?,?,?,?,?) for k in range(0,50): for record in employees[1:]: i += 1 # %sql execute :prep using :record end_time = time.time() print('Total load time for {:d} records is {:.2f} seconds'.format(i,end_time-start_time)) time_array = end_time-start_time # - # ### Loop with PREPARE Statement, Arrays and AUTOCOMMIT OFF # Finally, we can turn `AUTOCOMMIT` off and then commit the work at the end of the block to improve the total time required to insert the data. Note the use of the parameter shortform `?*14` in the code. # %sql -q DELETE FROM EMPLOYEE2 # %sql autocommit off print("Starting Insert") start_time = time.time() i = 0 # prep = %sql prepare INSERT INTO EMPLOYEE2 VALUES (?*14) for k in range(0,50): for record in employees[1:]: i += 1 # %sql execute :prep using :record # %sql commit work end_time = time.time() print('Total load time for {:d} records is {:.2f} seconds'.format(i,end_time-start_time)) # %sql autocommit on time_commit = end_time-start_time # ### Performance Comparison # You may have noticed that the performance of the last method is substantially faster than the other examples. The primary reason for this is the `COMMIT` only occuring at the end of the code. # + magic_args="-pb" language="sql" # WITH RESULT(RUN,ELAPSED) AS ( # VALUES # ('INSERT',CAST(:time_insert AS DEC(5,2))), # ('PREPARE',CAST(:time_prepare AS DEC(5,2))), # ('ARRAY ',CAST(:time_array AS DEC(5,2))), # ('COMMIT ',CAST(:time_commit AS DEC(5,2))) # ) # SELECT RUN, ELAPSED FROM RESULT # ORDER BY ELAPSED DESC # - # #### Credits: IBM 2018, <NAME> [<EMAIL>]
Db2 Using Prepared Statements.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="yBNbqanW4jQy" # #Importing libraries # + [markdown] id="SBM9O2LSRrmF" # **Definition** - Imorting Pandas, Numpy, Matplotlib, Seaborn, Glob and OS, for accessing, assesing, cleaning, Engineering, Analysing and Visualising Given Data. # + id="M3ZLR0znerQI" import numpy as np import pandas as pd import matplotlib.pyplot as plt import seaborn as sb import glob import os from datetime import datetime, date # %matplotlib inline # + [markdown] id="n0C801QY3jZl" # # Gathering Data # + [markdown] id="W1AldLm2SJYx" # **Definition** - Reading file from cloud stored csv file using file drive path . # **Function Used** : pandas.read_csv() # + id="TvQltDVEe_ma" df = pd.read_csv('/content/drive/MyDrive/Utkarsh doc/201902-fordgobike-tripdata.csv') # + [markdown] id="uctQkz9E3m6D" # #Assessing Data # + [markdown] id="7c4nzJdmShcV" # **Definition** - Assessing given Raw database using functions such as: # * dataframe.head() # * dataframe.tail() # * dataframe.shape - It is an attribute not a function. # * dataframe.info() # * dataframe.dtypes - It is an attribute not a function. # * dataframe.describe() - It is an attribute not a function. # + colab={"base_uri": "https://localhost:8080/", "height": 392} id="Enz1CEVGfDPt" outputId="af4fc87e-1a07-44af-f472-aa7b8d31bcd7" df.head() # + colab={"base_uri": "https://localhost:8080/", "height": 374} id="XBZHfbLVik0E" outputId="0c568dfc-31e0-4ab5-f228-d7886afc58c2" df.tail() # + colab={"base_uri": "https://localhost:8080/"} id="HEt5kSp9fFar" outputId="8245506b-d197-4e70-c26e-579d34f4dfeb" df.shape # + colab={"base_uri": "https://localhost:8080/"} id="dHvhsU-ahwev" outputId="b76e9d8d-b540-4f13-f0ba-6cef816c26fa" df.dtypes # + colab={"base_uri": "https://localhost:8080/", "height": 364} id="5CGHXWxPhz3K" outputId="c15e30ea-9df0-4258-e5f0-81de9f081e25" df.describe() # + [markdown] id="DgrzCPKZi6E_" # **What is the structure of your dataset?** # # The dataset includes 183,412 trips 'rows' with 16 features 'columns'. Out of 16 features, seven are float64, two are int64, and seven objects. Also, start and end time have the wrong datatype( string instead of date-time object). # + [markdown] id="0UEgMOEWjfdr" # **What is/are the main feature(s) of interest in your dataset?** # # According to me, the main features are the duration of the trip, origination and conclusion of trips, the user-type and most used stations. # # # + [markdown] id="WVUfCbohkFBk" # **What features in the dataset do you think will help support your investigation into your feature(s) of interest?** # # * duration_sec # * start_time # * end_time # * user_type # * start_station_name # * end_station_name # # # # + [markdown] id="n6PbG_Br3v4h" # #Cleaning Data # + [markdown] id="FYybulEZbOXT" # ###**1.Definition** - Making a copy of provided dataframe to retain original values for future comparision. # + [markdown] id="WHX9Tgfibbxn" # ###**Code** # + id="Kg2hjamxbd5B" df_copy = df.copy() # + [markdown] id="SvsQccLCWgDx" # ###**2. Definition** - Start and End times of trips are given in String format and thus need to be changed into Date-time object as it would further facilitate the calsulations and visualisation process. # + [markdown] id="uki1CwYoXPEj" # ###**Code** # + id="bLE_eOiUicol" #changing datatype of 'start_time' & 'end_time' into datetime object df.start_time = pd.to_datetime(df.start_time) df.end_time = pd.to_datetime(df.end_time) # + [markdown] id="55u1juoGW43v" # ###**testing acquired Changes** # + colab={"base_uri": "https://localhost:8080/"} id="1s6IiSoGW8-N" outputId="922d85c2-38b6-4726-fd5a-b5d34d9506dd" df.dtypes # + [markdown] id="c2hla49OXA27" # ###**3.Definition** - Ahead, Checking for data redundancy and duplicacy in particular. # # # + [markdown] id="FGUhN9q_XkO9" # ###**Code** # + colab={"base_uri": "https://localhost:8080/"} id="tuwAazbpiUZl" outputId="14f5ddc8-8da3-4e6b-b6e2-c213297183e5" #check if any rows are duplicated sum(df.duplicated()) # + [markdown] id="ulQA-GgKXovw" # ### **Test Result** - # + [markdown] id="oYeG7LygFPJY" # No Duplicacy Found # + [markdown] id="1mSWbEVIXuEo" # ###**4.Definition** - Extracting Hours and minutes from Date-Time object("start_time" and "end_time") into new added respectible columns for further extraction of data into subpart for reducing complexity and better visualization. # + [markdown] id="AUtmgEj_YTYn" # ###**Code** # + id="NAu2MLrmjzYE" #extract the hours from start time df['start_time_hours']=df['start_time'].dt.hour #extract the minute from start time df['start_time_minutes']=df['start_time'].dt.minute + df['start_time_hours']*60 # + id="C8KxObg7j2v8" #extract the hours from end time df['end_time_hours']=df['end_time'].dt.hour #extract the minute from end time df['end_time_minutes']=df['end_time'].dt.minute + df['start_time_hours']*60 # + [markdown] id="rLQKaCZfYeqf" # ###**Testing above made changes** # + colab={"base_uri": "https://localhost:8080/"} id="nDWfL214YizV" outputId="8f9e9800-bd22-4f7b-8778-cf2c194b4ec7" df.info() # + [markdown] id="cKHjZfEEY-ys" # ###**5.Definition** - Finally Extracting Month Number from given timestamps. # (We do not need to extract Year as this data in collected from 2019 only.) # + [markdown] id="zlEBHBRSZ-u6" # ###**Code** # + id="5aYaXIFdj2sk" df['month']=df.start_time.dt.month # + [markdown] id="wsZiqZB9aEoi" # ###**Testing above made changes** # + colab={"base_uri": "https://localhost:8080/"} id="KiP1-aZtaGkF" outputId="18aba1c7-bf26-4c16-da0a-dec29cee63c7" df.info() # + [markdown] id="C0aF5OXqaKCZ" # ###**6.Definition** - # * Dropping irrelevant columns from our Data Base. # * Dropping Rows with Null values for removing all NAN values from our database. # # + [markdown] id="OZIW1ttdaXG3" # ###**Code** # + id="_X5d55eesaUd" #dropping irrelevant columns df.drop(['start_station_latitude', 'start_station_longitude','end_station_latitude', 'end_station_longitude'] ,axis =1 , inplace = True) # + id="C7xtWEgnbKXk" df=df.dropna() # + [markdown] id="00e6TNE8anhH" # ###**Testing above made alteration.** # + colab={"base_uri": "https://localhost:8080/"} id="ZUgw22cih8dz" outputId="4ba735e6-f26e-40b9-ba37-885eca5409c4" df.isna().sum() # + [markdown] id="-ZE1SfM030CJ" # #Storing Data # + [markdown] id="kCs6niVMcGnR" # ###**Saving Engineered Database in a csv file** # + id="epO_6AHX4Cbe" df.to_csv("fordbike_engineered.csv") # + [markdown] id="U_bUd7G933mQ" # #Analyzing Data # + [markdown] id="dE7fQGa1keOE" # ##**Univariate Exploration** # + [markdown] id="doazy9VOSz8b" # ### Analyse the ratio of User Type : Customer/Subscriber # + colab={"base_uri": "https://localhost:8080/", "height": 349} id="9oTcjBMawe0y" outputId="ecf92723-2350-4d23-8cec-baf8c05fc8ed" #BoxPlot representing User Type : Customer/Subscriber plt.figure(figsize = (10, 5)) sb.countplot(data=df,x='user_type') plt.title("Type of Users") plt.xlabel('User Type') plt.ylabel('Number of Users') plt.show() # + [markdown] id="B0NDN2M8S_eK" # Above chart suggests that majority of users are Subscribers while, a lesser number of usrs fall under 'Customer' category # + [markdown] id="g9Q8d7WATPla" # ### Which Age group comprises most of our User Database? # # + colab={"base_uri": "https://localhost:8080/", "height": 349} id="xNVvfxIQrdVL" outputId="92aa3920-e479-4ecc-aa26-942934b92d23" #Histogram Plot depicting member's spread on the basis of their Age df['Age'] = 2019- df['member_birth_year'] plt.figure(figsize=(10,5)) plt.hist(data=df,x='Age') plt.title('User Age Analysis') plt.xlabel('Age') plt.ylabel('Number of Users') plt.show() # + [markdown] id="w_IA6bhVThVU" # Above histogram clearly shows, # * People of age '18' to '25', comprise most of our Users. # * We see clear fall in user count as their Age progress. # + [markdown] id="HHU3G9y1UVqQ" # ###Function to plot Univariate Barplots with specified X-Label. # + id="8YLXLMZY2stF" #Function to plot Univariate Barplots with specified X-Label. def funcbarplot(df, x): mg=df.groupby(x).size() plt.figure(figsize = (10, 5)) sb.barplot(x = mg.index, y = mg.values) plt.title(f"{x} Ratio") plt.xlabel(f'{x} Type') plt.ylabel('Number of Members') plt.show() # + [markdown] id="RrkqMcHjUadB" # ### Which Gender type comprises most of our Users? # + colab={"base_uri": "https://localhost:8080/", "height": 349} id="LQSGsj8vuW-I" outputId="f869b40f-4eac-408d-c9fa-6c55c66e1acc" #BarPlot representing member gender ratio funcbarplot(df,'member_gender') # + [markdown] id="Kw6CQEtgUslq" # Above BarPlot shows, # * "Male" users dominate in number, with a total of 135000(approx) Users. # * "Female" users fall 2nd in count ratio, with a total number close to 40000. # * Users of "Other" gender type are least in number. Their total number lies close to 3000 users. # + [markdown] id="fNU-hi88V1Sq" # ### Do users preffer to share their ride with other users? # # + colab={"base_uri": "https://localhost:8080/", "height": 349} id="yQPjL4I4xfOv" outputId="4225c5ba-1cea-4981-ccb3-7561abd288ae" #BarPlot representing member gender ratio funcbarplot(df,'bike_share_for_all_trip') # + [markdown] id="7RXd4F9gV-CT" # No, We can clearly see, majority of our users do not preffer sharing their rides with other users. # + [markdown] id="cIMAYy13WNEy" # ### Function to plot barchart of Station on basis of trips onboarded and deboarded. # + id="mCSVTCTD5jTM" def funcbarh(df,x): df[x].value_counts()[:12].plot(kind='barh') plt.title(f'Most Used {x}') plt.show() # + [markdown] id="ubcx0_kAWdWu" # ### A Barchart for depicting journey origination frquency of all stations. # + colab={"base_uri": "https://localhost:8080/", "height": 362} id="kbUdX7tCkTOD" outputId="5507eabb-c538-44cb-868b-5a82081b4238" #A Barchart for depicting journey origination frquency of all stations. funcbarh(df,'start_station_name') # + [markdown] id="M3OmcBScW0vk" # Market st at 10th station is the most used as trip origination station. # + [markdown] id="ZFKqk4rDWodI" # ### A Barchart for depicting journey conclusion frquency of all stations. # # + colab={"base_uri": "https://localhost:8080/", "height": 362} id="l8Kh245XkTKa" outputId="70ce74d6-a756-4bef-adb8-575007a364d4" #A Barchart for depicting journey conclusion frquency of all stations. funcbarh(df,'end_station_name') # + [markdown] id="GJp58SCuWxDp" # San Francisco Caltrain Station 2 is most used as trip conclusion station. # + [markdown] id="ZuFtcLvfXHP8" # ###Histogram plot for plotting Frequency Vs time duration in seconds # + colab={"base_uri": "https://localhost:8080/", "height": 350} id="mYuNDGDNxNRP" outputId="0b10027d-688d-40f7-e70c-ceeb95a4565f" #Histogram plot for plotting Frequency Vs time duration in seconds plt.figure(figsize=(10,5)) duration_bins = np.arange(1,df.duration_sec.max()+300,300) plt.hist(data=df,x='duration_sec',bins=duration_bins) plt.title('Trip Durations Vs Trip Frequency') plt.xlabel('Duration in "seconds"') plt.ylabel('Trip Frequency') plt.axis([-500, 10000, 0, 90000]) plt.show() # + [markdown] id="gsAWxkXTmpel" # ###Discuss the distribution(s) of your variable(s) of interest. Were there any unusual points? Did you need to perform any transformations? # + [markdown] id="Lu6oCcDPmpVs" # The graph shows the most common trip durationz take 0-2000 seconds. The number of subscribers are significantly higher than customers in User-types. also, Market st at 10th street is the most used as trip origination station. # And, San Francisco Caltrain Station 2 is most used as trip conclusion station. # + [markdown] id="sciyBLHampYr" # ###Of the features you investigated, were there any unusual distributions? Did you perform any operations on the data to tidy, adjust, or change the form of the data? If so, why did you do this? # + [markdown] id="RA1y0K3QmpS2" # **YES!** There were some data redundancy and distribution issues in the used dataset and needed some cleaning. The problems adressed are as follows: # # * Start and End times of trips are given in String format and thus need to be changed into Date-time object as it would further facilitate the calsulations and visualisation process. # * Extracting Hours and minutes from Date-Time object("start_time" and "end_time") into new added respectible columns for further extraction of data into subpart for reducing complexity and better visualization. # * Extracting Month Number from given timestamps. # (We do not need to extract Year as this data in collected from 2019 only.) # * Dropping irrelevant columns from our Data Base. # * Dropping Rows with Null values for removing all NAN values from our database. # + [markdown] id="3pcsGzmfko8R" # ##Bivariate Exploration # + [markdown] id="rH7A6F43QBEO" # ### Discuss trip sharing tendency of Users of different gender types. # + id="LQEQVJ2JJRwu" #Function to plot Bike Sharing Tendency of users def funcMemberG(df,x): df['Memberg'] = df['member_gender'].mask(df['member_gender'].ne(x)) df.groupby(['bike_share_for_all_trip'])['Memberg'].count().plot.bar() # + colab={"base_uri": "https://localhost:8080/", "height": 367} id="U8pIsORSH-zU" outputId="1de47aee-0568-4fa1-eba2-ac03efa3dd4f" #Function to Plot Bike Sharing Tendency of MALE users funcMemberG(df,"Male") # + [markdown] id="SSSYYoTANYHC" # We can clearly infer, # * Users with "Male" gender type are dominant in number. # * Their total number is close to 120000 users. # * Approximately 17500 Users of "Male" gender preffer to share their ride. # * Above given point indicates that such users have medium tendancy of all three genders to share their ride. Since approximately 1/6th of them share their ride. # + colab={"base_uri": "https://localhost:8080/", "height": 367} id="3Lq7muDkIkUn" outputId="4615e46f-1c88-462f-f44c-5d975890576d" #Function to Plot Bike Sharing Tendency of FEMALE users funcMemberG(df,"Female") # + [markdown] id="phZj5uVqMrdv" # We can clearly infer, # * Users with "Female" gender type are less in number than "Male" users. # * Their total number is close to 37500 users. # * Approximately 4000 Users of "Female" gender preffer to share their ride. # * Above given point indicates that such users have least tendancy to share their ride. Since approximately only 1/9th of them share their ride. # + colab={"base_uri": "https://localhost:8080/", "height": 367} id="ic8w43QtIkQ-" outputId="a8b0b585-9750-4f83-8332-04bc2dec2112" #Function to Plot Bike Sharing Tendency of OTHER users funcMemberG(df,"Other") # + [markdown] id="Gj2j5mbyLOAK" # We can clearly infer, # * Users with "Other" gender type are least in number. # * Their total number is close to 3000 users. # * Approximately 600 Users of "Other" gender preffer to share their ride. # * Above given point indicates that such users have maximum tendancy to share their ride. Since approximately 1/5th of them share their ride. # + [markdown] id="cWJp4UHuPitS" # ### Discuss trip duration tendency of Users of different gender types. # + id="HQZthDsU9ckA" #Function to Plot Trip Duration Mean for Each Gender def funcplot(df, x): plt.figure(figsize = (10, 5)) df.groupby(['member_gender'])[x].mean().plot.bar() plt.title(f'{x} VS Member Gender Bar Graph') plt.xlabel('Member Gender') plt.ylabel(f'{x}') plt.show() # + colab={"base_uri": "https://localhost:8080/", "height": 372} id="DmE7OiDp-DGb" outputId="73f1205f-e606-4b16-c092-c50cdb5fedac" funcplot(df,'duration_sec') # + [markdown] id="HjwjyL1cOehP" # We can clearly infer, # * Users of "Other" gender type tend to have longer trip duration. # * Female users travel more than Male users. # * Men tend to have travel for least duration. (We are all the same and lazy !) # + [markdown] id="SnhpbGhbP4Tj" # ### Discuss trip duration tendency of Users of different User types. # + colab={"base_uri": "https://localhost:8080/", "height": 295} id="f8QKaySjxhV8" outputId="e8467e57-90e7-4465-9bd4-7732490ae40e" # Boxplot showing trip duration per user type, ie; Customer/Subscriber sb.boxplot(data=df, x='user_type', y='duration_sec') plt.ylim(0,3000) plt.title("Bike Riding Duration Per User Type") plt.xlabel('User Type') plt.ylabel('Duration') plt.show() # + [markdown] id="pT8bjPlQQK30" # We can infer here, # * Customers tend to travel for longer durations. # * Subscribers travel for comparatively less duration. # + [markdown] id="B94cvPLKQwn9" # ### Discuss trip duration tendency of Users from top 3 most Onboarded stations. # + colab={"base_uri": "https://localhost:8080/", "height": 420} id="a1o4f1jzkTH2" outputId="2ac5990f-b948-4c06-d004-82fb0e2eefd4" #boxplot for trip duration analysis of journeys from top three most used stations. start=df['start_station_name'].value_counts().index[:3] s_stations = df.loc[df['start_station_name'].isin(start)] plt.figure(figsize=(10,6)) sb.boxplot(data = s_stations, x='start_station_name', y='duration_sec') plt.ylim(0, 2500) plt.style.use('seaborn') plt.title('Duration of Bike Rides from Three Onboarded Stations') plt.xlabel('Station Names') plt.ylabel('Duration "seconds"') # + [markdown] id="4oRuuTR-RP9J" # We can infer that, # * All stations share close to common trip durations. # * Trips started from Station 'San Francisco Caltrain Station 2', tend to have longest duration of the three. # + [markdown] id="iKWmJyotRy2N" # ### Discuss traffic tendency of Users over 24hr period. # + colab={"base_uri": "https://localhost:8080/", "height": 388} id="o7NOcLZ-kd31" outputId="1ff35d60-a16c-49e3-bfae-9497466d3d1e" start=df['start_station_name'].value_counts().index[:5] s_stations = df.loc[df['start_station_name'].isin(start)] plt.figure(figsize = (15,6)) sb.countplot(data=s_stations, x='start_time_hours', hue='start_station_name') x_tick= np.arange(0,24,2) x_label= [str(x)+":00" for x in x_tick] plt.xticks(x_tick, x_label) plt.xlabel('Time in Hours') plt.ylabel('Users') plt.show() # + colab={"base_uri": "https://localhost:8080/", "height": 389} id="PtXlj3WTkdx2" outputId="41c91b1e-47e7-452d-85ef-95c12cb58af4" end= df['end_station_name'].value_counts().index[:5] e_stations = df.loc[df['end_station_name'].isin(end)] plt.figure(figsize = (15,6)) sb.countplot(data=e_stations, x='end_time_hours', hue='end_station_name') x_tick= np.arange(0,24,2) x_label= [str(x)+":00" for x in x_tick] plt.xticks(x_tick, x_label) plt.xlabel('Time in Hours') plt.ylabel('Users') plt.show() # + [markdown] id="aIrVnbcASbrl" # Graphs suggest, bigger number of users prefer ride bike in the morning at 8. On the other hand, the users end the trip usually at 5 pm. # + [markdown] id="aK1sHHsd3aui" # ### Talk about some of the relationships you observed in this part of the investigation. Were there features that strengthened each other in terms of looking at your feature(s) of interest? # + [markdown] id="-VcQ9f9r3fuF" # First, the duration per type of user shows that the customers average duration is higher than the subscribers. While as seen in the univariate plot the subscribers bike ride count is higher than the customers bike ride count. Also, the start station and end station does not much determine the duration. It suggests that some starting stations are having higher visited as the starting point and some end stations are having higher visited as the ending point. # + [markdown] id="BWeXaT9D3kXf" # ### Were there any interesting or surprising interactions between features? # + [markdown] id="aoOyberb3oRf" # Graphs suggest, bigger number of users prefer ride bike in the morning at 8. On the other hand, the users end the trip usually at 5 pm. # + [markdown] id="rjdVJ4YWk9ma" # ##**Multivariate Exploration** # + colab={"base_uri": "https://localhost:8080/", "height": 404} id="4TFZQZos16P3" outputId="3ee7343e-1b1b-4245-ae16-db5f1d36784a" # Scatterplot depicting time duration trends for both user types. ut=sb.FacetGrid(data = df, col = 'user_type', col_wrap = 2, size = 5, xlim = [10, 80], ylim = [-500, 9000]) ut.map(plt.scatter, 'start_time_minutes', 'duration_sec') ut.set_xlabels('Start Time') ut.set_ylabels('Duration "sec"') plt.show() # + [markdown] id="7W1CiDcD4Pnu" # ### Talk about some of the relationships you observed in this part of the investigation. Were there features that strengthened each other in terms of looking at your feature(s) of interest? # + [markdown] id="S2FjKxO_4VIV" # The graph shows the subscribers generally ride for a shorter time than customers who ride for a longer time during the period from 8 am. # **We can clearly see few outlier values in both cases.**
ford_Bikes_Exploratory_i_.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import numpy as np import matplotlib.pyplot as plt import os import pandas as pd import math import geopy.distance from geographiclib.geodesic import Geodesic # import scipy.sparse as sps # + path='/usr/people/out/Documents/Finland' kabels=pd.read_csv(path+'/spreadsheetcables.csv', delimiter = ';') trafo=pd.read_csv(path+'/spreadsheettrafo.csv', delimiter = ';') ###### make some matrices ####### trafo_connect=np.zeros((len(trafo),len(trafo))) #connectivity trafo trafo_all_connections=np.zeros((len(trafo),len(kabels))) #connections possible between trafo and every cable trafo_cond=np.zeros((len(trafo),len(trafo))) # The conductivity matrix stat_heading=np.zeros((len(trafo),len(trafo))) stat_heading2=np.zeros((len(trafo),len(trafo))) #heading stations to another stat_length=np.zeros((len(trafo),len(trafo))) #length between stations stat_length2=np.zeros((len(trafo),len(trafo))) stat_voltN=np.zeros((len(trafo),len(trafo))) #voltage in North direction between stations stat_voltE=np.zeros((len(trafo),len(trafo))) #voltage in east direction between stations I_GICcable=np.zeros((len(trafo),len(trafo))) #GIC between stations(= through cables) Z_matrix=np.zeros((len(trafo),len(trafo))) kabel_resist=np.zeros(len(kabels)) kabel_cond=np.zeros(len(kabels)) #gives total conductivity per connected cable segment per cable total_kabel_cond=np.zeros(len(kabels)) #gives total conductivity per segment for all cables aantal_kabels=np.zeros(len(kabels)) #aantal kabels (per fase) per traject station_lat=np.zeros(len(trafo)) #latitude stations in degrees station_lon=np.zeros(len(trafo)) #longitude stations in degrees station_rlat=np.zeros(len(trafo)) #now in radians station_rlon=np.zeros(len(trafo)) volt_result=np.zeros(len(trafo)) # resulting voltage in nodes ground_cond=np.zeros(len(trafo)) cable_icon=np.zeros(len(kabels)) # icon array for cable and trafo resp. trafo_icon=np.zeros(len(trafo)) ################ Determine inducted currents ###################### # heading=np.zeros(len(kabels)) # length=np.zeros(len(kabels)) J_north=np.zeros(len(trafo)) J_east=np.zeros(len(trafo)) # set electric field at 1V/1000m for now in North and East direction E_north=1/1000. E_east=1/1000. ################################################################### ##### connect trafo and cable number to position in matrix ##### for line in range(len(kabels)): cable_icon[line]=kabels.at[line,'kabelnr'] for line in range(len(trafo)): trafo_icon[line]=trafo.at[line,'trafonr'] ##### make trafo-trafo connectivity matrix ###### CORRECT for line in range(len(trafo)): temp=trafo.at[line,'verbonden trafo'] #get right column temp=temp.split(",") #split values for item in temp: temp2=int(item) trafo_connect[line,np.where(trafo_icon == temp2)[0]]=True #check for connection other trafo del temp2 del temp ###### make trafo-cable connectivity matrix ###### CORRECT #all connection for line in range(len(trafo)): temp=trafo.at[line,'alle aansluitingen'] temp=temp.split(",") for item in temp: temp2=int(item) trafo_all_connections[line,np.where(cable_icon == temp2)[0]]=True del temp2 del temp ###### make total conductivity vector for cables####### CORRECT for line in range(len(kabels)): kabel_cond[line]=float(kabels.at[line,'conductivity old']) kabel_resist[line]=1/kabel_cond[line] for line in range(len(kabels)): # aantal_kabels[line]=1 aantal_kabels[line]=kabels.at[line,'kab/3'] #get amount of cables per traject total_kabel_cond[line]=aantal_kabels[line]*kabel_cond[line] #conductivity per cabletraject ##### now conductivity matrix ##### for row,line in enumerate(trafo_connect): Z_matrix[row,row]=trafo.loc[row,'resistance'] station_lat[row]=trafo.at[row,'lat'] # get trafostation latitude and longitude station_lon[row]=trafo.at[row,'lon'] station_rlat[row]=np.radians(station_lat[row]) #get trafostation latitude and longitude in radians station_rlon[row]=np.radians(station_lon[row]) for column,item in enumerate(line): if item: temp=trafo_all_connections[row,:]+trafo_all_connections[column,:] temp2=0 for counter,value in enumerate(temp): if value == 2: # if 2 then we have found the connecting cables temp2+=1/total_kabel_cond[counter] #because of serieschain we have to add 1/sigma if temp2==0: print(row,column) trafo_cond[row,column]=-1/temp2 #add cable resistance to off-diagonal trafo_cond[row,row]+=1/temp2 #add cable resistance to trace del temp, temp2 # add grounding conductivity to trace for line in range(len(trafo_connect)): ground_cond[line]=trafo.at[line,'conductivity total'] # trafo_cond[line,line]+=trafo.at[line,'conductivity total'] ################### total induced currents for stationary field ########################## for row,line in enumerate(trafo_connect): #get heading and length between stations for column,item in enumerate(line): if item and column>=row: temp=math.cos(station_rlat[column])*math.sin(station_rlon[column]-station_rlon[row]) temp2=math.cos(station_rlat[row])*math.sin(station_rlat[column])-math.sin(station_rlat[row])*math.cos(station_rlat[column])*math.cos(station_rlon[column]-station_rlon[row]) stat_heading2[row,column]=np.abs(math.degrees(math.atan(temp/temp2))) temp4=Geodesic.WGS84.Inverse(station_lat[row],station_lon[row],station_lat[column],station_lon[column]) # print(temp4) temp3=math.sin((station_rlat[column]-station_rlat[row])/2.)**2+math.cos(station_rlat[row])*math.cos(station_rlat[column])*math.sin((station_rlon[column]-station_rlon[row])/2.)**2 stat_length[row,column]=temp4['s12'] stat_length2[row,column]=6371000*2*math.atan(np.sqrt(temp3)/np.sqrt(1-temp3)) stat_heading[row,column]=temp4['azi1'] stat_heading[column,row]=180+stat_heading[row,column] stat_heading2[column,row]=180+stat_heading2[row,column] stat_length[column,row]=stat_length[row,column] stat_length2[column,row]=stat_length2[row,column] for row,line in enumerate(trafo_connect): for column,item in enumerate(line): if item: # get voltage between stations stat_voltN[row,column]=E_north*np.cos(math.radians(stat_heading[row,column]))*stat_length[row,column] stat_voltE[row,column]=E_east*np.sin(math.radians(stat_heading[row,column]))*stat_length[row,column] J_north[row]+=stat_voltN[row,column]*trafo_cond[row,column]*-1*-1 #-1*-1 because trafo_cond is negative J_east[row]+=stat_voltE[row,column]*trafo_cond[row,column]*-1*-1 # and 1V/km N = x A in southern direction J_total=J_north+J_east ##### Apply the inversion ##### matrix=np.identity(len(trafo))+np.matmul(trafo_cond,Z_matrix) I_GIC=np.linalg.solve(matrix,J_east) print(I_GIC) matrix=np.identity(len(trafo))+np.matmul(trafo_cond,Z_matrix) I_GIC=np.linalg.solve(matrix,J_north) print(I_GIC) # - absAMP=abs(I_GIC[0:-3]) # maxAMP=np.max(absAMP) #amperes for 1 cm maxAMP=30 print(maxAMP) print(I_GIC) ##### Save files ####### GIC=pd.DataFrame(columns=['lon','lat','GIC',f'GIC/{maxAMP}']) # GIC=pd.DataFrame(columns=['lon','lat','GIC','Name',]) GICmatrix=pd.DataFrame() for i in range(len(trafo_connect)): GIC.at[i,'lon']=station_lon[i] GIC.at[i,'lat']=station_lat[i] GIC.at[i,'GIC']=I_GIC[i] GIC.at[i,f'GIC/{maxAMP}']=I_GIC[i]/maxAMP # GIC.at[i,f'Name']=trafo.at[i,'naam'] GICmatrix[i]=I_GICcable[i,:] GIC.to_csv(path_or_buf=f'{path}/GIC_trafo.csv', sep=';', index=False, header=True) GICmatrix.to_csv(path_or_buf=f'{path}/GIC_cable.csv', sep=';', index=False, header=False) # + # lim1=np.min(station_lon)-0.75 # lim2=np.max(station_lon)+0.75 # lim3=np.min(station_lat)-0.75 # lim4=np.max(station_lat)+0.75 # lim1=3.3 # lim2=9.6 # lim3=49 # lim4=54 # lim1=3.3 # lim2=7.5 # lim3=50.5 # lim4=54 lim1=19 lim2=29 lim3=59.5 lim4=67.5 legendlon=lim2-0.60 legendlat=lim4-0.1 textlon=legendlon+0.38 textlat=legendlat legend=open('legend.txt','w+') legend.write(f'{legendlon} {legendlat} 1 1') legend.close() legendtxt=open('legendtext.txt', 'w+') legendtxt.write(f'{textlon} {textlat} {maxAMP} A') legendtxt.close() ##### Use GMT to plot GICs ###### direction='North' proj='-JM15C -P' lims=f'-R{lim1}/{lim2}/{lim3}/{lim4}' psfile=f'{path}/reduced{direction}.ps' #aanpassen pngfile=f'{path}/reduced{direction}.png' #aanpassen os.system(f'gmt pscoast {proj} {lims} -W0.5p -K -Ggrey -Slightblue -Df -N1/0.25p> {psfile}' ) os.system(f'gmt psbasemap {proj} {lims} -Ba1g1 -BWeSn+t"GIC {direction}" -O -K>> {psfile}' ) # os.system(f'gmt psxy {path}/trafo.csv {proj} {lims} -Ss0.1c -Wblack -O -K>> {psfile}') os.system(f'gmt psxy {path}/GIC_trafo.csv {proj} {lims} -Ctopo.cpt -Scc -O -K>> {psfile}') # os.system(f'gmt psxy {path}/cables220.csv {proj} {lims} -W0.5p -Wred -O -K>> {psfile}' ) os.system(f'gmt psxy {path}/cables.csv {proj} {lims} -W0.5p -Wred -O -K>> {psfile}' ) os.system(f'gmt psxy legend.txt {proj} {lims} -Ctopo.cpt -W -Scc -O -K>> {psfile}') os.system(f'gmt pstext legendtext.txt {proj} {lims} -F+f20pHelvetica,black -O>> {psfile}') os.system(f'convert -density 300 {psfile} {pngfile}') # - # if we would have a gridded electric field, this interpolation technique could be used from scipy.interpolate import griddata listt=[[1,1],[2,1],[1,2],[2,2]] values=[0,1,1,3] interpolationpoints=[1.3,1.2] print(listt) griddata(listt,values,interpolationpoints,method='linear') pwd # + # obtain real voltages per cable piece and execute total code import os import pandas as pd import numpy as np import math import logging from scipy.interpolate import griddata from threading import local localvar=local() from multiprocessing import Process from multiprocessing import Lock lock=Lock() path='/usr/people/out/Documents/Magnetic_field/station_results/31-10-2003' logging.basicConfig(filename=f'{path}/GIClogbook.log', level=logging.DEBUG, format='%(asctime)s %(message)s') #################################### first reading in datasets ##################################################### try: os.mkdir(f'{path}/GIC') except: logging.warning("Directory has already been created, data could be destroyed!") print("Directory has already been created, data could be destroyed!") logging.info('Reading in datasets!') Electric_Xfiles=[] Electric_Yfiles=[] os.system(f' ls {path}/electric_field_north/*.csv > {path}/temp.txt') f=open(f'{path}/temp.txt') for item in f: item=item.strip('\n') Electric_Xfiles.append(item) f.close() os.system(f' ls {path}/electric_field_east/*.csv > {path}/temp.txt') f=open(f'{path}/temp.txt') for item in f: item=item.strip('\n') Electric_Yfiles.append(item) f.close() os.system(f'rm {path}/temp.txt') for counter,file in enumerate(Electric_Xfiles): Xfile=pd.read_csv(file, delimiter=' ', header=None) values=Xfile.to_numpy() break EX_matrix=np.zeros((len(Electric_Xfiles),len(values))) EY_matrix=np.zeros((len(Electric_Xfiles),len(values))) for counter,file in enumerate(Electric_Xfiles): Xfile=pd.read_csv(file, delimiter=' ', header=None) values=Xfile.to_numpy() EX_matrix[counter,:]=values[:,2] lat=values[:,1] lon=values[:,0] for counter,file in enumerate(Electric_Yfiles): Yfile=pd.read_csv(file, delimiter=' ', header=None) values=Yfile.to_numpy() EY_matrix[counter,:]=values[:,2] del item, f, Xfile, values, Yfile ######################################### Getting the needed GIC matrices and code ################################# logging.info('Starting with the GIC code!') path2='/usr/people/out/Documents/380+220kV_extended' kabels=pd.read_csv(path2+'/spreadsheetcables.csv', delimiter = ';') trafo=pd.read_csv(path2+'/spreadsheettrafo.csv', delimiter = ';') trafo_connect=np.zeros((len(trafo),len(trafo))) #connectivity trafo trafo_all_connections=np.zeros((len(trafo),len(kabels))) #connections possible between trafo and every cable trafo_cond=np.zeros((len(trafo),len(trafo))) # The conductivity matrix stat_heading=np.zeros((len(trafo),len(trafo))) #heading stations to another stat_length=np.zeros((len(trafo),len(trafo))) #length between stations station_lat=np.zeros(len(trafo)) #latitude stations in degrees station_lon=np.zeros(len(trafo)) #longitude stations in degrees ground_cond=np.zeros(len(trafo)) cable_icon=np.zeros(len(kabels)) # icon array for cable and trafo resp. trafo_icon=np.zeros(len(trafo)) ##### connect trafo and cable number to position in matrix ##### for line in range(len(kabels)): cable_icon[line]=kabels.at[line,'kabelnr'] for line in range(len(trafo)): trafo_icon[line]=trafo.at[line,'trafonr'] ##### make trafo-trafo connectivity matrix ###### for line in range(len(trafo)): temp=trafo.at[line,'verbonden trafo'] #get right column temp=temp.split(",") #split values for item in temp: temp2=int(item) trafo_connect[line,np.where(trafo_icon == temp2)[0]]=True #check for connection other trafo del temp2 del temp ###### make trafo-cable connectivity matrix ###### for line in range(len(trafo)): temp=trafo.at[line,'alle aansluitingen'] temp=temp.split(",") for item in temp: temp2=int(item) trafo_all_connections[line,np.where(cable_icon == temp2)[0]]=True del temp2 del temp ###### make conductivity matrix ###### for row,line in enumerate(trafo_connect): trafo_cond[row,row]=trafo.at[row,'conductivity total'] for column,item in enumerate(line): if item: temp=trafo_all_connections[row,:]+trafo_all_connections[column,:] temp2=0 for counter,value in enumerate(temp): if value == 2: # if 2 then we have found the connecting cables temp2+=1/(float(kabels.at[counter,'conductivity'])*kabels.at[counter,'kab/3']) #because of serieschain we have to add 1/sigma # if temp2==0: # print(row,column) trafo_cond[row,column]=-1/temp2 #add cable resistance to off-diagonal trafo_cond[row,row]+=1/temp2 #add cable resistance to trace del temp, temp2 ###### get heading and length between stations ##### for row,line in enumerate(trafo_connect): for column,item in enumerate(line): if item and column>=row: Alat=np.radians(trafo.at[row,'lat']) Alon=np.radians(trafo.at[row,'lon']) Blat=np.radians(trafo.at[column,'lat']) Blon=np.radians(trafo.at[column,'lon']) temp=math.cos(Blat)*math.sin(Blon-Alon) temp2=math.cos(Alat)*math.sin(Blat)-math.sin(Alat)*math.cos(Blat)*math.cos(Blon-Alon) stat_heading[row,column]=np.abs(math.degrees(math.atan(temp/temp2))) temp3=math.sin((Blat-Alat)/2.)**2+math.cos(Alat)*math.cos(Blat)*math.sin((Blon-Alon)/2.)**2 stat_length[row,column]=6371000*2*math.atan(np.sqrt(temp3)/np.sqrt(1-temp3)) stat_heading[column,row]=stat_heading[row,column] stat_length[column,row]=stat_length[row,column] del temp, temp2, temp3, Alat, Alon, Blat, Blon del line, item, row, column, value, counter ######### get necessary arrays ######## for item in range(len(trafo)): station_lat[item]=trafo.at[item,'lat'] station_lon[item]=trafo.at[item,'lon'] ground_cond[item]=trafo.at[item,'conductivity total'] ############################### Run the function with multiple processors ########################################## logging.info('Start multiprocessing!') print("New data is added now!") n=1 nrsteps=int(1/n) threads=list() for index in range(n): q=Process(target=GICfunction, args=(index+1,nrsteps*index,nrsteps*(index+1),trafo,EX_matrix,EY_matrix,lat,lon,station_lat,station_lon,trafo_connect,stat_heading,stat_length,trafo_cond,ground_cond,path,path2)) threads.append(q) q.start() for thread in threads: thread.join() logging.info("Script has been completed!") print("Script has been completed!") # + def GICfunction(q,begin,end,trafo,EX_matrix,EY_matrix,lat,lon,station_lat,station_lon,trafo_connect,stat_heading,stat_length,trafo_cond,ground_cond,path,path2): ######################################### interpolation ############################################################ localvar.volt_result=np.zeros(len(trafo)) localvar.J_total=np.zeros(len(trafo)) localvar.grid_EX=np.zeros(len(trafo)) localvar.grid_EY=np.zeros(len(trafo)) logging.info(f'Thread {q} has started interpolation!') for localvar.time in range(begin,end):#range(len(grid_EX)): localvar.J_north=np.zeros(len(trafo)) localvar.J_east=np.zeros(len(trafo)) localvar.grid_EX=griddata((lat,lon),EX_matrix[localvar.time,:],(station_lat,station_lon),method='cubic') localvar.grid_EY=griddata((lat,lon),EY_matrix[localvar.time,:],(station_lat,station_lon),method='cubic') # logging.debug(f'Grid X is {localvar.grid_EX}') # logging.debug(f'Grid Y is {localvar.grid_EY}') ###################################### get electric potential between stations ##################################### logging.info(f'Thread {q} is acquiring potential for timestep {localvar.time}!') print(f'Thread {q} is acquiring potential for timestep {localvar.time}!') for localvar.row,localvar.line in enumerate(trafo_connect): for localvar.column,localvar.item in enumerate(localvar.line): if localvar.item: localvar.signlat=np.sign(trafo.at[localvar.row,'lat']-trafo.at[localvar.column,'lat']) localvar.signlon=np.sign(trafo.at[localvar.row,'lon']-trafo.at[localvar.column,'lon']) localvar.stat_voltN=localvar.signlat*((localvar.grid_EX[localvar.row]+localvar.grid_EX[localvar.column])/2.)*np.cos(math.radians(stat_heading[localvar.row,localvar.column]))*stat_length[localvar.row,localvar.column] localvar.stat_voltE=localvar.signlon*((localvar.grid_EY[localvar.row]+localvar.grid_EY[localvar.column])/2.)*np.sin(math.radians(stat_heading[localvar.row,localvar.column]))*stat_length[localvar.row,localvar.column] localvar.J_north[localvar.row]+=localvar.stat_voltN*trafo_cond[localvar.row,localvar.column]*-1*-1 localvar.J_east[localvar.row]+=localvar.stat_voltE*trafo_cond[localvar.row,localvar.column]*-1*-1 localvar.J_total=localvar.J_north+localvar.J_east localvar.volt_result=np.linalg.solve(trafo_cond,localvar.J_total) localvar.I_GIC=localvar.volt_result*ground_cond ##################################### getting max I_GIC and writing results ######################################## logging.info(f'Thread {q} is writing results to files for timestep {localvar.time}!') localvar.maxAMP=1 logging.debug(localvar.I_GIC) if localvar.time<10: localvar.tijd=f'000{localvar.time}' elif localvar.time<100 and localvar.time>9: localvar.tijd=f'00{localvar.time}' elif localvar.time<1000 and localvar.time>99: localvar.tijd=f'0{localvar.time}' else: localvar.tijd=f'{localvar.time}' ##### Save files ####### localvar.GIC=pd.DataFrame(columns=['lon','lat','GIC',f'GIC/{localvar.maxAMP}']) GICmatrix=pd.DataFrame() for localvar.i in range(len(trafo_connect)): localvar.GIC.at[localvar.i,'lon']=station_lon[localvar.i] localvar.GIC.at[localvar.i,'lat']=station_lat[localvar.i] localvar.GIC.at[localvar.i,'GIC']=localvar.I_GIC[localvar.i] localvar.GIC.at[localvar.i,f'GIC/{localvar.maxAMP}']=localvar.I_GIC[localvar.i]/localvar.maxAMP localvar.GIC.to_csv(path_or_buf=f'{path}/GIC/TEST{localvar.tijd}.csv', sep=';', index=False, header=True) ################################### Plotting results ############################################################### lim1=3.3 lim2=7.5 lim3=50.5 lim4=54 legendlon=lim1+0.18 legendlat=lim4-0.1 textlon=legendlon+0.40 textlat=legendlat-0.03 with lock: legend=open(f'{path2}/legend.txt','w+') legend.write(f'{legendlon} {legendlat} 1 1') legend.close() legendtxt=open(f'{path2}/legendtext.txt', 'w+') legendtxt.write(f'{textlon} {textlat} {localvar.maxAMP}A') legendtxt.close() ##### Use GMT to plot GICs ###### minute=localvar.time%60 hour=int(localvar.time/60) if minute < 10: minute=f'0{minute}' if hour < 10: hour=f'0{hour}' title=f'GIC at 29-10-2003 - {hour}:{minute}' proj='-JM15C -P' lims=f'-R{lim1}/{lim2}/{lim3}/{lim4}' psfile=f'{path}/GIC/GIC_at_{localvar.tijd}.ps' #aanpassen pngfile=f'{path}/GIC/GIC_at_{localvar.tijd}.png' #aanpassen os.system(f'gmt pscoast {proj} {lims} -W0.5p -K -Ggrey -Slightblue -Df -N1/0.25p> {psfile}' ) os.system(f'gmt psbasemap {proj} {lims} -Ba1g1 -BWeSn+t"{title}" -O -K>> {psfile}' ) os.system(f'gmt psxy {path2}/cables.csv {proj} {lims} -W0.5p -Wred -O -K>> {psfile}' ) os.system(f'gmt psxy {path}/GIC/GIC_{localvar.tijd}.csv {proj} {lims} -Ctopo.cpt -Scc -O -K>> {psfile}') os.system(f'gmt psxy {path2}/legend.txt {proj} {lims} -Ctopo.cpt -W -Scc -O -K>> {psfile}') os.system(f'gmt pstext {path2}/legendtext.txt {proj} {lims} -F+f25pHelvetica,black -O>> {psfile}') os.system(f'convert -density 300 {psfile} {pngfile}') os.system(f'rm {psfile}') logging.info(f'Thread {q} has fulfilled timestep {localvar.time}!') logging.info(f'Thread {q} is finished!') # - import os path='/usr/people/out/Documents/Magnetic_field/station_results/31-10-2003' os.system(f'ffmpeg -framerate 24 -pattern_type glob -i "{path}/GIC/GIC_at_????.png" {path}/GIC.mp4') # + #plot one station's timeseries import matplotlib # %matplotlib inline import matplotlib.pyplot as plt import pandas as pd import numpy as np import os path='/usr/people/out/Documents/Magnetic_field/station_results/new_29-10-2003' os.system(f'ls {path}/GIC/*.csv > {path}/GIC.temp') f=open(f'{path}/GIC.temp') for counter,files in enumerate(f): #reading in data files=files.strip('\n') newdata=pd.read_csv(files, delimiter=';') numpydata=newdata.to_numpy() if counter==0: GIC_matrix=np.zeros((1440,len(numpydata))) GIC_matrix[counter,:]=numpydata[:,2] os.system(f'rm {path}/GIC.temp') Time=np.linspace(0,24,1440) fig=plt.figure() ax=fig.add_subplot(111) ax.set_title('GIC at Eemshaven on 29-10-2003') ax.set_ylabel('Current (A)') ax.set_xlabel('Time (hours)') ax.plot(Time,GIC_matrix[:,0]) plt.plot() fig=plt.figure() ax=fig.add_subplot(111) ax.set_title('GIC at Borssele on 29-10-2003') ax.set_ylabel('Current (A)') ax.set_xlabel('Time (hours)') ax.plot(Time,GIC_matrix[:,20]) plt.plot() # - def ObtainJ(kabels,EX_matrix,EY_matrix,lat,lon,time,trafo_connect): # MEGACOUNTER=0 localvar.cablecheck=np.zeros(len(kabels)) localvar.E_kabels=np.zeros((len(kabels),2)) localvar.E_kabels=calcE(kabels,EX_matrix,EY_matrix,lat,lon,time) localvar.stat_voltN=np.zeros((len(trafo),len(trafo))) localvar.stat_voltE=np.zeros((len(trafo),len(trafo))) localvar.J_north=np.zeros(len(trafo)) localvar.J_east=np.zeros(len(trafo)) for localvar.row,localvar.line in enumerate(trafo_connect): # NEWCOUNTER=0 ruleA=999 for localvar.column,localvar.item in enumerate(localvar.line): if localvar.item: for localvar.number in range(len(kabels)): localvar.cablecheck[localvar.number]=trafo_all_connections[localvar.row,localvar.number]+trafo_all_connections[localvar.column,localvar.number] localvar.A=np.array(np.where(localvar.cablecheck==2)) #find indices that indicate cables connected localvar.coord=trafo.at[localvar.row,'lat'] # print(localvar.row, localvar.column, localvar.coord, localvar.A, len(localvar.A[0])) for localvar.counter2 in range(len(localvar.A[0])): #double loop to check the cable connections # NEWCOUNTER+=1 # MEGACOUNTER+=1 for localvar.counter in range(len(localvar.A[0])): if abs(localvar.coord-kabels.at[localvar.A[0,localvar.counter],'strtlat'])<0.00001 and abs(ruleA-localvar.A[0,localvar.counter])>0.51: #check coord for which cable is connected and same cable is not allowed! localvar.coord=kabels.at[localvar.A[0,localvar.counter],'eindlat'] ruleA=localvar.A[0,localvar.counter] #rule to ensure that the same cable is not picked again # MEGACOUNTER+=1 # print(localvar.row,localvar.column, localvar.coord) # first North component if kabels.at[localvar.A[0,localvar.counter],'strtlat'] < kabels.at[localvar.A[0,localvar.counter],'eindlat']: localvar.stat_voltN[localvar.row,localvar.column]+=localvar.E_kabels[localvar.A[0,localvar.counter],0]*-1 else: localvar.stat_voltN[localvar.row,localvar.column]+=localvar.E_kabels[localvar.A[0,localvar.counter],0] # now East component if kabels.at[localvar.A[0,localvar.counter],'strtlon'] < kabels.at[localvar.A[0,localvar.counter],'eindlon']: localvar.stat_voltE[localvar.row,localvar.column]+=localvar.E_kabels[localvar.A[0,localvar.counter],1]*-1 else: localvar.stat_voltE[localvar.row,localvar.column]+=localvar.E_kabels[localvar.A[0,localvar.counter],1] break elif abs(localvar.coord-kabels.at[localvar.A[0,localvar.counter],'eindlat'])<0.00001 and abs(ruleA-localvar.A[0,localvar.counter])>0.51: ruleA=localvar.A[0,localvar.counter] # MEGACOUNTER+=1 localvar.coord=kabels.at[localvar.A[0,localvar.counter],'strtlat'] # print(localvar.row, localvar.column, localvar.coord) # first North component if kabels.at[localvar.A[0,localvar.counter],'strtlat'] < kabels.at[localvar.A[0,localvar.counter],'eindlat']: localvar.stat_voltN[localvar.row,localvar.column]+=localvar.E_kabels[localvar.A[0,localvar.counter],0] else: localvar.stat_voltN[localvar.row,localvar.column]+=localvar.E_kabels[localvar.A[0,localvar.counter],0]*-1 # now East component if kabels.at[localvar.A[0,localvar.counter],'strtlon'] < kabels.at[localvar.A[0,localvar.counter],'eindlon']: localvar.stat_voltE[localvar.row,localvar.column]+=localvar.E_kabels[localvar.A[0,localvar.counter],1] else: localvar.stat_voltE[localvar.row,localvar.column]+=localvar.E_kabels[localvar.A[0,localvar.counter],1]*-1 break else: pass # print(localvar.row,NEWCOUNTER) # print(f'Amount of cables is {MEGACOUNTER/2}') # else: # print(localvar.row, localvar.column, 'error') for localvar.row,localvar.line in enumerate(trafo_connect): for localvar.column,localvar.item in enumerate(localvar.line): localvar.J_north[localvar.row]+=localvar.stat_voltN[localvar.row,localvar.column]*trafo_cond[localvar.row,localvar.column]*-1*-1 #extra -1 -1 to get J in opposite direction of E localvar.J_east[localvar.row]+=localvar.stat_voltE[localvar.row,localvar.column]*trafo_cond[localvar.row,localvar.column]*-1*-1 return localvar.J_north, localvar.J_east def calcE(kabels,EX_matrix,EY_matrix,lat,lon,time): #E for all cables from scipy.interpolate import griddata from scipy.integrate import simps localvar.heading=np.zeros(len(kabels)) localvar.old=np.zeros((len(kabels),2)) nr=40 # amount of nodes while True: localvar.E_all=np.zeros((len(kabels),2)) localvar.latrange=np.zeros((len(kabels),nr)) localvar.lonrange=np.zeros((len(kabels),nr)) localvar.GridEX=np.zeros((len(kabels),nr)) localvar.GridEY=np.zeros((len(kabels),nr)) for number in range(len(kabels)): localvar.latrange[number,:]=np.linspace(kabels.at[number,'strtlat'],kabels.at[number,'eindlat'],nr) localvar.lonrange[number,:]=np.linspace(kabels.at[number,'strtlon'],kabels.at[number,'eindlon'],nr) localvar.heading[number]=kabels.at[number,'heading'] localvar.GridEX=griddata((lat,lon),EX_matrix[time,:],(localvar.latrange,localvar.lonrange),method='cubic') #interpolate value localvar.GridEY=griddata((lat,lon),EY_matrix[time,:],(localvar.latrange,localvar.lonrange),method='cubic') for number in range(len(kabels)): localvar.E_all[number,0]+=abs(np.cos(np.radians(localvar.heading[number])))*simps(localvar.GridEX[number,:],np.linspace(0,kabels.at[number,'length'],nr)) localvar.E_all[number,1]+=abs(np.sin(np.radians(localvar.heading[number])))*simps(localvar.GridEY[number,:],np.linspace(0,kabels.at[number,'length'],nr)) print(nr, np.sum(abs(localvar.old-localvar.E_all))) if np.sum(abs(localvar.old-localvar.E_all))<10**-5: print('Iterations used', nr-1) break else: localvar.old[:,0]=localvar.E_all[:,0] localvar.old[:,1]=localvar.E_all[:,1] nr+=1 return localvar.E_all E_kabels=np.zeros((len(kabels),2)) E_kabels=calcE(kabels,EX_matrix,EY_matrix,lat,lon,1300) print(E_kabels) J_kabels=np.zeros((len(kabels),2)) J_kabels=ObtainJ(kabels,EX_matrix,EY_matrix,lat,lon,0,trafo_connect) print(J_kabels) # + import os import numpy as np station=np.zeros(149) cable=np.zeros(149) ob_cable=np.zeros(149) path='/usr/people/out/Documents' f=open(f'{path}/original') g=open(f'{path}/obtained_cable') for counter,item in enumerate(f): item=item.split(' ') station[counter]=item[0] cable[counter]=item[1] for counter,item in enumerate(g): item=item.split(' ') ob_cable[counter]=item[1] for counter in range(149): if cable[counter]-ob_cable[counter] != 0: print(f'Hey, station {station[counter]} is incorrect, has now {ob_cable[counter]} cables, but should be {cable[counter]}!') else: print(f'{station[counter]} has no problems') # + # obtain real voltages per cable piece and execute total code import os import pandas as pd import numpy as np import math import logging from scipy.interpolate import griddata from threading import local localvar=local() from multiprocessing import Process from multiprocessing import Lock lock=Lock() path='/nobackup/users/out/Magnetic_field/Halloweenx10' plotday='Halloween storm' logging.basicConfig(filename=f'{path}/GIClogbook.log', level=logging.DEBUG, format='%(asctime)s %(message)s') #################################### first reading in datasets ##################################################### try: os.mkdir(f'{path}/GIC') except: logging.warning("Directory has already been created, data could be destroyed!") print("Directory has already been created, data could be destroyed!") logging.info('Reading in datasets!') Electric_Xfiles=[] Electric_Yfiles=[] os.system(f' ls {path}/electric_field_north/*.csv > {path}/temp.txt') f=open(f'{path}/temp.txt') for item in f: item=item.strip('\n') Electric_Xfiles.append(item) f.close() os.system(f' ls {path}/electric_field_east/*.csv > {path}/temp.txt') f=open(f'{path}/temp.txt') for item in f: item=item.strip('\n') Electric_Yfiles.append(item) f.close() os.system(f'rm {path}/temp.txt') logging.debug('Electric files created!') for counter,file in enumerate(Electric_Xfiles): Xfile=pd.read_csv(file, delimiter=' ', header=None) values=Xfile.to_numpy() break EX_matrix=np.zeros((len(Electric_Xfiles),len(values))) EY_matrix=np.zeros((len(Electric_Xfiles),len(values))) logging.debug('Electric matrices have been made in memory!') for counter,file in enumerate(Electric_Xfiles): Xfile=pd.read_csv(file, delimiter=' ', header=None) values=Xfile.to_numpy() EX_matrix[counter,:]=values[:,2] logging.debug('EX_matrix has been made!') lat=values[:,1] lon=values[:,0] for counter,file in enumerate(Electric_Yfiles): Yfile=pd.read_csv(file, delimiter=' ', header=None) values=Yfile.to_numpy() EY_matrix[counter,:]=values[:,2] del item, f, Xfile, values, Yfile ######################################### Getting the needed GIC matrices and code ################################# logging.info('Starting with the GIC code!') path2='/usr/people/out/Documents/380+220kV_extended' kabels=pd.read_csv(path2+'/spreadsheetcables.csv', delimiter = ';') trafo=pd.read_csv(path2+'/spreadsheettrafo.csv', delimiter = ';') trafo_connect=np.zeros((len(trafo),len(trafo))) #connectivity trafo trafo_all_connections=np.zeros((len(trafo),len(kabels))) #connections possible between trafo and every cable trafo_cond=np.zeros((len(trafo),len(trafo))) # The conductivity matrix stat_heading=np.zeros((len(trafo),len(trafo))) #heading stations to another stat_length=np.zeros((len(trafo),len(trafo))) #length between stations station_lat=np.zeros(len(trafo)) #latitude stations in degrees station_lon=np.zeros(len(trafo)) #longitude stations in degrees ground_cond=np.zeros(len(trafo)) cable_icon=np.zeros(len(kabels)) # icon array for cable and trafo resp. trafo_icon=np.zeros(len(trafo)) ##### connect trafo and cable number to position in matrix ##### for line in range(len(kabels)): cable_icon[line]=kabels.at[line,'kabelnr'] for line in range(len(trafo)): trafo_icon[line]=trafo.at[line,'trafonr'] ##### make trafo-trafo connectivity matrix ###### for line in range(len(trafo)): temp=trafo.at[line,'verbonden trafo'] #get right column temp=temp.split(",") #split values for item in temp: temp2=int(item) trafo_connect[line,np.where(trafo_icon == temp2)[0]]=True #check for connection other trafo del temp2 del temp ###### make trafo-cable connectivity matrix ###### for line in range(len(trafo)): temp=trafo.at[line,'alle aansluitingen'] temp=temp.split(",") for item in temp: temp2=int(item) trafo_all_connections[line,np.where(cable_icon == temp2)[0]]=True del temp2 del temp ###### make conductivity matrix ###### for row,line in enumerate(trafo_connect): trafo_cond[row,row]=trafo.at[row,'conductivity total'] for column,item in enumerate(line): if item: temp=trafo_all_connections[row,:]+trafo_all_connections[column,:] temp2=0 for counter,value in enumerate(temp): if value == 2: # if 2 then we have found the connecting cables temp2+=1/(float(kabels.at[counter,'conductivity'])*kabels.at[counter,'kab/3']) #because of serieschain we have to add 1/sigma # if temp2==0: # print(row,column) trafo_cond[row,column]=-1/temp2 #add cable resistance to off-diagonal trafo_cond[row,row]+=1/temp2 #add cable resistance to trace del temp, temp2 ###### get heading and length between stations ##### for row,line in enumerate(trafo_connect): for column,item in enumerate(line): if item and column>=row: Alat=np.radians(trafo.at[row,'lat']) Alon=np.radians(trafo.at[row,'lon']) Blat=np.radians(trafo.at[column,'lat']) Blon=np.radians(trafo.at[column,'lon']) temp=math.cos(Blat)*math.sin(Blon-Alon) temp2=math.cos(Alat)*math.sin(Blat)-math.sin(Alat)*math.cos(Blat)*math.cos(Blon-Alon) stat_heading[row,column]=np.abs(math.degrees(math.atan(temp/temp2))) temp3=math.sin((Blat-Alat)/2.)**2+math.cos(Alat)*math.cos(Blat)*math.sin((Blon-Alon)/2.)**2 stat_length[row,column]=6371000*2*math.atan(np.sqrt(temp3)/np.sqrt(1-temp3)) stat_heading[column,row]=stat_heading[row,column] stat_length[column,row]=stat_length[row,column] del temp, temp2, temp3, Alat, Alon, Blat, Blon del line, item, row, column, value, counter ######### get necessary arrays ######## for item in range(len(trafo)): station_lat[item]=trafo.at[item,'lat'] station_lon[item]=trafo.at[item,'lon'] ground_cond[item]=trafo.at[item,'conductivity total'] ############################### Run the function with multiple processors ########################################## logging.info('Start multiprocessing!') print("New data is added now!") n=6 nrsteps=int(1440*3/n) threads=list() for index in range(n): q=Process(target=GICfunction, args=(index+1,nrsteps*index,nrsteps*(index+1),trafo,EX_matrix,EY_matrix,lat,lon,station_lat,station_lon,trafo_connect,stat_heading,stat_length,trafo_cond,ground_cond,path,path2,kabels,trafo_all_connections,plotday)) threads.append(q) q.start() for thread in threads: thread.join() logging.info("Script has been completed!") print("Script has been completed!") # + def GICfunction(q,begin,end,trafo,EX_matrix,EY_matrix,lat,lon,station_lat,station_lon,trafo_connect,stat_heading,stat_length,trafo_cond,ground_cond,path,path2,kabels,trafo_all_connections,plotday): ######################################### interpolation ############################################################ localvar.volt_result=np.zeros(len(trafo)) localvar.J_total=np.zeros(len(trafo)) logging.info(f'Thread {q} has started interpolation!') for localvar.time in range(begin,end):#range(len(grid_EX)): localvar.J_north, localvar.J_east=ObtainJ(q,kabels,EX_matrix,EY_matrix,lat,lon,localvar.time,trafo_connect,trafo_all_connections,trafo_cond) localvar.J_total=localvar.J_north+localvar.J_east localvar.volt_result=np.linalg.solve(trafo_cond,localvar.J_total) localvar.I_GIC=localvar.volt_result*ground_cond ##################################### getting max I_GIC and writing results ######################################## logging.info(f'Thread {q} is writing results to files for timestep {localvar.time}!') localvar.maxAMP=10 if localvar.time<10: localvar.tijd=f'000{localvar.time}' elif localvar.time<100 and localvar.time>9: localvar.tijd=f'00{localvar.time}' elif localvar.time<1000 and localvar.time>99: localvar.tijd=f'0{localvar.time}' else: localvar.tijd=f'{localvar.time}' ##### Save files ####### localvar.GIC=pd.DataFrame(columns=['lon','lat','GIC',f'GIC/{localvar.maxAMP}']) GICmatrix=pd.DataFrame() localvar.GIC.at[:,'lon']=station_lon localvar.GIC.at[:,'lat']=station_lat localvar.GIC.at[:,'GIC']=localvar.I_GIC localvar.GIC.at[:,f'GIC/{localvar.maxAMP}']=localvar.I_GIC/localvar.maxAMP localvar.GIC.to_csv(path_or_buf=f'{path}/GIC/GIC_{localvar.tijd}.csv', sep=';', index=False, header=True) ################################### Plotting results ############################################################### lim1=3.3 lim2=7.5 lim3=50.5 lim4=54 legendlon=lim1+0.18 legendlat=lim4-0.1 textlon=legendlon+0.40 #0.33 textlat=legendlat-0.01 with lock: legend=open(f'{path2}/legend.txt','w+') legend.write(f'{legendlon} {legendlat} 1 1') legend.close() legendtxt=open(f'{path2}/legendtext.txt', 'w+') legendtxt.write(f'{textlon} {textlat} {localvar.maxAMP}A') legendtxt.close() ##### Use GMT to plot GICs ###### minute=localvar.time%60 hour=int(localvar.time/60)%24 DAY=int(localvar.time/(60*24)) if minute < 10: minute=f'0{minute}' if hour < 10: hour=f'0{hour}' title=f'GIC at {plotday} - {DAY}:{hour}:{minute}' proj='-JM15C -P' lims=f'-R{lim1}/{lim2}/{lim3}/{lim4}' psfile=f'{path}/GIC/GIC_at_{localvar.tijd}.ps' #aanpassen pngfile=f'{path}/GIC/GIC_at_{localvar.tijd}.png' #aanpassen os.system(f'gmt pscoast {proj} {lims} -W0.5p -K -Ggrey -Slightblue -Df -N1/0.25p> {psfile}' ) os.system(f'gmt psbasemap {proj} {lims} -Ba1g1 -BWeSn+t"{title}" -O -K>> {psfile}' ) os.system(f'gmt psxy {path2}/cables.csv {proj} {lims} -W0.5p -Wred -O -K>> {psfile}' ) os.system(f'gmt psxy {path}/GIC/GIC_{localvar.tijd}.csv {proj} {lims} -Ctopo.cpt -Scc -O -K>> {psfile}') os.system(f'gmt psxy {path2}/legend.txt {proj} {lims} -Ctopo.cpt -W -Scc -O -K>> {psfile}') os.system(f'gmt pstext {path2}/legendtext.txt {proj} {lims} -F+f30pHelvetica,black -O>> {psfile}') os.system(f'convert -density 300 {psfile} {pngfile}') os.system(f'rm {psfile}') logging.info(f'Thread {q} has fulfilled timestep {localvar.time}!') logging.info(f'Thread {q} is finished!') # - def ObtainJ(q,kabels,EX_matrix,EY_matrix,lat,lon,time,trafo_connect,trafo_all_connections,trafo_cond): localvar.cablecheck=np.zeros(len(kabels)) localvar.E_kabels=np.zeros((len(kabels),2)) logging.info(f'Thread {q} has started integration procedure!') localvar.E_kabels=calcE(kabels,EX_matrix,EY_matrix,lat,lon,time) logging.info(f'Thread {q} has finished integration procedure and is now writing results!') localvar.stat_voltN=np.zeros((len(trafo_connect),len(trafo_connect))) localvar.stat_voltE=np.zeros((len(trafo_connect),len(trafo_connect))) localvar.J_north=np.zeros(len(trafo_connect)) localvar.J_east=np.zeros(len(trafo_connect)) for localvar.row,localvar.line in enumerate(trafo_connect): ruleA=999 for localvar.column,localvar.item in enumerate(localvar.line): if localvar.item: for localvar.number in range(len(kabels)): localvar.cablecheck[localvar.number]=trafo_all_connections[localvar.row,localvar.number]+trafo_all_connections[localvar.column,localvar.number] localvar.A=np.array(np.where(localvar.cablecheck==2)) #find indices that indicate cables connected localvar.coord=trafo.at[localvar.row,'lat'] for localvar.counter2 in range(len(localvar.A[0])): #double loop to check the cable connections for localvar.counter in range(len(localvar.A[0])): if abs(localvar.coord-kabels.at[localvar.A[0,localvar.counter],'strtlat'])<0.00001 and abs(ruleA-localvar.A[0,localvar.counter])>0.51: #check coord for which cable is connected and same cable is not allowed! localvar.coord=kabels.at[localvar.A[0,localvar.counter],'eindlat'] ruleA=localvar.A[0,localvar.counter] #rule to ensure that the same cable is not picked again # first North component if kabels.at[localvar.A[0,localvar.counter],'strtlat'] < kabels.at[localvar.A[0,localvar.counter],'eindlat']: localvar.stat_voltN[localvar.row,localvar.column]+=localvar.E_kabels[localvar.A[0,localvar.counter],0]*-1 else: localvar.stat_voltN[localvar.row,localvar.column]+=localvar.E_kabels[localvar.A[0,localvar.counter],0] # now East component if kabels.at[localvar.A[0,localvar.counter],'strtlon'] < kabels.at[localvar.A[0,localvar.counter],'eindlon']: localvar.stat_voltE[localvar.row,localvar.column]+=localvar.E_kabels[localvar.A[0,localvar.counter],1]*-1 else: localvar.stat_voltE[localvar.row,localvar.column]+=localvar.E_kabels[localvar.A[0,localvar.counter],1] break elif abs(localvar.coord-kabels.at[localvar.A[0,localvar.counter],'eindlat'])<0.00001 and abs(ruleA-localvar.A[0,localvar.counter])>0.51: ruleA=localvar.A[0,localvar.counter] localvar.coord=kabels.at[localvar.A[0,localvar.counter],'strtlat'] # first North component if kabels.at[localvar.A[0,localvar.counter],'strtlat'] < kabels.at[localvar.A[0,localvar.counter],'eindlat']: localvar.stat_voltN[localvar.row,localvar.column]+=localvar.E_kabels[localvar.A[0,localvar.counter],0] else: localvar.stat_voltN[localvar.row,localvar.column]+=localvar.E_kabels[localvar.A[0,localvar.counter],0]*-1 # now East component if kabels.at[localvar.A[0,localvar.counter],'strtlon'] < kabels.at[localvar.A[0,localvar.counter],'eindlon']: localvar.stat_voltE[localvar.row,localvar.column]+=localvar.E_kabels[localvar.A[0,localvar.counter],1] else: localvar.stat_voltE[localvar.row,localvar.column]+=localvar.E_kabels[localvar.A[0,localvar.counter],1]*-1 break else: pass localvar.J_north[localvar.row]+=localvar.stat_voltN[localvar.row,localvar.column]*trafo_cond[localvar.row,localvar.column]*-1*-1 #extra -1 -1 to get J in opposite direction of E localvar.J_east[localvar.row]+=localvar.stat_voltE[localvar.row,localvar.column]*trafo_cond[localvar.row,localvar.column]*-1*-1 return localvar.J_north, localvar.J_east def calcE(kabels,EX_matrix,EY_matrix,lat,lon,time): #E for all cables from scipy.interpolate import griddata from scipy.integrate import simps localvar.heading=np.zeros(len(kabels)) localvar.old=np.zeros((len(kabels),2)) nr=40 # amount of nodes while True: localvar.E_all=np.zeros((len(kabels),2)) localvar.latrange=np.zeros((len(kabels),nr)) localvar.lonrange=np.zeros((len(kabels),nr)) localvar.GridEX=np.zeros((len(kabels),nr)) localvar.GridEY=np.zeros((len(kabels),nr)) for number in range(len(kabels)): localvar.latrange[number,:]=np.linspace(kabels.at[number,'strtlat'],kabels.at[number,'eindlat'],nr) localvar.lonrange[number,:]=np.linspace(kabels.at[number,'strtlon'],kabels.at[number,'eindlon'],nr) localvar.heading[number]=kabels.at[number,'heading'] localvar.GridEX=griddata((lat,lon),EX_matrix[time,:],(localvar.latrange,localvar.lonrange),method='cubic') #interpolate value localvar.GridEY=griddata((lat,lon),EY_matrix[time,:],(localvar.latrange,localvar.lonrange),method='cubic') for number in range(len(kabels)): localvar.E_all[number,0]+=abs(np.cos(np.radians(localvar.heading[number])))*simps(localvar.GridEX[number,:],np.linspace(0,kabels.at[number,'length'],nr)) localvar.E_all[number,1]+=abs(np.sin(np.radians(localvar.heading[number])))*simps(localvar.GridEY[number,:],np.linspace(0,kabels.at[number,'length'],nr)) if np.sum(abs(localvar.old-localvar.E_all))<10**-5: logging.info(f'{nr-39} iterations were used for time={time}') break else: localvar.old[:,0]=localvar.E_all[:,0] localvar.old[:,1]=localvar.E_all[:,1] nr+=1 return localvar.E_all import os path='/nobackup/users/out/Magnetic_field/Halloweenx10' os.system(f'ffmpeg -framerate 24 -pattern_type glob -i "{path}/GIC/GIC_at_????.png" {path}/GIC_Halloween.mp4') import os os.system("shutdown now") # + # plot timelapse GIC import matplotlib.pyplot as plt import os import numpy as np import pandas as pd #reading in all GIC files path='/nobackup/users/out/Magnetic_field/Halloweenx10' path2='/usr/people/out/Documents/380+220kV_extended' os.system(f"ls {path}/GIC/GIC_????.csv > {path}/temp.txt") f=open(f"{path}/temp.txt") string=[] A=np.arange(3,21) B=np.arange(46,54,1) stationlist=np.hstack([0,1,A,28,29,32,33,35,43,44,B]) GIC_data=np.zeros((1440*3,149)) for item in f: item=item.rstrip('\n') string.append(item) string=sorted(string) for counter,time in enumerate(string): GIC_file=pd.read_csv(time, delimiter=';') GIC=GIC_file.to_numpy() GIC_data[counter]=GIC[:,2] os.system(f'rm {path}/temp.txt') stationframe=pd.read_csv(f'{path2}/spreadsheettrafo.csv', delimiter=';') plt.rcParams.update({'font.size': 14}) timevector=np.linspace(0,72,1440*3) fig1=plt.figure(figsize=(20,15)) ax1=fig1.add_subplot() ax1.set_title('GIC during Halloween storm (29-31 October 2003)') ax1.set_ylabel('GIC (A)') ax1.set_xlabel('Time (hours)') for station in stationlist: ax1.plot(timevector,GIC_data[:,station],label=stationframe.at[station,'naam']) # plt.subplots_adjust(left=0) lgd=ax1.legend(bbox_to_anchor=(1.01,1)) plt.savefig(f'{path}/GIC_allstationsnew.png', bbox_extra_artists=(lgd,), bbox_inches='tight') # + # get time-frequency plot fs=1/(60.) from scipy.signal import spectrogram import matplotlib.pyplot as plt for station in stationlist: f,t,z=spectrogram(GIC_data[:,station],fs) print(len(t),t) plt.pcolormesh(t/3600, f, z) plt.ylabel('Frequency [Hz]') plt.xlabel('Time [hours]') plt.title(stationframe.at[station,'naam']) plt.axis(ymin=0, ymax=10**-3) plt.show() # + import matplotlib.pyplot as plt import os import numpy as np import pandas as pd respath='/nobackup/users/out/Magnetic_field' days='Halloween/model7' day=3 lentrafo=149 netpath='/usr/people/out/Documents/380+220kV_extended' stationlist=[0,20] #reading in all GIC files os.system(f"ls {respath}/{days}/GIC/GIC_????.csv > {respath}/{days}/temp.txt") f=open(f"{respath}/{days}/temp.txt") string=[] GIC_data=np.zeros((1440*day,lentrafo)) for item in f: item=item.rstrip('\n') string.append(item) string=sorted(string) for counter,time in enumerate(string): GIC_file=pd.read_csv(time, delimiter=';') GIC=GIC_file.to_numpy() GIC_data[counter]=GIC[:,2] os.system(f'rm {respath}/{day}/temp.txt') stationframe=pd.read_csv(f'{netpath}/spreadsheettrafo.csv', delimiter=';') plt.rcParams.update({'font.size': 20}) timevector=np.linspace(0,24*day,1440*day) fig1=plt.figure(figsize=(20,15)) ax1=fig1.add_subplot() ax1.set_title(f'GIC during Halloween') ax1.set_ylabel('GIC (A)') ax1.set_xlabel('Time (hours)') for station in stationlist: ax1.plot(timevector,GIC_data[:,station],label=stationframe.at[station,'naam']) # plt.subplots_adjust(left=0) lgd=ax1.legend(bbox_to_anchor=(1.01,1)) plt.savefig(f'{respath}/{days}/GIC_Borssele.png', bbox_extra_artists=(lgd,), bbox_inches='tight')
partial_code/Calculating GICs+plot results-LP.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Keras mnist LeNet-5 v2 # **此项目为测试修改版的LeNet-5,并且使用图像增强,调节学习率** # - 目前能在测试集上达到$0.9952$的准确率 # + # %matplotlib inline import os import PIL import pickle import numpy as np import pandas as pd import matplotlib.pyplot as plt import tensorflow as tf import keras from IPython import display from functools import partial from sklearn.preprocessing import normalize from keras import backend from keras.utils import np_utils, plot_model from keras.callbacks import TensorBoard, ModelCheckpoint from keras.callbacks import LearningRateScheduler, ReduceLROnPlateau from keras.preprocessing.image import ImageDataGenerator from keras.models import load_model from keras.models import Sequential, Model from keras.layers import Dense, Conv2D, MaxPool2D, Input, AveragePooling2D from keras.layers import Activation, Dropout, Flatten, BatchNormalization import warnings warnings.filterwarnings('ignore') np.random.seed(42) # - # ## 准备数据 # + file_path = r"I:\Dataset\mnist\all_mnist_data.csv" mnist_data = pd.read_csv(file_path) idx = np.random.permutation(len(mnist_data)) train_data = mnist_data.iloc[idx[: 60000]] test_data = mnist_data.iloc[idx[60000: ]] X_train = np.array(train_data.drop('0', axis=1)).reshape(-1, 28, 28, 1).astype("float32") X_test = np.array(test_data.drop('0', axis=1)).reshape(-1, 28, 28, 1).astype("float32") y_train = np.array(train_data['0']) y_test = np.array(test_data['0']) y_train = np_utils.to_categorical(y_train) y_test = np_utils.to_categorical(y_test) x_train = X_train[10000:] t_train = y_train[10000:] x_val = X_train[:10000] t_val = y_train[:10000] print("\nimgs of trainset : ", x_train.shape) print("labels of trainset : ", t_train.shape) print("imgs of valset : ", x_val.shape) print("labels of valset : ", t_val.shape) print("imgs of testset : ", X_test.shape) print("labels of testset : ", y_test.shape) # - # ## 搭建模型 def myCNN(): model = Sequential() model.add(Conv2D(filters=16, kernel_size=(5, 5), padding='same', input_shape=(28, 28, 1), activation='relu', name='conv2d_1')) model.add(MaxPool2D(pool_size=(2, 2), name='max_pool2d_1')) model.add(Conv2D(filters=36, kernel_size=(5, 5), padding='same', input_shape=(14, 14, 1), activation='relu', name='conv2d_2')) model.add(MaxPool2D(pool_size=(2, 2), name='max_pool2d_2')) model.add(Dropout(0.25, name='dropout_1')) model.add(Flatten(name='flatten_1')) model.add(Dense(128, activation='relu', name='dense_1')) model.add(Dropout(0.5, name='dropout_2')) model.add(Dense(10, activation='softmax', name='dense_2')) return model model = myCNN() model.summary() # ### 分配计算资源 # 设置每个进程占用gpu内存的30%,方便同时进行其他任务 config = tf.ConfigProto() config.gpu_options.per_process_gpu_memory_fraction = 0.3 sess = tf.Session(config=config) backend.set_session(sess) # ### 训练 # + """训练模型并保存模型及训练历史 保存模型单独创建一个子文件夹modeldir, 保存训练历史则为单个文件hisfile""" models_name = "Keras_mnist_LeNet-5_DA_lr" # 模型名称的公共前缀 factor_list = [""] # 此次调参的变量列表 model_list = [] # 模型名称列表 for i in range(len(factor_list)): modelname = models_name + factor_list[i] + ".h5" model_list.append(modelname) # 创建模型保存子目录modeldir if not os.path.isdir("saved_models"): os.mkdir("saved_models") modeldir = r"saved_models" # 创建训练历史保存目录 if not os.path.isdir("train_history"): os.mkdir("train_history") # 设置训练历史文件路径 hisfile = r"train_history\Keras_mnist_LeNet-5_DA_lr.train_history" # 每个模型及其对应的训练历史作为键值对{modelname: train_history} # train_history为字典,含四个key,代表train和val的loss和acc model_train_history = dict() # 开始训练 epochs=100 batch_size = 32 steps_per_epoch=1250 for i in range(len(model_list)): model = myCNN() modelname = model_list[i] modelpath = os.path.join(modeldir, modelname) train_his = np.array([]).reshape(-1, 2) val_his = np.array([]).reshape(-1, 2) datagen = ImageDataGenerator( rotation_range=10, height_shift_range=0.1, width_shift_range=0.1, fill_mode="constant", cval=0.0,) datagen.fit(x_train) model.compile(loss="categorical_crossentropy", optimizer=keras.optimizers.Adam(), metrics=["accuracy"]) print("\ntraining model : ", modelname) lr = 0.001 ck_epoch, max_val_acc = 0, 0.0 for epoch in range(epochs+1): i = 0 tr_his = [] for X, y in datagen.flow(x_train, t_train, batch_size=batch_size): his = model.train_on_batch(X, y) tr_his.append(his) i += 1 if i >= steps_per_epoch: break tr = np.mean(tr_his, axis=0) val = model.evaluate(x_val, t_val, verbose=0) train_his = np.vstack((train_his, tr)) val_his = np.vstack((val_his, val)) if epoch%5==0: print("%4d epoch: train acc: %8f loss: %8f val acc: %8f loss: %8f"%(epoch, tr[1], tr[0], val[1], val[0])) # 设置保存模型 if val[1] > max_val_acc: model.save(modelpath) print("val acc improved from %6f to %6f"%(max_val_acc, val[1])) max_val_acc = val[1] ck_epoch = epoch # 调学习率: 当连续10周期性能没有提升 if epoch-ck_epoch>10 and lr > 1e-4: lr = lr*0.5 ck_epoch = epoch model.compile(loss="categorical_crossentropy", optimizer=keras.optimizers.Adam(lr=lr), metrics=["accuracy"]) print("lr : ", lr) model_train_history[modelname] = {"acc": train_his[:, 1], "val_acc": val_his[:, 1], "loss": train_his[:, 0], "val_loss": val_his[:, 0]} """保存训练历史""" fo = open(hisfile, 'wb') pickle.dump(model_train_history, fo) fo.close() # - # ### 可视化训练过程 def show_train_history(saved_history, his_img_file): modelnames = sorted(list(saved_history.keys())) train = ["acc", "loss"] val = ["val_acc", "val_loss"] """作loss和acc两个图""" fig, ax = plt.subplots(1, 2, figsize=(16, 5)) ax = ax.flatten() color_add = 0.9/len(saved_history) for i in range(2): c = 0.05 for j in range(len(saved_history)): modelname = modelnames[j] train_history = saved_history[modelname] ax[i].plot(train_history[train[i]], color=(0, 1-c, 0), linestyle="-", label="train_"+modelname[21:-3]) ax[i].plot(train_history[val[i]], color=(c, 0, 1-c), linestyle="-", label="val_"+modelname[21:-3]) c += color_add ax[i].set_title('Train History') ax[i].set_ylabel(train[i]) ax[i].set_xlabel('Epoch') ax[0].legend(loc="lower right") ax[1].legend(loc="upper right") ax[0].set_ylim(0.9, 1.0) ax[1].set_ylim(0, 0.2) plt.suptitle("LeNet-5_DA_lr") print("saved img: ", his_img_file) plt.savefig(his_img_file) plt.show() # + """载入训练历史并可视化, 并且保存图片""" if not os.path.isdir("his_img"): os.mkdir("his_img") his_img_file = r"his_img\LeNet-5_DA_lr.png" fo2 = open(hisfile, "rb") saved_history1 = pickle.load(fo2) show_train_history(saved_history1, his_img_file) # - # ## 在测试集上测试 # + smodel = load_model(modelpath) print("test model: ", os.path.basename(modelpath)) loss, acc = smodel.evaluate(X_test, y_test) print("test :acc: %.4f"%(acc))
Keras_mnist_LeNet-5_v2_DA_lr.9952.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + nexid="krtx" product=172 elev="0.0" from awips.dataaccess import DataAccessLayer from cartopy.feature import ShapelyFeature,NaturalEarthFeature from awips import ThriftClient, RadarCommon from dynamicserialize.dstypes.com.raytheon.uf.common.time import TimeRange from dynamicserialize.dstypes.com.raytheon.uf.common.dataplugin.radar.request import GetRadarDataRecordRequest from datetime import datetime import matplotlib.pyplot as plt from datetime import timedelta from numpy import ma from metpy.plots import ctables #from awips.tables import nexrad import numpy as np import cartopy.crs as ccrs from cartopy.mpl.gridliner import LONGITUDE_FORMATTER, LATITUDE_FORMATTER nexrad = { "N0Q" : { 'id': 94, 'unit':'dBZ', 'name':'0.5 deg Base Reflectivity', 'ctable': ['NWSStormClearReflectivity',-20., 0.5], 'res': 1000., 'elev': '0.5'}, "N0U" : { 'id': 99, 'unit':'kts', 'name':'0.5 deg Base Velocity', 'ctable': ['NWS8bitVel',-100.,1.], 'res': 250., 'elev': '0.5'}, "EET" : { 'id': 135, 'unit':'kft', 'name':'Enhanced Echo Tops', 'ctable': ['NWSEnhancedEchoTops',0,1], 'res': 1000., 'elev': '0.0'}, "DST" : { 'id': 172, 'unit':'in', 'name':'Storm Total Accumulation', 'ctable': ['NWSStormClearReflectivity',0,1], 'res': 250., 'elev': '0.0'} } def make_map(bbox, projection=ccrs.PlateCarree()): fig, ax = plt.subplots(figsize=(14, 10), subplot_kw=dict(projection=projection)) ax.set_extent(bbox) ax.coastlines(resolution='50m') gl = ax.gridlines(draw_labels=True) gl.xlabels_top = gl.ylabels_right = False gl.xformatter = LONGITUDE_FORMATTER gl.yformatter = LATITUDE_FORMATTER gl.xlabel_style = {'size': 6} gl.ylabel_style = {'size': 6} return fig, ax # set EDEX server and radar site definitions request = DataAccessLayer.newDataRequest() DataAccessLayer.changeEDEXHost('edex-cloud.unidata.ucar.edu') request.setDatatype('radar') request.setLocationNames(nexid) request.setParameters(product) # Get latest time datatimes = DataAccessLayer.getAvailableTimes(request) dateTimeStr = str(datatimes[-1]) dateTime = datetime.strptime(dateTimeStr, '%Y-%m-%d %H:%M:%S') # Build timerange +/- 60 sec buffer beginRange = dateTime - timedelta(0, 60) endRange = dateTime + timedelta(0, 60) timerange = TimeRange(beginRange, endRange) # GetRadarDataRecordRequest to query site with timerange client = ThriftClient.ThriftClient('edex-cloud.unidata.ucar.edu') request = GetRadarDataRecordRequest() request.setRadarId(nexid) request.setTimeRange(timerange) grids = [] nexrad_info = [kv for kv in nexrad.items() if kv[1]['id'] == product][0] code = nexrad_info[0] request.setProductCode(product) request.setPrimaryElevationAngle(nexrad_info[1]['elev']) response = client.sendRequest(request) # - if response.getData(): for record in response.getData(): # Get hdf5 idra = record.getHdf5Data() rdat,azdat,depVals,threshVals = RadarCommon.get_hdf5_data(idra) dim = rdat.getDimension() lat,lon = float(record.getLatitude()),float(record.getLongitude()) radials,rangeGates = rdat.getSizes() # Convert raw byte to pixel value rawValue=np.array(rdat.getByteData()) array = [] for rec in rawValue: if rec<0: rec+=256 array.append(rec) #array=rawValue if azdat: azVals = azdat.getFloatData() az = np.array(RadarCommon.encode_radial(azVals)) dattyp = RadarCommon.get_data_type(azdat) az = np.append(az,az[-1]) header = RadarCommon.get_header(record, format, rangeGates, radials, azdat, 'description') rng = np.linspace(0, rangeGates, rangeGates + 1) # Convert az/range to a lat/lon from pyproj import Geod g = Geod(ellps='clrk66') center_lat = np.ones([len(az),len(rng)])*lat center_lon = np.ones([len(az),len(rng)])*lon az2D = np.ones_like(center_lat)*az[:,None] rng2D = np.ones_like(center_lat)*np.transpose(rng[:,None])*nexrad[code]['res'] lons,lats,back=g.fwd(center_lon,center_lat,az2D,rng2D) bbox = [lons.min(), lons.max(), lats.min(), lats.max()] # Create 2d array multiArray = np.reshape(array, (-1, rangeGates)) data = ma.array(multiArray) if product==135: data[data<2] = ma.masked elif product==172: # <entry displayValue='0.0' pixelValue='6' /> # <entry displayValue='0.1' pixelValue='16' /> # <entry displayValue='0.3' pixelValue='32' /> # <entry displayValue='0.6' pixelValue='48' /> # <entry displayValue='1.0' pixelValue='64' /> # <entry displayValue='1.5' pixelValue='80' /> # <entry displayValue='2.0' pixelValue='96' /> # <entry displayValue='2.5' pixelValue='112' /> # <entry displayValue='3.0' pixelValue='128' /> # <entry displayValue='4.0' pixelValue='144' /> # <entry displayValue='5.0' pixelValue='160' /> # <entry displayValue='6.0' pixelValue='176' /> # <entry displayValue='8.0' pixelValue='192' /> # <entry displayValue='10.0' pixelValue='208' /> # <entry displayValue='12.0' pixelValue='224' /> # <entry displayValue='15.0' pixelValue='240' /> # <entry displayValue='31.0' pixelValue='255' /> # The Scale and Offset used in the equation (F = (N - OFFSET) / SCALE), where # N is the integer data value and F is the resulting floating point value) # to relate the integer data values to physical units are ANSI/IEEE # Standard 754-1985 floating point values. Halfwords 31 and 32 contain the # Scale, and halfwords 33 and 34 contain the Offset. data = ma.array((multiArray - float(threshVals[1])/float(threshVals[0]))) data[data==0] = ma.masked else: data = ma.array(threshVals[0]/10. + (data)*threshVals[1]/10.) if nexrad[code]['unit'] == 'kts': data[data<-63] = ma.masked data *= 1.94384 # Convert to knots else: data[data<=((threshVals[0]/10.)+threshVals[1]/10.)] = ma.masked # Save our requested grids so we can render them multiple times radar_grid = { "code": code, "bbox": bbox, "lats": lats, "lons": lons, "data": data } grids.append(radar_grid) print("data values range from " +str(rawValue.min())+ " to " +str(rawValue.max())) print("threshVals is " +str(threshVals)) print(timerange) print(dateTime) print(data.shape) # set EDEX server and radar site definitions request = DataAccessLayer.newDataRequest() DataAccessLayer.changeEDEXHost('edex-cloud.unidata.ucar.edu') request.setDatatype('radar') request.setLocationNames(nexid) # nexid="krtx" availableParms = DataAccessLayer.getAvailableParameters(request) availableParms.sort() list(availableParms) request.setParameters(product) # product=172 request.setLevels("0.0TILT") times = DataAccessLayer.getAvailableTimes(request) response = DataAccessLayer.getGridData(request, [times[-1]]) grid = response[0] data = grid.getRawData() lons, lats = grid.getLatLonCoords() bbox = [lons.min(), lons.max(), lats.min(), lats.max()] print(bbox) data = ma.masked_invalid(ma.array(grid.getRawData())) print(data.min()) print(data.max()) datax= data*(16256*0.01) print(datax.min()) print(datax.max()) print(threshVals) print(threshVals.byteswap()) print(depVals) print(depVals.byteswap()) for rec in grids: code = rec["code"] bbox = rec["bbox"] lats = rec["lats"] lons = rec["lons"] data = rec["data"] # Create figure # %matplotlib inline fig, ax = make_map(bbox=bbox) # Colortable filename, beginning value, increment ctable = nexrad[code]['ctable'][0] ctable = "NWSReflectivityExpanded" beg = nexrad[code]['ctable'][1] inc = nexrad[code]['ctable'][2] norm, cmap = ctables.registry.get_with_steps(ctable, beg, inc) cs = ax.pcolormesh(lons, lats, data, norm=norm, cmap=cmap) ax.set_aspect('equal', 'datalim') cbar = plt.colorbar(cs, shrink=0.85, orientation='horizontal') #cbar.ax.set_xticklabels(['0','10','20','30','40','50','60','70kft','topped']) cbar.set_label(nexid.upper()+" "+ str(nexrad[code]['res']/1000.) +"km " \ +nexrad[code]['name']+" ("+code+") " \ +nexrad[code]['unit']+" " \ +str(record.getDataTime())) political_boundaries = NaturalEarthFeature(category='cultural', name='admin_0_boundary_lines_land', scale='50m', facecolor='none') states = NaturalEarthFeature(category='cultural', name='admin_1_states_provinces_lakes', scale='50m', facecolor='none') ax.add_feature(political_boundaries, linestyle='-') ax.add_feature(states, linestyle='-') # Zoom ax.set_xlim(lon-1.5, lon+1.5) ax.set_ylim(lat-1.5, lat+1.5) plt.tight_layout()
failing_notebooks/python-awips/NEXRAD3_Storm_Total_Accumulation.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 2 # language: python # name: python2 # --- # #range() # # In this short lecture we will be discussing the range function. We haven't developed a very deep level of knowledge of functions yet, but we can understand the basics of this simple (but extremely useful!) function. # # range() allows us to create a list of numbers ranging from a starting point *up to* an ending point. We can also specify step size. Lets walk through a few examples: range(0,10) x =range(0,10) type(x) start = 0 #Default stop = 20 x = range(start,stop) x # Great! Notice how it went *up to* 20, but doesn't actually produce 20. Just like in indexing. What about step size? We can specify that as a third argument: x = range(start,stop,2) #Show x # Awesome! Well thats it...or is it? # ### <font color='red'>Python 3 Alert!</font> # # You might have been wondering, what happens if I want to use a huge range of numbers? Can my computer store that all in memory? # # Great thinking! This is a dilemma that can be solve with the use of a generator. For a simplified explanation: A generator allows the generation of generated objects that are provided at that instance but does not store every instance generated into memory. # # This means a generator would not create a list to generate like range() does, but instead provide a one time generation of the numbers in that range. Python 2 has a built-in range generator called xrange(). It is recommended to use xrange() for **for** loops in Python 2. # # The good news is in Python 3, range() behaves as a generator and you don't need to worry about it. Let's see a quick example with xrange() for num in range(10): print num for num in xrange(10): print num # So the main takeaway here is for Python 2, if you are using range() in a way that you don't need to save the results in a list, use xrange() instead. For Python 3, use range() in any instance. # # You should now have a good understanding of how to use range() in either version of Python.
others/resources/python/intro-to-python-jupyter-notebooks-master/13-Range().ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Dependency Parsing # <div class="alert alert-info"> # # This tutorial is available as an IPython notebook at [Malaya/example/dependency](https://github.com/huseinzol05/Malaya/tree/master/example/dependency). # # </div> # <div class="alert alert-warning"> # # This module only trained on standard language structure, so it is not save to use it for local language structure. # # </div> # %%time import malaya # ### Describe supported dependencies malaya.dependency.describe() string = 'Dr Mahathir menasihati mereka supaya berhenti berehat dan tidur sebentar sekiranya mengantuk ketika memandu.' # ### List available transformer Dependency models malaya.dependency.available_transformer() # Make sure you can check accuracy chart from here first before select a model, https://malaya.readthedocs.io/en/latest/Accuracy.html#dependency-parsing # # **The best model in term of accuracy is XLNET**. # ### Load xlnet dependency model model = malaya.dependency.transformer(model = 'xlnet') # ### Load Quantized model # # To load 8-bit quantized model, simply pass `quantized = True`, default is `False`. # # We can expect slightly accuracy drop from quantized model, and not necessary faster than normal 32-bit float model, totally depends on machine. quantized_model = malaya.dependency.transformer(model = 'xlnet', quantized = True) d_object, tagging, indexing = model.predict(string) d_object.to_graphvis() d_object, tagging, indexing = quantized_model.predict(string) d_object.to_graphvis() # ### Voting stack model alxlnet = malaya.dependency.transformer(model = 'alxlnet') tagging, indexing = malaya.stack.voting_stack([model, alxlnet, model], string) malaya.dependency.dependency_graph(tagging, indexing).to_graphvis() # ### Dependency graph object # # To initiate a dependency graph from dependency models, you need to call `malaya.dependency.dependency_graph`. graph = malaya.dependency.dependency_graph(tagging, indexing) graph # #### generate graphvis graph.to_graphvis() # #### Get nodes graph.nodes # #### Flat the graph list(graph.triples()) # #### Check the graph contains cycles graph.contains_cycle() # #### Generate networkx # # Make sure you already installed networkx, # # ```bash # pip install networkx # ``` digraph = graph.to_networkx() digraph import networkx as nx import matplotlib.pyplot as plt nx.draw_networkx(digraph) plt.show() digraph.edges() digraph.nodes() labels = {i:graph.get_by_address(i)['word'] for i in digraph.nodes()} labels plt.figure(figsize=(15,5)) nx.draw_networkx(digraph,labels=labels) plt.show() # ### Vectorize # # Let say you want to visualize word level in lower dimension, you can use `model.vectorize`, # # ```python # def vectorize(self, string: str): # """ # vectorize a string. # # Parameters # ---------- # string: List[str] # # Returns # ------- # result: np.array # """ # ``` r = quantized_model.vectorize(string) x = [i[0] for i in r] y = [i[1] for i in r] # + from sklearn.manifold import TSNE import matplotlib.pyplot as plt tsne = TSNE().fit_transform(y) tsne.shape # - plt.figure(figsize = (7, 7)) plt.scatter(tsne[:, 0], tsne[:, 1]) labels = x for label, x, y in zip( labels, tsne[:, 0], tsne[:, 1] ): label = ( '%s, %.3f' % (label[0], label[1]) if isinstance(label, list) else label ) plt.annotate( label, xy = (x, y), xytext = (0, 0), textcoords = 'offset points', )
docs/load-dependency.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + # %matplotlib notebook import numpy as np import matplotlib.pyplot as plt # + #Initialize input as a matrix #Each row is a different training example #Each column is a different neuron X = np.array([[0,0,1], [0,1,1], [1,0,1], [1,1,1], [1,1,1]]) #Create output data set y = np.array([[0], [1], [1], [0], [0]]) #Generate random numbers so we seed them so it can be deterministic, #Same starting points np.random.seed(1) # + #Build a sigmoid function to map any value to a value between zero and one # Refers to case of logistic function defined by: s(x) = 1/(1+e^-x) # which derivative is bell shape. derivative is equal to f(x)*(1-f(x)) def sigmoid(x, deriv = False): if deriv == True: return x*(1-x) return 1/(1+np.exp(-x)) # - #Create Synapsis matrix syn0 = 2+np.random.random((3,4)) -1 syn1 = 2+np.random.random((4,1)) -1 # + #Training #For loop iterate over the training set for i in range(60000): #First layer is the input l0 = X #Second layer can be obtained with the multiplication of each layer #and its synapsis and then running sigmoid function l1 = sigmoid(np.dot(l0, syn0)) #Do the same with l1 and its synapsis l2 = sigmoid(np.dot(l1,syn1)) #Compute the error by checking how far the prediction #is from the real value l2_error = y - l2 #Print average error rate at a set interval to make sure #it goes down every time if(i%10000) == 0: print("Error"+str(np.mean(np.abs(l2_error)))) #multiply error rate by result of sigmoide on l2 to get derivative #from output #Delta will be use to reduce error rate of prediction when update syn l2_delta = l2_error*sigmoid(l2, deriv=True) #How much l1 contributed to error in l2 (backpropagation). Multiply #layer2_delta with syn1 transpose l1_error = l2_delta.dot(syn1.T) #get delta for l1 l1_delta = l1_error * sigmoid(l1, deriv=True) #Update our synapse rates to reduce the error rate every iteration #Gradient Descent: multiply each layer by a delta syn1 += l1.T.dot(l2_delta) syn0 += l0.T.dot(l1_delta) # - print("The output after the training is") print(l2) z = np.arange(-10, 10, 0.3) sigm = 1 / (1 + np.exp(-z)) plt.plot(z, sigm, color = 'mediumvioletred', linewidth= 1.5) plt.xlabel('Z', size = 14, alpha = 0.8) plt.ylabel('σ(z)', size = 14, alpha = 0.8) a = plt.title('Sigmoid Function', size = 14) a.set_position([.5, 1.05]) # + def perceptron(X, Y): w = np.zeros(len(X[0])) eta = 1 epochs = 20 for t in range(epochs): for i, x in enumerate(X): if (np.dot(X[i], w)*Y[i]) <= 0: w = w + eta*X[i]*Y[i] return w w = perceptron(X,y) print(w) # -
.ipynb_checkpoints/Neural Network-checkpoint.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # # # Learning Bayesian Networks # # # Previous notebooks showed how Bayesian networks economically encode a probability distribution over a set of variables, and how they can be used e.g. to predict variable states, or to generate new samples from the joint distribution. This section will be about obtaining a Bayesian network, given a set of sample data. Learning a Bayesian network can be split into two problems: # # **Parameter learning:** Given a set of data samples and a DAG that captures the dependencies between the variables, estimate the (conditional) probability distributions of the individual variables. # # **Structure learning:** Given a set of data samples, estimate a DAG that captures the dependencies between the variables. # # This notebook aims to illustrate how parameter learning and structure learning can be done with pgmpy. # Currently, the library supports: # - Parameter learning for *discrete* nodes: # - Maximum Likelihood Estimation # - Bayesian Estimation # - Structure learning for *discrete*, *fully observed* networks: # - Score-based structure estimation (BIC/BDeu/K2 score; exhaustive search, hill climb/tabu search) # - Constraint-based structure estimation (PC) # - Hybrid structure estimation (MMHC) # # # ## Parameter Learning # # Suppose we have the following data: import pandas as pd data = pd.DataFrame(data={'fruit': ["banana", "apple", "banana", "apple", "banana","apple", "banana", "apple", "apple", "apple", "banana", "banana", "apple", "banana",], 'tasty': ["yes", "no", "yes", "yes", "yes", "yes", "yes", "yes", "yes", "yes", "yes", "no", "no", "no"], 'size': ["large", "large", "large", "small", "large", "large", "large", "small", "large", "large", "large", "large", "small", "small"]}) print(data) # We know that the variables relate as follows: # + from pgmpy.models import BayesianModel model = BayesianModel([('fruit', 'tasty'), ('size', 'tasty')]) # fruit -> tasty <- size # - # Parameter learning is the task to estimate the values of the conditional probability distributions (CPDs), for the variables `fruit`, `size`, and `tasty`. # # #### State counts # To make sense of the given data, we can start by counting how often each state of the variable occurs. If the variable is dependent on parents, the counts are done conditionally on the parents states, i.e. for seperately for each parent configuration: from pgmpy.estimators import ParameterEstimator pe = ParameterEstimator(model, data) print("\n", pe.state_counts('fruit')) # unconditional print("\n", pe.state_counts('tasty')) # conditional on fruit and size # We can see, for example, that as many apples as bananas were observed and that `5` large bananas were tasty, while only `1` was not. # # #### Maximum Likelihood Estimation # # A natural estimate for the CPDs is to simply use the *relative frequencies*, with which the variable states have occured. We observed `7 apples` among a total of `14 fruits`, so we might guess that about `50%` of `fruits` are `apples`. # # This approach is *Maximum Likelihood Estimation (MLE)*. According to MLE, we should fill the CPDs in such a way, that $P(\text{data}|\text{model})$ is maximal. This is achieved when using the *relative frequencies*. See [1], section 17.1 for an introduction to ML parameter estimation. pgmpy supports MLE as follows: from pgmpy.estimators import MaximumLikelihoodEstimator mle = MaximumLikelihoodEstimator(model, data) print(mle.estimate_cpd('fruit')) # unconditional print(mle.estimate_cpd('tasty')) # conditional # `mle.estimate_cpd(variable)` computes the state counts and divides each cell by the (conditional) sample size. The `mle.get_parameters()`-method returns a list of CPDs for all variable of the model. # # The built-in `fit()`-method of `BayesianModel` provides more convenient access to parameter estimators: # # Calibrate all CPDs of `model` using MLE: model.fit(data, estimator=MaximumLikelihoodEstimator) # # While very straightforward, the ML estimator has the problem of *overfitting* to the data. In above CPD, the probability of a large banana being tasty is estimated at `0.833`, because `5` out of `6` observed large bananas were tasty. Fine. But note that the probability of a small banana being tasty is estimated at `0.0`, because we observed only one small banana and it happened to be not tasty. But that should hardly make us certain that small bananas aren't tasty! # We simply do not have enough observations to rely on the observed frequencies. If the observed data is not representative for the underlying distribution, ML estimations will be extremly far off. # # When estimating parameters for Bayesian networks, lack of data is a frequent problem. Even if the total sample size is very large, the fact that state counts are done conditionally for each parents configuration causes immense fragmentation. If a variable has 3 parents that can each take 10 states, then state counts will be done seperately for `10^3 = 1000` parents configurations. This makes MLE very fragile and unstable for learning Bayesian Network parameters. A way to mitigate MLE's overfitting is *Bayesian Parameter Estimation*. # # #### Bayesian Parameter Estimation # # The Bayesian Parameter Estimator starts with already existing prior CPDs, that express our beliefs about the variables *before* the data was observed. Those "priors" are then updated, using the state counts from the observed data. See [1], Section 17.3 for a general introduction to Bayesian estimators. # # One can think of the priors as consisting in *pseudo state counts*, that are added to the actual counts before normalization. # Unless one wants to encode specific beliefs about the distributions of the variables, one commonly chooses uniform priors, i.e. ones that deem all states equiprobable. # # A very simple prior is the so-called *K2* prior, which simply adds `1` to the count of every single state. # A somewhat more sensible choice of prior is *BDeu* (Bayesian Dirichlet equivalent uniform prior). For BDeu we need to specify an *equivalent sample size* `N` and then the pseudo-counts are the equivalent of having observed `N` uniform samples of each variable (and each parent configuration). In pgmpy: # # # # + from pgmpy.estimators import BayesianEstimator est = BayesianEstimator(model, data) print(est.estimate_cpd('tasty', prior_type='BDeu', equivalent_sample_size=10)) # - # The estimated values in the CPDs are now more conservative. In particular, the estimate for a small banana being not tasty is now around `0.64` rather than `1.0`. Setting `equivalent_sample_size` to `10` means that for each parent configuration, we add the equivalent of 10 uniform samples (here: `+5` small bananas that are tasty and `+5` that aren't). # # `BayesianEstimator`, too, can be used via the `fit()`-method. Full example: # + import numpy as np import pandas as pd from pgmpy.models import BayesianModel from pgmpy.estimators import BayesianEstimator # generate data data = pd.DataFrame(np.random.randint(low=0, high=2, size=(5000, 4)), columns=['A', 'B', 'C', 'D']) model = BayesianModel([('A', 'B'), ('A', 'C'), ('D', 'C'), ('B', 'D')]) model.fit(data, estimator=BayesianEstimator, prior_type="BDeu") # default equivalent_sample_size=5 for cpd in model.get_cpds(): print(cpd) # - # ## Structure Learning # # To learn model structure (a DAG) from a data set, there are two broad techniques: # # - score-based structure learning # - constraint-based structure learning # # The combination of both techniques allows further improvement: # - hybrid structure learning # # We briefly discuss all approaches and give examples. # # ### Score-based Structure Learning # # # This approach construes model selection as an optimization task. It has two building blocks: # # - A _scoring function_ $s_D\colon M \to \mathbb R$ that maps models to a numerical score, based on how well they fit to a given data set $D$. # - A _search strategy_ to traverse the search space of possible models $M$ and select a model with optimal score. # # # #### Scoring functions # # Commonly used scores to measure the fit between model and data are _Bayesian Dirichlet scores_ such as *BDeu* or *K2* and the _Bayesian Information Criterion_ (BIC, also called MDL). See [1], Section 18.3 for a detailed introduction on scores. As before, BDeu is dependent on an equivalent sample size. # + import pandas as pd import numpy as np from pgmpy.estimators import BDeuScore, K2Score, BicScore from pgmpy.models import BayesianModel # create random data sample with 3 variables, where Z is dependent on X, Y: data = pd.DataFrame(np.random.randint(0, 4, size=(5000, 2)), columns=list('XY')) data['Z'] = data['X'] + data['Y'] bdeu = BDeuScore(data, equivalent_sample_size=5) k2 = K2Score(data) bic = BicScore(data) model1 = BayesianModel([('X', 'Z'), ('Y', 'Z')]) # X -> Z <- Y model2 = BayesianModel([('X', 'Z'), ('X', 'Y')]) # Y <- X -> Z print(bdeu.score(model1)) print(k2.score(model1)) print(bic.score(model1)) print(bdeu.score(model2)) print(k2.score(model2)) print(bic.score(model2)) # - # While the scores vary slightly, we can see that the correct `model1` has a much higher score than `model2`. # Importantly, these scores _decompose_, i.e. they can be computed locally for each of the variables given their potential parents, independent of other parts of the network: print(bdeu.local_score('Z', parents=[])) print(bdeu.local_score('Z', parents=['X'])) print(bdeu.local_score('Z', parents=['X', 'Y'])) # #### Search strategies # The search space of DAGs is super-exponential in the number of variables and the above scoring functions allow for local maxima. The first property makes exhaustive search intractable for all but very small networks, the second prohibits efficient local optimization algorithms to always find the optimal structure. Thus, identifiying the ideal structure is often not tractable. Despite these bad news, heuristic search strategies often yields good results. # # If only few nodes are involved (read: less than 5), `ExhaustiveSearch` can be used to compute the score for every DAG and returns the best-scoring one: # + from pgmpy.estimators import ExhaustiveSearch es = ExhaustiveSearch(data, scoring_method=bic) best_model = es.estimate() print(best_model.edges()) print("\nAll DAGs by score:") for score, dag in reversed(es.all_scores()): print(score, dag.edges()) # - # Once more nodes are involved, one needs to switch to heuristic search. `HillClimbSearch` implements a greedy local search that starts from the DAG `start` (default: disconnected DAG) and proceeds by iteratively performing single-edge manipulations that maximally increase the score. The search terminates once a local maximum is found. # # # # + from pgmpy.estimators import HillClimbSearch # create some data with dependencies data = pd.DataFrame(np.random.randint(0, 3, size=(2500, 8)), columns=list('ABCDEFGH')) data['A'] += data['B'] + data['C'] data['H'] = data['G'] - data['A'] hc = HillClimbSearch(data, scoring_method=BicScore(data)) best_model = hc.estimate() print(best_model.edges()) # - # The search correctly identifies e.g. that `B` and `C` do not influnce `H` directly, only through `A` and of course that `D`, `E`, `F` are independent. # # # To enforce a wider exploration of the search space, the search can be enhanced with a tabu list. The list keeps track of the last `n` modfications; those are then not allowed to be reversed, regardless of the score. Additionally a `white_list` or `black_list` can be supplied to restrict the search to a particular subset or to exclude certain edges. The parameter `max_indegree` allows to restrict the maximum number of parents for each node. # # # ### Constraint-based Structure Learning # # A different, but quite straightforward approach to build a DAG from data is this: # # 1. Identify independencies in the data set using hypothesis tests # 2. Construct DAG (pattern) according to identified independencies # # #### (Conditional) Independence Tests # # Independencies in the data can be identified using chi2 conditional independence tests. To this end, constraint-based estimators in pgmpy have a `test_conditional_independence(X, Y, Zs)`-method, that performs a hypothesis test on the data sample. It allows to check if `X` is independent from `Y` given a set of variables `Zs`: # + from pgmpy.estimators import ConstraintBasedEstimator data = pd.DataFrame(np.random.randint(0, 3, size=(2500, 8)), columns=list('ABCDEFGH')) data['A'] += data['B'] + data['C'] data['H'] = data['G'] - data['A'] data['E'] *= data['F'] est = ConstraintBasedEstimator(data) print(est.test_conditional_independence('B', 'H')) # dependent print(est.test_conditional_independence('B', 'E')) # independent print(est.test_conditional_independence('B', 'H', ['A'])) # independent print(est.test_conditional_independence('A', 'G')) # independent print(est.test_conditional_independence('A', 'G', ['H'])) # dependent # - # `test_conditional_independence()` returns a tripel `(chi2, p_value, sufficient_data)`, consisting in the computed chi2 test statistic, the `p_value` of the test, and a heuristig flag that indicates if the sample size was sufficient. The `p_value` is the probability of observing the computed chi2 statistic (or an even higher chi2 value), given the null hypothesis that X and Y are independent given Zs. # # This can be used to make independence judgements, at a given level of significance: # + def is_independent(X, Y, Zs=[], significance_level=0.05): return est.test_conditional_independence(X, Y, Zs) print(is_independent('B', 'H')) print(is_independent('B', 'E')) print(is_independent('B', 'H', ['A'])) print(is_independent('A', 'G')) print(is_independent('A', 'G', ['H'])) # - # #### DAG (pattern) construction # # With a method for independence testing at hand, we can construct a DAG from the data set in three steps: # 1. Construct an undirected skeleton - `estimate_skeleton()` # 2. Orient compelled edges to obtain partially directed acyclid graph (PDAG; I-equivalence class of DAGs) - `skeleton_to_pdag()` # 3. Extend DAG pattern to a DAG by conservatively orienting the remaining edges in some way - `pdag_to_dag()` # # Step 1.&2. form the so-called PC algorithm, see [2], page 550. PDAGs are `DirectedGraph`s, that may contain both-way edges, to indicate that the orientation for the edge is not determined. # # # # + skel, seperating_sets = est.estimate_skeleton(significance_level=0.01) print("Undirected edges: ", skel.edges()) pdag = est.skeleton_to_pdag(skel, seperating_sets) print("PDAG edges: ", pdag.edges()) model = est.pdag_to_dag(pdag) print("DAG edges: ", model.edges()) # - # The `estimate()`-method provides a shorthand for the three steps above and directly returns a `BayesianModel`: # # print(est.estimate(significance_level=0.01).edges()) # The `estimate_from_independencies()`-method can be used to construct a `BayesianModel` from a provided *set of independencies* (see class documentation for further features & methods): # + from pgmpy.independencies import Independencies ind = Independencies(['B', 'C'], ['A', ['B', 'C'], 'D']) ind = ind.closure() # required (!) for faithfulness model = ConstraintBasedEstimator.estimate_from_independencies("ABCD", ind) print(model.edges()) # - # PC PDAG construction is only guaranteed to work under the assumption that the identified set of independencies is *faithful*, i.e. there exists a DAG that exactly corresponds to it. Spurious dependencies in the data set can cause the reported independencies to violate faithfulness. It can happen that the estimated PDAG does not have any faithful completions (i.e. edge orientations that do not introduce new v-structures). In that case a warning is issued. # # # ### Hybrid Structure Learning # # The MMHC algorithm [3] combines the constraint-based and score-based method. It has two parts: # # 1. Learn undirected graph skeleton using the constraint-based construction procedure MMPC # 2. Orient edges using score-based optimization (BDeu score + modified hill-climbing) # # We can perform the two steps seperately, more or less as follows: # + from pgmpy.estimators import MmhcEstimator from pgmpy.estimators import BDeuScore data = pd.DataFrame(np.random.randint(0, 3, size=(2500, 8)), columns=list('ABCDEFGH')) data['A'] += data['B'] + data['C'] data['H'] = data['G'] - data['A'] data['E'] *= data['F'] mmhc = MmhcEstimator(data) skeleton = mmhc.mmpc() print("Part 1) Skeleton: ", skeleton.edges()) # use hill climb search to orient the edges: hc = HillClimbSearch(data, scoring_method=BDeuScore(data)) model = hc.estimate(tabu_length=10, white_list=skeleton.to_directed().edges()) print("Part 2) Model: ", model.edges()) # - # `MmhcEstimator.estimate()` is a shorthand for both steps and directly estimates a `BayesianModel`. # # ### Conclusion # # This notebook aimed to give an overview of pgmpy's estimators for learning Bayesian network structure and parameters. For more information about the individual functions see their docstring documentation. If you used pgmpy's structure learning features to satisfactorily learn a non-trivial network from real data, feel free to drop us an eMail via the mailing list or just open a Github issue. We'd like to put your network in the examples-section! # # ### References # # [1] Koller & Friedman, Probabilistic Graphical Models - Principles and Techniques, 2009 # # [2] Neapolitan, [Learning Bayesian Networks](http://www.cs.technion.ac.il/~dang/books/Learning%20Bayesian%20Networks&#40;Neapolitan,%20Richard&#41;.pdf), 2003 # # [3] Tsamardinos et al., [The max-min hill-climbing BN structure learning algorithm](http://www.dsl-lab.org/supplements/mmhc_paper/paper_online.pdf), 2005 # #
notebooks/9. Learning Bayesian Networks from Data.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # HigherLower Game # + #Biblioteca random para gerar escolhas randômicas import random #Biblioteca para apagar as informações da tela. from replit import clear # - #Base de dados do jogo data = [ { 'name': 'Instagram', 'follower_count': 346, 'description': 'Social media platform', 'country': 'United States' }, { 'name': '<NAME>', 'follower_count': 215, 'description': 'Footballer', 'country': 'Portugal' }, { 'name': '<NAME>', 'follower_count': 183, 'description': 'Musician and actress', 'country': 'United States' }, { 'name': '<NAME>', 'follower_count': 181, 'description': 'Actor and professional wrestler', 'country': 'United States' }, { 'name': '<NAME>', 'follower_count': 174, 'description': 'Musician and actress', 'country': 'United States' }, { 'name': '<NAME>', 'follower_count': 172, 'description': 'Reality TV personality and businesswoman and Self-Made Billionaire', 'country': 'United States' }, { 'name': '<NAME>', 'follower_count': 167, 'description': 'Reality TV personality and businesswoman', 'country': 'United States' }, { 'name': '<NAME>', 'follower_count': 149, 'description': 'Footballer', 'country': 'Argentina' }, { 'name': 'Beyoncé', 'follower_count': 145, 'description': 'Musician', 'country': 'United States' }, { 'name': 'Neymar', 'follower_count': 138, 'description': 'Footballer', 'country': 'Brasil' }, { 'name': 'National Geographic', 'follower_count': 135, 'description': 'Magazine', 'country': 'United States' }, { 'name': '<NAME>', 'follower_count': 133, 'description': 'Musician', 'country': 'Canada' }, { 'name': '<NAME>', 'follower_count': 131, 'description': 'Musician', 'country': 'United States' }, { 'name': '<NAME>', 'follower_count': 127, 'description': 'Reality TV personality and Model', 'country': 'United States' }, { 'name': '<NAME>', 'follower_count': 119, 'description': 'Musician and actress', 'country': 'United States' }, { 'name': '<NAME>', 'follower_count': 113, 'description': 'Musician', 'country': 'Trinidad and Tobago' }, { 'name': 'Nike', 'follower_count': 109, 'description': 'Sportswear multinational', 'country': 'United States' }, { 'name': '<NAME>', 'follower_count': 108, 'description': 'Reality TV personality and businesswoman', 'country': 'United States' }, { 'name': '<NAME>', 'follower_count': 107, 'description': 'Musician and actress', 'country': 'United States' }, { 'name': '<NAME>', 'follower_count': 94, 'description': 'Musician', 'country': 'United States' }, { 'name': '<NAME>', 'follower_count': 90, 'description': 'Reality TV personality', 'country': 'United States' }, { 'name': '<NAME>', 'follower_count': 89, 'description': 'Comedian and actor', 'country': 'United States' }, { 'name': '<NAME>', 'follower_count': 87, 'description': 'Comedian', 'country': 'United States' }, { 'name': 'Real Mad<NAME>', 'follower_count': 86, 'description': 'Football club', 'country': 'Spain' }, { 'name': 'FC Barcelona', 'follower_count': 85, 'description': 'Football club', 'country': 'Spain' }, { 'name': 'Rihanna', 'follower_count': 81, 'description': 'Musician and businesswoman', 'country': 'Barbados' }, { 'name': '<NAME>', 'follower_count': 80, 'description': 'Musician and actress', 'country': 'United States' }, { 'name': "<NAME>", 'follower_count': 69, 'description': 'Lingerie brand', 'country': 'United States' }, { 'name': 'Zendaya', 'follower_count': 68, 'description': 'Actress and musician', 'country': 'United States' }, { 'name': 'Shakira', 'follower_count': 66, 'description': 'Musician', 'country': 'Colombia' }, { 'name': 'Drake', 'follower_count': 65, 'description': 'Musician', 'country': 'Canada' }, { 'name': '<NAME>', 'follower_count': 64, 'description': 'Musician', 'country': 'United States' }, { 'name': '<NAME>', 'follower_count': 63, 'description': 'Basketball player', 'country': 'United States' }, { 'name': '<NAME>', 'follower_count': 62, 'description': 'Actor', 'country': 'United States' }, { 'name': '<NAME>', 'follower_count': 67, 'description': 'Musician', 'country': 'United States' }, { 'name': '<NAME>', 'follower_count': 82, 'description': 'Footballer', 'country': 'United Kingdom' }, { 'name': '<NAME>', 'follower_count': 61, 'description': 'Musician', 'country': 'United States' }, { 'name': '<NAME>', 'follower_count': 59, 'description': 'Musician and actor', 'country': 'United States' }, { 'name': 'UEFA Champions League', 'follower_count': 58, 'description': 'Club football competition', 'country': 'Europe' }, { 'name': 'NASA', 'follower_count': 56, 'description': 'Space agency', 'country': 'United States' }, { 'name': '<NAME>', 'follower_count': 56, 'description': 'Actress', 'country': 'United Kingdom' }, { 'name': '<NAME>', 'follower_count': 57, 'description': 'Musician', 'country': 'Canada' }, { 'name': '<NAME>', 'follower_count': 55, 'description': 'Cricketer', 'country': 'India' }, { 'name': '<NAME>', 'follower_count': 54, 'description': 'Model', 'country': 'United States' }, { 'name': '<NAME>', 'follower_count': 53, 'description': 'Actress and musician', 'country': 'India' }, { 'name': '9GAG', 'follower_count': 52, 'description': 'Social media platform', 'country': 'China' }, { 'name': 'Ronaldinho', 'follower_count': 51, 'description': 'Footballer', 'country': 'Brasil' }, { 'name': 'Maluma', 'follower_count': 50, 'description': 'Musician', 'country': 'Colombia' }, { 'name': '<NAME>', 'follower_count': 49, 'description': 'Musician', 'country': 'Cuba' }, { 'name': 'NBA', 'follower_count': 47, 'description': 'Club Basketball Competition', 'country': 'United States' } ] #Função para escolher aleatoriamente as pessoas a serem comparadas. def choose(data): '''Esta função irá escolher aleatoriamente, a partir da lista de dicionários colocados como input, uma pessoa. Deste indivíduo, irá guardar as informações disponíveis em uma variável e retornar uma lista com os valores des- tas variáveis.''' import random indice = random.randint(0,len(data)-1) name = data[indice]['name'] follower_count = data[indice]['follower_count'] description = data[indice]['description'] country = data[indice]['country'] return [name, follower_count, description, country] # + def playgame(): logo = """ __ ___ __ / / / (_)___ _/ /_ ___ _____ / /_/ / / __ `/ __ \/ _ \/ ___/ / __ / / /_/ / / / / __/ / /_/ ///_/\__, /_/ /_/\___/_/ / / /____/_ _____ _____ / / / __ \ | /| / / _ \/ ___/ / /___/ /_/ / |/ |/ / __/ / /_____/\____/|__/|__/\___/_/ """ vs = """ _ __ | | / /____ | | / / ___/ | |/ (__ ) |___/____(_) """ print(logo) #Escolhendo a pessoa a e guardando na variavel. A pessoa 'A' #ficou de fora do while pois quando a pessoa acertar, a pessoa 'A' será a pessoa com mais seguidores da última rodada. comparea = choose(data) score = 0 rungame = True while rungame == True: print(f'Compare A: {comparea[0]}, a {comparea[2]}, from {comparea[3]}') print(vs) compareb = choose(data) #Função para que não existam comparações entre a mesma pessoa. while comparea[1] == compareb[1]: compareb = choose(data) print(f'Compare B: {compareb[0]}, a {compareb[2]}, from {compareb[3]}') resposta = input("Who has more followers? Type 'A' or 'B': ").lower() #Verificando quem tem mais seguidores, se é a pessoa a ou a pessoa b. if comparea[1] > compareb[1]: answer = "a" else: answer = "b" clear() print(logo) #Comparando a resposta do usuário com a resposta final. if resposta == answer: score += 1 print(f"You're right! Current score: {score}") #Caso a pessoa b da rodada seja a que tem mais seguidores, a variável comparea recebe a compareb. Assim, #na próxima rodada, a pessoa a sempre será a pessoa com mais seguidores da última rodada. if comparea[1] < compareb[1]: comparea = compareb else: print(f"Sorry, that's wrong. Final score {score}") rungame = False playgame()
100 Days of Code The Complete Python Pro Bootcamp for 2022/Day 14 - HigherLower Game.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 2 # language: python # name: python2 # --- # * 前言 # * 开始准备用jupyter写推荐,强化学习和大数据相关的项目,打算通过这个方式激励学习实验能力,打发无聊时光。 # * 2019-01-07 # ### shell 交互 # files = !ls print files # directory = !pwd print type(directory), directory[0] # ### 风格变换 # !jt - l # !jt - t chesterish # !jt - r # ### grid表格操作 import numpy as np import pandas as pd dlist = list(range(10)) data = pd.DataFrame({'a': dlist, 'b': dlist}) data.head() import qgrid qgrid_widget = qgrid.show_grid(data, show_toolbar=True) qgrid_widget # ### 制图工具 Altair # + import altair as alt from vega_datasets import data alt.renderers.enable('notebook') iris = data.iris() alt.Chart(iris).mark_point().encode( x='petalLength', y='average(petalWidth)', color='species' ) # - iris.head()
jupyter_learning.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- from IPython.core.debugger import set_trace import azure.mgmt.consumption import adal from msrestazure.azure_active_directory import AADTokenCredentials from dotenv import load_dotenv, find_dotenv import os load_dotenv(find_dotenv()) # Parameters need for API subscription = os.getenv("AZURE_SUBSCRIPTION_ID") tenant = os.getenv("AZURE_TENANT_ID") client_id = os.getenv("AZURE_CLIENT_ID") client_secret = os.getenv("AZURE_CLIENT_SECRET") def authenticate_client_key(tenant, client_id, client_secret): """ Authenticate using service principal w/ key. """ authority_host_uri = 'https://login.microsoftonline.com' authority_uri = authority_host_uri + '/' + tenant resource_uri = 'https://management.core.windows.net/' context = adal.AuthenticationContext(authority_uri, api_version=None) mgmt_token = context.acquire_token_with_client_credentials(resource_uri, client_id, client_secret) credentials = AADTokenCredentials(mgmt_token, client_id) return credentials credentials = authenticate_client_key(tenant, client_id, client_secret) client = azure.mgmt.consumption.ConsumptionManagementClient(credentials, subscription) client.operations.list() for op in client.operations.list(): print(op.__dict__) details = [] for detail in client.usage_details.list_by_billing_period(billing_period_name='201808-1', expand='properties/additionalProperties'): details.extend([detail.__dict__]) import pandas as pd import numpy as np df = pd.DataFrame(details) df['billing_period_id'][0] df df['pretax_cost_rounded'] = df['pretax_cost'].astype(float).round(2) df.billing_period_id.unique() df.groupby(by = ['billing_period_id'])[['pretax_cost_rounded']].sum() df.groupby(by = ['billing_period_id'])[['pretax_cost']].sum() df # Only EA subscriptions are supported for this request sheet = client.price_sheet.get() df.to_excel('invoice-201808-1.xlsx')
Python azure-mgmt-consumption.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python [default] # language: python # name: python2 # --- # # Calvin and his magic wand # The solution to the following problem is proposed # # *Calvin has to cross several signals when he walks from his home to school. Each of these signals operate independently. They alternate every 80 seconds between green light and red light. At each signal, there is a counter display that tells him how long it will be before the current signal light changes. Calvin has a magic wand which lets him turn a signal from red to green instantaneously. However, this wand comes with limited battery life, so he can use it only for a specified number of times.* # # # a. *If the total number of signals is 2 and Calvin can use his magic wand only once, then what is the expected waiting time at the signals when Calvin optimally walks from his home to school?* # # # b. *What if the number of signals is 3 and Calvin can use his magic wand only once?* # # ## General assumptions # 1. The waiting time of the traffic light is independent (i.e. there is no sync mechanism for the traffic lights) and there is no knowledge of its probability distribution. Thus, the waiting time for each traffic light is uniform and independent. # 2. Calvin can optimize the waiting time by knowing how much he needs to wait in the traffic light. There is a certain optimum waiting time (L) he can use. If the traffic light is above this waiting time he decides to use the magic wand, if it is below, it is better to keep it for later # # Under a number of paths can appear, all of them have a certain probability (which depends on the parent path) and an associated waiting time. # # On each traffic light, three situations can happen # # $ p(T=0) = P(green) = 0.5 $ # # $ p(0< T <= L) = P( 0< T_w <= L | red) = P(0 < T_w <=L ) p(red) = {{L}\over{80}} 0.5 $ # # $ p(L < T <= 80) = P( L< T_w <= 80 | red) = P(L < T_w <=80 ) p(red) = {{80-L}\over{80}} 0.5 $ # # Since those probabilities depend on the parent path (from where it is coming) the probabilty of such path has to be multiplied by its parent's probability. # # $ p(X_i = x / X_p = y ) = p(x) p(y) $ # # The mean waiting time for a single traffic light is given by the addition of the three cases above # # $ E(T_w ) = \sum_{i=1}^{3} p(i)E(T_i) $ # # $ E(T / green) = 0 $ # # $ E(T / 0< T <= L ) = {{L}\over{2}} $ # # $ E(T / L< T<= 80) = {{80-L}\over{2}} $ # # Now, this is considering that Calvin has still some magic wands left to use. Otherwise if it is red, he needs to wait 40 seconds (uniform mean). # # Under these assumptions the total waiting time is given by the addition of all the branches. Each branch is positioned on a certain level l. # # $ T_{total} = \sum_{l=1}^{N_l} \sum_{i=1}^{3} \Pi_{j=1}^{l} p_{i,j} E(T_i) $ # # # The following code proposes a recursive class that calls itself to compute the path and eventually the total waiting time. # # + MAX_TIME = 80. # max time waiting at traffic light class TrafficLightPath: '''Class that computes the probabilities of a traffic light path over itself and the future (children) traffic lights. ''' p = 0 # probability of this path T = 0 # expected time of this path Nw = 0 # remaining uses of the magic wand Nl = 0 # remaining traffic lights Lvec = [] # vector of thresholds when waiting for the red light childrenPaths = [] # array of TrafficLightPath with the future path def __init__ (self, p, T, Nw, Nl, Lvec): '''Creates the current path and recursively compute the path ''' self.p = p self.T = T self.Nw = Nw self.Nl = Nl self.Lvec = [float(x) for x in Lvec] self.childrenPaths = [] if self.getCurrentPosInPath()==-1: self.p = 1 self.T = 0 if Nl > 0: self.computeChildrenPaths () def computeChildrenPaths (self): '''Creates the future path possibilities ''' if self.Nw >= self.Nl: # no need to wait, use magic wand self.childrenPaths.append(TrafficLightPath(1, 0, self.Nw-1, self.Nl-1, self.Lvec)) else: # don't have a magic wand if self.Nw == 0: # green light self.childrenPaths.append(TrafficLightPath(0.5, 0, self.Nw, self.Nl-1, self.Lvec)) # red light self.childrenPaths.append(TrafficLightPath(0.5, MAX_TIME/2 , self.Nw, self.Nl-1, self.Lvec)) else: # can decide whether to wait or stay # green ligth self.childrenPaths.append(TrafficLightPath(0.5, 0, self.Nw, self.Nl-1, self.Lvec)) # wait, don't use wand L = self.Lvec[self.getCurrentPosInPath()+1] self.childrenPaths.append(TrafficLightPath(L/(2*MAX_TIME), L/2, self.Nw, self.Nl-1, self.Lvec)) # don't wait, use wand self.childrenPaths.append(TrafficLightPath((MAX_TIME-L)/(2*MAX_TIME), 0, self.Nw-1, self.Nl-1, self.Lvec)) def getCurrentPosInPath(self): '''Returns the current position in the path ''' return len(self.Lvec)-self.Nl-1 def printPath (self): '''Prints the path and future paths with indentation ''' pos = self.getCurrentPosInPath()+2 print '-'*4*pos + ' pos=' + str(pos-2) print '-'*4*pos + ' p=' + str(self.p) print '-'*4*pos + ' T=' + str(self.T) print '-'*4*pos + ' Nw=' + str(self.Nw) print '-'*4*pos + ' Nl=' + str(self.Nl) print '-'*4*pos + ' L=' + str(self.Lvec[pos-2]) print '-'*4*pos for item in self.childrenPaths: item.printPath() def computeMeanWaitingTimes (self, a_total_time=[], prob_path=1): '''Computes the mean waiting time for this path including the children The probability of the current path is the probability of this path times the probability of the children ''' a_total_time.append(self.p*prob_path*self.T) # total time for this light new_prob_path = prob_path * self.p # prepare the prob path for children for item in self.childrenPaths: a_total_time = item.computeMeanWaitingTimes(a_total_time=a_total_time, prob_path=new_prob_path) return a_total_time def computeTotalMeanWaitingTime (self): times = self.computeMeanWaitingTimes(a_total_time=[]) return sum(times) # - # Now let's try the path for a single path of 2 lights and 1 wand and assuming a waiting time of 0 seconds. This is, he will not consider for how many seconds he needs to wait, he will automatically use the magic wand even if he needs to wait 1 second. path = TrafficLightPath(1, 0, 1, 2, [0, 0]) path.printPath() print 'Total waiting time ' + str(path.computeTotalMeanWaitingTime()) + ' seconds' # Now, let's optimize this, let's see which would be the optimal value he needs to wait to use the magic wand. For this we compute all the possible waiting times from (0 to 80) and see where the optimal value is. # + import pandas as pd import numpy as np import matplotlib.pyplot as plt TwL = pd.Series(index=np.arange(0,80,0.1)) N_WANDS = 1 N_LIGHTS = 2 for i in TwL.index: path = TrafficLightPath(1, 0, N_WANDS, N_LIGHTS ,[i, np.nan]) TwL.loc[i] = path.computeTotalMeanWaitingTime() TwL.plot() plt.hlines(TwL.min(), 0, TwL.index[TwL==TwL.min()][0], linestyles='--') plt.vlines(TwL.index[TwL==TwL.min()][0], 0, TwL.min(), linestyles='--') plt.title ('Mean waiting time') plt.xlabel('Time to wait for using the magic wand [s]') plt.ylabel('Total mean waiting time') plt.show() print 'Optimal waiting time is ' + str(TwL.min()) + ' seconds.' # - # One can see that the problem presents a multidimensional optimization problem, being the dimension the number of traffic lights minus 1. Now, let's optimize the case for 3 traffic lights: # + N_WANDS = 1 N_LIGHTS = 3 [L1vec, L2vec] = np.meshgrid(np.arange(0,80, 1), np.arange(0,80, 1)) Tw = np.zeros(L1vec.shape) for i in range(Tw.shape[0]): for j in range(Tw.shape[1]): path = TrafficLightPath(1,0, N_WANDS, N_LIGHTS, [L1vec[i,j], L2vec[i,j], np.nan]) Tw[i,j] = path.computeTotalMeanWaitingTime() min_ind = np.unravel_index(Tw.argmin(), Tw.shape) path = TrafficLightPath(1, 0, N_WANDS, N_LIGHTS,[L1vec[min_ind[0], min_ind[1]], L2vec[min_ind[0], min_ind[1]], np.nan]) if path.computeTotalMeanWaitingTime() == Tw.min(): print 'Min time = ' + str(Tw.min()) print 'L1 = ' + str(L1vec[min_ind[0], min_ind[1]]) print 'L2 = ' + str(L2vec[min_ind[0], min_ind[1]]) plt.imshow(Tw) plt.hlines(min_ind[0], 0, min_ind[1], linestyles='--') plt.vlines(min_ind[1], 0, min_ind[0], linestyles='--') plt.colorbar() plt.show() print 'Optimal waiting time is ' + str(Tw.min()) + ' seconds.' # -
Probability/Calvin.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + import argparse import ray from ray import tune from ray.rllib.agents import ppo from ray.rllib.agents import ddpg from ray.rllib.agents import a3c from ray.tune import grid_search from my_env import ContentCaching import pickle import time import numpy as np import os import pandas as pd def ret_lst(cpt): string1 = 'data/listfile_evol'+str(cpt)+'.data' #_evol'+ , _pos'+ with open(string1, 'rb') as filehandle: # read the data as binary data stream lst = pickle.load(filehandle) return lst def ret_nei(cpt): string2 = 'data/nei_tab_pos'+str(cpt)+'.data' with open(string2, 'rb') as filehandle: # read the data as binary data stream nei_tab = pickle.load(filehandle) return nei_tab class customExperimentClass(): def __init__(self,ttl_var, cpt, variable, stop_iters=2, stop_timesteps=990000000, stop_reward=0.00001):# self.env = ContentCaching#gym.make("ContentCaching-v0") self.config_train = { "env": ContentCaching, "env_config": { "ttl_var": ttl_var, "variable": variable,#[8,8,8,4], "nei_tab": ret_nei(cpt), "lst_tab": ret_lst(cpt), }, "num_gpus": int(os.environ.get("RLLIB_NUM_GPUS", "0")), "model": { # By default, the MODEL_DEFAULTS dict above will be used. # Change individual keys in that dict by overriding them, e.g. "fcnet_hiddens": grid_search( [[64, 64, 64]]), "fcnet_activation": grid_search(["relu"]), "vf_share_layers": False,#True, }, "lr": grid_search([1e-2]), # try different lrs "num_workers": 0, # parallelism #"framework": "torch" if args.torch else "tf", } self.config_test = { "env": ContentCaching, "env_config": { "ttl_var": ttl_var, "variable": variable, "nei_tab": ret_nei(5), "lst_tab": ret_lst(5), }, "model": { # By default, the MODEL_DEFAULTS dict above will be used. # Change individual keys in that dict by overriding them, e.g. "fcnet_hiddens": [64, 64, 64], "fcnet_activation": "sigmoid", "vf_share_layers": False,#True, }, "lr": [1e-2], # try different lrs #"num_workers": 2, # parallelism #"framework": "torch" if args.torch else "tf", } self.save_dir = "~/ray_results" self.stop_criteria = { "training_iteration": stop_iters,#args.stop_iters, "timesteps_total": stop_timesteps,#args.c, "episode_reward_mean": stop_reward#args.stop_reward, } def train(self, algo): """ Train an RLlib IMPALA agent using tune until any of the configured stopping criteria is met. See https://docs.ray.io/en/latest/tune/api_docs/execution.html#tune-run :return: Return the path to the saved agent (checkpoint) and tune's ExperimentAnalysis object See https://docs.ray.io/en/latest/tune/api_docs/analysis.html#experimentanalysis-tune-experimentanalysis """ if algo == "ppo": analysis = ray.tune.run(ppo.PPOTrainer, config=self.config_train, local_dir=self.save_dir, stop=self.stop_criteria, checkpoint_at_end=True) if algo == "ddpg": analysis = ray.tune.run(ddpg.DDPGTrainer, config=self.config_train, local_dir=self.save_dir, stop=self.stop_criteria, checkpoint_at_end=True) if algo == "a3c": analysis = ray.tune.run(a3c.A3CTrainer, config=self.config_train, local_dir=self.save_dir, stop=self.stop_criteria, checkpoint_at_end=True) if algo == "td3": analysis = ray.tune.run(ddpg.TD3Trainer, config=self.config_train, local_dir=self.save_dir, stop=self.stop_criteria, checkpoint_at_end=True) if algo == "appo": analysis = ray.tune.run(ppo.APPOTrainer, config=self.config_train, local_dir=self.save_dir, stop=self.stop_criteria, checkpoint_at_end=True) lr = analysis.get_best_config(metric='episode_reward_mean', mode="max")["lr"] fc_hid = analysis.get_best_config(metric='episode_reward_mean', mode="max")["model"]["fcnet_hiddens"] fc_act = analysis.get_best_config(metric='episode_reward_mean', mode="max")["model"]["fcnet_activation"] # list of lists: one list per checkpoint; each checkpoint list contains 1st the path, 2nd the metric value checkpoints = analysis.get_trial_checkpoints_paths(trial=analysis.get_best_trial('episode_reward_mean', mode = 'max'), metric='episode_reward_mean') # retriev the checkpoint path; we only have a single checkpoint, so take the first one df = analysis.results_df all_dataframes = analysis.trial_dataframes print("all_dataframes ===== : ", all_dataframes) print("type all_dataframes ===== : ", type(all_dataframes)) print("--------------------------------------") trials = analysis.trials print("trials ===== : ", trials) print("trials[0] ===== : ", trials[0]) print("len trials ===== : ", len(trials)) print("type trials ===== : ", type(trials)) #df = analysis.dataframe(metric="episode_reward_mean", mode="max") #print("df ===== : ", type(df)) dfs = analysis.trial_dataframes # Plot by epoch ax = None # This plots everything on the same plot for d in dfs.values(): ax = d.episode_reward_mean.plot(ax=ax, legend=False) plot.show() #dff = pd.DataFrame(df).set_index('Index') #print("dff ===== : ", type(dff)) #print("dff ===== : ", dff["trial_id"]) #print("dff ===== : ", dff["hist_stats/episode_reward"]) checkpoint_path = checkpoints[0][0] print("Checkpoint path:", checkpoint_path) return checkpoint_path, analysis, lr, fc_hid, fc_act def load(self, path): """ Load a trained RLlib agent from the specified path. Call this before testing a trained agent. :param path: Path pointing to the agent's saved checkpoint (only used for RLlib agents) """ self.agent = ppo.PPOTrainer(config=self.config) self.agent.restore(path) def test(self,algo, path, lr, fc_hid, fc_act): """Test trained agent for a single episode. Return the episode reward""" # instantiate env class unused_shared = [] unused_own = [] unsatisfied_shared = [] unsatisfied_own = [] episode_reward = 0 self.config_test["num_workers"] = 0 self.config_test["lr"] = lr self.config_test['model']["fcnet_hiddens"] = fc_hid self.config_test['model']["fcnet_activation"] = fc_act if algo == "ppo": self.agent = ppo.PPOTrainer(config=self.config_test) if algo == "ddpg": self.agent = ddpg.DDPGTrainer(config=self.config_test) if algo == "a3c": self.agent = a3c.A3CTrainer(config=self.config_test) if algo == "td3": self.agent = ddpg.TD3Trainer(config=self.config_test) if algo == "appo": self.agent = ppo.APPOTrainer(config=self.config_test) self.agent.restore(path) env = self.agent.workers.local_worker().env obs = env.reset() done = False while not done: action = self.agent.compute_action(obs) obs, reward, done, info = env.step(action) episode_reward += reward unused_shared.append(info["unused_shared"]) unused_own.append(info["unused_own"]) unsatisfied_shared.append(info["unsatisfied_shared"]) unsatisfied_own.append(info["unsatisfied_own"]) return episode_reward, unused_shared, unused_own, unsatisfied_shared, unsatisfied_own if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument("--stop-iters", type=int, default= 2)#50) parser.add_argument("--stop-timesteps", type=int, default=90000000) parser.add_argument("--stop-reward", type=float, default=0.001) parser.add_argument("--ttl_var", type=float, default=3) parser.add_argument("--cpt", type=float, default=1) parser.add_argument("--algo", type=str, default="ppo") ray.shutdown() ray.init(num_cpus=3)#num_cpus=2, num_gpus=0) args = parser.parse_args() # Class instance exper = customExperimentClass(args.ttl_var, args.cpt, [8,8,8,4], args.stop_iters) # ttl_var, cpt, variable # Train and save for 2 iterations checkpoint_path, results, lr, fc_hid, fc_act = exper.train(args.algo) print("------------------------------------------------------------------------------------") print("------------------------------------------------------------------------------------") print("------------------------------------------------------------------------------------") # Load saved #exper.load(checkpoint_path) # Test loaded """ reward, unused_shared ,unused_own, unsatisfied_shared, unsatisfied_own = exper.test(args.algo,checkpoint_path, lr, fc_hid, fc_act) print(" info[unused_shared] = ", unused_shared ) print(" info[unused_own] = ", unused_own ) print(" info[unsatisfied_shared] = ", unsatisfied_shared ) print(" info[unsatisfied_own] = ", unsatisfied_own ) print(" reward = ", reward ) """ """ config=dict( extra_config, **{ "env": "BreakoutNoFrameskip-v4" if args.use_vision_network else "CartPole-v0", # Use GPUs iff `RLLIB_NUM_GPUS` env var set to > 0. "num_gpus": int(os.environ.get("RLLIB_NUM_GPUS", "0")), "callbacks": { "on_train_result": check_has_custom_metric, }, "model": { "custom_model": "keras_q_model" if args.run == "DQN" else "keras_model" }, "framework": "tf", }) """ # - # %debug # !python customclass.py dfs = analysis.trial_dataframe
rllib/examples/custom.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import pandas as pd import numpy as np import matplotlib.pyplot as plt import seaborn as sns # # Define functions # The `plot_corr_matrix` function is used to plot correlation heatmaps. # This function will be used twice: (1) to plot the initial correlation matrix for all (valid) input features, # and (2) to plot the final correlation matrix for all features that survived the feature reduction steps. def plot_corr_matrix(in_corr_matrix): """ Plots the correlation matrix as a heatmap using seaborn """ sns.set(style = 'white') f, ax = plt.subplots(figsize = (12, 9)) # Generate a mask for the upper triangle corr_mask = np.zeros_like(in_corr_matrix, dtype = np.bool) corr_mask[np.triu_indices_from(corr_mask)] = True ax.set_title('CORRELATION MATRIX (%s FEATURES)' %len(in_corr_matrix), fontsize = 14, fontweight = 'bold') sns.heatmap(in_corr_matrix, mask = corr_mask, cmap = sns.diverging_palette(220, 10, as_cmap=True), square = True, ax = ax, vmin = -1, vmax = 1) # The `plot_feature_reduction_results` function is used to plot a line graph that shows # the absolute correlation of each input feature with the target feature. The features that did not survive the feature reduction # steps are distinguished by using a different color. def plot_feature_reduction_results(indf): """ Plots the feature reduction results to visualize the discarded features """ sns.set(style = 'whitegrid') f, ax = plt.subplots(figsize = (12, 9)) # TODO: Show x labels if they are visible (i.e., not too many) ax = sns.barplot(x = 'index', y = 'corr_with_y', hue = 'Survived', palette = sns.color_palette('Paired'), data = indf) ax.set_xticks([]) ax.set_xticklabels([]) ax.set_title('FEATURE REDUCTION RESULTS', fontsize = 14, fontweight = 'bold') plt.xlabel('Features', fontsize = 14) plt.ylabel('Correlation with Target', fontsize = 14) #plt.legend(fontsize = 14) plt.show() # The `multcolin` function perform feature reduction on the set of (valid) input features. # After performing some initial checks (e.g., empty dataframe) it discards any features that are not numeric. # The list of features that survive the feature reduction steps are returned by this function. def multcolin(indf, y_vals, min_vars_to_keep, corr_tol, condition_ind_tol, verbose, export_csv): """ Performs feature reduction on *numeric* features using: 1. Pairwise correlation analysis, followed by 2. Multi-collinearity analysis min_vars_to_keep: Stop further feature reduction if this threshold is met corr_tol: If the absolute correlation between two variables is higher, one of them will be dropped (the one that has high corr with target) condition_ind_tol: Used to detect high levels of multicollinearity To disable the pairwise correlation step, set corr_tol to 1.0 """ # If empty dataframe, raise an error if indf.shape[0] == 0: raise RuntimeError('The input dataframe is empty!') # Number of input features (original) in_col_ct = len(indf.columns) # Discard categorical vars, if any indf = indf.loc[:, indf.dtypes != object] num_col_ct = len(indf.columns) # If one or less (valid) column, print message and return if num_col_ct <= 1: print ('The input dataframe contains one or less column! \ Exiting function without executing.') return # If number of columns <= min_vars_to_keep, raise an error if num_col_ct <= min_vars_to_keep: raise RuntimeError('The number of valid features = min_vars_to_keep. \ Choose a higher value for min_vars_to_keep.') # If y_vals are not numeric, raise warning and return if type(y[0]) == 'object': raise ValueError('The target/output vector is non-numeric.') ## Everything looks good -- let's proceed! (aka the "happy path") # Print the number of discarded non-numeric features (if any) if in_col_ct != num_col_ct and verbose == True: print ("%s non-numeric feature(s) discarded" %(in_col_ct - num_col_ct)) # Correlation matrix for all independent vars corr_matrix = indf.corr() num_features = len(corr_matrix) # Export the initial correlation matrix for all input features if export_csv == True: corr_matrix.to_csv('initial_corr_matrix.csv') if verbose == 1: print (f'# of input vars = {num_features}', '\n') # Plot the initial correlation matrix plot_corr_matrix(corr_matrix) # Correlations with the target/output vector corr_with_y = {} for var in indf.columns: corr_with_y[var] = y_vals.corr(indf[var]) # Save those in a dataframe orig_vars_df = pd.DataFrame.from_dict([corr_with_y]).T orig_vars_df.columns = ['corr_with_y'] # For each column in the corr matrix print ('Running Pairwise Correlation Analysis') for col in corr_matrix: if col in corr_matrix.keys(): this_col, these_vars = [], [] for i in range(len(corr_matrix)): this_var = corr_matrix.keys()[i] this_corr = corr_matrix[col][i] if abs(this_corr) == 1.0 and col != this_var: highly_corr = 0 else: highly_corr = (1 if abs(this_corr) > corr_tol else -1) * abs(corr_with_y[this_var]) this_col.append(highly_corr) these_vars.append(corr_matrix.keys()[i]) # Initialize the mask mask = np.ones(len(this_col), dtype = bool) # To keep track of the number of columns deleted del_col_ct = 0 for n, j in enumerate(this_col): # Delete if (a) a var is correlated with others and do not have # the best corr with dep, or (b) completely corr with the 'col' mask[n] = not (j != max(this_col) and j >= 0) if j != max(this_col) and j >= 0: if verbose == 1: print (' Dropping %s {Corr with %s=%.5f}' %(these_vars[n], this_var, corr_matrix[col][n])) # Delete the column from corr matrix corr_matrix.pop('%s' %these_vars[n]) corr_with_y.pop('%s' %these_vars[n]) del_col_ct += 1 # Delete the corresponding row(s) from the corr matrix corr_matrix = corr_matrix[mask] if verbose == 1 and corr_tol != 1: print ('\n# of vars after eliminating high pairwise correlations =', len(corr_matrix), '\n') # Multicollinearity if num_features > min_vars_to_keep: print ('Running Multi-collinearity Analysis') while True: num_features -= 1 # Update the list of columns cols = corr_matrix.keys() # Eigen values and vectors eigen_vals, eigen_vectors = np.linalg.eig(corr_matrix) # Calculate the max of all conditinon indices c_ind = max((max(eigen_vals) / eigen_vals) ** 0.5) # If the condition index <= 30 then multicolin is not an issue if c_ind <= condition_ind_tol or num_features == min_vars_to_keep: break for i, val in enumerate(eigen_vals): if val == min(eigen_vals): # Min value, close to zero # Look into that vector this_eigen_vector = eigen_vectors[:, i] max_w = max(abs(this_eigen_vector)) for j, vec in enumerate(this_eigen_vector): # Var that has the max weight on that vector if abs(vec) == max_w: # Initialize mask = np.ones(len(corr_matrix), dtype = bool) for n, col in enumerate(corr_matrix.keys()): mask[n] = n != j #TODO: Also print the set of features # with which this var is correlated if verbose == 1: print (' Dropping %s {Weight=%.2f}' %(corr_matrix.keys()[j], max_w)) # Delete row corr_matrix = corr_matrix[mask] # Delete column corr_matrix.pop(cols[j]) if verbose == 1: print ('\n # of vars after multicolinearity analyis =', len(corr_matrix), '\n') # Export the final correlation matrix for the survivors if export_csv == True: corr_matrix.to_csv('final_corr_matrix.csv') # Survivors surv_vars = {k:v for (k,v) in corr_with_y.items() if k in corr_matrix.keys()} # Create a dataframe (to be exported) surv_vars_df = pd.DataFrame.from_dict([surv_vars]).T surv_vars_df.columns = ['Survived'] orig_vars_df = orig_vars_df.merge(surv_vars_df, left_index = True, right_index = True, how = 'left') orig_vars_df.loc[pd.isnull(orig_vars_df['Survived']) == False, 'Survived'] = 'Y' orig_vars_df['Survived'].fillna('N', inplace = True) orig_vars_df = orig_vars_df.reset_index().sort_values(by = 'corr_with_y', ascending = False) if export_csv == True: orig_vars_df.to_csv('multcolin_results.csv') # Plot the final correlation matrix plot_corr_matrix(corr_matrix) # Plot the feature reduction results plot_feature_reduction_results(orig_vars_df) return surv_vars # # Demonstration using the Boston Housing data # #### Details about the Boston Housing data can be found here: http://scikit-learn.org/stable/modules/generated/sklearn.datasets.load_boston.html # + from sklearn.datasets import load_boston boston = load_boston() print(boston.data.shape) # - # The `multcolin` function expects the dataset (X values) to be a dataframe with column names # So let's convert this numpy array into a Pandas dataframe X = pd.DataFrame(boston['data']) # And the y values must be a Series # So let's convert the target values into a Pandas Series y = pd.Series(boston['target']) # let's take a quick look at the data X.head() # The `multcolin` function also expects column names. Let's assign column names to this dataframe. X.columns = ['CRIM', 'ZN', 'INDUS', 'NOX', 'RM', 'AGE', 'DIS', 'RAD', 'TAX', 'PTRATIO', 'B1000', 'LSTAT', 'MEDV'] # Now this dataset doesn't suffer from multi-collinearity (you can check this separately by using a piece of code from the `'multcolin` function that calculates the condition indices for each feature). So let's (try to) introduce some multicollinearity! # + # Create a new feature that is correlated with three other features X['EXTRA'] = .01*X['RAD'] + .02*X['ZN'] - .003*X['CRIM'] \ + np.random.uniform() # - # Let's perform feature reduction on this dataset by calling the `multcolin` function. # Please note that I've used condition_ind_tol = 5 just for this demo. # This value is TOO LOW! In practice, you should use a value between 15 and 30. (30 is the most commonly used value.) vars_to_keep = multcolin(X, y, min_vars_to_keep = 5, corr_tol = .95, condition_ind_tol = 5, verbose = 1, export_csv = 1) # Out of 14 input features, `ZN` was dropped because it has a high correlation with `EXTRA`. # Then three more features, `PTRATIO`, `RM`, and `RAD` were dropped because these variables caused multicollinearity in the data (they were correlated with two or more other variables). # The correlation matrix heatmaps demonstrates the reduction in pairwise correlations. While the Feature Reduction Results plot shows which features (with respect to where they are in terms of their correlation with the target) are discarded. This can be useful if you want to ensure that not too many of the important features (on either ends of the spectrum) are discarded.
multcolin.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 (ipykernel) # language: python # name: python3 # --- # # Explore the original Waymo dataset # # In this notebook, we will perform an EDA (Exploratory Data Analysis) on the original Waymo dataset (downloaded data in the `raw` folder in the data directory `./data/waymo`). The data frames in the original tfrecord files contain the images of all five cameras, range images and lidar point clouds including ground truth annotations. # ## Open data set tree structure of a Waymo tfrecord # # The tree structure of an original Waymo tfrecord file is shown below: # # ``` # open_dataset # |-- LaserName # | |-- UNKNOWN # | |-- TOP # | |-- FRONT # | |-- SIDE_LEFT # | |-- SIDE_RIGHT # | `-- REAR # |-- CameraName # | |-- UNKNOWN # | |-- FRONT # | |-- FRONT_LEFT # | |-- FRONT_RIGHT # | |-- SIDE_LEFT # | `-- SIDE_RIGHT # |-- RollingShutterReadOutDirection # | |-- UNKNOWN # | |-- TOP_TO_BOTTOM # | |-- LEFT_TO_RIGHT # | |-- BOTTOM_TO_TOP # | |-- RIGHT_TO_LEFT # | `-- GLOBAL_SHUTTER # |-- Frame # | |-- images ⇒ list of CameraImage # | | |-- name (CameraName) # | | |-- image # | | |-- pose # | | |-- velocity (v_x, v_y, v_z, w_x, w_y, w_z) # | | |-- pose_timestamp # | | |-- shutter # | | |-- camera_trigger_time # | | `-- camera_readout_done_time # | |-- Context # | | |-- name # | | |-- camera_calibrations ⇒ list of CameraCalibration # | | | |-- name # | | | |-- intrinsic # | | | |-- extrinsic # | | | |-- width # | | | |-- height # | | | `-- rolling_shutter_direction (RollingShutterReadOutDirection) # | | |-- laser_calibrations ⇒ list of LaserCalibration # | | | |-- name # | | | |-- beam_inclinations # | | | |-- beam_inclination_min # | | | |-- beam_inclination_max # | | | `-- extrinsic # | | `-- Stats # | | |-- laser_object_counts # | | |-- camera_object_counts # | | |-- time_of_day # | | |-- location # | | `-- weather # | |-- timestamp_micros # | |-- pose # | |-- lasers ⇒ list of Laser # | | |-- name (LaserName) # | | |-- ri_return1 (RangeImage class) # | | | |-- range_image_compressed # | | | |-- camera_projection_compressed # | | | |-- range_image_pose_compressed # | | | `-- range_image # | | `-- ri_return2 (same as ri_return1) # | |-- laser_labels ⇒ list of Label # | |-- projected_lidar_labels (same as camera_labels) # | |-- camera_labels ⇒ list of CameraLabels # | | |-- name (CameraName) # | | `-- labels ⇒ list of Label # | `-- no_label_zones (Refer to the doc) # `-- Label # |-- Box # | |-- center_x # | |-- center_y # | |-- center_z # | |-- length # | |-- width # | |-- height # | `-- heading # |-- Metadata # | |-- speed_x # | |-- speed_y # | |-- accel_x # | `-- accel_y # |-- type # |-- id # |-- detection_difficulty_level # `-- tracking_difficulty_level # ``` # + import os import tensorflow as tf import math import numpy as np import itertools import pandas as pd import seaborn as sns import matplotlib.pyplot as plt #Used for creating the bouding boxes import matplotlib.patches as patches # Enable TensorFlow eager execution mode tf.compat.v1.enable_eager_execution() # Import waymoe open dataset utils from waymo_open_dataset.utils import range_image_utils from waymo_open_dataset.utils import transform_utils from waymo_open_dataset.utils import frame_utils from waymo_open_dataset import dataset_pb2 as open_dataset from utils import get_dataset from utils import get_label_map # - # %matplotlib inline # + # get object category index and object classname index category_idx, classname_idx = get_label_map(label_map_path='./label_map.pbtxt') print("Object category index: {}".format(category_idx)) print("Object classname index: {}".format(classname_idx)) # get number of availabel object classes num_of_object_classes = len(category_idx) print("Number of object classes: {}".format(num_of_object_classes)) # - #class_color_idx = {1: [0, 0, 1], 2: [1, 0, 0], 4: [0, 1, 0]} class_color_idx = {1: u'#1f77b4', 2: u'#ff7f0e', 4: u'#2ca02c'} # create an array with class-based colors custom_colors = list(class_color_idx.values()) # define custom color palette custom_color_palette = sns.set_palette(sns.color_palette(custom_colors)) # ## Load Raw Data Set from TFRecord Files tfrecord_path = "./data/waymo/raw/" tfrecord_filelist = os.listdir(tfrecord_path) print("Number of tfrecord files in filelist: {}".format(len(tfrecord_filelist))) def get_statistics_from_tf_data_set(tfrecord_path, tfrecord_filelist, classname_idx): """ Get ground truth label statistics from a dataset for object detection in color images contained in a set of tfrecord files. Args: tfrecord_path [str]: filepath to the tfrecord file storage location tfrecord_filelist [list]: list of tfrecord files to be evaluated classname_idx [dict]: object class name index by object class name Returns: [pandas.DataFrame]: dataframe holding the frame statistics """ # column names for frame statistics frame_stats_columns = [ 'tfrecord_index', 'tfrecord_id', 'frame_index', 'location', 'time_of_day', 'weather', 'num_of_vehicles', 'num_of_pedestrians', 'num_of_cyclists', ] # init list of frame statistics frame_stats = [] # number of tfrecord files to be evaluated num_of_files = len(tfrecord_filelist) # loop over all tfrecord files in the given directory for tfrecord_idx, tfrecord_file in enumerate(tfrecord_filelist): # load data set from the current tfrecord file dataset = tf.data.TFRecordDataset(os.path.join(tfrecord_path, tfrecord_file), compression_type='') # loop over all frames in the current data set and count the number of frames in tfrecord file for frame_idx, data in enumerate(dataset): # open next data frame frame = open_dataset.Frame() # convert the byte array to numpy dictionary frame.ParseFromString(bytearray(data.numpy())) # count the labeled object per frame and per class obj_cnts_vehicles = 0 obj_cnts_pedestrians = 0 obj_cnts_cyclists = 0 for obj_cnts in frame.context.stats.camera_object_counts: if obj_cnts.type == classname_idx['vehicle']: obj_cnts_vehicles = obj_cnts.count if obj_cnts.type == classname_idx['pedestrian']: obj_cnts_pedestrians = obj_cnts.count if obj_cnts.type == classname_idx['cyclist']: obj_cnts_cyclists = obj_cnts.count # append evaluation results to list of frame statistics frame_stats.append([ tfrecord_idx, frame.context.name, frame_idx, frame.context.stats.location, frame.context.stats.time_of_day, frame.context.stats.weather, obj_cnts_vehicles, obj_cnts_pedestrians, obj_cnts_cyclists, ]) # count number of frames per tfrecord num_of_frames = frame_idx + 1 # print tfrecord file name incl. number of contained frames print("tfrecord index: {} of {}".format(tfrecord_idx, num_of_files)) print("tfrecord file: {}".format(tfrecord_file)) print("number of frames: {}".format(num_of_frames)) # create and return data frame holding the frame statistics return pd.DataFrame(frame_stats, columns=frame_stats_columns) # get the frame statistics from tfrecord files df_frame_stats = get_statistics_from_tf_data_set(tfrecord_path, tfrecord_filelist, classname_idx) print(df_frame_stats.head()) # total number of frames in the data set total_num_of_frames = len(df_frame_stats) print("\nTotal number of frames: {}\n".format(total_num_of_frames)) # ### Object classes and object counts # get total number of labeled objects in the data set object_classes = ['vehicle', 'pedestrian', 'cyclist'] object_counts = [ df_frame_stats.num_of_vehicles.sum(), df_frame_stats.num_of_pedestrians.sum(), df_frame_stats.num_of_cyclists.sum() ] object_percentage = np.array(object_counts) / sum(object_counts) * 100 # Print results print('Total number of labeled objects in the data set:') print("- Vehicles: {0} ({1:5.2f}%)".format(object_counts[0], object_percentage[0])) print("- Pedestrians: {0} ({1:5.2f}%)".format(object_counts[1], object_percentage[1])) print("- Cyclists: {0} ({1:5.2f}%)".format(object_counts[2], object_percentage[2])) print("- Objects: {0} (100%)".format(sum(object_counts))) f, axs = plt.subplots(1, 2, figsize=(15, 5)) sns.barplot(x=object_classes, y=object_counts, palette=custom_color_palette, ax=axs[0]) axs[0].set( xlabel='object classes', ylabel='object counts', title='Object counts per class over {} images'.format(total_num_of_frames), ); sns.barplot(x=object_classes, y=object_percentage, palette=custom_color_palette, ax=axs[1]) axs[1].set( xlabel='object classes', ylabel='object counts [%]', title='Percentual object counts per class over {} images'.format(total_num_of_frames), ); # + # plot pie graph f, ax = plt.subplots(1, 1, figsize=(7.5, 5)) ax.pie(object_counts, labels=object_classes, colors=custom_colors) # calculate distribution percentage print("Percentage of vehicles = {0:5.2f} %".format(object_percentage[0])) print("Percentage of pedestrian = {0:5.2f} %".format(object_percentage[1])) print("Percentage of cyclists = {0:5.2f} %".format(object_percentage[2])) # - # ### Frame counts per time of day, location and weather conditions # + # Frame counts per time of day, location and weather condition frame_counts_by_time_of_day = df_frame_stats.time_of_day.value_counts() frame_counts_by_location = df_frame_stats.location.value_counts() frame_counts_by_weather = df_frame_stats.weather.value_counts() # Print results print("\nFrame counts by time of day:\n{}".format(frame_counts_by_time_of_day)) print("\nFrame counts by loation:\n{}".format(frame_counts_by_time_of_day)) print("\nFrame counts by weather:\n{}".format(frame_counts_by_time_of_day)) # - # Plot distribution of time of day, location and weather conditions over all frames in the data set f, axs = plt.subplots(3, 2, figsize=(15, 15)) sns.barplot( x=frame_counts_by_time_of_day.index, y=frame_counts_by_time_of_day, palette=custom_color_palette, ax=axs[0, 0] ) axs[0, 0].set( xlabel='time of day', ylabel='frame counts', title='Frame counts per time of day over {} frames'.format(total_num_of_frames), ); sns.barplot( x=frame_counts_by_time_of_day.index, y=frame_counts_by_time_of_day/total_num_of_frames*100, palette=custom_color_palette, ax=axs[0, 1] ) axs[0, 1].set( xlabel='time of day', ylabel='frame counts [%]', title='Percentual frame counts per time of day over {} frames'.format(total_num_of_frames), ); sns.barplot( x=frame_counts_by_location.index, y=frame_counts_by_location, palette=custom_color_palette, ax=axs[1, 0] ) axs[1, 0].set( xlabel='location', ylabel='frame counts', title='Frame counts per location over {} frames'.format(total_num_of_frames), ); sns.barplot( x=frame_counts_by_location.index, y=frame_counts_by_location/total_num_of_frames*100, palette=custom_color_palette, ax=axs[1, 1] ) axs[1, 1].set( xlabel='location', ylabel='frame counts [%]', title='Percentual frame counts per location over {} frames'.format(total_num_of_frames), ); sns.barplot( x=frame_counts_by_weather.index, y=frame_counts_by_weather, palette=custom_color_palette, ax=axs[2, 0] ) axs[2, 0].set( xlabel='weather conditions', ylabel='frame counts', title='Frame counts per weather condition over {} frames'.format(total_num_of_frames), ); sns.barplot( x=frame_counts_by_weather.index, y=frame_counts_by_weather/total_num_of_frames*100, palette=custom_color_palette, ax=axs[2, 1] ) axs[2, 1].set( xlabel='weather conditions', ylabel='frame counts [%]', title='Percentual frame counts per weather condition over {} frames'.format(total_num_of_frames), ); # ### Object class frequency per frame # Plot histograms of object counts per frame over all frames in the data set f, axs = plt.subplots(2, 2, figsize=(15, 10)) sns.histplot( data=(df_frame_stats.num_of_vehicles + df_frame_stats.num_of_pedestrians + df_frame_stats.num_of_cyclists), kde=True, color="#03012d", ax=axs[0, 0]) axs[0, 0].grid() axs[0, 0].set( xlabel='number of objects per frame', ylabel='object counts', title='Object counts per frame over {} frames'.format(total_num_of_frames), ); sns.histplot( data=df_frame_stats.num_of_vehicles, kde=True, color=custom_colors[0], ax=axs[0, 1] ) axs[0, 1].grid() axs[0, 1].set( xlabel='number of vehicles per frame', ylabel='vehicle counts', title='Vehicle counts per frame over {} frames'.format(total_num_of_frames), ); sns.histplot( data=df_frame_stats.num_of_pedestrians, kde=True, color=custom_colors[1], ax=axs[1, 0] ) axs[1, 0].grid() axs[1, 0].set( xlabel='number of pedestrians per frame', ylabel='pedestrian counts', title='Pedestrian counts per frame over {} frames'.format(total_num_of_frames), ); sns.histplot( data=df_frame_stats.num_of_cyclists, kde=True, color=custom_colors[2],ax=axs[1, 1] ) axs[1, 1].grid() axs[1, 1].set( xlabel='number of cyclists per frame', ylabel='cyclist counts', title='Cyclist counts per frame over {} frames'.format(total_num_of_frames), ); # ## Analyse and Compare Training, Validation and Test Data Sub-Sets # + # get filenames of the training data set train_file_list = 'tfrecord_files_train.txt' with open(train_file_list, 'r') as f: train_filenames = f.read().splitlines() # get filenames of the validation data set val_file_list = 'tfrecord_files_val.txt' with open(val_file_list, 'r') as f: val_filenames = f.read().splitlines() # get filenames of the test data set test_file_list = 'tfrecord_files_test.txt' with open(test_file_list, 'r') as f: test_filenames = f.read().splitlines() # - # extract the tfrecord context ids from the filenames belonging to the training, validation and test data set train_tfrecord_ids = [fn.replace('segment-', '').replace('_with_camera_labels.tfrecord', '') for fn in train_filenames] val_tfrecord_ids = [fn.replace('segment-', '').replace('_with_camera_labels.tfrecord', '') for fn in val_filenames] test_tfrecord_ids = [fn.replace('segment-', '').replace('_with_camera_labels.tfrecord', '') for fn in test_filenames] # create separate data frames for the statistics of the training, validation and test data set df_frame_stats_train = df_frame_stats[df_frame_stats['tfrecord_id'].isin(train_tfrecord_ids)] df_frame_stats_val = df_frame_stats[df_frame_stats['tfrecord_id'].isin(val_tfrecord_ids)] df_frame_stats_test = df_frame_stats[df_frame_stats['tfrecord_id'].isin(test_tfrecord_ids)] # ### Frame counts per data sub-set # get the number of frames in the training, validation and test data set (without applying downsampling) num_of_frames_train = len(df_frame_stats_train) num_of_frames_val = len(df_frame_stats_val) num_of_frames_test = len(df_frame_stats_test) # Print results print('Total number of frames in the data sets (without applying downsampling):') print("- Number of frames in the training set: {0}".format(num_of_frames_train)) print("- Number of frames in the validation set: {0}".format(num_of_frames_val)) print("- Number of frames in the test set: {0}".format(num_of_frames_test)) print("- Total number of frames: {0}".format(total_num_of_frames)) # ### Frame counts per time of day # + # Frame counts per time of day for training, validation and test data set (without applying downsampling) time_of_day_classes_train = df_frame_stats_train.time_of_day.unique() frame_counts_by_time_of_day_train = df_frame_stats_train.time_of_day.value_counts().to_numpy() time_of_day_classes_val = df_frame_stats_val.time_of_day.unique() frame_counts_by_time_of_day_val = df_frame_stats_val.time_of_day.value_counts().to_numpy() time_of_day_classes_test = df_frame_stats_test.time_of_day.unique() frame_counts_by_time_of_day_test = df_frame_stats_test.time_of_day.value_counts().to_numpy() # Print results print("\nFrame counts by time of day (without applying downsampling):\n") print("Training data set:\n{}\n{}\n".format(time_of_day_classes_train, frame_counts_by_time_of_day_train)) print("Validation data set:\n{}\n{}\n".format(time_of_day_classes_val, frame_counts_by_time_of_day_val)) print("Test data set:\n{}\n{}\n".format(time_of_day_classes_test, frame_counts_by_time_of_day_test)) # - f, axs = plt.subplots(3, 2, figsize=(15, 15)) sns.barplot(x=time_of_day_classes_train, y=frame_counts_by_time_of_day_train, palette=custom_color_palette, ax=axs[0, 0]) axs[0, 0].set( xlabel='time of day classes', ylabel='time of day frame counts', title='Training set: Frame counts per time of day over {} images'.format( num_of_frames_train), ); sns.barplot(x=time_of_day_classes_train, y=frame_counts_by_time_of_day_train/num_of_frames_train*100, palette=custom_color_palette, ax=axs[0, 1]) axs[0, 1].set( xlabel='time of day classes', ylabel='time of day frame counts', title='Training set: Percentual frame counts per time of day over {} images'.format( num_of_frames_train), ); sns.barplot(x=time_of_day_classes_val, y=frame_counts_by_time_of_day_val, palette=custom_color_palette, ax=axs[1, 0]) axs[1, 0].set( xlabel='time of day classes', ylabel='time of day frame counts', title='Validation set: Frame counts per time of day over {} images'.format( num_of_frames_val), ); sns.barplot(x=time_of_day_classes_val, y=frame_counts_by_time_of_day_val/num_of_frames_val*100, palette=custom_color_palette, ax=axs[1, 1]) axs[1, 1].set( xlabel='time of day classes', ylabel='time of day frame counts', title='Validation set: Percentual frame counts per time of day over {} images'.format( num_of_frames_val), ); sns.barplot(x=time_of_day_classes_test, y=frame_counts_by_time_of_day_test, palette=custom_color_palette, ax=axs[2, 0]) axs[2, 0].set( xlabel='time of day classes', ylabel='time of day frame counts', title='Test set: Frame counts per time of day over {} images'.format( num_of_frames_test), ); sns.barplot(x=time_of_day_classes_test, y=frame_counts_by_time_of_day_test/num_of_frames_test*100, palette=custom_color_palette, ax=axs[2, 1]) axs[2, 1].set( xlabel='time of day classes', ylabel='time of day frame counts', title='Test set: Percentual frame counts per time of day over {} images'.format( num_of_frames_test), ); # ### Object classes and object counts # get total number of labeled objects in the training data set object_counts_train = [ df_frame_stats_train.num_of_vehicles.sum(), df_frame_stats_train.num_of_pedestrians.sum(), df_frame_stats_train.num_of_cyclists.sum() ] object_percentage_train = np.array(object_counts_train) / sum(object_counts_train) * 100 # Print results print('Total number of labeled objects in the training data set:') print("- Vehicles: {0} ({1:5.2f}%)".format(object_counts_train[0], object_percentage_train[0])) print("- Pedestrians: {0} ({1:5.2f}%)".format(object_counts_train[1], object_percentage_train[1])) print("- Cyclists: {0} ({1:5.2f}%)".format(object_counts_train[2], object_percentage_train[2])) print("- Objects: {0} (100%)".format(sum(object_counts_train))) print("") # get total number of labeled objects in the validation data set object_counts_val = [ df_frame_stats_val.num_of_vehicles.sum(), df_frame_stats_val.num_of_pedestrians.sum(), df_frame_stats_val.num_of_cyclists.sum() ] object_percentage_val = np.array(object_counts_val) / sum(object_counts_val) * 100 # Print results print('Total number of labeled objects in the validation data set:') print("- Vehicles: {0} ({1:5.2f}%)".format(object_counts_val[0], object_percentage_val[0])) print("- Pedestrians: {0} ({1:5.2f}%)".format(object_counts_val[1], object_percentage_val[1])) print("- Cyclists: {0} ({1:5.2f}%)".format(object_counts_val[2], object_percentage_val[2])) print("- Objects: {0} (100%)".format(sum(object_counts_val))) print("") # get total number of labeled objects in the test data set object_counts_test = [ df_frame_stats_test.num_of_vehicles.sum(), df_frame_stats_test.num_of_pedestrians.sum(), df_frame_stats_test.num_of_cyclists.sum() ] object_percentage_test = np.array(object_counts_test) / sum(object_counts_test) * 100 # Print results print('Total number of labeled objects in the test data set:') print("- Vehicles: {0} ({1:5.2f}%)".format(object_counts_test[0], object_percentage_test[0])) print("- Pedestrians: {0} ({1:5.2f}%)".format(object_counts_test[1], object_percentage_test[1])) print("- Cyclists: {0} ({1:5.2f}%)".format(object_counts_test[2], object_percentage_test[2])) print("- Objects: {0} (100%)".format(sum(object_counts_test))) f, axs = plt.subplots(3, 2, figsize=(15, 15)) sns.barplot(x=object_classes, y=object_counts_train, palette=custom_color_palette, ax=axs[0, 0]) axs[0, 0].set( xlabel='object classes', ylabel='object counts', title='Training set: Object counts per class over {} images'.format(num_of_frames_train), ); sns.barplot(x=object_classes, y=object_percentage_train, palette=custom_color_palette, ax=axs[0, 1]) axs[0, 1].set( xlabel='object classes', ylabel='object counts [%]', title='Training set: Percentual object counts per class over {} images'.format(num_of_frames_train), ); sns.barplot(x=object_classes, y=object_counts_val, palette=custom_color_palette, ax=axs[1, 0]) axs[1, 0].set( xlabel='object classes', ylabel='object counts', title='Validation set: Object counts per class over {} images'.format(num_of_frames_val), ); sns.barplot(x=object_classes, y=object_percentage_val, palette=custom_color_palette, ax=axs[1, 1]) axs[1, 1].set( xlabel='object classes', ylabel='object counts [%]', title='Validation set: Percentual object counts per class over {} images'.format(num_of_frames_val), ); sns.barplot(x=object_classes, y=object_counts_test, palette=custom_color_palette, ax=axs[2, 0]) axs[2, 0].set( xlabel='object classes', ylabel='object counts', title='Test set: Object counts per class over {} images'.format(num_of_frames_test), ); sns.barplot(x=object_classes, y=object_percentage_test, palette=custom_color_palette, ax=axs[2, 1]) axs[2, 1].set( xlabel='object classes', ylabel='object counts [%]', title='Test set: Percentual object counts per class over {} images'.format(num_of_frames_test), ); # ### Object class frequency per frame # Plot histograms of object counts per frame over all frames in the data set f, ax = plt.subplots(1, 1, figsize=(15, 7)) sns.histplot( data=(df_frame_stats_train.num_of_vehicles + \ df_frame_stats_train.num_of_pedestrians + \ df_frame_stats_train.num_of_cyclists), kde=True, color="blue", label="training set", ax=ax) sns.histplot( data=(df_frame_stats_val.num_of_vehicles + \ df_frame_stats_val.num_of_pedestrians + \ df_frame_stats_val.num_of_cyclists), kde=True, color="green", label="validation set", ax=ax) sns.histplot( data=(df_frame_stats_test.num_of_vehicles + \ df_frame_stats_test.num_of_pedestrians + \ df_frame_stats_test.num_of_cyclists), kde=True, color="red", label="test set", ax=ax) ax.set( xlabel='number of objects per frame', ylabel='object counts', title='Object counts per frame over {} frames distributed over training, validation and test set'.format( total_num_of_frames), ) ax.grid() ax.legend(); # Plot histograms of vehicle counts per frame over all frames in the data set f, ax = plt.subplots(1, 1, figsize=(15, 7)) sns.histplot( data=df_frame_stats_train.num_of_vehicles, kde=True, color="blue", label="training set", ax=ax) sns.histplot( data=df_frame_stats_val.num_of_vehicles, kde=True, color="green", label="validation set", ax=ax) sns.histplot( data=df_frame_stats_test.num_of_vehicles, kde=True, color="red", label="test set", ax=ax) ax.set( xlabel='number of vehicles per frame', ylabel='object counts', title='Vehicle counts per frame over {} frames distributed over training, validation and test set'.format( total_num_of_frames), ) ax.grid() ax.legend(); # Plot histograms of object counts per frame over all frames in the data set f, ax = plt.subplots(1, 1, figsize=(15, 7)) sns.histplot( data=df_frame_stats_train.num_of_pedestrians, kde=True, color="blue", label="training set", ax=ax) sns.histplot( data=df_frame_stats_val.num_of_pedestrians, kde=True, color="green", label="validation set", ax=ax) sns.histplot( data=df_frame_stats_test.num_of_pedestrians, kde=True, color="red", label="test set", ax=ax) ax.set( xlabel='number of pedestrians per frame', ylabel='object counts', title='Pedestrian counts per frame over {} frames distributed over training, validation and test set'.format( total_num_of_frames), ) ax.grid() ax.legend(); # Plot histograms of object counts per frame over all frames in the data set f, ax = plt.subplots(1, 1, figsize=(15, 7)) sns.histplot( data=df_frame_stats_train.num_of_cyclists, kde=True, color="blue", label="training set", ax=ax) sns.histplot( data=df_frame_stats_val.num_of_cyclists, kde=True, color="green", label="validation set", ax=ax) sns.histplot( data=df_frame_stats_test.num_of_cyclists, kde=True, color="red", label="test set", ax=ax) ax.set( xlabel='number of cyclists per frame', ylabel='object counts', title='Cyclist counts per frame over {} frames distributed over training, validation and test set'.format( total_num_of_frames), ) ax.grid() ax.legend();
Exploratory_Data_Analysis_of_Original_Waymo_Data.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # ARE 106 Summer Session II # # Homework 3 # # This homework will be due on **August 29th at 2pm** # ## Name: # ## SSID: # Please put your name and SSID in the corresponding cells above. # # The homework is worth 13.5 points. # # For each of the following questions, show as much of your steps as you can (without going overboard). If you end up getting the wrong answer, but we spot where you made a mistake in the algebra, partial credit will be more readily given. If you only put the final answer, you will be marked either right or wrong. # # Answer questions in the correct cell. For problems where you have to input math, make sure that you know that it's a markdown cell (It won't have a `In: []` on the left) and make sure you run the cell by either pressing `Ctrl + Enter` or going to `Cell -> Run Cell`. Alternatively, write all your answers and then go to `Cell -> Run All Cells` after you're done. # # Please ignore cells that read `\pagebreak`. These are so your document converts to PDF in a way that will make it possible to grade your homework. Ignore them and only write your answers where it is specified. # # **When you are finished export your homework to a PDF by going to `File -> Download as -> PDF`.** # + active="" # \pagebreak # # - # ## Exercise 1: Single Regression # **Please don't forget to comment your code. Failure to do so will result in a loss of points.** # # Also remember that all code that is required here (unless otherwise stated) can be found in the lecture Jupyter Notebooks or the coding notebooks from class. # Here are three models for the median starting salary of law school graduates in 1985. # # # \begin{gather} # log(salary_i) = b_0 + b_1 LSAT_i + e_i \\ # log(salary_i) = b_0 + b_1 LSAT_i + b_2 GPA_i + e_i \\ # log(salary_i) = b_0 + b_1 LSAT_i + b_2 GPA_i + b_3 log(cost_i) + b_4 rank_i + e_i # \end{gather} # # # Each observation $i$ represents a school. # # The variables in the dataset are: # # # | | Variable | Description | # |---------|---------------|---------------| # | 1. | rank |law school ranking | # | 2. | salary |median starting salary| # | 3. | cost |law school cost| # | 4. | LSAT |median LSAT score| # | 5. | GPA |median college GPA| # | 6. | libvol |no. volumes in lib., 1000s| # | 7. | faculty |no. of faculty| # | 8. | age |age of law sch., years| # | 9. | clsize |size of entering class| # | 10. | north |=1 if law sch in north| # | 11. | south |=1 if law sch in south| # | 12. | east |=1 if law sch in east| # | 13. | west |=1 if law sch in west| # | 14. | studfac |student-faculty ratio| # | 15. | top10 |=1 if ranked in top 10| # | 16. | r11_25 |=1 if ranked 11-25| # | 17. | r26_40 |=1 if ranked 26-40| # | 18. | r41_60 |=1 if ranked 41-60| # + active="" # \pagebreak # # - # a. In the code cell below, write the appropriate imports you will need for this question (we will need `pandas`, `numpy` and `statsmodels.formula.api`). You can do an abbreviated import if you wish (but the standard for `pandas` is `pd`, `statsmodels.formula.api` is `smf`, and `numpy` is `np`). Afterwards, load in the data from here: # # https://raw.githubusercontent.com/lordflaron/ARE106data/master/lawsch85.csv # # This can be done using the `read_csv()` function. Name this dataset `raw_df`. After loading in the data, show the first *10* observations in the output. # + ## a. Put your answer in this cell. ## import libraries import pandas as pd import numpy as np import statsmodels.formula.api as smf ## import data raw_df = pd.read_csv("https://raw.githubusercontent.com/lordflaron/ARE106data/master/lawsch85.csv") # + import pandas as pd raw_df = pd.read_csv("https://raw.githubusercontent.com/lordflaron/ARE106data/master/lawsch85.csv") raw_df[['salary', 'LSAT']].plot() # + active="" # \pagebreak # # - # b. Use the `describe()` method on `raw_df` to show a table of summary statistics for each variable in the dataset. How many observations does $salary_i$ have? Write this in a print statement. (Hint: This is in the "count" row the summary table). # + ## b. Put your answer in this cell. ## describe the data raw_df.describe() print("there are 156 observations in salary") # + active="" # \pagebreak # # - # c. Since we'll need a log-transformed version of $salary_i$ for all our models, use `assign()` to create a new variable which is the log of $salary_i$. Name this new variable `log_salary`. # # Hints: # # Remember that assign is not an *inplace* operation! # # Remember to use a lambda function in this case. To log a variable, you can use `np.log()` # # Remember the syntax for `assign()`: # # ``` # my_df.assign(new_variable = expression) # ``` # # After this we now need to also drop any observations that are missing. This isn't actually how econometricians deal with missing data, but this is good enough for us for now. # # You can do this by chaining the `dropna()` method after the `assign()` method. # # **Warning: Do not do `dropna` BEFORE `assign`** # # The end result should look something like this: # # ``` # df = raw_df.assign(log_salary= expression).dropna() # ``` # + active="" # \pagebreak # # - ## c. Put your answer in this cell. df = raw_df.assign(log_salary = lambda x: np.log(x['salary'])).dropna() # + active="" # \pagebreak # # - # d. Before estimating the model, explain how to interpret $b_1$ in Model 1. # **Please write your answer for d here. If you need to use more than one line, you may do so.** # # We interpret $b_1$ here as the effect of one extra unit of LSAT score on the log of median salary. # # e. Before estimating the model, explain how to interpret $b_1$ in Model 2. # **Please write your answer for e here. If you need to use more than one line, you may do so.** # # In this case, we interpret this as one extra unit of LSAT score on the log of median salary, *keeping GPA unchanged*. # # f. Before estimating the model, do you expect $b_1$ and $b_2$ to be positive or negative in Model 2? Explain. # (Hint: I'm not asking for any rigorous mathematical way to answer this question. Just use your economic intuition and reasoning skills to write an argument). # **Please write your answer for f here. If you need to use more than one line, you may do so.** # # It would make sense that since a higher LSAT score would lead to getting into a better law school and thus a higher median salary, The effect of LSAT should then probably be positive. Since a higher GPA would also lead to higher achievement in learning law, and excelling as a lawyer, we would also expect it to have a positive effect. # # + active="" # \pagebreak # # - # g. Estimate Model 1. Show the regression output. # + ## g. Put your answer in this cell. mod = smf.ols('np.log(salary) ~ LSAT + GPA', data=df) ## Like writing down the equation results = mod.fit() ## Like doing the minimization problem results.summary() ## Computing the numbers and showing in a table # + active="" # \pagebreak # # - # h. What is the effect of a one unit increase in LSAT score on the log of median salary? # **Please write your answer for h here. If you need to use more than one line, you may do so.** # # The effect is 0.0475 # # i. What does the $R^2$ measure in the regression? What is the $R^2$ in this case? (Not the adjusted $R^2$). # **Please write your answer for i here. If you need to use more than one line, you may do so.** # # The $R^2$ measures how much of the variability in $log_salary$ is explained by the model. The $R^2$ in this case is 0.594 # # + active="" # \pagebreak # # - # ## Exercise 2: Multiple Regression # # This is a continuation of what we were doing in Exercise 1. # # For this exercise, observe the expression for $b_1$ when there are two regressors in the equation: # # $$ # \hat{b_1} = \frac{\sum_i^N x_{1i} y_i \sum_i^N x_{2i}^2 - \sum_i^N x_{1i} x_{2i} \sum_i^N x_{2i} y_i}{\sum_i^N x_{1i}^2 \sum_i^N x_{2i}^2 - \left(\sum_i^N x_{1i} x_{2i} \right)^2} # $$ # # Hint: Notice that each of these terms in the equation look similar to either covariances or variances (in fact if you multiply the denominator and numerator by $\frac{1}{N^2}$ then they are in fact variances and covariances without changing the value of the coefficient (since $\frac{\frac{1}{N^2}}{\frac{1}{N^2}}$ is 1). # # Also notice that the covariance is like an **un-normalized correlation coefficient**. So if you calculate the correlation between two variables, you won't know the covariance between the two, but you'll know the direction and strength of their relationship. # + active="" # \pagebreak # # - # a. Estimate Model 2. Show the regression output. # + ## a. Put your answer in this cell. mod = smf.ols('log_salary ~ LSAT +GPA ', data=df) ## Like writing down the equation results = mod.fit() ## Like doing the minimization problem results.summary() ## Computing the numbers and showing in a table # + active="" # \pagebreak # # - # b. Calculate the correlations between $log(salary)_i$, $LSAT_i$ and $GPA_i$. # # Use the slicing notation to first make a subset of the data with only log_salary, LSAT and GPA. # # Then use the `corr()` method to get the correlation for those variables, i.e. it will look something like this: # # `df[['log_salary', 'GPA', 'LSAT']].corr()` # # This will give a matrix where you can see correlation between variables. (Note: correlation of a variable with itself is always 1). # + ## b. Put your answer in this cell. ## Calculating correlation matrix df[['log_salary', 'GPA', 'LSAT']].corr() # + active="" # \pagebreak # # - # c. Using you answer from (b) and the expression for $\hat{b_1}$ above, answer this question: # # Why is $b_1$ in Model 2 different from $b_1$ in Model 1? # **Please write your answer for c here. If you need to use more than one line, you may do so.** # # In this case since the correlations between $GPA$ and $LSAT$ are positive and $GPA$ and $log_salary$ is positive, this leads to lowering the coefficient on $LSAT$ to 0.0329. # # d. Why is the $R^2$ in Model 2 higher than Model 1? (Not the adjusted $R^2$). # **Please write your answer for d here. If you need to use more than one line, you may do so.** # # Since we added more coefficients to the model, the $R^2$ has gone up to 0.631 # # + active="" # \pagebreak # # - # e. Estimate Model 3. Show the regression output. # # Hint: One of the extra regressors in Model 3 is log-transformed. Instead of doing another `assign()` call, run this regression by explicitly logging the variable in the `patsy` formula. Use `np.log()` to do this. # + ## e. Put your answer in this cell. mod = smf.ols('log_salary ~ LSAT +GPA +np.log(cost) + rank', data=df) ## Like writing down the equation results = mod.fit() ## Like doing the minimization problem results.summary() ## Computing the numbers and showing in a table # + active="" # \pagebreak # # - # f. Suppose School A and School B have the same values for all the variables on the right hand side in Model 3, except School A is ranked 10 places higher than School B. What is the predicted difference in log median salary between the two schools? # # This question can be answered by simply printing out the math you did in a print statement using an `f-string`. # + ## f. Put your answer in this cell. print(f"It would be {-10*(-.0034)}") # + active="" # \pagebreak # # - # ## Exercise 3: Multicollinearity # # a. Re-estimate Model 1, except add north, south, east, and west as the additional right hand side variables. # + ## Exercise 3. Put your answer in this cell. mod = smf.ols('log_salary ~ LSAT +north+west+south+east ', data=df) ## Like writing down the equation results = mod.fit() ## Like doing the minimization problem results.summary() ## Computing the numbers and showing in a table # + active="" # \pagebreak # # - # b. What is wrong with this regression? What happens when you estimate it? How could fix this problem? # # Hint: Look at the warnings underneath the regression. # **Please write your answer for b here. If you need to use more than one line, you may do so.** # # The warnings tell us that there is evidence of multicollinearity. This is due to the fact that we have included all four types of direction variables and we know that $north+west+east+south=1$. A way to fix this problem would be to omit one of these variables. # # + active="" # \pagebreak # # - # ## Exercise 4: Auxiliary Regression # Consider the following two regressions: # # \begin{gather*} # LSAT_i = a_0 + a_1 GPA_i + v_i \\ # log(salary_i) = b_0 + b_1 v_i + e_i # \end{gather*} # # a. Estimate $b_1$. This is a two-step process. First, you need to estimate the first regression model and save the errors. Then, you regress $log(salary_i)$ on those errors ($v_i$). Compare your estimate of $b_1$ to the estimate you found from Model 2. Explain the similarity or difference. # # In order to do this, you need to save the errors (also called residuals) after you run the first stage. In order to do this, after fitting the first stage, the `results` variable will have an attribute `resid`. So to call the residuals all you need to do is type this: `results.resid`. # # You can then run the second stage in one of two ways: # # - 1. `assign` a new variable to your data, called "residuals" and run a regresion with it like any other variable, or # - 2. Directly call `results.resid` in your second stage's `patsy formula`, i.e, ` 'log_salary ~ results.resid'` # + active="" # \pagebreak # # + ## a. Put your answer in this cell. ## First stage regression mod = smf.ols('LSAT ~ GPA', data=df) results = mod.fit() ## Second stage regression using residuals from first stage. results.summary() mod2 = smf.ols('log_salary ~ results.resid', data=df) mod2.fit().summary() # + active="" # \pagebreak # # - # b. What do you notice from the coefficient on this regression, versus the one in Model 1? # **Please write your answer for b here. If you need to use more than one line, you may do so.** # # The coefficient on this regression is the same as $b_1$ from model 1. # + active="" # \pagebreak # # - # ## Exercise 5: Back to $R^2$ # # Suppose that we have an estimated regression model $y_i = \hat{b}_0 + \hat{b}_1 x_i +e_i$, where $\hat{b}_0,\hat{b}_1$ are estimated OLS coefficients. Let $\hat{y}_i = \hat{b}_0 + \hat{b}_1 x_i$, so that: # # $$ # y_i = \hat{y}_i + e_i # $$ # # Let's look at the next step of solving this problem in order to finally get at solving a mystery we've had during the class. # # If we wanted to solve for the $R^2$, we would use the fact that a way to understand the variability in $y_i$ is to look at its variance. And we already know that: # # $$ # Var(y_i) = Var(\hat{y_i} + e_i) = Var(\hat{y_i}) + Var(e_i) + 2Cov(\hat{y_i}, e_i) # $$ # # Up until now, we've just assumed it to be true that $Cov(\hat{y_i}, e_i)$ was 0 and it allowed us to finish the proof. But all along, we've been implicitly assuming a Gauss-Markov assumption in order to make that claim. # # Which of the Gauss-Markov assumptions do we need in order to say that $Cov(\hat{y_i}, e_i)=0$? # # **Hint: Don't forget that you can express the covariance in terms of expectations.** # # **Hint: Try plugging in $\hat{y}_i = \hat{b}_0 + \hat{b}_1 x_i$ into this expression and seeing what you end up with.** # # **Hint: Don't forget that $\sum_i^N e_i=0$** # + active="" # \pagebreak # # - # **Please write your answer for exercise 5 here. If you need to use more than one line, you may do so.** # # We can rewrite the the covariance like so: # # $$ # Cov(\hat{y_i}, e_i) = E\left[(\hat{y_i} - \bar{y})(e_i - \bar{e})\right] # $$ # # Knowing that $\bar{e}=0$, we can see that: # # $$ # Cov(\hat{y_i}, e_i) = E(\hat{y_i}e_i) -E(\bar{y}e_i) # $$ # # Since we know that $\bar{y}E(e_i)=0$, we can plug in $\hat{y_i}$ into the rest and get: # # $$ # Cov(\hat{y_i}, e_i) = E((\hat{b_0} + \hat{b_1} x_i)e_i) # $$ # # Distributing $e_i$ and the expectation operator brings us to: # # $$ # Cov(\hat{y_i}, e_i) = E(\hat{b_0} e_i) + E(\hat{b_1} x_i e_i) # $$ # # Again, we know that $\hat{b_0}E(e_i)=0$, so we are left with: # # $$ # Cov(\hat{y_i}, e_i) = \hat{b_1}E(x_i e_i) # $$ # # From CR5, we know that $E(x_i e_i)=0$, so we are done. # + active="" # \pagebreak # # - # ## Exercise 6: Data Types # Let's say that we have a population model: # # $$ # Y = \beta_0 + \beta_1 X + \epsilon # $$ # # The subscripts for the variables have been purposely omitted. For each part, rewrite the model so that it corresponds to each data type and explain why you wrote it that way. # # - a. Cross-section # - b. Time Series # - c. Panel # + active="" # \pagebreak # # - # **Please write your answer for exercise 6 here. If you need to use more than one line, you may do so.** # # - a. In this case, we have $Y_i = \beta_0 + \beta_1 X_i + \epsilon_i$, where we are following several subjects $i$ in a given time. # - b. Here we have $Y_t = \beta_0 + \beta_1 X_t + \epsilon_t$, so we are following the same subject across time. # - c. Now we have $Y_{it} = \beta_0 + \beta_1 X_{it} + \epsilon_{it}$, where we are following several subjects across time. #
Homework/Homework 3/HW3_answers.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Comtypes # # Comtypes is a library to interact with software that have registered a COM (component object model) interface. The COM interface is stored centrally in Windows, and can be accessed from any software through a unified API. Many softwares have COM interfaces. Previously, it was also known as ActiveX. # # https://pythonhosted.org/comtypes/ from pathlib import Path import matplotlib.pyplot as plt import numpy as np # Open `comtypes.xlsx` # + from comtypes.client import CreateObject filename = "comtypes.xlsx" filepath = (Path(".") / filename).absolute() app = CreateObject("Excel.Application") app.Visible = True wb = app.Workbooks.Open(str(filepath)) worksheet = wb.Sheets[1] # - # Read some data i = 1 for row in range(10): row += 1 data = worksheet.Cells[row, i].Value() print(data) # Write some data i = 1 j = 2 for row in range(10): row += 1 data = worksheet.Cells[row, i].Value() worksheet.Cells[row, j] = data**2 # Slow version # + worksheet = wb.Sheets[2] I, J = 56, 600 arr = np.empty((I, J)) for i in range(I): print(f"Row {i}", end="\r") for j in range(J): val = worksheet.Cells[i+1, j+1].Value() arr[i, j] = val plt.imshow(arr); # - # Faster version # + worksheet = wb.Sheets[2] I, J = 56, 600 arr = np.empty((I, J)) for i in range(I): row = worksheet.Rows[i+1].Value() row = [value for value in row[0] if value != None] arr[i, :] = np.array(row) plt.imshow(arr); # - # # ctypes # # `ctypes` is a useful library for interfacing with C-code from Python. The examples here show how we can use `ctypes` to implement functions available from the Windows API, and how we can use it to interface with C functions exposed in a DLL. # # https://docs.python.org/3.7/library/ctypes.html # ## Increasing the Windows time resolution # # Here we use the Windows multimedia API to increase the Windows time resolution (`C:\Windows\System32\winmm.dll`) # # https://docs.microsoft.com/en-us/windows/win32/api/timeapi/index # First we need to import the DLL with ctypes and then map the functions. This is essentially telling Python how to interact with the C API. # + from ctypes import wintypes import ctypes winmm = ctypes.WinDLL('winmm') ENABLED = False class TIMECAPS(ctypes.Structure): _fields_ = (('wPeriodMin', wintypes.UINT), ('wPeriodMax', wintypes.UINT)) milliseconds = 1 # 1 ms is the smallest possible time period on Windows # - # Let's time how long it takes to sleep 1 ms... # %%timeit -n 500 -r 3 import time time.sleep(0.001) # Where does this number come from? The Windows scheduler ticks 64 times per second: print(f"Time period @ 64 ticks/s: {1/64 * 1000} ms") # So, it takes more than 15 ms for the cpu to sleep just 1 ms... # However, the Windows multimedia library offers functions to adjust the time period when needed. Let's increase the time resolution to 1 ms! # + # setup caps = TIMECAPS() winmm.timeGetDevCaps(ctypes.byref(caps), ctypes.sizeof(caps)) # check that it is within allowed values milliseconds = min(max(milliseconds, caps.wPeriodMin), caps.wPeriodMax) winmm.timeBeginPeriod(milliseconds) # - # Try again: # %%timeit -n 500 -r 3 import time time.sleep(0.001) # Much better! Note that this effect is system wide, but we can reset it easily. # reset winmm.timeEndPeriod(milliseconds) # And to make sure it worked... # %%timeit -n 500 -r 3 import time time.sleep(0.001) # Changing the time period is useful when higher precision is required on the scheduler, for example when millisecond time precision is needed `time.sleep` for time-sensitive programs. # ## Interfacing with a DLL # # A cmore complex, but real-world example of how to interface a DLL that reads data from a camera mounted under a TEM. Again, we will use `ctypes` for this. # # This example is taken from: # https://github.com/stefsmeets/instamatic/blob/master/instamatic/camera/camera_gatan.py import ctypes from ctypes import c_int, c_long, c_float, c_double, c_bool, c_wchar_p from ctypes import POINTER, byref, addressof # The only thing we have is a header file (`CCDCOM.h`) and a dll (`CCDCOM2_x64_simulation.dll`), let's see if we can get some data out of this thing. Note that this DLL simulates the connection, and makes up some data for debugging purposes. with open("CCDCOM.h", "r") as f: for line in f: print(line, end="") # So the symbols are defined in the header, but they are a bit mangled, it does not matter, because we can make a mapping. # You can use #dependency walker to get function names from DLL: http://www.dependencywalker.com/ symbols = { 'acquireImageNewFloat' : '?acquireImageNewFloat@@YAHHHHHHN_NPEAPEAMPEAH2@Z', 'acquireImageNewInt' : '?acquireImageNewInt@@YAHHHHHPEAH00HN_N@Z', 'cameraCount' : '?cameraCount@@YAHXZ', 'cameraDimensions' : '?cameraDimensions@@YA_NPEAH0@Z', 'cameraName' : '?cameraName@@YA_NPEA_WH@Z', 'CCDCOM2_release' : '?CCDCOM2_release@@YAXPEAM@Z', 'initCCDCOM' : '?initCCDCOM@@YAHH@Z', 'isCameraInfoAvailable' : '?isCameraInfoAvailable@@YA_NXZ', 'releaseCCDCOM' : '?releaseCCDCOM@@YAXXZ' } # Next, we load the DLL using ctypes # + libpath = (Path() / "CCDCOM2_x64_simulation.dll").absolute() lib = ctypes.cdll.LoadLibrary(str(libpath)) # - # All the functions are defined in the header file, and we get them as attributes from `lib` # Simple functions are easily interfaced, such as the following, which checks if the camera info is available. # We define the interface and the return type as a boolean (`c_bool`). # + isCameraInfoAvailable = getattr(lib, symbols['isCameraInfoAvailable']) isCameraInfoAvailable.restype = c_bool print(f"Camera available: {isCameraInfoAvailable()}") # - # Easy enough, what about getting the image dimensions. # First we define the function `cameraDimensions`. Because we know the arguments are pointers to where the height and width are stored as floats (`c_long`), we must define the arguments as such. This function does not return, so we do not have to specify the return type (`restype`) as above. cameraDimensions = getattr(lib, symbols['cameraDimensions']) cameraDimensions.argtypes = [POINTER(c_long), POINTER(c_long)] # Next we can define our own interface, and neatly wrap it in a python interface. # Initialze the width (`pnWidth`) and height (`pnHeight`) as integers, and pass them by reference (`byref`) # + def get_dimensions(): pnWidth = c_int(0) pnHeight = c_int(0) cameraDimensions(byref(pnWidth), byref(pnHeight)) return pnWidth.value, pnHeight.value print(get_dimensions()) # - # Ok, that worked, what about a more complex function, such as obtaining the image, which is what we are after. # The trick is the same as above, 1) map the function, 2) specify the arguments (`argtypes`), and 3) specify the return type (`restype`). # + acquireImageNewFloat = getattr(lib, symbols['acquireImageNewFloat']) acquireImageNewFloat.argtypes = [ ## Input c_int, # int area_t c_int, # int area_l c_int, # int area_b c_int, # int area_r c_int, # float nBinning c_double, # double fExposure c_bool, # bool bShowInDM ## Output POINTER(POINTER(c_float)), # float** pdata POINTER(c_int), # int* pnImgWidth POINTER(c_int) # int* pnImgHeight ] # we must also interface the function to free the memory after the image is retrieved CCDCOM2release = getattr(lib, symbols['CCDCOM2_release']) CCDCOM2release.argtypes = [POINTER(c_float)] # - # Again, we define a function to hide all the messy details to get this to work def get_image(exposure=0.1): xmin, ymin = 0, 0 xmax, ymax = get_dimensions() pdata = POINTER(c_float)() pnImgWidth = c_int(0) pnImgHeight = c_int(0) acquireImageNewFloat(ymin, xmin, ymax, xmax, 1, exposure, False, byref(pdata), byref(pnImgWidth), byref(pnImgHeight) ) # the image dimensions are stored here xres = pnImgWidth.value yres = pnImgHeight.value # here we calculate the memory space that is addressed from the address of the # `pdata` pointer by multiplying the number of pixels (`xres`*`yres`) by the # a floating point in memory ad converting that to a numpy array... oof! arr = np.ctypeslib.as_array((c_float * xres * yres).from_address(addressof(pdata.contents))) arr = arr.copy() # we must tell the DLL to release the memory CCDCOM2release(pdata) return arr # Let's see what we got! arr = get_image() plt.imshow(arr); # # Subprocess # # https://docs.python.org/3.7/library/subprocess.html # # Subprocesses are convenient to call programs from Python. This can be any program that is accessible through the Windows command line. import subprocess as sp # For example, we can start notepad p = sp.run("notepad.exe") # But we can also call other programs, like `sginfo`: # http://cci.lbl.gov/sginfo/ # # Sginfo is a command line program that gives space group information. We can specify the space group we are interested in, and capture the output. p = sp.run("sginfo Pnma -allxyz", capture_output=True) print(p.stdout.decode()) # We can then write a little function to wrap some of the information we are interested. # For example, we want to obtain the Laue group for any space group. We will raise an error if the space group cannot be found. # + class SpaceGroupError(NameError): pass def get_laue_group(spgr): p = sp.run(f"sginfo {spgr}", capture_output=True) stdout = p.stdout.decode() stderr = p.stderr.decode() for line in p.stdout.decode().splitlines(): if line.startswith("Laue Group"): ret = line.split()[2] return ret raise SpaceGroupError(stderr) spgr = "Pnma" lauegr = get_laue_group(spgr) print(f"The Laue group for `{spgr}` is `{lauegr}`") # - # # WSL # Windows Subsystem for Linux (WSL) is a feature in Windows 10 that allows you to run native Linux executables directly on Windows. # + import subprocess as sp p = sp.run("bash -ic xds", capture_output=True) print(p.stdout.decode()) print(p.stderr.decode()) # - # Can we run Linux GUI programs from Python through WSL? # # For this to work Xming X server for Windows should be installed: # https://sourceforge.net/projects/xming/ # # This makes sure that Windows knows how to render the GUI programs invoked from bash. # # Also, set the `DISPLAY` environment variable (or add it to `~/.bashrc`): # `export DISPLAY=:0` # + import subprocess as sp p = sp.run("bash -ic xdsgui", capture_output=True) print(p.stdout.decode()) print(p.stderr.decode()) # - # # Sockets # # Sockets are used for communicating between different programs, locally or over the network. In fact, the python multiprocessing library makes heavy use of sockets for interprocess communcation to achieve parallel processing. Here we will use it to transmit message between a server and a client program. # # https://docs.python.org/3/library/socket.html # First, set some ports. Localhost refers to the local computer, but you can put an ip address here to establish a connection over the network. The port indicates the port over which communcation takes place, and `BUFF` refers to the maximum message length. # + from socket import * HOST = 'localhost' PORT = 8009 BUFF = 1024 # - # The first example uses Python sockets to establish the connection with the server. First run the `glue echo server.ipynb` notebook. # + payload = "Hello world!" with socket(AF_INET, SOCK_STREAM) as s: print("Sending message to server...") s.connect((HOST, PORT)) s.send(payload.encode()) msg = s.recv(BUFF).decode() print("Answer ->", msg) # - # The cool thing is, we can actually do the same thing through Python by calling netcat directly using subprocesses import subprocess as sp # + msg = "Hello through netcat!".encode() p = sp.run(f"./nc {HOST} {PORT}", input=msg, capture_output=True) msg = p.stdout.decode() print(msg) # - # The way this works is through pipes. Much like in the command line, software pipes can be specified in Python's subprocess using the following syntax. `sp.run` is nothing more than a wrapper around `sp.Popen` that simplifies this interface. # + p1 = sp.Popen(f"./nc {HOST} {PORT}", stdout=sp.PIPE, stdin=sp.PIPE) data = "Hello through netcat (using sp.Popen)!".encode() msg = p1.communicate(data)[0].decode() print(msg) # - # You can send any data over the network, but you have to make a bytes representation first. `pickle` is a good way to serialize python data. # + import pickle arr = [1, 2, 3, 4, 5, 6, 7, 8, 9] payload = pickle.dumps(arr) payload # - # If you now run the second server in the notebook, we can make it calculate the sum of the list above. with socket(AF_INET, SOCK_STREAM) as s: print("Sending message to server...") s.connect((HOST, PORT)) s.send(payload) msg = pickle.loads(s.recv(BUFF)) print("Answer ->", msg) # This is extremely powerful and quite easy to set up! # # PyAutoGui # # If all other options fail, `pyautogui` is a last resort option. It can find UI elements on the screen, and automate interactions, such as clicking on buttons. # # https://pypi.org/project/PyAutoGUI/#files # + import pyautogui as pg import imageio import matplotlib.pyplot as plt # - # First, we define an image to click on. # + click_target = "pp.png" data = imageio.imread(click_target) plt.imshow(data) # - # Then, `pyautogui` can find the button on the screen and click on it :-) pos = pg.locateCenterOnScreen(click_target) pg.moveTo(pos, duration=2.0, pause=1.0, tween=pg.easeOutQuad) # add delay for dramatic effect pg.click() pg.hotkey("shift", "f5")
lectures/python-glue/glue.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="Q3ZqvmZ8qVaz" # # **Teste de Python** # + [markdown] id="gz3P9YJkqauY" # ## **1.** # + id="Be4goJEUnbYm" # Função para verificar se um número é multiplo do outro def verifica_multiplo(numero, multiplo): if numero % multiplo == 0: return True else: return False # + id="9OcMr5nvnszW" # Contando quantos números pares, múltiplos de 49 e de 37 existem no intervalo de 1 a 5.000.000 soma = 0 for numero in range(1, 5000001): if (verifica_multiplo(numero, 2) and verifica_multiplo(numero, 49) and verifica_multiplo(numero, 37)): soma += 1 # + colab={"base_uri": "https://localhost:8080/"} id="f_jwNSSgoIcV" outputId="b160bcd5-4071-4695-d5db-97a6798a5d49" # Visualizando o resultado print(f'A quantidade de números pares, múltiplos de 49 e de 37 no intervalo de 1 a 5.000.000 é igual a {soma}') # + [markdown] id="DpD01PbwrZSF" # ## **2.** # + id="A9QgEOKYyGYA" # Biblioteca usada para as funções matemáticas import numpy as np # + id="La5VJZJpoTxh" # Vetor de 10 posições x = list(range(10)) # + id="bfuW1U_trinm" # Fazendo as diferentes operações para caso a posição for par ou ímpar for i in range(10): if i % 2 == 0: x[i] = np.power(3, i) + 7 * np.math.factorial(i) else: x[i] = np.power(2, i) + 4 * np.log(i) # + colab={"base_uri": "https://localhost:8080/"} id="tZEBMRQQyTSD" outputId="47f9461d-04c9-4d55-c5c9-599f66e7ff34" # Posição do maior elemento posicao = x.index(max(x)) print(f'A posição do maior elemento é a {posicao}') # + colab={"base_uri": "https://localhost:8080/"} id="ghZKYYXPyo1B" outputId="eceb9c31-fffd-4a10-b536-1df4eaddad28" # Média dos elementos contidos no vetor media = round(np.mean(x), 2) print(f'A média dos elementos contidos no vetor é igual a {media}') # + [markdown] id="ifFZqL1HzNUk" # ## **3.** # + id="nDFaPkFfzIpM" # Criando o dicionário que receberá o nome dos alunos e as suas respectivas notas notas = {} # + colab={"base_uri": "https://localhost:8080/"} id="Wuu0YHJjveXv" outputId="87f9ecfd-b792-4025-c280-ffab82428a5c" # Recebendo as notas dos cinco alunos e as adicionando ao dicionário for i in range(5): nota = float(input(f'Insira a nota do Aluno {i + 1}: ')) if nota >= 0 and nota <= 10: notas[f'Aluno {i + 1}'] = nota else: print('A nota inserida é inválida. O valor da nota deve estar entre 0 e 10!') while nota < 0 or nota > 10: nota = float(input(f'Insira a nota do Aluno {i + 1}: ')) notas[f'Aluno {i + 1}'] = nota # + colab={"base_uri": "https://localhost:8080/"} id="pfEFew8mvlL3" outputId="0875eee4-226a-4c1c-977a-0647c6006d8b" # Verificando e visualizando o ou os aluno(s) que obtiveram a maior nota print('O(s) aluno(s) que obteveram a maior nota e sua(s) respectiva(s) nota(s): ') for key, value in notas.items(): if value == max(notas.values()): print(f'{key} - Nota = {value}') # + id="QCjSYRqCv2mi"
teste-de-python-summer-navi/teste_python_navi.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + import khmer # ref = https://github.com/dib-lab/khmer/blob/master/examples/python-api/exact-counting.py import tqdm import pandas as pd import numpy as np import os, glob, subprocess import scanpy.api as sc import anndata as ad from scipy import sparse from plotnine import * import plotnine from IPython.core.display import HTML def translate(seq, frame=1): # frame: 1 = start at pos 0; 2 = start at pos 1; 3 = start at pos 2 table = { 'ATA':'I', 'ATC':'I', 'ATT':'I', 'ATG':'M', 'ACA':'T', 'ACC':'T', 'ACG':'T', 'ACT':'T', 'AAC':'N', 'AAT':'N', 'AAA':'K', 'AAG':'K', 'AGC':'S', 'AGT':'S', 'AGA':'R', 'AGG':'R', 'CTA':'L', 'CTC':'L', 'CTG':'L', 'CTT':'L', 'CCA':'P', 'CCC':'P', 'CCG':'P', 'CCT':'P', 'CAC':'H', 'CAT':'H', 'CAA':'Q', 'CAG':'Q', 'CGA':'R', 'CGC':'R', 'CGG':'R', 'CGT':'R', 'GTA':'V', 'GTC':'V', 'GTG':'V', 'GTT':'V', 'GCA':'A', 'GCC':'A', 'GCG':'A', 'GCT':'A', 'GAC':'D', 'GAT':'D', 'GAA':'E', 'GAG':'E', 'GGA':'G', 'GGC':'G', 'GGG':'G', 'GGT':'G', 'TCA':'S', 'TCC':'S', 'TCG':'S', 'TCT':'S', 'TTC':'F', 'TTT':'F', 'TTA':'L', 'TTG':'L', 'TAC':'Y', 'TAT':'Y', 'TAA':'', 'TAG':'', 'TGC':'C', 'TGT':'C', 'TGA':'', 'TGG':'W', } comp_dict = {'C':'G', 'G':'C', 'A':'T', 'T':'A' } protein = '' if frame == 1 : start_val = 0 if frame == 2: start_val = 1 if frame == 3: start_val = 2 if frame == 4 : start_val = 0 if frame == 5: start_val = 1 if frame == 6: start_val = 2 if frame > 3: seq = ''.join([comp_dict.get(x) for x in seq]) for i in range(start_val, len(seq), 3): try: codon = seq[i:i + 3] protein+= table[codon] except: break return protein def file_len(fname): with open(fname) as f: for i, l in enumerate(f): pass return i + 1 # filter completed samples def find_completed (results_dir, dir_path, target_suffix): fa_list = [] blastOut_list = [] # check for completed files: *_geneCounts.csv for outfile in glob.glob(f'{results_dir}*/*_geneCounts.csv'): with open(outfile, 'r') as out_f: deduce_cell_id = outfile.split('/')[-2] deduce_target = f'{dir_path}{deduce_cell_id}{target_suffix}' deduce_out = f'{results_dir}{deduce_cell_id}/{deduce_cell_id}_blastpOut.txt' fa_list = fa_list + [deduce_target] blastOut_list = blastOut_list + [deduce_out] return fa_list, blastOut_list def process_blastp_results(target_output, results_dir, evalue_cutoff): # ingest table results = pd.read_csv(target_output, sep = '\t', header = None ) results.columns = ['qsid' , 'ssid', 'evalue'] # parse frame and ids results['frame'] = [x.split('__')[-1][-1] for x in results['qsid']] results['qsid'] = [x.split('__')[0] for x in results['qsid']] results['uniprot'] = [x.split('|')[1] for x in results['ssid']] results['common_name'] = [x.split('|')[2] for x in results['ssid']] # return best-match frame results = results.sort_values('evalue', ascending = True).groupby(['qsid']).head(1) # filter results to significant matches results = results[results.evalue < evalue_cutoff] # adjust df to merge results = (results .loc[:,['uniprot','qsid']] .rename(columns = {'qsid':'seq_id'}) ) # ingest uniprot to symbol df for lookup scope2field_df = pd.read_csv(f'{results_dir}scope2field.csv',index_col = 0) results = pd.merge(scope2field_df, results, 'inner', 'uniprot' ) results = results.drop('uniprot', axis = 1) return results def run_blastp(input_fn, prot_ref, n_threads, ouput_fn, log_fn): # run blast blast_cmd_list = ['blastp', '-query', input_fn, '-db', prot_ref, '-task', 'blastp-fast', '-matrix', 'BLOSUM45', '-evalue', '1000', '-outfmt', '"6 evalue qseqid sseqid"', # '-num_descriptions', # '1', # '-num_alignments', # '1', '-max_target_seqs', '5', '-num_threads', f'{n_threads}', '-out', ouput_fn, ] blast_cmd = ' '.join(blast_cmd_list) print(blast_cmd) subprocess.call(blast_cmd, shell=True) with open(log_fn, 'a') as outf: outf.write(f'{input_fn}\n') # - sc.__version__ # identify input data input_dir = '/home/ubuntu/data/longread/test_dir/multi_species/' fa_list = glob.glob(f'{input_dir}*.fa') entry_len_list = [] for fn in fa_list: entry_len_list = entry_len_list+[int(file_len(fn)/2)] balanced_entry_len_sum = np.sum(entry_len_list) # + # kmerize ksize = 6 nkmers = 4**ksize cell_df = pd.DataFrame() master_df = pd.DataFrame() kmer_arr = np.zeros((nkmers, balanced_entry_len_sum)) arr_idx = 0 seq_id_arr = np.zeros((balanced_entry_len_sum), dtype = 'object') cell_id_arr = np.zeros((balanced_entry_len_sum), dtype = 'object') # ingest all .fa files from dir for fn in fa_list: # extract fn prefix for cell id cell_id = fn.split('/')[-1].split('.fa')[0] with open(fn, 'r') as f: # parse entry by entry for count, line in tqdm.tqdm(enumerate(f, start=0)): # extract first line as seq id if count % 2 == 0: k_name = line[1:-1] # extract second line as sequence if count % 2 != 0: # Initialize countgraph tablesize = nkmers + 10 ktable = khmer.Countgraph(ksize, tablesize, 1) # count all k-mers in the given string ktable.consume(line[:-1]) # capture full kmer counts k_n_list = [ktable.get(i) for i in range(nkmers)] # update kmer count arr kmer_arr[:,arr_idx] = k_n_list # log sequence and cell names for metadata seq_id_arr[arr_idx] = k_name cell_id_arr[arr_idx] = cell_id # update arr pointer arr_idx = arr_idx + 1 # create dfs kmers_list = [ktable.reverse_hash(i) for i in range(nkmers)] master_df = pd.DataFrame(kmer_arr) master_df['kmers'] = kmers_list master_df = master_df.set_index('kmers') master_df.columns = seq_id_arr.tolist() master_df = master_df.astype(int) cell_df = pd.DataFrame({'seq_id':seq_id_arr, 'cell_id':cell_id_arr, }) cell_df = cell_df.set_index('seq_id') # + # ingest data to adata obj # pd df to np array array_adata = master_df.values # extract obs and var obs = master_df.columns.tolist() gene_names = master_df.index.tolist() # create ad obj adata = ad.AnnData(X=array_adata).T adata.X = sparse.csr_matrix(adata.X) adata.var_names = gene_names adata.obs_names = obs # append metadata adata.obs['cell_id'] = cell_df.cell_id.tolist() # transform and freeze raw state adata.raw = sc.pp.log1p(adata, copy=True) # + # embed and cluster n_pcs = 200 n_neighbs = 10 min_dist = 0.3 louvain_res = 0.1 all_forms_adata = sc.pp.log1p(adata, copy=True) sc.pp.scale(all_forms_adata) sc.tl.pca(all_forms_adata,svd_solver='arpack',use_highly_variable = False,n_comps=n_pcs,random_state=100) sc.pp.neighbors(all_forms_adata, n_pcs=n_pcs, n_neighbors = n_neighbs, random_state=100) sc.tl.umap(all_forms_adata, min_dist = min_dist, random_state=100, init_pos='random') sc.tl.louvain(all_forms_adata, resolution = louvain_res, random_state=100) umap_df = pd.DataFrame(all_forms_adata.obsm['X_umap'], columns = ['xvar','yvar']) umap_df['cell_id'] = all_forms_adata.obs.cell_id.tolist() umap_df['seq_id'] = all_forms_adata.obs.index.tolist() umap_df['louvain'] = all_forms_adata.obs.louvain.tolist() plotnine.options.figure_size = (5,5) for val in ['louvain','cell_id']: plot = (ggplot(umap_df) + theme_bw() + theme(aspect_ratio = 1, panel_grid_major = element_blank(), panel_grid_minor = element_blank() ) + geom_point(aes('xvar','yvar',color=val), size=0.05) ) print(plot) # + sample_n = 10 input_dir = '/home/ubuntu/data/longread/test_dir/multi_species/' out_dir = '/home/ubuntu/data/longread/test_dir/multi_species/outputs/' clear_files = True if clear_files == True: for x in glob.glob(f'{out_dir}*.fa'): os.remove(x) outfile = f'{out_dir}allClusters.fa' cluster_count_df = pd.DataFrame() for cluster_oi in list(set(all_forms_adata.obs.louvain)): cluster_slice = all_forms_adata[all_forms_adata.obs.louvain == cluster_oi] fn_oi = cluster_slice.obs.cell_id.tolist()[:sample_n] entry_oi = cluster_slice.obs.index.tolist()[:sample_n] cluster_count_df = cluster_count_df.append(pd.DataFrame({'cluster':[cluster_oi],'cell_n':len(entry_oi)})) for fn, entry in zip([f'{input_dir}{x}.fa' for x in fn_oi], entry_oi ): with open(fn, 'r') as f: # parse entry by entry for count, line in enumerate(f, start=0): # extract first line as seq id if count % 2 == 0: k_name = line[1:-1].split(' ')[0] # extract second line as sequence if count % 2 != 0 and k_name == entry.split(' ')[0]: for frame in [1,2,3,4,5,6]: seq = translate(line[:-1],frame) with open(outfile, 'a') as outf: outf.write(f'>{k_name}_fr_{frame}_cl_{cluster_oi}\n{seq}\n') # + code_folding=[] prot_ref = '/home/ubuntu/data/longread/proteome/UP000005640_9606.fasta' n_threads = 6 log_fn = '/home/ubuntu/data/longread/test_dir/multi_species/outputs/blast_results/log.txt' clear_files = True if clear_files == True: for x in glob.glob('/home/ubuntu/data/longread/test_dir/multi_species/outputs/blast_results/*'): os.remove(x) for blast_targets in glob.glob(f'{out_dir}*.fa'): prefix = blast_targets.split('/')[-1].split('.')[0] ouput_fn = f'/home/ubuntu/data/longread/test_dir/multi_species/outputs/blast_results/{prefix}_blast.tsv' run_blastp(blast_targets, prot_ref, n_threads, ouput_fn, log_fn) # - # ingest blast results and score entries subject_n = 1 for x in glob.glob('/home/ubuntu/data/longread/test_dir/multi_species/outputs/blast_results/*.tsv'): print(x) blast_result = pd.read_csv(x, sep = '\t', header=None) blast_result.columns = ['evalue','qsid_fr_cl','ssid'] # machine precision estimate: ref = https://www.biostars.org/p/43484/ blast_result = blast_result.replace(0, 2.225074e-308) blast_result['qsid'] = [x.split('_fr_')[0] for x in blast_result['qsid_fr_cl']] blast_result['cluster'] = [x.split('_cl_')[-1] for x in blast_result['qsid_fr_cl']] blast_result['log_evalue'] = -np.log10(blast_result['evalue']) # return best match for each subject blast_result = (pd.DataFrame(blast_result.groupby(['cluster', 'qsid', 'ssid'])['log_evalue'].max()) .reset_index() ) # sum subject match scores across reads blast_result = (pd.DataFrame(blast_result.groupby(['cluster','ssid'])['log_evalue'].sum()) .reset_index() ) # normalize to the number of reads blasted blast_result = pd.merge(blast_result, cluster_count_df, 'left', 'cluster') blast_result['norm_log_evalue'] = blast_result['log_evalue']/blast_result['cell_n'] blast_result['neg_norm_log_evalue'] = [-x for x in blast_result['norm_log_evalue']] # subset results blast_result = (blast_result .sort_values('norm_log_evalue', ascending = False) .groupby('cluster') .head(subject_n) .sort_values(['cluster','neg_norm_log_evalue'], ascending = True) .drop('neg_norm_log_evalue', axis = 1) ) scope_df = pd.read_csv('/home/ubuntu/data/longread/test_dir/scope2field.csv', index_col = 0) scope_df = scope_df.drop_duplicates() cluster_annotation_df = blast_result.loc[:, ['cluster','ssid','norm_log_evalue']] cluster_annotation_df['uniprot'] = [x.split('|')[1] for x in cluster_annotation_df['ssid']] cluster_annotation_df = pd.merge(cluster_annotation_df,scope_df,'inner','uniprot') cluster_annotation_df = cluster_annotation_df.sort_values('norm_log_evalue') cell_by_cluster = pd.merge(umap_df.rename(columns = {'louvain':'cluster'}), cluster_annotation_df, 'inner', 'cluster' ) cell_by_cluster = ((pd.DataFrame(cell_by_cluster .groupby(['cell_id'])['symbol'] .value_counts() .rename('count') ) .reset_index() .pivot(index='symbol', columns='cell_id', values='count') ) .replace(np.nan, 0) .astype(int) ) # + # ingest data to adata obj # pd df to np array array_adata = cell_by_cluster.values # extract obs and var obs = cell_by_cluster.columns.tolist() gene_names = cell_by_cluster.index.tolist() # create ad obj cell_adata = ad.AnnData(X=array_adata).T cell_adata.X = sparse.csr_matrix(cell_adata.X) cell_adata.var_names = gene_names cell_adata.obs_names = obs # transform and freeze raw state cell_adata.raw = sc.pp.log1p(cell_adata, copy=True) # + # embed and cluster n_pcs = 50 n_neighbs = 4 min_dist = 0.1 louvain_res = 0.3 all_cells_adata = sc.pp.log1p(cell_adata, copy=True) sc.pp.scale(all_cells_adata) sc.tl.pca(all_cells_adata,svd_solver='arpack',use_highly_variable = False,n_comps=n_pcs,random_state=100) sc.pp.neighbors(all_cells_adata, n_pcs=n_pcs, n_neighbors = n_neighbs, random_state=100) sc.tl.umap(all_cells_adata, min_dist = min_dist, random_state=100, init_pos='random') sc.tl.louvain(all_cells_adata, resolution = louvain_res, random_state=100) umap_df = pd.DataFrame(all_cells_adata.obsm['X_umap'], columns = ['xvar','yvar']) umap_df['cell_id'] = all_cells_adata.obs.index.tolist() umap_df['louvain'] = all_cells_adata.obs.louvain.tolist() umap_df['species'] = [x.split('_')[0] for x in umap_df['cell_id']] plotnine.options.figure_size = (5,5) for val in ['louvain','species']: plot = (ggplot(umap_df) + theme_bw() + theme(aspect_ratio = 1, panel_grid_major = element_blank(), panel_grid_minor = element_blank() ) + geom_point(aes('xvar','yvar',color=val), size=0.05) ) print(plot) # - sc.tl.rank_genes_groups(all_cells_adata, 'louvain', use_raw=True, groups='all', reference='rest', n_genes=100, rankby_abs=False, key_added=None, copy=False, method='wilcoxon', corr_method='benjamini-hochberg') sc.pl.rank_genes_groups_heatmap(all_cells_adata, groups=None, n_genes=10, groupby='louvain', key=None, show=None, save=None)
DL20191203_longread_cell_typing.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # 单例模式(Single Pattern) # Ensure a class has only one instancem, and provides a global point to access to it. import threading import time class Singleton(object): def __new__(cls,*args,**kw): if not hasattr(cls,'_instance'): orig=super(Singleton,cls) cls._instance=orig.__new__(cls,*args,**kw) return cls._instance # + class Bus(Singleton): lock=threading.RLock() def sendData(self,data): self.lock.acquire() time.sleep(3) print('Sending single data...',data) self.lock.release() class VisitEntity(threading.Thread): my_bus='' name='' def getName(self): return name def setName(self,name): self.name=name def run(self): self.my_bus=Bus() self.my_bus.sendData(self.name) for i in range(3): print('Entity %d begin to run...'%i) my_entity=VisitEntity() my_entity.setName('Entity_'+str(i)) my_entity.start() # - # # advantage # + Because of only one instance in global environment, it can save memory. # + Because of the only one access point, it can be used to concurrent. # + The only one Instace can store in memory, avoiding creating instance. # # Disadvantage # + Difficulty for expansion # + revolt the Single Responsibilyt Principle # + Unfriendly unit test
DesignPattern/SinglePattern.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + id="d8yh75oag2Vz" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 487} executionInfo={"status": "ok", "timestamp": 1592651705911, "user_tz": -60, "elapsed": 26575, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gg9_X9PHXyfxsFqdQBJarSFy5TpLITVEg043ihVUA=s64", "userId": "03611350071843918342"}} outputId="2e896121-0909-4dcc-fc6c-59f1c7781d92" import os # !nvidia-smi useColab = True if useColab: from google.colab import drive drive.mount('/content/gdrive') recordpath='gdrive/My Drive/Colab Notebooks/00data/record/' exppath='gdrive/My Drive/Colab Notebooks/00data/record/experiments/' storagepath='gdrive/My Drive/Colab Notebooks/00data/storage/' else: recordpath='/home/hdaniel/Downloads/record/' exppath='/home/hdaniel/Downloads/record/experiments/' storagepath='/home/hdaniel/Downloads/' # + id="uMbXsXkg2cTE" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 35} executionInfo={"status": "ok", "timestamp": 1592651706902, "user_tz": -60, "elapsed": 2278, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gg9_X9PHXyfxsFqdQBJarSFy5TpLITVEg043ihVUA=s64", "userId": "03611350071843918342"}} outputId="93145109-a91e-4779-e7f6-239a929166ac" if useColab: # %run gdrive/My\ Drive/Colab\ Notebooks/lib/common.ipynb else: # %run lib/common.ipynb # + id="weXn1Sf6e0bq" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 55} executionInfo={"status": "ok", "timestamp": 1592651709591, "user_tz": -60, "elapsed": 1012, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gg9_X9PHXyfxsFqdQBJarSFy5TpLITVEg043ihVUA=s64", "userId": "03611350071843918342"}} outputId="b803d580-59d6-422d-d004-c4e3128478b2" #Append logs ''' logFNL1 = exppath + 'experiment.log' logFNL2 = exppath + 'experiment-channels-100p.log' log1 = SimpleLog.load(logFNL1) log2 = SimpleLog.load(logFNL2) log1.append(log2) #log1.save(logFNL2) showLog(log1, time=False) ''' #log1.save(logFNL2) #showLog(log1, time=False) # + id="x2CeCpOyayi5" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 1000} executionInfo={"status": "ok", "timestamp": 1592651800131, "user_tz": -60, "elapsed": 1598, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gg9_X9PHXyfxsFqdQBJarSFy5TpLITVEg043ihVUA=s64", "userId": "03611350071843918342"}} outputId="44d51177-c1b6-427e-bdf6-76e6df4831f0" #Show Log # def showLogTimes(log): print('Log entries = ', log.len()) print('Log: ') for i in log.getTime(): print(i) #plt.plot(log.getTime(), range(0, log.len())) plt.plot(log.getTime()) def showLog(log, time=True): print('Log entries = ', log.len()) print('Log: ') for i in log.getAll(): if time: print(i[0], end='') print(i[1]) print('\nLog last entry:') print(str(log.getLast()[0])) print(log.getLast()[1]) #logFNL = exppath + 'experiment.log' logFNL = exppath + 'experiment_AP30-1.0.log' log = SimpleLog.load(logFNL) #showLogTimes(log) showLog(log, time=False) plt.plot(log.getTime()) # + id="La1Fecvxtege" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 699} executionInfo={"status": "ok", "timestamp": 1592651805731, "user_tz": -60, "elapsed": 1746, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gg9_X9PHXyfxsFqdQBJarSFy5TpLITVEg043ihVUA=s64", "userId": "03611350071843918342"}} outputId="6f88064e-165c-48bc-a2c1-a5545080cf40" logFNL = exppath + 'experiment-channels-100p.log' #logFNL = exppath + 'experiment_AP30-1.0.log' logseries = getLogSeries(logFNL, True) #showMeans(logseries, maxAcc=1.0, time=True) showMeans(logseries, maxAcc=1.0, mode=('max', 200, 30), time=True) #showMeans(logseries, maxAcc=1.0, mode=('first', 200, 30), time=True) #Boxplots logseries = getLogSeries(logFNL, False) plotBoxes2(logseries, title='Acc(%), 30 runs 200 epochs', width=0.5) plotBoxes2(logseries, measure='epoch', title='Epochs, 30 runs 200 epochs', width=0.5) #import itertools #lseries = dict(itertools.islice(logseries.items(), 4)) #plotBoxes2(lseries, title='Acc(%) 30 runs 200 epochs', width=1, positions=[8, 6, 4, 2]) #lseries = dict(itertools.islice(logseries.items(), 4, 7)) #plotBoxes2(lseries, title='Acc(%) 30 runs 200 epochs', width=0.3, positions=[1, 0.5, 0.25]) # + id="-4MlF6oTu0d9" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 645} executionInfo={"status": "ok", "timestamp": 1592651835793, "user_tz": -60, "elapsed": 1168, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gg9_X9PHXyfxsFqdQBJarSFy5TpLITVEg043ihVUA=s64", "userId": "03611350071843918342"}} outputId="67a56cb3-0679-4992-a631-f7b6b8a35015" #logFNL = exppath + 'experiment.log' logFNL = exppath + 'experiment_AP30-1.0.log' logseries = getLogSeries(logFNL, True) e=2000 runs = len(logseries[e]['epoch']) trainEpochs = logseries[e]['epoch'] trainAccs = logseries[e]['acc'] trainLosses = logseries[e]['loss'] plotStats(runs, trainEpochs, trainAccs, trainLosses, earlyStopACCmin=1.0) # + id="z6oSvNepiW-R" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 133} executionInfo={"status": "error", "timestamp": 1592651840795, "user_tz": -60, "elapsed": 938, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gg9_X9PHXyfxsFqdQBJarSFy5TpLITVEg043ihVUA=s64", "userId": "03611350071843918342"}} outputId="984db543-3d2c-4033-bd51-7f907aea07b4" todo: compare 2000 pts and 800 pts datasets if equal todo: remake samples with seed 1024 and diff 6512 seeds, for 2000 pts todo: move fix epoch max out of means? Make new class for CTBU log? ModelLog? # + id="7f-9V9_OVy9V" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 127} executionInfo={"status": "ok", "timestamp": 1592651848124, "user_tz": -60, "elapsed": 1300, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gg9_X9PHXyfxsFqdQBJarSFy5TpLITVEg043ihVUA=s64", "userId": "03611350071843918342"}} outputId="9a69bb5c-4bf3-4de0-e0a2-16475c51f827" logFNL = exppath + 'experiment2000fixed.log' logseries = getLogSeries(logFNL, True) showMeans(logseries, maxAcc=1.0, time=True) for e in logseries: print(e,':') print(logseries[e]['epoch']) print(logseries[e]['acc']) # + id="_0-N6qcctxJM" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 127} executionInfo={"status": "ok", "timestamp": 1592651851673, "user_tz": -60, "elapsed": 848, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gg9_X9PHXyfxsFqdQBJarSFy5TpLITVEg043ihVUA=s64", "userId": "03611350071843918342"}} outputId="d04a5cf7-e3d7-4901-80d0-c79553c4e6d3" logFNL = exppath + 'experiment2000bouncing.log' logseries = getLogSeries(logFNL, True) showMeans(logseries, maxAcc=1.0, time=True) for e in logseries: print(e,':') print(logseries[e]['epoch']) print(logseries[e]['acc']) # + id="FV1uCZ3PPT0I" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 215} executionInfo={"status": "ok", "timestamp": 1592651856415, "user_tz": -60, "elapsed": 2153, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gg9_X9PHXyfxsFqdQBJarSFy5TpLITVEg043ihVUA=s64", "userId": "03611350071843918342"}} outputId="c4b1b23e-daba-4934-c8e5-26ae88b03702" #Compare experiences logFNL1 = exppath + 'experiment_AP25.log' logseries1 = getLogSeries(logFNL1, False) #logFNL2 = exppath + 'experiment_BP25.log' logFNL2 = exppath + 'experiment_AP100.log' logseries2 = getLogSeries(logFNL2, False) print('Pts runs epochs(mean/stdev) runs epochs(mean/stdev)') for e1, e2 in zip(logseries1, logseries2): mean1 = np.mean(logseries1[e1]['epoch']) stdev1 = np.std(logseries1[e1]['epoch']) mean2 = np.mean(logseries2[e2]['epoch']) stdev2 = np.std(logseries2[e2]['epoch']) print('{:4d} {:4d} {:8.2f} {:8.4f} {:4d} {:8.2f} {:8.4f}'.format( e1, len(logseries1[e1]['epoch']), mean1, stdev1, len(logseries2[e2]['epoch']), mean2, stdev2)) # + id="bzbFlr_30QLH" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 1000} executionInfo={"status": "ok", "timestamp": 1592651860338, "user_tz": -60, "elapsed": 1203, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gg9_X9PHXyfxsFqdQBJarSFy5TpLITVEg043ihVUA=s64", "userId": "03611350071843918342"}} outputId="f9b56240-424b-4e26-c53d-c87a9ac09c4f" #Boxplots logFNL = exppath + 'experiment_AP25.log' logseries = getLogSeries(logFNL, False) plotBoxes1(logseries, title='25 runs') logFNL = exppath + 'experiment_AP100.log' logseries = getLogSeries(logFNL, True) plotBoxes1(logseries, title='100 runs') # + colab_type="code" id="_pxXo2bdotOl" colab={"base_uri": "https://localhost:8080/", "height": 843} executionInfo={"status": "ok", "timestamp": 1592651866684, "user_tz": -60, "elapsed": 853, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gg9_X9PHXyfxsFqdQBJarSFy5TpLITVEg043ihVUA=s64", "userId": "03611350071843918342"}} outputId="8d941155-5101-48f0-c0b5-106dd44f63b7" #Boxplots logFNL = exppath + 'experiment_AP30-1.0.log' logseries = getLogSeries(logFNL, False) showMeans(logseries, maxAcc=1.0, time=False) import itertools lseries = dict(itertools.islice(logseries.items(), 9)) plotBoxes2(lseries, title='Acc(%) 30 runs 200 epochs', width=1, positions=[20, 18, 16, 14, 12, 10, 8, 6, 4]) lseries = dict(itertools.islice(logseries.items(), 9, 14)) plotBoxes2(lseries, title='Acc(%) 30 runs 200 epochs', width=0.3, positions=[2, 1.5, 1, 0.5, 0.25]) # + id="zTsqcRM9bVTr" colab_type="code" colab={} executionInfo={"status": "ok", "timestamp": 1592651872083, "user_tz": -60, "elapsed": 1261, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/<KEY>", "userId": "03611350071843918342"}} def plotCompareAcc(logFNL1, logFNL2, description): logseries1 = getLogSeries(logFNL1, False) logseries2 = getLogSeries(logFNL2, False) exps, runs, means1, mins, maxs, stdevs, nbests1 = lstats(logseries1, measure='acc', best=1.0) exps, runs, means2, mins, maxs, stdevs, nbests2 = lstats(logseries2, measure='acc', best=1.0) print('Pts 100% Acc 100% Acc(shuffled)') for e, m1, b1, m2, b2 in zip(exps, means1, nbests1, means2, nbests2): print('{:4d} {:4d} {:10.4f} {:4d} {:10.4f}'.format(e, b1, m1, b2, m2)) f = plt.figure(figsize=(15,3)) ax1 = f.add_subplot(121) ax2 = f.add_subplot(122) ax1.plot(exps, means1, label='unShuffled') ax1.plot(exps, means2, label='shuffled') ax2.plot(exps, nbests1, label='unShuffled') ax2.plot(exps, nbests2, label='shuffled') ax1.set_title(description+' val_acc') ax1.set_xlabel(description) ax1.set_ylabel('val_acc(%)') ax1.legend(loc='best') ax2.set_title('100% reached') ax2.set_xlabel(description) ax2.set_ylabel('no. of 100% reaches in {} runs'.format(runs[0])) ax2.legend(loc='best') f.show() # + colab_type="code" id="wjGq8jKWE1pS" colab={"base_uri": "https://localhost:8080/", "height": 439} executionInfo={"status": "ok", "timestamp": 1592651876503, "user_tz": -60, "elapsed": 1921, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gg9_X9PHXyfxsFqdQBJarSFy5TpLITVEg043ihVUA=s64", "userId": "03611350071843918342"}} outputId="5157c6c4-392d-4cba-96b4-1df55d5a6078" #Show experiences logFNL1 = exppath + 'experiment_AS30-1.0.log' logFNL2 = exppath + 'experiment_BS30-1.0.log' plotCompareAcc(logFNL1, logFNL2, 'Samples') # + colab_type="code" id="EzgLakMTcOAu" colab={"base_uri": "https://localhost:8080/", "height": 511} executionInfo={"status": "ok", "timestamp": 1592651881021, "user_tz": -60, "elapsed": 2074, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gg9_X9PHXyfxsFqdQBJarSFy5TpLITVEg043ihVUA=s64", "userId": "03611350071843918342"}} outputId="651b0ca0-b647-4317-898c-26f209d36efc" #Show experiences logFNL1 = exppath + 'experiment_AP30-1.0.log' logFNL2 = exppath + 'experiment_BP30-1.0.log' plotCompareAcc(logFNL1, logFNL2, 'Sample points') # + colab_type="code" id="ZDaYd9y4L2mp" colab={"base_uri": "https://localhost:8080/", "height": 303} executionInfo={"status": "ok", "timestamp": 1592651884382, "user_tz": -60, "elapsed": 931, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gg9_X9PHXyfxsFqdQBJarSFy5TpLITVEg043ihVUA=s64", "userId": "03611350071843918342"}} outputId="d25cdd83-8b7e-4be5-d0bb-278b5ffdc915" def boxplotCompareAcc(logFNL1, logFNL2, description): logseries1 = getLogSeries(logFNL1, False) logseries2 = getLogSeries(logFNL2, False) series1 = logseries1[2000]['acc'] series2 = logseries2[2000]['acc'] #fig, ax1 = plt.subplots() #plt.title('Epochs to reach acc={}% or best'.format(earlyStopACCmin*100)) print(series2) #plt.boxplot(boxes, positions=[20, 18, 16, 14, 12, 10, 8, 6, 4], widths=1) plt.boxplot(series1)#, positions=[2000]) #ax2.set_ylabel('Acc(%)', color='red') plt.ylabel('Acc(%)') #ax1. .hold() #plt.show() #ax2 = ax1.twinx() # instantiate a second axes that shares the same x-axis #plt.boxplot(series2, positions=[5000]) #plt.plot(np.ones(len(series2)), series2, 'x')#, positions=[2000]) plt.scatter(range(len(series2)), series2) #ax2.plot(range(xlen), [x * 100 for x in trainAccs], 'red') #ax2.set_ylabel('Acc(%)', color='red') #ax2.tick_params(axis='y', labelcolor='red') #set axis tick mark labels to red plt.show() #exps, runs, means1, mins, maxs, stdevs, nbests1 = lstats(logseries1, measure='acc', best=1.0) #exps, runs, means2, mins, maxs, stdevs, nbests2 = lstats(logseries2, measure='acc', best=1.0) #print('Pts 100% Acc 100% Acc(shuffled)') #for e, m1, b1, m2, b2 in zip(exps, means1, nbests1, means2, nbests2): # print('{:4d} {:4d} {:10.4f} {:4d} {:10.4f}'.format(e, b1, m1, b2, m2)) #f = plt.figure(figsize=(15,3)) #ax1 = f.add_subplot(121) #ax2 = f.add_subplot(122) #ax1.plot(exps, means1, label='unShuffled') #ax1.plot(exps, means2, label='shuffled') #ax2.plot(exps, nbests1, label='unShuffled') #ax2.plot(exps, nbests2, label='shuffled') ''' ax1.set_title(description+' val_acc') ax1.set_xlabel(description) ax1.set_ylabel('val_acc(%)') ax1.legend(loc='best') ax2.set_title('100% reached') ax2.set_xlabel(description) ax2.set_ylabel('no. of 100% reaches in {} runs'.format(runs[0])) ax2.legend(loc='best') ''' #fig.show() #Show experiences logFNL1 = exppath + 'experiment_AP30-1.0.log' logFNL2 = exppath + 'experiment_BP30-1.0.log' boxplotCompareAcc(logFNL1, logFNL2, 'Sample points') # + id="h_n0cTAKULxX" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 265} executionInfo={"status": "ok", "timestamp": 1592651889483, "user_tz": -60, "elapsed": 1005, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gg9_X9PHXyfxsFqdQBJarSFy5TpLITVEg043ihVUA=s64", "userId": "03611350071843918342"}} outputId="d4427ef4-1918-44bc-9428-5e581b12ed56" import numpy as np import collections def plotboxScatter(series, showmeans=True, showScatter=True, disperse=True, showCount=False, positionlabels='', title='', ylabel='', xlabel='', alphaScatter=0.6, formatScatter='rx', meanPrec='{:.4f}', grid=True): positions = range(1,len(series)+1) f, ax = plt.subplots() ax.boxplot(series, positions=positions, showmeans=showmeans) for i in range(len(positions)): if showScatter: if disperse: x = np.linspace(positions[i]-0.1, positions[i]+0.1, len(series[i])) else: x = np.ones(len(series[i])) x = [e*positions[i] for e in x] if showCount: counter=collections.Counter(series[i]) for c in counter: if counter[c] > 1: ax.annotate(str(counter[c]), (positions[i], c), color='red', alpha=alphaScatter) ax.plot(x, series[i], formatScatter, alpha=alphaScatter) mean = np.mean(series[i]) ax.annotate(meanPrec.format(mean), (positions[i], mean)) ax.set_ylabel(ylabel) ax.set_xlabel(xlabel) ax.set_title(title) ax.set_xticklabels(positionlabels) if grid: ax.grid(color='k', alpha=0.05) else: ax.grid() return f logFNL1 = exppath + 'experiment_AP30-1.0.log' logFNL2 = exppath + 'experiment_BP30-1.0.log' logseries = getLogSeries(logFNL1, False) data1 = logseries[2000]['acc'] data1 = [e*100 for e in data1] logseries = getLogSeries(logFNL2, False) data2 = logseries[2000]['acc'] data2 = [e*100 for e in data2] f = plotboxScatter([data1, data2], disperse=False, showCount=True, positionlabels=['seed=1024 800s x 2000p', 'shuffled 800s x 2000p'], ylabel = 'val_acc (%)') f.show() # + id="7tHCoecH7fCS" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 55} executionInfo={"status": "ok", "timestamp": 1592651893555, "user_tz": -60, "elapsed": 669, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gg9_X9PHXyfxsFqdQBJarSFy5TpLITVEg043ihVUA=s64", "userId": "03611350071843918342"}} outputId="c8233d47-7e5f-4fa7-815d-3d4fed98c773" ##Fix log info ''' import os def addtime(log, idx): #def delmem(log, idx): entry = list(log._SimpleLog__log[idx]) info = list(entry[1]) #info = info[:-1] info = (*info, 0) print(info) entry[1] = tuple(info) log._SimpleLog__log[idx] = tuple(entry) files = os.listdir(exppath) addTime= [] remMem = [] print (len(files), files) for f in files: log = SimpleLog.load(exppath+f) last = log.getLast()[1] if len(last)==5: addTime.append(f) if len(last)==7: remMem.append(f) print(len(last), f) print('->', remMem) print('->', addTime) for f in addTime: #remMem: l = SimpleLog.load(exppath + f) #showLog(l, time=False) for i in range(l.len()): addTime(l, i) #delmem(l, i) #l.save(exppath + f) showLog(l, time=False) ''' # + colab_type="code" id="GTMnlwUZFnpb" colab={"base_uri": "https://localhost:8080/", "height": 55} executionInfo={"status": "ok", "timestamp": 1592651897571, "user_tz": -60, "elapsed": 1022, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gg9_X9PHXyfxsFqdQBJarSFy5TpLITVEg043ihVUA=s64", "userId": "03611350071843918342"}} outputId="2ade077e-29ef-487f-890d-c403bf3eb0fc" #Manually Re assemble broken log if needed #Must be done case by case # ''' def getRun(log, entry): return log._SimpleLog__log[entry][1][1] def setRun(log, idx, val): entry = list(log._SimpleLog__log[idx]) info = list(entry[1]) info[1] = val entry[1] = tuple(info) log._SimpleLog__log[idx] = tuple(entry) #setRun(log0, 0, -2) #print(getRun(log0, 0)) logFN0 = exppath + 'experiment0.log' logFN1 = exppath + 'experiment1.log' logFN2 = exppath + 'experiment2.log' logFNS = exppath + 'experimentX.log' log0 = SimpleLog.load(logFN0) log1 = SimpleLog.load(logFN1) log2 = SimpleLog.load(logFN2) #log3 = log3.head(25) #for i in range(log4.len()): setRun(log4, i, i+17) log0.append(log1) log0.append(log2) log0.append(log3) log0.save(logFNS) #Reload and check log = SimpleLog.load(logFNS) print('Log entries = ', len(log.getInfo())) print('Log: ') for i in log.getInfo(): print(i) print('\nLog last entrie:') print(log.getLast()[0]) print(log.getLast()[1]) '''
classifier-15.3.ipynb/monitorlog.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python [conda env:codon] # language: python # name: conda-env-codon-py # --- # # Baseline classification model # # This is the baseline model for the project. It is a random forest model coupled with a basic grid search. The scores for this baseline model are as follows: <br> # Accuracy: 0.794 <br> # Recall: 0.794 <br> # Precision: 0.821 <br> # + import pandas as pd import numpy as np from sklearn.ensemble import RandomForestClassifier from sklearn.model_selection import GridSearchCV from sklearn.metrics import accuracy_score, recall_score, precision_score, confusion_matrix # + # Import the training dataset train = pd.read_csv('Data/train.csv', low_memory = False) y_train = train['Kingdom'] X_train = train.drop(columns = ['Kingdom', 'Unnamed: 0', 'SpeciesID', 'Ncodons', 'SpeciesName'], axis = 1) # - X_train # + # Initialize the Random Forest model rf = RandomForestClassifier() # Define the grid search parameters params = {'n_estimators': [100, 150], 'max_depth': [5, 7], 'verbose':[0]} # Initialize the grid search object grid = GridSearchCV(rf, params) # Fit the GridSearchCV object grid.fit(X_train, y_train) # + # Check the best score from the grid search cross-validation print(f'Best score from the grid search: {grid.best_score_}') # Check the best parameters from the grid search cross-validation print(f'The best parameters were: {grid.best_params_}') # + # Load the testing data into dataframe test = pd.read_csv('Data/test.csv') y_test = test['Kingdom'] X_test = test.drop(columns = ['Kingdom', 'Unnamed: 0', 'SpeciesID', 'Ncodons', 'SpeciesName'], axis = 1) # Make predictions using the grid search fitted params preds = grid.predict(X_test) # + # Check the metric scores print(f'Accuracy: {accuracy_score(y_test, preds)}') print(f'Recall: {recall_score(y_test, preds, average = "weighted")}') print(f'Precision: {precision_score(y_test, preds, average = "weighted", zero_division = 1)}') print(f'Confusion Matrix: \n {confusion_matrix(y_test, preds)}') # -
Baseline_Random_Forest_Model.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Frequency of Words # The Quintus Curtius Snodgrass Letters: As a forensic example of applied statistics, there was a famous case where <NAME> was accused of being a Confederate deserter during the Civil War, and the evidence given were ten essays published in the New Orleans Daily Crescent under the name <NAME>. In 1963 <NAME> published an article in the # Journal of the American Statistical Association where he uses word frequencies and a chi-squared test to show that the essays were almost certainly not Twain’s. # # Brinegar’s Abstract: # “<NAME> is widely credited with the authorship of 10 letters published in 1861 in the New Orleans Daily Crescent. The adventures described in these letters, which are signed “<NAME>,” provide the historical basis of a main part of Twain’s presumed role in the Civil War. This study applies an old, though little used statistical test of authorship - a word-length frequency test - to show that Twain almost certainly did not write these 10 letters. The statistical analysis includes a visual comparison of several word-length frequency distributions and applications of the 𝜒2 and two-sample t tests.” # # # # TODO: Show how to count the frequency of three letter words # The following table shows relative frequencies of three-letter-words from the Snodgrass letters, and from samples of Twain’s known works. Rather than run them through complex calculations, let’s make box plots! # import pandas as pd snodgrass = [.209,.205,.196,.210,.202,.207,.224,.223,.220,.201] twain = [.225,.262,.217,.240,.230,.229,.235,.217] df = pd.DataFrame([snodgrass,twain]).T df.columns= ["QCS","MT"] df # # Lets check if there is a significant difference in these three letter words # + import plotly.offline as pyo import plotly.graph_objs as go pyo.init_notebook_mode(connected=True) snodgrass = [.209,.205,.196,.210,.202,.207,.224,.223,.220,.201] twain = [.225,.262,.217,.240,.230,.229,.235,.217] data = [ go.Box( y=snodgrass, name='QCS' ), go.Box( y=twain, name='MT' ) ] layout = go.Layout( title = 'Comparison of three-letter-word frequencies<br>\ between <NAME> and <NAME>' ) fig = go.Figure(data=data, layout=layout) pyo.iplot(fig, filename='box3.html') # -
14 Text und Machine Learning/2.9 Frequency of Words as hidden author signature .ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernel_info: # name: python3 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # WeatherPy # ---- # # #### Note # * Instructions have been included for each segment. You do not have to follow them exactly, but they are included to help you think through the steps. # + # Dependencies and Setup import matplotlib.pyplot as plt import pandas as pd import numpy as np import requests import time import openweathermapy as ow # Import API key from api_keys import api_key # Incorporated citipy to determine city based on latitude and longitude from citipy import citipy # Output File (CSV) output_data_file = "output_data/cities.csv" # Range of latitudes and longitudes lat_range = (-90, 90) lng_range = (-180, 180) # - # ## Generate Cities List # + # List for holding lat_lngs and cities lat_lngs = [] cities = [] # Create a set of random lat and lng combinations lats = np.random.uniform(low=-90.000, high=90.000, size=1500) lngs = np.random.uniform(low=-180.000, high=180.000, size=1500) lat_lngs = zip(lats, lngs) # Identify nearest city for each lat, lng combination for lat_lng in lat_lngs: city = citipy.nearest_city(lat_lng[0], lat_lng[1]).city_name # If the city is unique, then add it to a our cities list if city not in cities: cities.append(city) # Print the city count to confirm sufficient count len(cities) # - # ### Perform API Calls # * Perform a weather check on each city using a series of successive API calls. # * Include a print log of each city as it'sbeing processed (with the city number and city name). # # + country = [] date = [] cloudiness = [] lat = [] lng = [] temp = [] wind = [] humidity = [] new_cities = [] country = [] set_count = 0 record_count = 0 url = "http://api.openweathermap.org/data/2.5/weather?units=Imperial&APPID=" + api_key print("Gathering Data") print("********************") for city in cities: query_url = url + "&q=" + city response = requests.get(query_url).json() if record_count < 50: record_count += 1 else: set_count += 1 record_count = 0 print('Processing record {} of set {} | {}'.format(record_count, set_count, city)) print(url) try: cloudiness.append(response['clouds']['all']) country.append(response['sys']['country']) date.append(response['dt']) humidity.append(response['main']['humidity']) temp.append(response['main']['temp_max']) lat.append(response['coord']['lat']) lng.append(response['coord']['lon']) wind.append(response['wind']['speed']) new_cities.append(city) except: print("City not found!") pass print('*************************') print('Data Retrieval Complete') print('**************************') # - # ### Convert Raw Data to DataFrame # * Export the city data into a .csv. # * Display the DataFrame weather_dict = { "City": new_cities, "Cloudiness" : cloudiness, "Country" : country, "Date" : date, "Humidity" : humidity, "Temp": temp, "Lat" : lat, "Lng" : lng, "Wind Speed" : wind } weather_data = pd.DataFrame(weather_dict) weather_data.count() weather_data.head() export_csv = weather_data.to_csv('..\\dataframes\\weather_data.csv') # ### Plotting the Data # * Use proper labeling of the plots using plot titles (including date of analysis) and axes labels. # * Save the plotted figures as .pngs. # #### Latitude vs. Temperature Plot weather_data.plot(kind = 'scatter', x = 'Lat', y = 'Temp', color = 'red') plt.title('Latitude of City vs Max Temperature') plt.xlabel('Latitude') plt.ylabel('Max Temperature (F)') plt.grid() plt.savefig('..\Images\lat_vs_temp.png') # #### Latitude vs. Humidity Plot weather_data.plot(kind = 'scatter', x = 'Lat', y = 'Humidity', color = 'red') plt.title('Latitude of City vs Humidity') plt.xlabel('Latitude') plt.ylabel('Humidity') plt.grid() plt.savefig('..\Images\lat_vs_humidity.png') # #### Latitude vs. Cloudiness Plot weather_data.plot(kind = 'scatter', x = 'Lat', y = 'Cloudiness', color = 'red') plt.title('Latitude of City vs Cloudiness') plt.xlabel('Latitude') plt.ylabel('Cloudiness') plt.grid() plt.savefig('..\Images\lat_vs_cloudiness.png') # #### Latitude vs. Wind Speed Plot weather_data.plot(kind = 'scatter', x = 'Lat', y = 'Wind Speed', color = 'red') plt.title('Latitude of City vs Wind Speed') plt.xlabel('Latitude') plt.ylabel('Wind Speed (MPH)') plt.grid() plt.savefig('..\Images\lat_vs_windspeed.png')
starter_code/WeatherPy.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: tensorflow # language: python # name: tensorflow # --- import numpy as np from sklearn.datasets import make_blobs from sklearn.model_selection import train_test_split X, y = make_blobs(n_samples=1000, n_features=2, cluster_std=1.0, centers=[(-2, 0), (2, 0)], random_state=42) X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=.1, random_state=45) y_train.shape y_train = y_train.reshape(y_train.size, 1) y_train.shape y_test = y_test.reshape(y_test.size, 1) def prob(X, W, b): Z = np.dot(X, W) + b return 1. / (1 + np.exp(-Z)) def predict(X, W, b): p = prob(X, W, b) return (p > .5).astype(int) def loss(X, y, W, b): p = prob(X, W, b) l = -y * np.log(p) - (1 - y) * np.log(1 - p) return l.mean() def grad_loss(X, y, W, b): m = X.shape[0] p = prob(X, W, b) dLdw = (1. / m) * np.dot(X.T, (p - y)) dLdb = (1. / m) * np.sum((p - y), axis = 0, keepdims = True) return dLdw, dLdb W = np.random.random((2, 1)) b = np.random.random((1, 1)) W b eta = 0.01 num_iterations = 1000 for i in range(num_iterations): dw, db = grad_loss(X_train, y_train, W, b) W -= eta * dw b -= eta * db if (i % 20) == 0: y_train_hat = predict(X_train, W, b) y_test_hat = predict(X_test, W, b) #print "%4d: loss(train) = %.3f, acc(train) = %.3f, loss(test) = %.3f, acc(test) = %.3f" % \ print (i, loss(X_train, y_train, W, b), (y_train == y_train_hat).mean(), \ loss(X_test, y_test, W, b), (y_test == y_test_hat).mean())
ml/back_propagation.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Simulating 1D Trench Flow* # # The tool simulates the effect on the water table due to model parameters. # # ## Scenario and Equation # # ```{figure} /contents/modeling/lecture_11/images/M11_f4.png # --- # scale: 20% # align: center # name: Ditch # --- # Conceptual model of a flow between two water bodies separated by unconfined aquifer # ``` # # You can calculate the steady flow in an unconfined aquifer with this Equations<sup>[^Fetter2017]</sup> : # # $$q' = \frac{1}{2} \cdot K \cdot \frac{H_o^2-H_u^2}{L}$$ # # $$h(x)=\sqrt{H_o^2 - \frac{H_o^2-H_u^2}{L} \cdot x+\frac{R}{K} \cdot x \cdot(L-x)}$$ # # with # $q'$ = flow per unit width $[m^2/s]$, # $h$ = head at x $[m]$, # $x$ = distance from the origin $[m]$, # $H_o$ = head at the origin $[m]$, # $H_u$ = head at L $[m]$, # $L$ = distance from the origin at the point $H_u$ is measured $[m]$, # $K$ = hydraulic conductivity $[m/s]$, # $R$ = recharge rate $[m/s]$ # # [^Fetter2017]: <NAME>, <NAME>, <NAME> (2017), _Contaminant Hydrogeology_: Third Edition, Waveland Press # # **_Contributed by Ms. <NAME> and <NAME>. The original concept from Prof. R. Liedl spreasheet code._** # # ### How to use this tool ### # # 1. Go to the Binder by clicking the rocket button (top-right of the page) # # 2. Execute the code cell with libraries and the code cell # # 3. Interact with the sliders. # # The codes are licensed under CC by 4.0 [(use anyways, but acknowledge the original work)](https://creativecommons.org/licenses/by/4.0/deed.en) # Initialize librarys import matplotlib.pyplot as plt import numpy as np import math from ipywidgets import * # + # Definition of the function def head(Ho, Hu, L, R, K): """ Ho: inflow head in [m] Hu: outflow head in [m] L: Domain length in [m] R: Recharge rate in [mm/d] K: Hydraulic conductivity in [m/s] """ x = np.arange(0, L,L/1000) R=R/1000/365.25/86400 h=(Ho**2-(Ho**2-Hu**2)/L*x+(R/K*x*(L-x)))**0.5 plt.plot(x, h) plt.ylabel('head [m]') plt.ylim(0,1.5*Ho) plt.xlabel('x [m]') plt.xlim(0,L) plt.show() style = {'description_width': 'initial'} interact(head, Ho=widgets.BoundedFloatText(value=10, min=0, max=1000, step=0.1, description='Ho:', disabled=False), Hu=widgets.BoundedFloatText(value=7.5, min=0, max=1000, step=0.1, description='Hu:', disabled=False), L= widgets.BoundedFloatText(value=175,min=0, max=10000,step=1, description='L:' , disabled=False), R=(-500,2500,10), K=widgets.FloatLogSlider(value=0.0002,base=10,min=-6, max=-2, step=0.0001,readout_format='.2e')) # - R=800/1000/365.25/86400 R
_build/html/_sources/contents/modeling/lecture_11/1D_ditchflow.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 2 # language: python # name: python2 # --- # + [markdown] toc="true" # # Table of Contents # <p><div class="lev1"><a href="#Conditional-Probability"><span class="toc-item-num">1&nbsp;&nbsp;</span>Conditional Probability</a></div><div class="lev1"><a href="#Probability-Trees"><span class="toc-item-num">2&nbsp;&nbsp;</span>Probability Trees</a></div><div class="lev1"><a href="#Bayesian-Inference"><span class="toc-item-num">3&nbsp;&nbsp;</span>Bayesian Inference</a></div><div class="lev2"><a href="#before-you-collect-data"><span class="toc-item-num">3.1&nbsp;&nbsp;</span>before you collect data</a></div><div class="lev2"><a href="#data-collection"><span class="toc-item-num">3.2&nbsp;&nbsp;</span>data collection</a></div><div class="lev2"><a href="#posterior"><span class="toc-item-num">3.3&nbsp;&nbsp;</span>posterior</a></div><div class="lev2"><a href="#recap"><span class="toc-item-num">3.4&nbsp;&nbsp;</span>recap</a></div><div class="lev1"><a href="#Examples-of-Bayesian-Inference"><span class="toc-item-num">4&nbsp;&nbsp;</span>Examples of Bayesian Inference</a></div> # - # # Conditional Probability # <img src="images/Screen Shot 2016-06-01 at 10.03.36 PM.png"> # <img src="images/Screen Shot 2016-06-01 at 10.03.41 PM.png"> # # *Screenshot taken from [Coursera](https://www.coursera.org/learn/probability-intro/lecture/fN6Kh/conditional-probability) 2:19* # # <!--TEASER_END--> # <img src="images/Screen Shot 2016-06-01 at 10.07.26 PM.png"> # # *Screenshot taken from [Coursera](https://www.coursera.org/learn/probability-intro/lecture/fN6Kh/conditional-probability) 3:35* # # <!--TEASER_END--> # <img src="images/Screen Shot 2016-06-01 at 10.11.43 PM.png"> # <img src="images/Screen Shot 2016-06-01 at 10.11.49 PM.png"> # # *Screenshot taken from [Coursera](https://www.coursera.org/learn/probability-intro/lecture/fN6Kh/conditional-probability) 4:29* # # <!--TEASER_END--> # <img src="images/Screen Shot 2016-06-01 at 10.12.55 PM.png"> # # *Screenshot taken from [Coursera](https://www.coursera.org/learn/probability-intro/lecture/fN6Kh/conditional-probability) 5:07* # # <!--TEASER_END--> # <img src="images/Screen Shot 2016-06-01 at 10.26.59 PM.png"> # <img src="images/Screen Shot 2016-06-05 at 3.51.36 PM.png"> # # *Screenshot taken from [Coursera](https://www.coursera.org/learn/probability-intro/lecture/fN6Kh/conditional-probability) 9:13* # # <!--TEASER_END--> # <img src="images/Screen Shot 2016-06-05 at 3.54.47 PM.png"> # <img src="images/Screen Shot 2016-06-05 at 3.56.12 PM.png"> # # *Screenshot taken from [Coursera](https://www.coursera.org/learn/probability-intro/lecture/fN6Kh/conditional-probability) 9:31* # # <!--TEASER_END--> # # Probability Trees # <img src="images/Screen Shot 2016-06-02 at 8.19.08 AM.png"> # # *Screenshot taken from [Coursera](https://www.coursera.org/learn/probability-intro/lecture/QE3kX/probability-trees) 4:32* # # <!--TEASER_END--> # <img src="images/Screen Shot 2016-06-02 at 8.26.30 AM.png"> # # *Screenshot taken from [Coursera](https://www.coursera.org/learn/probability-intro/lecture/QE3kX/probability-trees) 6:00* # # <!--TEASER_END--> # - So, we're told that 25.9% of this country's population is infected with HIV. So the probability of having HIV is 0.259. We also know something about the accuracy of the test, which seems to vary depending on whether the person has HIV or not. This is very common for medical tests. They tend to have different accuracy rates the, different accuracy rates for whether the patient has the disease or does not have the disease. # - This statement, for those who carry HIV, the ELISA test is 99.7% accurate, basically means that probability of testing positive, because that's what an accurate result would be if a person has HIV, so probability of positive given HIV is 0.997. # - This statement, for those who do not carry HIV, the test is 92.6% accurate, means probability of testing negative, because that's what accurate would mean in this case given that the patient does not have HIV, is 0.926. # - The question says, if an individual from Swaziland has tested positive, what is the probability that he carries HIV? So what we know is that the person tested positive. We're looking to see what is the probability that they have HIV. What we can see here is that we have a situation where we're asked for a conditional probability, and the condition has been reversed from one of the things that we are given, and we should really think about a tree diagram in this case. Those tend to be the most effective methods for getting to the answer. There are definitely other ways that you can solve this problem, and you can organize the information that's given to you. But a tree diagram tends to be one where you can really efficiently and effectively organize the information that you're given. And you're going to get to the right answer if you do it the right way. # - So, the first branch in the tree is always made up of marginal probabilities, since we're dividing up our population without conditioning on any other attributes. Some people in the population have HIV. That's the top branch. And others don't. That's the bottom branch. So probability of having HIV, as we saw, was 0.259 in Swaziland. # - And the probability of not having HIV is the complement of that, 1 minus 0.259 is going to give us 0.741. So about 74.1% of the population in Swaziland does not have HIV. Note that probabilities on a set of branches always add up to 1. # - Next, we move on to conditional probabilities. Let's start with the part of the population who has HIV, so we're going to be working with the top branch here. When these people take the test, they may get a positive or a negative result, because the test isn't 100% accurate. Therefore, we divide up the HIV population into two, those who test positive, and those who test negative. Based on information on the test that we were provided earlier, we know that the probability of testing positive, if someone has HIV is 0.997. # - Then, probability of testing negative if someone has HIV, this would be a false negative, would be the complement of that, 0.003. Similarly, among those who don't have HIV, some still test positive, and some test negative. Probability of accurately testing negative if the patient doesn't have HIV is 0.926. # - And the probability of a false positive, that's testing positive even though the patient does not have HIV is the complement of that, 0.074. Remember, our goal is to find the probability of having HIV, given that the patient has tested positive. Which based on Bayes theorem should be probability of HIV and positive divided by probability of testing positive. Remember, the numerator is always the joint probability, and the denominator is the marginal probability of what we're conditioning on. So far, we don't have the building blocks we need to calculate the probability that we're interested in. # - To get the join probabilities, like the one in the numerator, using the probability true, all we need to do is multiply across the branches. This is why a probability tree is useful. Because it organizes the information for you in a way where you'd no longer have to think, what should I multiply with what. And you, all you need to do is carry along the branches and pick up the building blocks along the way. # - We start with the marginal probability of having HIV and we multiply it by the probability of testing positive, given that the patient has HIV. So I'm following the first, the very top branch here, which is going to yield us the joint probability of having HIV and testing positive. So what we get is 0.259 from the first branch times 0.997 from the second branch, which gives up 0.2582. So, there's a 25.82% chance that a randomly drawn person from the Swaziland population has HIV and tests positive. # - Similarly, probability of HIV and negative is going to be the probability of HIV, 0.259, times the probability of negative given HIV, 0.003. That's a really tiny probability, 0.0008. We can keep going and calculate similar probabilities for the lower branch, the no HIV population as well. # - Probability of no HIV and positive comes out to be 5.48% and probability of no HIV and negative comes out to be 68.60, 68.62%. We've done a bunch of calculations so far, but let's go back to the task at hand. We're only interested in those who test positive, because that's what our given are. And among these, we're especially interested in those who actually have the HIV. # <img src="images/Screen Shot 2016-06-02 at 8.30.46 AM.png"> # <img src="images/Screen Shot 2016-06-02 at 8.31.00 AM.png"> # <img src="images/Screen Shot 2016-06-02 at 8.32.21 AM.png"> # # *Screenshot taken from [Coursera](https://www.coursera.org/learn/probability-intro/lecture/QE3kX/probability-trees) 10:00* # # <!--TEASER_END--> # # Bayesian Inference # <img src="images/Screen Shot 2016-06-02 at 8.34.45 AM.png"> # <img src="images/Screen Shot 2016-06-02 at 8.37.19 AM.png"> # # *Screenshot taken from [Coursera](https://www.coursera.org/learn/probability-intro/lecture/qYbZQ/bayesian-inference) 1:05* # # <!--TEASER_END--> # ## before you collect data # <img src="images/Screen Shot 2016-06-02 at 8.42.02 AM.png"> # <img src="images/Screen Shot 2016-06-02 at 8.42.08 AM.png"> # <img src="images/Screen Shot 2016-06-02 at 8.51.05 AM.png"> # # *Screenshot taken from [Coursera](https://www.coursera.org/learn/probability-intro/lecture/qYbZQ/bayesian-inference) 5:25* # # <!--TEASER_END--> # ## data collection # <img src="images/Screen Shot 2016-06-02 at 8.48.08 AM.png"> # <img src="images/Screen Shot 2016-06-02 at 8.56.37 AM.png"> # # *Screenshot taken from [Coursera](https://www.coursera.org/learn/probability-intro/lecture/qYbZQ/bayesian-inference) 7:23* # # <!--TEASER_END--> # ## posterior # <img src="images/Screen Shot 2016-06-02 at 8.58.39 AM.png"> # # *Screenshot taken from [Coursera](https://www.coursera.org/learn/probability-intro/lecture/qYbZQ/bayesian-inference) 12:13* # # <!--TEASER_END--> # <img src="images/Screen Shot 2016-06-02 at 9.00.24 AM.png"> # # *Screenshot taken from [Coursera](https://www.coursera.org/learn/probability-intro/lecture/qYbZQ/bayesian-inference) 13:12* # # <!--TEASER_END--> # ## recap # <img src="images/Screen Shot 2016-06-02 at 9.00.30 AM.png"> # # *Screenshot taken from [Coursera](https://www.coursera.org/learn/probability-intro/lecture/qYbZQ/bayesian-inference) 13:30* # # <!--TEASER_END--> # # Examples of Bayesian Inference # <img src="images/Screen Shot 2016-06-02 at 9.13.11 AM.png"> # <img src="images/Screen Shot 2016-06-02 at 9.14.38 AM.png"> # # *Screenshot taken from [Coursera](https://www.coursera.org/learn/probability-intro/lecture/AWtbY/examples-of-bayesian-inference) 5:33* # # <!--TEASER_END--> # <img src="images/Screen Shot 2016-06-02 at 9.16.23 AM.png"> # <img src="images/Screen Shot 2016-06-02 at 9.17.44 AM.png"> # <img src="images/Screen Shot 2016-06-02 at 9.17.50 AM.png"> # <img src="images/Screen Shot 2016-06-02 at 9.18.33 AM.png"> # # *Screenshot taken from [Coursera](https://www.coursera.org/learn/probability-intro/lecture/AWtbY/examples-of-bayesian-inference) 7:22* # # <!--TEASER_END-->
statistics_with_R/1_probability_intro/lecture/week3/Conditional Probability.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="Ff_Jv8Ptu4lP" # # # INSTALLATION # + colab={"base_uri": "https://localhost:8080/"} id="q5qYRG8zufHw" executionInfo={"status": "ok", "timestamp": 1630383045872, "user_tz": -570, "elapsed": 6940, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GhE0cXJBikzHY7xUavBkbPRseKZ_N-obrw0xIoLhQ=s64", "userId": "04127040763952829247"}} outputId="aff76124-9813-4d51-803f-496fd52ff0bf" # !pip install aif360 # !pip install fairlearn # + colab={"base_uri": "https://localhost:8080/"} id="TltW3iPkux0Q" executionInfo={"status": "ok", "timestamp": 1630383046371, "user_tz": -570, "elapsed": 509, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GhE0cXJBikzHY7xUavBkbPRseKZ_N-obrw0xIoLhQ=s64", "userId": "04127040763952829247"}} outputId="94fcbe47-78d8-4b14-bdcb-6ca1c8e4f15d" # !apt-get install -jre # !java -version # + colab={"base_uri": "https://localhost:8080/"} id="KssrNl8GvDYU" executionInfo={"status": "ok", "timestamp": 1630383049207, "user_tz": -570, "elapsed": 2839, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GhE0cXJBikzHY7xUavBkbPRseKZ_N-obrw0xIoLhQ=s64", "userId": "04127040763952829247"}} outputId="1d4f6c6f-4b34-4773-e7f6-ed3498ca60d2" # !pip install h2o # + colab={"base_uri": "https://localhost:8080/"} id="_NQn2JJ0uw6u" executionInfo={"status": "ok", "timestamp": 1630383053225, "user_tz": -570, "elapsed": 4035, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GhE0cXJBikzHY7xUavBkbPRseKZ_N-obrw0xIoLhQ=s64", "userId": "04127040763952829247"}} outputId="64ca081a-3d6a-4c9a-ee9f-4e2d909b87cb" # !pip install xlsxwriter # + colab={"base_uri": "https://localhost:8080/"} id="a0YklbHpAxd8" executionInfo={"status": "ok", "timestamp": 1630383056396, "user_tz": -570, "elapsed": 3205, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GhE0cXJBikzHY7xUavBkbPRseKZ_N-obrw0xIoLhQ=s64", "userId": "04127040763952829247"}} outputId="6d28cce7-78e6-4d5b-8cce-154bda91778d" # !pip install BlackBoxAuditing # + [markdown] id="-Y_uQ6vdvN4a" # #IMPORTS # + id="rf1aISz6vGfR" executionInfo={"status": "ok", "timestamp": 1630383056415, "user_tz": -570, "elapsed": 86, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GhE0cXJBikzHY7xUavBkbPRseKZ_N-obrw0xIoLhQ=s64", "userId": "04127040763952829247"}} import numpy as np from mlxtend.feature_selection import ExhaustiveFeatureSelector from xgboost import XGBClassifier # import pandas as pd import matplotlib.pyplot as plt import numpy as np import pandas as pd import openpyxl import xlsxwriter from openpyxl import load_workbook import BlackBoxAuditing import shap #suppress setwith copy warning pd.set_option('mode.chained_assignment',None) from sklearn.feature_selection import VarianceThreshold from sklearn.feature_selection import SelectKBest, SelectFwe, SelectPercentile,SelectFdr, SelectFpr, SelectFromModel from sklearn.feature_selection import chi2, mutual_info_classif # from skfeature.function.similarity_based import fisher_score import aif360 import matplotlib.pyplot as plt from aif360.metrics.classification_metric import ClassificationMetric from aif360.metrics import BinaryLabelDatasetMetric from aif360.algorithms.preprocessing import DisparateImpactRemover, Reweighing, LFR,OptimPreproc from aif360.datasets import StandardDataset , BinaryLabelDataset from sklearn.preprocessing import MinMaxScaler MM= MinMaxScaler() import h2o from h2o.automl import H2OAutoML from h2o.estimators.glm import H2OGeneralizedLinearEstimator import sys sys.path.append("../") import os # + id="RcxQeeX7vUXz" colab={"base_uri": "https://localhost:8080/", "height": 386} executionInfo={"status": "ok", "timestamp": 1630383056417, "user_tz": -570, "elapsed": 68, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GhE0cXJBikzHY7xUavBkbPRseKZ_N-obrw0xIoLhQ=s64", "userId": "04127040763952829247"}} outputId="bf56a1aa-5c91-48b7-d18c-fbb7a097666d" h2o.init() # + [markdown] id="RQVI-ISXvrZm" # #**************************LOADING DATASET******************************* # + id="FEGPULDrvk3g" colab={"base_uri": "https://localhost:8080/"} executionInfo={"status": "ok", "timestamp": 1630383056419, "user_tz": -570, "elapsed": 52, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GhE0cXJBikzHY7xUavBkbPRseKZ_N-obrw0xIoLhQ=s64", "userId": "04127040763952829247"}} outputId="bd6e6a67-16ea-4bc0-af37-c606c4d49f3f" from google.colab import drive drive.mount('/content/gdrive') # + id="rA0wTXKH-csL" executionInfo={"status": "aborted", "timestamp": 1630371515491, "user_tz": -570, "elapsed": 20, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GhE0cXJBikzHY7xUavBkbPRseKZ_N-obrw0xIoLhQ=s64", "userId": "04127040763952829247"}} # advantagedGroup= [{'race':1}] # disadvantagedGroup= [{'race':0}] # tr=pd.read_csv(r'/content/gdrive/MyDrive/Datasets/SurveyData/DATASET/Violent/Test/Test1.csv') # tr # tester= BinaryLabelDataset(favorable_label=1, # unfavorable_label=0, # df=tr, # label_names=['two_year_recid'], # protected_attribute_names=['race'], # unprivileged_protected_attributes=[[0]], # privileged_protected_attributes=[[1]]) # tester # TR = LFR(unprivileged_groups=disadvantagedGroup, # privileged_groups=advantagedGroup, # k=10, Ax=0.1, Ay=1.0, Az=2.0, # # verbose=1 # ) # TR = TR.fit(tester, maxiter=5000, maxfun=5000) # + id="YiMTx-YfV94X" executionInfo={"status": "aborted", "timestamp": 1630371515493, "user_tz": -570, "elapsed": 20, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GhE0cXJBikzHY7xUavBkbPRseKZ_N-obrw0xIoLhQ=s64", "userId": "04127040763952829247"}} # transformed = TR.transform(tester) # transformed.labels= tester.labels # transformed.protected_attributes= tester.protected_attributes # transformed.feature_names # + [markdown] id="qDh3f5HwHubq" # # GBM LFR # # # # + id="uN9VfZBAvxCj" colab={"base_uri": "https://localhost:8080/"} executionInfo={"status": "ok", "timestamp": 1630390890631, "user_tz": -570, "elapsed": 7815127, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GhE0cXJBikzHY7xUavBkbPRseKZ_N-obrw0xIoLhQ=s64", "userId": "04127040763952829247"}} outputId="1249e055-7aee-448e-e73c-46d83910db4e" for i in range(31,51,1): train_url=r'/content/gdrive/MyDrive/Datasets/SurveyData/RESULTS/LFR/FairData/Violent/Train' train_path= os.path.join(train_url ,("Train"+ str(i)+ ".csv")) train= pd.read_csv(train_path) first_column = train.pop('two_year_recid') train.insert(0, 'two_year_recid', first_column) test_url=r'/content/gdrive/MyDrive/Datasets/SurveyData/RESULTS/LFR/FairData/Violent/Test' test_path= os.path.join(test_url ,("Test"+ str(i)+ ".csv")) test= pd.read_csv(test_path) first_column = test.pop('two_year_recid') test.insert(0, 'two_year_recid', first_column) #********************************************************binary labels for LFR************************************************************* advantagedGroup= [{'race':1}] disadvantagedGroup= [{'race':0}] # bldTrain= BinaryLabelDataset(favorable_label=1, # unfavorable_label=0, # df=train, # label_names=['two_year_recid'], # protected_attribute_names=['race'], # unprivileged_protected_attributes=[[0]], # privileged_protected_attributes=[[1]]) # bldTest= BinaryLabelDataset(favorable_label=1, # unfavorable_label=0, # df=test, # label_names=['two_year_recid'], # protected_attribute_names=['race'], # unprivileged_protected_attributes=[[0]], # privileged_protected_attributes=[[1]]) # #*******************************************************LFR instance************************************************************** # TR = LFR(unprivileged_groups=disadvantagedGroup, # privileged_groups=advantagedGroup) # TR = TR.fit(bldTrain, maxiter=5000, maxfun=5000) # #setting the label and the protected groups of the transformed to the original so that only # #features are transformed. in order for LFR to be used as pre_processing algorithm # #transforming and setting transformed train labels and protected attributes # LFR_Train = TR .transform(bldTrain ) # LFR_Train.labels= bldTrain.labels # # LFR_Train.protected_attributes= bldTrain.protected_attributes # #transforming and setting transformed test labels and protected attributes # LFR_Test = TR .transform(bldTest) # LFR_Test.labels= bldTest.labels # # LFR_Test.protected_attributes= bldTest.protected_attributes # #*****************************************Repaired Train and Test Set NB: home language is at index 3******************************************************* # #using bldTest and bldTrain protected attr vals which is the old protected attributes as we are not using the transformed version of it created by the LFR # train= pd.DataFrame(np.hstack([LFR_Train .labels, LFR_Train .features[:,0:2],bldTrain.protected_attributes,LFR_Train.features[:,3:]]),columns=train.columns) # test= pd.DataFrame(np.hstack([LFR_Test .labels, LFR_Test .features[:,0:2],bldTest.protected_attributes,LFR_Test.features[:,3:]]),columns=test.columns) # # TotalRepairedDF= pd.concat([RepairedTrain ,RepairedTest ]) # # normalization of train and test sets # Fitter= MM.fit(train) # transformed_train=Fitter.transform(train) # train=pd.DataFrame(transformed_train, columns= train.columns) # #test normalization # transformed_test=Fitter.transform(test) # test=pd.DataFrame(transformed_test, columns= test.columns) # *************CHECKING FAIRNESS IN DATASET************************** ## ****************CONVERTING TO BLD FORMAT****************************** #Transforming the Train and Test Set to BinaryLabel class Test(StandardDataset): def __init__(self,label_name= 'two_year_recid', favorable_classes= [1],protected_attribute_names=['race'], privileged_classes=[[1]], ): super(Test, self).__init__(df=test , label_name=label_name , favorable_classes=favorable_classes , protected_attribute_names=protected_attribute_names , privileged_classes=privileged_classes , ) BLD_Test= Test(protected_attribute_names= ['race'], privileged_classes= [[1]]) ## ********************Checking Bias Repaired Data******************************** DataBias_Checker = BinaryLabelDatasetMetric(BLD_Test , unprivileged_groups= disadvantagedGroup, privileged_groups= advantagedGroup) dsp= DataBias_Checker .statistical_parity_difference() dif= DataBias_Checker.consistency() ddi= DataBias_Checker.disparate_impact() print('The Statistical Parity diference is = {diff}'.format(diff= dsp )) print('Individual Fairness is = {IF}'.format( IF= dif )) print('Disparate Impact is = {IF}'.format( IF= ddi )) # ********************SETTING TO H20 FRAME AND MODEL TRAINING******************************* x = list(train.columns) y = "two_year_recid" x.remove(y) Train=h2o.H2OFrame(train) Test= h2o.H2OFrame(test) Train[y] = Train[y].asfactor() Test[y] = Test[y].asfactor() aml = H2OAutoML(max_models=5, nfolds=5, include_algos=['GBM'] , stopping_metric='AUTO') #verbosity='info',,'GBM', 'DRF' aml.train(x=x, y=y, training_frame=Train) best_model= aml.leader # a.model_performance() #**********************REPLACE LABELS OF DUPLICATED TEST SET WITH PREDICTIONS**************************** #predicted labels gbm_Predictions= best_model.predict(Test) gbm_Predictions= gbm_Predictions.as_data_frame() predicted_df= test.copy() predicted_df['two_year_recid']= gbm_Predictions.predict.to_numpy() # ********************COMPUTE DISCRIMINATION***************************** advantagedGroup= [{'race':1}] disadvantagedGroup= [{'race':0}] class PredTest(StandardDataset): def __init__(self,label_name= 'two_year_recid', favorable_classes= [1],protected_attribute_names=['race'], privileged_classes=[[1]], ): super(PredTest, self).__init__(df=predicted_df , label_name=label_name , favorable_classes=favorable_classes , protected_attribute_names=protected_attribute_names , privileged_classes=privileged_classes , ) BLD_PredTest= PredTest(protected_attribute_names= ['race'], privileged_classes= [[1]]) # # Workbook= pd.ExcelFile(r'/content/gdrive/MyDrive/Datasets/SurveyData/RESULTS/BaseLines/GBM/gbm_Results.xlsx') # excelBook= load_workbook('/content/gdrive/MyDrive/Datasets/SurveyData/RESULTS/BaseLines/GBM/gbm_Results.xlsx') # OldDF= excelBook.get_sheet_by_name("Violent")#pd.read_excel(Workbook,sheet_name='Violent') #load workbook excelBook= load_workbook('/content/gdrive/MyDrive/Datasets/SurveyData/RESULTS/LFR/NewLFR_Results/gbm_Results.xlsx') Violent= excelBook['Violent'] data= Violent.values # Get columns columns = next(data)[0:] 10# Create a DataFrame based on the second and subsequent lines of data OldDF = pd.DataFrame(data, columns=columns) ClassifierBias = ClassificationMetric( BLD_Test,BLD_PredTest , unprivileged_groups= disadvantagedGroup, privileged_groups= advantagedGroup) Accuracy= ClassifierBias.accuracy() TPR= ClassifierBias.true_positive_rate() TNR= ClassifierBias.true_negative_rate() NPV= ClassifierBias.negative_predictive_value() PPV= ClassifierBias.positive_predictive_value() SP=ClassifierBias .statistical_parity_difference() IF=ClassifierBias.consistency() DI=ClassifierBias.disparate_impact() EOP=ClassifierBias.true_positive_rate_difference() EO=ClassifierBias.average_odds_difference() FDR= ClassifierBias.false_discovery_rate(privileged=False)- ClassifierBias.false_discovery_rate(privileged=True) NPV_diff=ClassifierBias.negative_predictive_value(privileged=False)-ClassifierBias.negative_predictive_value(privileged=True) FOR=ClassifierBias.false_omission_rate(privileged=False)-ClassifierBias.false_omission_rate(privileged=True) PPV_diff=ClassifierBias.positive_predictive_value(privileged=False) -ClassifierBias.positive_predictive_value(privileged=True) BGE = ClassifierBias.between_group_generalized_entropy_index() WGE = ClassifierBias.generalized_entropy_index()-ClassifierBias.between_group_generalized_entropy_index() BGTI = ClassifierBias.between_group_theil_index() WGTI = ClassifierBias.theil_index() -ClassifierBias.between_group_theil_index() EDF= ClassifierBias.differential_fairness_bias_amplification() newdf= pd.DataFrame(index = [0], data= { 'ACCURACY': Accuracy,'TPR': TPR, 'PPV':PPV, 'TNR':TNR,'NPV':NPV,'SP':SP,'CONSISTENCY':IF,'DI':DI,'EOP':EOP,'EO':EO,'FDR':FDR,'NPV_diff':NPV_diff, 'FOR':FOR,'PPV_diff':PPV_diff,'BGEI':BGE,'WGEI':WGE,'BGTI':BGTI,'WGTI':WGTI,'EDF':EDF, 'DATA_SP':dsp,'DATA_CONS':dif,'DATA_DI':ddi}) newdf=pd.concat([OldDF,newdf]) pathway= r'/content/gdrive/MyDrive/Datasets/SurveyData/RESULTS/LFR/NewLFR_Results/gbm_Results.xlsx' with pd.ExcelWriter(pathway, engine='openpyxl') as writer: #load workbook base as for writer writer.book= excelBook writer.sheets=dict((ws.title, ws) for ws in excelBook.worksheets) newdf.to_excel(writer, sheet_name='Violent', index=False) # newdf.to_excel(writer, sheet_name='Adult', index=False) print('Accuracy', Accuracy) # + [markdown] id="Hoc62jY7Olbt" # #LOGISTIC REGRESSION LFR # # + id="RZmY0q8iVY3O" colab={"base_uri": "https://localhost:8080/"} executionInfo={"status": "ok", "timestamp": 1630391700457, "user_tz": -570, "elapsed": 809442, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GhE0cXJBikzHY7xUavBkbPRseKZ_N-obrw0xIoLhQ=s64", "userId": "04127040763952829247"}} outputId="94372104-080e-402a-f928-ba7758614eff" for i in range(1,51,1): train_url=r'/content/gdrive/MyDrive/Datasets/SurveyData/RESULTS/LFR/FairData/Violent/Train' train_path= os.path.join(train_url ,("Train"+ str(i)+ ".csv")) train= pd.read_csv(train_path) first_column = train.pop('two_year_recid') train.insert(0, 'two_year_recid', first_column) test_url=r'/content/gdrive/MyDrive/Datasets/SurveyData/RESULTS/LFR/FairData/Violent/Test' test_path= os.path.join(test_url ,("Test"+ str(i)+ ".csv")) test= pd.read_csv(test_path) first_column = test.pop('two_year_recid') test.insert(0, 'two_year_recid', first_column) #********************************************************binary labels for LFR************************************************************* # bldTrain= BinaryLabelDataset(favorable_label=1, # unfavorable_label=0, # df=train, # label_names=['two_year_recid'], # protected_attribute_names=['race'], # unprivileged_protected_attributes=[[0]], # privileged_protected_attributes=[[1]]) # bldTest= BinaryLabelDataset(favorable_label=1, # unfavorable_label=0, # df=test, # label_names=['two_year_recid'], # protected_attribute_names=['race'], # unprivileged_protected_attributes=[[0]], # privileged_protected_attributes=[[1]]) # #*******************************************************LFR instance************************************************************** # TR = LFR(unprivileged_groups=disadvantagedGroup, # privileged_groups=advantagedGroup) # TR = TR.fit(bldTrain, maxiter=5000, maxfun=5000) # #setting the label and the protected groups of the transformed to the original so that only # #features are transformed. in order for LFR to be used as pre_processing algorithm # #transforming and setting transformed train labels and protected attributes # LFR_Train = TR .transform(bldTrain ) # LFR_Train.labels= bldTrain.labels # # LFR_Train.protected_attributes= bldTrain.protected_attributes # #transforming and setting transformed test labels and protected attributes # LFR_Test = TR .transform(bldTest) # LFR_Test.labels= bldTest.labels # # LFR_Test.protected_attributes= bldTest.protected_attributes # #*****************************************Repaired Train and Test Set NB: home language is at index 3******************************************************* # #using bldTest and bldTrain protected attr vals which is the old protected attributes as we are not using the transformed version of it created by the LFR # train= pd.DataFrame(np.hstack([LFR_Train .labels, LFR_Train .features[:,0:2],bldTrain.protected_attributes,LFR_Train.features[:,3:]]),columns=train.columns) # test= pd.DataFrame(np.hstack([LFR_Test .labels, LFR_Test .features[:,0:2],bldTest.protected_attributes,LFR_Test.features[:,3:]]),columns=test.columns) # normalization of train and test sets Fitter= MM.fit(train) transformed_train=Fitter.transform(train) train=pd.DataFrame(transformed_train, columns= train.columns) #test normalization transformed_test=Fitter.transform(test) test=pd.DataFrame(transformed_test, columns= test.columns) # *************CHECKING FAIRNESS IN DATASET************************** ## ****************CONVERTING TO BLD FORMAT****************************** #Transforming the Train and Test Set to BinaryLabel advantagedGroup= [{'race':1}] disadvantagedGroup= [{'race':0}] # class Train(StandardDataset): # def __init__(self,label_name= 'two_year_recid', # favorable_classes= [1],protected_attribute_names=['race'], privileged_classes=[[1]], ): # super(Train, self).__init__(df=train , label_name=label_name , # favorable_classes=favorable_classes , protected_attribute_names=protected_attribute_names , # privileged_classes=privileged_classes , # ) # BLD_Train= Train(protected_attribute_names= ['race'], # privileged_classes= [[1]]) class Test(StandardDataset): def __init__(self,label_name= 'two_year_recid', favorable_classes= [1],protected_attribute_names=['race'], privileged_classes=[[1]], ): super(Test, self).__init__(df=test , label_name=label_name , favorable_classes=favorable_classes , protected_attribute_names=protected_attribute_names , privileged_classes=privileged_classes , ) BLD_Test= Test(protected_attribute_names= ['race'], privileged_classes= [[1]]) ## ********************Checking Bias in Data******************************** DataBias_Checker = BinaryLabelDatasetMetric(BLD_Test , unprivileged_groups= disadvantagedGroup, privileged_groups= advantagedGroup) dsp= DataBias_Checker .statistical_parity_difference() dif= DataBias_Checker.consistency() ddi= DataBias_Checker.disparate_impact() print('The Statistical Parity diference is = {diff}'.format(diff= dsp )) print('Individual Fairness is = {IF}'.format( IF= dif )) print('Disparate Impact is = {IF}'.format( IF= ddi )) # ********************SETTING TO H20 FRAME AND MODEL TRAINING******************************* x = list(train.columns) y = "two_year_recid" x.remove(y) Train=h2o.H2OFrame(train) Test= h2o.H2OFrame(test) Train[y] = Train[y].asfactor() Test[y] = Test[y].asfactor() LogReg = H2OGeneralizedLinearEstimator(family= "binomial", lambda_ = 0) LogReg.train(x=x, y=y, training_frame=Train) LogReg_Predictions= LogReg.predict(Test) LogReg_Predictions= LogReg_Predictions.as_data_frame() # *************************REPLACE LABELS OF DUPLICATED TEST SET WITH PREDICTIONS************************************** predicted_df= test.copy() predicted_df['two_year_recid']= LogReg_Predictions.predict.to_numpy() # ***************************COMPUTE DISCRIMINATION******************************** advantagedGroup= [{'race':1}] disadvantagedGroup= [{'race':0}] class PredTest(StandardDataset): def __init__(self,label_name= 'two_year_recid', favorable_classes= [1],protected_attribute_names=['race'], privileged_classes=[[1]], ): super(PredTest, self).__init__(df=predicted_df , label_name=label_name , favorable_classes=favorable_classes , protected_attribute_names=protected_attribute_names , privileged_classes=privileged_classes , ) BLD_PredTest= PredTest(protected_attribute_names= ['race'], privileged_classes= [[1]]) excelBook= load_workbook(r'/content/gdrive/MyDrive/Datasets/SurveyData/RESULTS/LFR/NewLFR_Results/LR_Results.xlsx') Violent= excelBook['Violent'] data= Violent.values # Get columns columns = next(data)[0:] OldDF = pd.DataFrame(data, columns=columns) ClassifierBias = ClassificationMetric( BLD_Test,BLD_PredTest , unprivileged_groups= disadvantagedGroup, privileged_groups= advantagedGroup) Accuracy= ClassifierBias.accuracy() TPR= ClassifierBias.true_positive_rate() TNR= ClassifierBias.true_negative_rate() NPV= ClassifierBias.negative_predictive_value() PPV= ClassifierBias.positive_predictive_value() SP=ClassifierBias .statistical_parity_difference() IF=ClassifierBias.consistency() DI=ClassifierBias.disparate_impact() EOP=ClassifierBias.true_positive_rate_difference() EO=ClassifierBias.average_odds_difference() FDR= ClassifierBias.false_discovery_rate(privileged=False)- ClassifierBias.false_discovery_rate(privileged=True) NPV_diff=ClassifierBias.negative_predictive_value(privileged=False)-ClassifierBias.negative_predictive_value(privileged=True) FOR=ClassifierBias.false_omission_rate(privileged=False)-ClassifierBias.false_omission_rate(privileged=True) PPV_diff=ClassifierBias.positive_predictive_value(privileged=False) -ClassifierBias.positive_predictive_value(privileged=True) BGE = ClassifierBias.between_group_generalized_entropy_index() WGE = ClassifierBias.generalized_entropy_index()-ClassifierBias.between_group_generalized_entropy_index() BGTI = ClassifierBias.between_group_theil_index() WGTI = ClassifierBias.theil_index() -ClassifierBias.between_group_theil_index() EDF= ClassifierBias.differential_fairness_bias_amplification() newdf= pd.DataFrame(index = [0], data= { 'ACCURACY': Accuracy,'TPR': TPR, 'PPV':PPV, 'TNR':TNR,'NPV':NPV,'SP':SP,'CONSISTENCY':IF,'DI':DI,'EOP':EOP,'EO':EO,'FDR':FDR,'NPV_diff':NPV_diff, 'FOR':FOR,'PPV_diff':PPV_diff,'BGEI':BGE,'WGEI':WGE,'BGTI':BGTI,'WGTI':WGTI,'EDF':EDF, 'DATA_SP':dsp,'DATA_CONS':dif,'DATA_DI':ddi}) newdf=pd.concat([OldDF,newdf]) pathway= r'/content/gdrive/MyDrive/Datasets/SurveyData/RESULTS/LFR/NewLFR_Results/LR_Results.xlsx' with pd.ExcelWriter(pathway, engine='openpyxl') as writer: #load workbook base as for writer writer.book= excelBook writer.sheets=dict((ws.title, ws) for ws in excelBook.worksheets) newdf.to_excel(writer, sheet_name='Violent', index=False) # newdf.to_excel(writer, sheet_name='Adult', index=False) print('Accuracy', Accuracy) # + id="8F81NoFWp2bQ" executionInfo={"status": "ok", "timestamp": 1630391700459, "user_tz": -570, "elapsed": 23, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GhE0cXJBikzHY7xUavBkbPRseKZ_N-obrw0xIoLhQ=s64", "userId": "04127040763952829247"}}
Fairness_Survey/ALGORITHMS/LFR/Violent.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="ntDqE_ZSFsG2" # ## Kaggle Dataset # + id="vcHBljgmEMdp" import numpy as np import pandas as pd import matplotlib.pyplot as plt from keras.layers import Conv2D,Dense,MaxPool2D,Dropout,Flatten from keras.models import Sequential from skimage import io from PIL import Image, ImageDraw, ImageFont import cv2 import os from sklearn.neighbors import KNeighborsClassifier from sklearn.model_selection import train_test_split from keras.utils import to_categorical # + id="RaZFoUOaFtLg" outputId="f2ccdcfe-b909-4709-ed8e-3b90107d4c9a" colab={"base_uri": "https://localhost:8080/", "height": 35} from google.colab import drive drive.mount('/content/drive') # + id="OTmue16aF72R" path = '/content/drive/My Drive/OpenCV/' # + id="FWNYR_CNKC41" outputId="f453da7c-5572-4765-ce0c-ecf3185d0327" colab={"base_uri": "https://localhost:8080/", "height": 126} os.listdir(path) # + [markdown] id="T8MCdb8azIdW" # Un-comment it for the first time # + id="eYmeJpbeKFGc" # #!unzip '/content/drive/My Drive/OpenCV/ocr.zip' -d '/tmp/' # + id="LvsqwuvpKfai" # #!unzip '/tmp/train.zip' -d '/train' # + id="WQ_SdnjCKsyK" # #!unzip '/tmp/test.zip' -d '/test' # + id="kh1BLvRGKaCl" outputId="da8d300f-eaab-48bb-9d71-22d96a816e44" colab={"base_uri": "https://localhost:8080/", "height": 290} os.listdir('/tmp/') # + id="pqviScRsKmK6" os.listdir('/train/') train_path = '/train/train/' # + id="4_hvpxURKzgA" os.listdir('/test/') test_path = '/test/test/' # + [markdown] id="nGn0rM65zWl2" # ### EDA # + id="6VQBeSOHK4gB" outputId="4117ec4f-ac45-41e6-f819-de22ae988724" colab={"base_uri": "https://localhost:8080/", "height": 287} from matplotlib.image import imread img=imread(f'{train_path}/1.Bmp') plt.imshow(img) # + id="3TMWHuJJK8P8" outputId="5dd8898d-b586-4f4e-82ae-03c9c660f9aa" colab={"base_uri": "https://localhost:8080/", "height": 253} train=[io.imread(f'{train_path}'+str(i)+'.Bmp',as_gray=True) for i in range(1,6283)] #plt.imshow(train[1]) train[1] # + id="OE8_8PRyLSxv" final_train=[cv2.resize(image,(28,28)).flatten() for image in train] # + id="xY0AaoeQLVor" outputId="9d6d8dd9-1874-45fc-ac06-663004af7089" colab={"base_uri": "https://localhost:8080/", "height": 206} labels=pd.read_csv('/tmp/trainLabels.csv') labels.head() # + id="_wrxXpErLeUT" outputId="9d285cbe-a570-4ecd-a389-48075702b2cb" colab={"base_uri": "https://localhost:8080/", "height": 35} finaly=np.array(final_train) finaly.shape # + [markdown] id="WY9H4xSyzZ_O" # ## PREPROCESSING # + id="o0RLNqy7Lgj1" mapp={} a='abcdefghijklmnopqrstuvwxyz' count=0 for x in range(10): mapp[x]=count count+=1 for y in a: mapp[count]=y.upper() count+=1 for y in a: mapp[count]=y count+=1 # + id="2FzHzl7FLh4Z" trainx,testx,trainy,testy=train_test_split(finaly,labels['Class'].iloc[:-1],test_size=0.10) # + [markdown] id="Zn8xS9oQzcaF" # ## KNN # + id="2pGbJQ2sLjL5" outputId="1b095451-5d5c-495a-e953-390c20e84200" colab={"base_uri": "https://localhost:8080/", "height": 72} model=KNeighborsClassifier(n_neighbors=1,n_jobs=-1) model.fit(trainx,trainy) # + id="EDc4UEJbLkVL" outputId="3a1c5134-8be4-4a50-9617-089b668099b6" colab={"base_uri": "https://localhost:8080/", "height": 35} model.score(testx,testy) # + id="o_ulyS2kLlo5" features_for_conv=np.array([cv2.resize(image,(28,28)) for image in train]) features_for_conv=features_for_conv[:,:,:,np.newaxis] # + [markdown] id="JYn5SoaWfagJ" # ## CNN MODEL 1 # + id="xFnE9oXYLoZ9" outputId="3263eb29-47dd-480a-c3ac-877f6d030a53" colab={"base_uri": "https://localhost:8080/", "height": 598} model=Sequential() model.add(Conv2D(32,(3,3),activation='relu',input_shape=(28,28,1),data_format='channels_last')) model.add(MaxPool2D((2,2))) model.add(Dropout(0.2)) model.add(Conv2D(64,(3,3),activation='relu')) model.add(MaxPool2D((2,2))) model.add(Dropout(0.2)) model.add(Conv2D(128,(3,3),activation='relu')) model.add(MaxPool2D((2,2))) model.add(Dropout(0.2)) model.add(Flatten()) #model.add(Dense(512,activation='relu')) #model.add(Dense(256,activation='relu')) model.add(Dense(128,activation='relu')) model.add(Dense(62,activation='softmax')) model.summary() # + id="-ZgaSg7KMBep" Labels=pd.get_dummies(labels['Class']) # + id="nfl0_ZS4ML_0" Trainx,valx,Trainy,valy=train_test_split(features_for_conv,Labels.iloc[:-1],test_size=0.2) # + id="lPFl4r87MNjA" outputId="a57007e6-d613-4edd-858e-c3f919b171c6" colab={"base_uri": "https://localhost:8080/", "height": 35} Trainx.shape # + id="b_uCm8LfMOnz" model.compile(loss='categorical_crossentropy', optimizer='rmsprop', metrics=['accuracy']) # + id="vqe0hAqEMPsg" #model.fit(Trainx,Trainy,epochs=25,validation_data=(valx,valy)) # + id="CvRA0VyAMT4s" #model.fit(features_for_conv,Labels.iloc[:-1],epochs=30) # + [markdown] id="YCbvlnGIzifT" # ### TESTNIG M1 # + id="0uJlR8foMXWE" lab=[] test=[] import os for i in os.listdir(f'{test_path}'): test.append(io.imread(f'{test_path}'+i,as_gray=True)) lab.append(i.split('.')[0]) # + id="RdIMw6JkMZfK" outputId="c7a3e1eb-204e-4345-d08b-cbfa7b4e7007" colab={"base_uri": "https://localhost:8080/", "height": 35} test_img=np.array([cv2.resize(image,(28,28)) for image in test]) test_img=test_img[:,:,:,np.newaxis] test_img.shape # + id="WI0xH3CLMeUw" predictions=model.predict(test_img) predictions=np.argmax(predictions,axis=1) # + id="N3spfZCXMe2A" lit=[] for x in predictions: lit.append(mapp.get(x)) lit # + [markdown] id="kT656JzLHnzb" # ## CNN MODEL 2 # + id="jU3pMF_uHnLw" outputId="d028b11e-1635-4a36-e248-6dbfc16fc1ca" colab={"base_uri": "https://localhost:8080/", "height": 126} os.listdir(f"{path}") # + id="bQcbJOzsH_55" outputId="ea2cc40e-2e4f-45f3-b8b4-cf134fa50d9b" colab={"base_uri": "https://localhost:8080/", "height": 35} print(path) # + [markdown] id="y-aIJk6pztIf" # ### Un-comment it when running it for the first time # + id="YHTrmSa7H5M1" # #!unzip '/content/drive/My Drive/OpenCV/data.zip' -d "/data" # + id="NGbiWKCZIG3T" outputId="796a8ab3-8b85-4594-f2ad-b82d9f33bc9f" colab={"base_uri": "https://localhost:8080/", "height": 1000} os.listdir('/data/processed/breta/') words_path = '/data/processed/breta/words_gaplines' archive_path = '/data/processed/breta/archive_gaplines' os.listdir(archive_path) # + [markdown] id="5ne-MIgTzz8P" # ### EDA # + id="je8A09FPI9Bx" outputId="5ec26c1e-0a42-4e1b-a2db-bac0071e8db3" colab={"base_uri": "https://localhost:8080/", "height": 286} from matplotlib.image import imread img=imread('/data/processed/breta/words_gaplines/A_1513181559.8292634.png') plt.imshow(img) # + id="4HnROQrZJkAx" outputId="270ed7b0-0ed5-4132-a26b-5e715edb2bed" colab={"base_uri": "https://localhost:8080/", "height": 35} ww = ["A_1513181559.8292634.png", "A_1513181559.8292636.png"] print(ww[1].split('_')) # + id="vJHMT9hzKt6w" outputId="684ebf06-662e-43ce-87ed-9c179feee3ca" colab={"base_uri": "https://localhost:8080/", "height": 35} #for i in os.listdir(words_path): # train2 = [io.imread()] training = [] filelist=os.listdir(words_path) for fichier in filelist[:]: # filelist[:] makes a copy of filelist. if (fichier.endswith(".png")): training.append(fichier) print(training[:5]) # + id="2YBCFLmBVxsa" outputId="2dff841d-2340-495a-83ed-2f03bf245bd0" colab={"base_uri": "https://localhost:8080/", "height": 35} gt = [] tag = [] for i in training: gg = i.split('_') gt.append(gg[0]) print(gt[:2]) # + id="XgJymbrvneCP" outputId="a451e0dd-4dcd-4278-ec27-9d9f56083483" colab={"base_uri": "https://localhost:8080/", "height": 35} # !pip install ipython-autotime # + id="5ATXEeClngYB" # %load_ext autotime # + [markdown] id="P2cU4ZTO1G5_" # ### PREPROCESSING # + id="KEaRFAZkBp3K" outputId="b36abf14-1a33-4e36-fc6a-cd2d2e873839" colab={"base_uri": "https://localhost:8080/", "height": 35} df = pd.DataFrame() # + id="4kWKozIwBtMD" outputId="5a3d43f4-3250-4811-ab22-0c3ceb492006" colab={"base_uri": "https://localhost:8080/", "height": 35} df['img'] = training[:int(0.5 * len(training))] # + id="rhPyR0ToBxyu" outputId="e2ee2cac-1861-4b97-a4be-b7412fef9898" colab={"base_uri": "https://localhost:8080/", "height": 35} df['label'] = gt[:int(0.5 * len(gt))] # + id="DVW2R0sgB2Jq" outputId="7103330d-4a19-408e-edd5-0e7d8365bddd" colab={"base_uri": "https://localhost:8080/", "height": 224} df.head() # + id="8puipDXbClBN" outputId="25653c3b-ac49-41df-bf70-0263576b3221" colab={"base_uri": "https://localhost:8080/", "height": 35} ydf = pd.DataFrame() # + id="k_G-yE_gDuyy" outputId="0ac648bb-4a43-4fd4-a356-ad33732dbdf2" colab={"base_uri": "https://localhost:8080/", "height": 53} print(len(gt)) # + id="m4jzqsbuCpK8" outputId="5e4059b5-7f69-4e34-96e8-f36cd0b9f846" colab={"base_uri": "https://localhost:8080/", "height": 35} ydf['img'] = training[int(0.5 * len(training)):5068] # + id="SFtDyx9BCqwL" outputId="94a4d214-b82c-4c56-de54-f01136dff8e1" colab={"base_uri": "https://localhost:8080/", "height": 35} ydf['label'] = gt[int(0.5 * len(gt)):5068] # + id="D5bc4uBtCx5R" outputId="60dc842e-daff-4697-f2aa-bd53129f9e8c" colab={"base_uri": "https://localhost:8080/", "height": 224} ydf.head() # + id="yAFfI4G1DR0E" outputId="8234c8d9-5cee-44e0-c5fa-1c865ad898a8" colab={"base_uri": "https://localhost:8080/", "height": 35} #ydf.drop([2534]) # + id="YD3tRuYNC7VR" outputId="615ddca1-c003-4a0e-d842-a119116ce0b3" colab={"base_uri": "https://localhost:8080/", "height": 53} print(len(df),len(ydf)) # + id="KwqV92faa66I" outputId="f508509b-c74e-48d4-ca0f-34ab20123eb5" colab={"base_uri": "https://localhost:8080/", "height": 35} df1 = np.array(df) df1.shape # + id="HUDG_PzLbEet" outputId="42170706-2d4f-47e3-94c4-c254f448b13a" colab={"base_uri": "https://localhost:8080/", "height": 35} df2 = np.array(ydf) df2.shape # + id="H6ZkeOP658Zc" outputId="5766f023-4ffe-4bc4-ec00-8ab58227a2b4" colab={"base_uri": "https://localhost:8080/", "height": 35} xtrain,xtest,ytrain,ytest = train_test_split(df1, df2, test_size=0.50) # + id="LUXvBkV_6wyA" outputId="67557bbc-456c-4a17-ec9d-2c065bda4e71" colab={"base_uri": "https://localhost:8080/", "height": 72} print(len(xtrain), "X", xtrain[:2]) # + id="8-mL0mmMBWB6" outputId="93dcca50-6cf9-4745-b569-edc4c3255992" colab={"base_uri": "https://localhost:8080/", "height": 53} print(len(ytrain), "L", len(gt), len(training)) # + id="JZuUiJMxpqP5" outputId="c7118a18-2270-4b5d-9b73-41f004688211" colab={"base_uri": "https://localhost:8080/", "height": 35} from keras.preprocessing import image # + id="eKqUDJblA2fV" outputId="5bffddd5-2668-4b2d-f86c-07db44056e7d" colab={"base_uri": "https://localhost:8080/", "height": 53} filelist=os.listdir(words_path) print(filelist[:5]) # + id="B94qQnyEEqNg" outputId="de9c1376-333b-4e63-ba2b-132a655a7d99" colab={"base_uri": "https://localhost:8080/", "height": 53} print(xtrain.shape) # + id="FHmGmy9VEsgQ" outputId="236217c2-9ebd-4b78-9a26-9db381568c0c" colab={"base_uri": "https://localhost:8080/", "height": 53} print(ytest.shape) # + id="SfF-rb_rltJW" outputId="5e253166-f168-4e8e-b9ae-c08ef2f39e0e" colab={"base_uri": "https://localhost:8080/", "height": 35} def ss(i, j): cp = [] for f in filelist[i:j]: if f.endswith(".png"): cp.append(f) # for u in range(0, batch_S): #print(len(cp), " XX ", i, " X ", j) return cp # print(len(xtrain)) # + id="8dscAPYDo0EB" def Loader(batch_size): sd = [] bb = [0, 64] sd.append(ss(bb[0], bb[1])) for i in range(len(xtrain)): if 2 * bb[1] < len(xtrain) - 1: bb[0] = bb[1] bb[1] = 2 * bb[0] sd.append(ss(bb[0], bb[1])) for l in sd: for x in l: print(x) #print("L: ", len(cp), len(xtrain), len(sd)) #for c in range(len(cp[1])): # print(cp[c]) Loader(64) # + id="czv33DHrvYhM" outputId="a33146a9-d165-4433-d4de-5753542c5d4f" colab={"base_uri": "https://localhost:8080/", "height": 35} from google.colab.patches import cv2_imshow import cv2 # + id="o2fBqcyj2-MU" outputId="9867dfa4-8ab8-4c91-89bd-5409c481da16" colab={"base_uri": "https://localhost:8080/", "height": 53} co = 0 for i in filelist: if i.endswith(".png"): if i not in kk: co += 1 # print(i) print(co, len(kk)) # + id="ppEaVLJo_fDL" outputId="47485740-c2ec-435a-c2c5-37d688e3c15d" colab={"base_uri": "https://localhost:8080/", "height": 53} if 'Child_1514313714.461586.png' not in kk: print("L") # + id="V8Fgm5y2_3ma" outputId="8f1a92f8-4d98-4be7-cafb-e42a3912de05" colab={"base_uri": "https://localhost:8080/", "height": 35} tt = 0 for i in kk: cou = i.split("_") if cou not in gt: tt += 1 tt # + id="NF3c5zAJn6sT" outputId="137eb315-c8c4-46e8-9e26-8ce6dadf451a" colab={"base_uri": "https://localhost:8080/", "height": 35} filelist=os.listdir(words_path) # + id="uo6ZWaDSXiw6" outputId="daa6fcbe-55d2-47e3-e8a5-7694963b2d87" colab={"base_uri": "https://localhost:8080/", "height": 53} trt = LoadBatch(xtrain, ytrain, 100) print(trt) # + id="G08eQWG_NetG" outputId="15af7f99-2563-4a7a-e6bf-ee277c75c601" colab={"base_uri": "https://localhost:8080/", "height": 53} print(words_path) # + id="GpUn8EwrNh-o" os.chdir(words_path) os.listdir(words_path) # + id="XGDt5ft7NNb5" outputId="e2d7a4fe-4c8a-453b-c8b3-3091c3fc524a" colab={"base_uri": "https://localhost:8080/", "height": 53} # !find 'hungry_1514294635.6984315.png' # + [markdown] id="kaArPLyQz56P" # ### LOADER # + id="ynQQoa4Btnca" outputId="2b291852-89ed-4c5e-e39e-d23093968c5c" colab={"base_uri": "https://localhost:8080/", "height": 53} ko = [] kk = [] for i in sd: for j in i: #print(len(i), j) kk.append(j) ko.append(cv2.imread(f"{words_path}/" + j)) #cv2_imshow(ko[1]) print(len(ko)) #print(ko) #print(cp[2], "L") #print("S: ",sd) # + id="T8QW9VXeIRAw" def ImgLoad(batch_size): xx = [] yy = [] filelist=os.listdir(words_path) # + id="zpwIMQgZm68Z" outputId="20305808-37cc-494d-ef38-2afbe39a02be" colab={"base_uri": "https://localhost:8080/", "height": 35} def LoadBatch(xdata, ydata, batch_size): xd = [] yd = [] img_l = [] count = 1 filelist=os.listdir(words_path) if (count % batch_size) < (batch_size): lim = (count % batch_size) else: lim = (len(filelist) - 1) l = len(xtrain) if lim == 0: lim = 1 while True: for count in range(batch_size + 1): for fichier in filelist[:(count % batch_size)]: if (fichier.endswith(".png")): img_l.append(image.load_img(fichier, color_mode = 'grayscale', target_size=[28, 28])) count += 1 #img = image.load_img([i for i in training[:Pointer % l]], color_mode = 'grayscale', target_size = [28, 28]) #enhancer = ImageEnhance.Contrast(img) #img = ImageOps.grayscale(img) #factor = 0.5 #increase contrast #im_output = enhancer.enhance(factor) for img in img_l: img = image.img_to_array(img) / 255.0 xd.append(img) yd.append([i for i in gt[count:lim]]) #Pointer = Pointer + 1 if len(xd) == batch_size: yield np.array(xd), np.array(yd) xd = [] yd = [] # + [markdown] id="X_S1LVQfz4Q-" # ## MODEL # + id="UR43aXT5oKOG" outputId="e42b6f61-87e0-4ef7-cf23-b3a1ed0b4c9f" colab={"base_uri": "https://localhost:8080/", "height": 653} cmodel=Sequential() cmodel.add(Conv2D(16,(3, 3),activation='relu',input_shape=(28,28,1))) cmodel.add(MaxPool2D((2, 2))) cmodel.add(Dropout(0.2)) cmodel.add(Conv2D(32,(3,3),activation='relu')) cmodel.add(MaxPool2D((2, 2))) cmodel.add(Dropout(0.2)) cmodel.add(Conv2D(64,(3, 3),activation='relu')) cmodel.add(MaxPool2D((2, 2))) cmodel.add(Dropout(0.2)) #cmodel.add(Conv2D(128,(3, 3),activation='relu')) #cmodel.add(MaxPool2D((2, 2))) #cmodel.add(Dropout(0.2)) #cmodel.add(Conv2D(512,(1, 1),activation='relu')) #cmodel.add(MaxPool2D((1, 1))) #cmodel.add(Dropout(0.2)) cmodel.add(Flatten()) #cmodel.add(Dense(256,activation='relu')) cmodel.add(Dense(64, activation='relu')) cmodel.add(Dense(32, activation='relu')) cmodel.add(Dense(16, activation='softmax')) cmodel.summary() # + id="VOzLPLdCoUDq" outputId="d3a32e61-e272-4f1c-ddd5-58c02e151554" colab={"base_uri": "https://localhost:8080/", "height": 35} cmodel.compile(loss='categorical_crossentropy', optimizer='rmsprop', metrics=['accuracy']) # + id="4oqH0prkoP2H" outputId="5f6151e5-4ab5-483b-e598-46c97b911baa" colab={"base_uri": "https://localhost:8080/", "height": 35} batch_size = 100 train_data_generator = LoadBatch(xtrain, ytrain, batch_size) valid_data_generator = LoadBatch(xtest, ytest, batch_size) # + id="HLUYToZKHe7g" tdg = # + id="oz8OC5PPo6Z_" outputId="0e8d1a92-9685-4379-e927-d81ee0b6e08d" colab={"base_uri": "https://localhost:8080/", "height": 35} import keras # + id="uwGRoYrSZcj-" outputId="9b73e722-3c1a-4860-ac0b-8d0524ac7700" colab={"base_uri": "https://localhost:8080/", "height": 35} xtrain.shape # + id="aDagvZryZKmO" outputId="549e6395-590a-44bc-e3d7-6a51b7c64d53" colab={"base_uri": "https://localhost:8080/", "height": 35} #X_train = pd.DataFrame() #X_train = xtrain / 255.0 #X_train.head() # + [markdown] id="pA1N7vMQ0Cao" # #### CALLBACK # + id="YOdM_7WJou63" outputId="6be926c1-2906-4ed8-c370-b4dbb12211bb" colab={"base_uri": "https://localhost:8080/", "height": 53} filepath_02="/content/drive/My Drive/OpenCV/Weights/z-1-ADweights-{val_accuracy:.4f}.h5" checkpointing = keras.callbacks.ModelCheckpoint(filepath_02, monitor='val_accuracy', verbose=1, save_best_only = True, save_weights_only=True, mode='auto', period=1) callbacks_l = [checkpointing] # + id="ZCbyeGNjogAP" outputId="f84b60dc-6112-4413-c892-3155410485b0" colab={"base_uri": "https://localhost:8080/", "height": 758} #cmodel.fit(xtrain, ytrain, epochs=5, validation_data= (xtest, ytest), callbacks = callbacks_l) cmodel.fit(xtrain, ytrain, epochs = 5, verbose = 1 ,validation_data = (xtest, ytest), callbacks=callbacks_l) # + id="rjyw2_Fydc3s" outputId="929bac13-52b7-4b77-9e5e-1e74a7ed35e3" colab={"base_uri": "https://localhost:8080/", "height": 35} traint=[io.imread(f'{words_path}/' + i, as_gray=True) for i in training] #plt.imshow(np.array(traint[:1])) #print(np.array(traint[1])) # + id="t0l9jVGVmuhj" outputId="c4c6634f-99ab-421b-8ddf-aba7057161fa" colab={"base_uri": "https://localhost:8080/", "height": 35} #a = np.expand_dims(traint[1], axis=0) #plt.imshow(a) # + id="UcYPagd5JYz7" outputId="19aa42e0-dcda-40b7-829d-4122682302d2" colab={"base_uri": "https://localhost:8080/", "height": 35} #te_mg = np.array([cv2.resize(image,(28,28)) for image in training]) #te_mg = te_mg[:,:,:,np.newaxis] #te_mg.shape # + [markdown] id="XUv1G7WqOjlP" # ## CNN MODEL 3 # + id="3t4S-9qNMfx3" outputId="c1929db7-5c18-4beb-fe0a-2293f76d1487" colab={"base_uri": "https://localhost:8080/", "height": 635} cmodel=Sequential() cmodel.add(Conv2D(32,(3, 3),activation='relu',input_shape=(28,28,1))) cmodel.add(MaxPool2D((2, 2))) cmodel.add(Dropout(0.2)) cmodel.add(Conv2D(64,(3,3),activation='relu')) cmodel.add(MaxPool2D((2, 2))) cmodel.add(Dropout(0.2)) cmodel.add(Conv2D(128,(3, 3),activation='relu')) cmodel.add(MaxPool2D((2, 2))) cmodel.add(Dropout(0.2)) #cmodel.add(Conv2D(128,(3, 3),activation='relu')) #cmodel.add(MaxPool2D((2, 2))) #cmodel.add(Dropout(0.2)) #cmodel.add(Conv2D(512,(1, 1),activation='relu')) #cmodel.add(MaxPool2D((1, 1))) #cmodel.add(Dropout(0.2)) cmodel.add(Flatten()) #cmodel.add(Dense(256,activation='relu')) cmodel.add(Dense(128,activation='relu')) cmodel.add(Dense(64,activation='relu')) cmodel.add(Dense(62,activation='softmax')) cmodel.summary() # + id="rQEJaOm3Owwu" cmodel.compile(loss='categorical_crossentropy', optimizer='rmsprop', metrics=['accuracy']) # + id="qRPub64NPAg2" outputId="2933c473-0af9-414c-8337-f98eaa279ca2" colab={"base_uri": "https://localhost:8080/", "height": 1000} cmodel.fit(Trainx,Trainy,epochs=25,validation_data=(valx,valy)) # + id="NrWJhERL3Q61" cmodel.fit(Trainx,Trainy,epochs=25,validation_data=(valx,valy),callbacks=callbacks_l) # + id="3GofB74aPFsQ" cmodel.fit(Trainx,Trainy,epochs=25,validation_data=(valx,valy)) # + [markdown] id="RoZFynwk04RV" # ### TESTING # + id="oyYsAZP-fKsC" lab1=[] test1=[] #import os for i in os.listdir(f'{test_path}'): test1.append(io.imread(f'{test_path}'+i,as_gray=True)) lab1.append(i.split('.')[0]) # + id="HhtofADShXCv" outputId="73fb9960-88e1-4b11-a844-f879caf653a0" colab={"base_uri": "https://localhost:8080/", "height": 35} test_img1=np.array([cv2.resize(image,(28,28)) for image in test]) test_img1=test_img1[:,:,:,np.newaxis] test_img1.shape # + id="DHY80p6BhxNj" predictions1=cmodel.predict(test_img1) predictions1=np.argmax(predictions1,axis=1) # + id="ZhchCYCYh7fW" lit1=[] for x in predictions1: lit1.append(mapp.get(x)) lit1 # + id="D4bvw8kRiCmB" cmodel.compile(loss='categorical_crossentropy', optimizer='rmsprop', metrics=['accuracy']) # + id="Cvt2wfkuiMg3" cmodel.fit(Trainx,Trainy,epochs=30,validation_data=(valx,valy)) # + id="3QFMMj57iQXf" cmodel.fit(Trainx,Trainy,epochs=30,validation_data=(valx,valy)) # + [markdown] id="cXF7BTON0MDm" # #### CALLBACK # + id="p4BFJ46OiVbC" outputId="f7bb142a-76b6-457c-c40a-f2e36f56541e" colab={"base_uri": "https://localhost:8080/", "height": 55} filepath_02="/content/z-1-weights-{val_accuracy:.4f}.h5" checkpointing = keras.callbacks.ModelCheckpoint(filepath_02, monitor='val_accuracy', verbose=1, save_best_only = True, save_weights_only=True, mode='auto', period=1) callbacks_l = [checkpointing] # + [markdown] id="mu21jiqk0wp0" # #### FITTING # + id="GL6XGhGXkYmm" cmodel.fit(Trainx, Trainy, epochs=30, validation_data=(valx, valy), callbacks=callbacks_l) # + id="YKg64S7WkgzU" cmodel.fit(Trainx, Trainy, epochs=30, validation_data=(valx, valy), callbacks=callbacks_l) # + id="bGPCVaiWlfhL" cmodel.fit(Trainx, Trainy, epochs=30, validation_data=(valx, valy), callbacks=callbacks_l) # + id="EES0sS6ymsri" cmodel.fit(Trainx, Trainy, epochs=30, validation_data=(valx, valy), callbacks=callbacks_l) # + id="XN6Ft4MjoVIc" cmodel.fit(Trainx, Trainy, epochs=5, validation_data=(valx, valy), callbacks=callbacks_l) # + id="Pv62FlAxp0Nh" cmodel.fit(Trainx, Trainy, epochs=30, validation_data=(valx, valy), callbacks=callbacks_l) # + id="0FxwD0RwqJE4" cmodel.fit(Trainx, Trainy, epochs=30, validation_data=(valx, valy), callbacks=callbacks_l) # + id="QN7YkCW9r069" cmodel.fit(Trainx, Trainy, epochs=30, validation_data=(valx, valy), callbacks=callbacks_l) # + id="w7iTj4u1ua3s" cmodel.fit(Trainx, Trainy, epochs=5, validation_data=(valx, valy), callbacks=callbacks_l) # + id="m8t9GHFyuz58" cmodel.fit(Trainx, Trainy, epochs=5, validation_data=(valx, valy), callbacks=callbacks_l) # + id="xsouHCntvWq1" cmodel.fit(Trainx, Trainy, epochs=30, validation_data=(valx, valy), callbacks=callbacks_l) # + id="yWL8s8E3vml7" cmodel.fit(Trainx, Trainy, epochs=30, validation_data=(valx, valy), callbacks=callbacks_l) # + id="XTpJM3FxxhL3" cmodel.fit(Trainx, Trainy, epochs=30, validation_data=(valx, valy), callbacks=callbacks_l) # + id="T2PQmuvZ0Hfk" cmodel.fit(Trainx, Trainy, epochs=30, validation_data=(valx, valy), callbacks=callbacks_l) # + id="PgVM1UGE0yGB" cmodel.fit(Trainx, Trainy, epochs=30, validation_data=(valx, valy), callbacks=callbacks_l) # + id="Az9MvwHZ134G" cmodel.fit(Trainx, Trainy, epochs=30, validation_data=(valx, valy), callbacks=callbacks_l) # + id="1oLU1wKn40hW" cmodel.fit(Trainx, Trainy, epochs=30, validation_data=(valx, valy), callbacks=callbacks_l) # + id="8oh7RIcj8pbc" cmodel.fit(Trainx, Trainy, epochs=30, validation_data=(valx, valy), callbacks=callbacks_l) # + id="FlUPy7YhIwWj" cmodel.fit(Trainx, Trainy, epochs=30, validation_data=(valx, valy), callbacks=callbacks_l) # + id="THAG-AUqLh51" cmodel.fit(Trainx, Trainy, epochs=30, validation_data=(valx, valy), callbacks=callbacks_l) # + id="scX1rmc7NVCO" cmodel.fit(Trainx, Trainy, epochs=30, validation_data=(valx, valy), callbacks=callbacks_l) # + id="Zum_0la3OUEy" cmodel.fit(Trainx, Trainy, epochs=30, validation_data=(valx, valy), callbacks=callbacks_l) # + id="OnuBeYWtPg-r" cmodel.fit(Trainx, Trainy, epochs=30, validation_data=(valx, valy), callbacks=callbacks_l) # + [markdown] id="QfjY-Uaj0SNB" # ### TESTING # + id="AC_HDJQLSDI7" lab2=[] test2=[] #import os for i in os.listdir(f'{test_path}'): test2.append(io.imread(f"{test_path}"+i,as_gray=True)) lab2.append(i.split('.')[0]) # + id="HKywugsXUNis" outputId="651375d9-9b27-4db1-ceed-7a3325912e74" colab={"base_uri": "https://localhost:8080/", "height": 35} test_img2=np.array([cv2.resize(image,(28,28)) for image in test2]) test_img2=test_img2[:,:,:,np.newaxis] test_img2.shape # + id="SwzWfvOzUUEw" predictions2=cmodel.predict(test_img2) predictions2=np.argmax(predictions2,axis=1) # + id="yjRBHzN2UmsR" outputId="05424f52-9fed-4928-aa4f-12e7ee709f38" colab={"base_uri": "https://localhost:8080/", "height": 35} print(predictions[:5]) # + id="hyBvebiAUYmo" outputId="8ae6a6e3-93a5-4346-90c5-4bebc9658c3a" colab={"base_uri": "https://localhost:8080/", "height": 108} lit2=[] for x in predictions[:5]: print(mapp.get(x), x) #lit.append(mapp.get(x)) #lit2 # + id="YurDj9ZuVCxK" lab3=[] test3=[] #import os for i in range(10000, 10005): test3.append(io.imread(f"{test_path}"+str(i)+".Bmp",as_gray=True)) lab3.append(str(i).split('.')[0]) # + id="WgdAq023UojH" outputId="5fe31669-9599-48d6-8b1d-304435275c83" colab={"base_uri": "https://localhost:8080/", "height": 35} test_img3=np.array([cv2.resize(image,(28,28)) for image in test3]) test_img3=test_img3[:,:,:,np.newaxis] test_img3.shape # + id="QtyrZRD0U5Od" predictions3=cmodel.predict(test_img3) predictions3=np.argmax(predictions3,axis=1) # + id="-0WrhBEKWIqr" outputId="21eacd5d-a137-483f-e2a7-6224d59768f3" colab={"base_uri": "https://localhost:8080/", "height": 35} print(predictions3[:5]) # + id="ALVXPGsmWKnl" outputId="420fdfff-883a-412d-8b54-1a1aa32f4621" colab={"base_uri": "https://localhost:8080/", "height": 35} lit3=[] for x in predictions3: lit3.append(mapp.get(x)) lit3 # + id="a8L0P2dUWUly" model.fit(Trainx, Trainy, epochs=30, validation_data=(valx, valy), callbacks=callbacks_l) # + id="VrLoaOEOY2XV" model.fit(Trainx, Trainy, epochs=30, validation_data=(valx, valy), callbacks=callbacks_l) # + id="WZmAJ17IZ5hI" model.fit(Trainx, Trainy, epochs=30, validation_data=(valx, valy), callbacks=callbacks_l) # + id="UQWZXQivepqg" model.fit(Trainx, Trainy, epochs=30, validation_data=(valx, valy), callbacks=callbacks_l) # + [markdown] id="lj05xtCM0Wh5" # ## CNN MODEL 4 # + id="8MPxqVUygKqE" outputId="e6a98806-f7a1-41e0-b350-6d5d4a4154c7" colab={"base_uri": "https://localhost:8080/", "height": 707} # MODEL 4 kmodel=Sequential() kmodel.add(Conv2D(32,(2,2),activation='relu',input_shape=(28,28,1))) kmodel.add(MaxPool2D((1,1))) kmodel.add(Dropout(0.2)) kmodel.add(Conv2D(64,(2,2),activation='relu')) kmodel.add(MaxPool2D((1,1))) kmodel.add(Dropout(0.2)) kmodel.add(Conv2D(128,(2,2),activation='relu')) kmodel.add(MaxPool2D((1,1))) kmodel.add(Dropout(0.2)) kmodel.add(Conv2D(128,(2,2),activation='relu')) kmodel.add(MaxPool2D((1,1))) kmodel.add(Dropout(0.2)) kmodel.add(Flatten()) #model.add(Dense(512,activation='relu')) #model.add(Dense(256,activation='relu')) #kmodel.add(Dense(256,activation='relu')) kmodel.add(Dense(128,activation='relu')) kmodel.add(Dense(62,activation='softmax')) kmodel.summary() kmodel.compile(loss='categorical_crossentropy', optimizer='rmsprop', metrics=['accuracy']) # + [markdown] id="1-w2X0hq0ZDN" # #### CALLBACK # + id="WBb3O5MAkIvw" outputId="5c0c29be-76ab-4f85-cc2d-db9c14f774d4" colab={"base_uri": "https://localhost:8080/", "height": 55} filepath_02="/content/z-1-weightsM3-{val_accuracy:.4f}.h5" checkpointing = keras.callbacks.ModelCheckpoint(filepath_02, monitor='val_accuracy', verbose=1, save_best_only = True, save_weights_only=True, mode='auto', period=1) callbacks_m = [checkpointing] # + [markdown] id="qwuFRSiP0bQ-" # #### FITTING # + id="9I5ihbe-lg9P" kmodel.fit(Trainx, Trainy, epochs=30, validation_data=(valx, valy), callbacks=callbacks_m) # + id="dxJMtQgjtQM1" kmodel.fit(Trainx, Trainy, epochs=40, validation_data=(valx, valy), callbacks=callbacks_m) # + id="XTLKKE0YuJgD"
Notebooks/OCR_Kaggle_DS.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # 스파크 스트리밍 실습 # > 파일을 읽어서 파이썬 스트리밍 실습을 합니다 # # ## 목표 # 1. 특정 경로에 존재하는 Json 파일을 스트리밍으로 읽는 실습을 수행합니다 # 1. 스트리밍 집계 함수를 실습합니다 # 1. 스트리밍 변환 함수를 실습합니다 # 1. 여러 스트림을 조인하는 연산을 실습합니다 # 1. 이벤트 시간 기준의 텀블링 윈도우 처리 실습 # 1. 워터마크를 이용하여 1분 단위로 접속한 이용자의 수를 가지는 테이블을 구성하고 조회합니다 # 1. UDF 를 이용하여, 워터마크와 윈도우 함수를 이용하여 누적 최고 매출 이용자를 가지는 테이블을 구성하고 조회합니다 # 1. DataFrame 을 이용하여, 동일한 기능을 실습합니다 # + from pyspark.sql import SparkSession from pyspark.sql.functions import * spark = SparkSession.builder.appName("foo-bar") \ .config("spark.sql.session.timeZone", "Asia/Seoul") \ .config("spark.jars.packages", "io.delta:delta-core_2.12:0.7.0") \ .config("spark.sql.extensions", "io.delta.sql.DeltaSparkSessionExtension") \ .config("spark.sql.catalog.spark_catalog", "org.apache.spark.sql.delta.catalog.DeltaCatalog") \ .getOrCreate() # - # ### 스파크 스트리밍 처리에서 자주 활용되는 함수들 from random import uniform from time import sleep from IPython.display import display, clear_output # 스트림 테이블을 주기적으로 조회하는 함수 def displayStream(name, sql, iterations, sleep_secs): i = 1 for x in range(iterations): clear_output(wait=True) display('[' + name + '] Iteration: '+str(i)+', Score: '+str(uniform(0, 1))) spark.sql(sql).show(truncate=False) sleep(sleep_secs) i += 1 from IPython.display import JSON # 해당 스트리밍 쿼리의 마지막 상태를 확인하는 함수 def checkLastProgress(query): JSON(query.lastProgress) # 해당 스트리밍 쿼리를 재실행 시에, 이미 시작된 쿼리를 사전에 종료하기 위한 함수 def stopQueryIfStarted(query): try: query.stop() except: pass # 테이블 목록 출력 def showTables(): spark.sql("show tables").show() # ## 1. 특정 경로에 존재하는 Json 파일을 읽어서 스트리밍 파이프라인을 구성합니다 static = spark.read.json("data/activity-data") dataSchema = static.schema static.printSchema() static.show(2, truncate=False) streaming = spark.readStream.schema(dataSchema) \ .option("maxFilesPerTrigger", 1).json("data/activity-data") spark.conf.set("spark.sql.shuffle.partitions", 5) # 로컬 모드에서는 너무 많은 파티션은 오히려 성능을 떨어뜨리므로 셔플 수를 줄입니다 # ## 2. 스트리밍 집계 함수를 실습합니다 # + activityCounts = streaming.groupBy("gt").count() # 스트리밍은 바로 출력 대신에 다른 스트리밍 싱크에 보내기 위한 로직만 먼저 작성합니다 # 아래와 같이 "activity_counts" 라는 임의의 "memory" 테이블에 매번 모든 결과를 "complete" 하게 전송하는 activityQuery 를 생성합니다 activityQuery = activityCounts.writeStream \ .queryName("activity_counts") \ .format("memory") \ .outputMode("complete") \ .start() # 상태를 확인하기 위해서 항상 스트리밍 쿼리를 별도의 객체로 만들어 두는 것이 편리합니다. activityQuery.status # - # 위와 같이 start 한 이후, 백그라운드에서 코드를 수행시키기 위해서 아래와 같이 실행합니다 DEBUG = False if DEBUG: activityQuery.id # get the unique identifier of the running query that persists across restarts from checkpoint data activityQuery.runId # get the unique id of this run of the query, which will be generated at every start/restart activityQuery.name # get the name of the auto-generated or user-specified name activityQuery.explain() # print detailed explanations of the query activityQuery.stop() # stop the query activityQuery.awaitTermination() # block until query is terminated, with stop() or with error activityQuery.exception # the exception if the query has been terminated with error activityQuery.recentProgress # an array of the most recent progress updates for this query activityQuery.lastProgress # the most recent progress update of this streaming query # #### 현재 활성화된 스트리밍 쿼리에 대한 확인 및 조회 # + spark.streams.active # get the list of currently active streaming queries # spark.streams.get(id) # get a query object by its unique id # 라이브 어플리케이션의 경우는 백그라운드에서 항상 동작해야 하므로, 아래와 같이 어플리케이션이 종료될 때까지 대기하는 명령을 수행해야 합니다. # spark.streams.awaitAnyTermination() # block until any one of them terminates # - displayStream("activity_counts", "select * from activity_counts", 3, 1) displayStream("Counts of Activities", "select * from activity_counts", 3, 1) # ## 3. 스트리밍 변환 함수를 실습합니다 simpleTransform = streaming.withColumn("stairs", expr("gt like '%stairs%'")) \ .where("stairs") \ .where("gt is not null") \ .select("gt", "model", "arrival_time", "creation_time") \ .writeStream \ .queryName("simple_transform") \ .format("memory") \ .outputMode("append") \ .start() displayStream("Counts of Activities", "select * from simple_transform", 3, 1) checkLastProgress(activityQuery) showTables() # ## 4. 여러 스트림을 조인하는 연산을 실습합니다 # 아래와 같이 데이터소스를 처음에 읽어서 해당 데이터프레임을 만들어두면, 해당 스트림을 사용하면 파일을 처음부터 읽어서 테스트가 용이합니다 deviceModelStats = None stopQueryIfStarted(deviceModelStats) historicalAgg = static.groupBy("gt", "model").avg() deviceModelStats = streaming.drop("Arrival_Time", "Creation_Time", "Index") \ .cube("gt", "model").avg() \ .join(historicalAgg, ["gt", "model"]) \ .writeStream \ .queryName("device_counts") \ .format("memory") \ .outputMode("complete") \ .start() displayStream("device_counts", "select * from device_counts", 5, 1) # ## 5. 이벤트 시간 기준의 텀블링 윈도우 처리 실습 withEventTime = streaming.selectExpr("*", "cast(cast(Creation_Time as double)/1000000000 as timestamp) as event_time") eventPerWindow = None stopQueryIfStarted(eventPerWindow) eventPerWindow = withEventTime.groupBy(window(col("event_time"), "10 minutes")).count() \ .writeStream \ .queryName("pyevents_per_window") \ .format("memory") \ .outputMode("complete") \ .start() displayStream("pyevents_per_window", "select * from pyevents_per_window", 5, 3) eventPerUserWindow = None stopQueryIfStarted(eventPerUserWindow) eventPerUserWindow = withEventTime.groupBy(window(col("event_time"), "10 minutes"), "User").count() \ .writeStream \ .queryName("pyevents_per_user_window") \ .format("memory") \ .outputMode("complete") \ .start() displayStream("pyevents_per_user_window", "select * from pyevents_per_user_window order by count desc limit 5", 5, 1) static.show(10, truncate=False) static.groupBy("Device", "User").count().sort(desc("count")).limit(5).show() # + userRank = None stopQueryIfStarted(userRank) # 해당 그룹 파티션 내에서 X, Y, Z 역순 정렬 후에, 유일한 번호를 매겨서 출력 userRank = withEventTime \ .withWatermark("event_time", "10 minutes") \ .groupBy("User", window(col("event_time"), "10 minutes")).count() \ .writeStream \ .queryName("user_rank") \ .format("memory") \ .outputMode("complete") \ .start() # - displayStream("user_rank", "select * from user_rank order by window desc, User, count desc", 1, 1) # ## 5. 워터마크를 이용하여 1분 단위로 접속한 이용자의 수를 가지는 테이블을 구성하고 조회합니다 df = spark.read.option("header", "true").csv("data/tbl_user.csv") df.printSchema() df.show() df.rdd.zipWithIndex().collect() df.withColumn("id", monotonically_increasing_id() + 1).show() df = spark.sql("select 'a,b' as col1") df2 = df.withColumn("codes" , explode(split("col1" , ","))).drop("col1") df2.show() # + from pyspark.sql.types import StructType,StructField, StringType, IntegerType data = [("James","","Smith","36636","M",3000), ("Michael","Rose","","40288","M",4000), ("Robert","","Williams","42114","M",4000), ("Maria","Anne","Jones","39192","F",4000), ("Jen","Mary","Brown","","F",-1) ] schema = StructType([ \ StructField("firstname",StringType(),True), \ StructField("middlename",StringType(),True), \ StructField("lastname",StringType(),True), \ StructField("id", StringType(), True), \ StructField("gender", StringType(), True), \ StructField("salary", IntegerType(), True) \ ]) df = spark.createDataFrame(data=data,schema=schema) df.printSchema() df.show(truncate=False) # + structureData = [ (("James","","Smith"),"36636","M",3100), (("Michael","Rose",""),"40288","M",4300), (("Robert","","Williams"),"42114","M",1400), (("Maria","Anne","Jones"),"39192","F",5500), (("Jen","Mary","Brown"),"","F",-1) ] structureSchema = StructType([ StructField('name', StructType([ StructField('firstname', StringType(), True), StructField('middlename', StringType(), True), StructField('lastname', StringType(), True) ])), StructField('id', StringType(), True), StructField('gender', StringType(), True), StructField('salary', IntegerType(), True) ]) df2 = spark.createDataFrame(data=structureData,schema=structureSchema) df2.printSchema() df2.show(truncate=False) # - df2.selectExpr("name.firstname as first", "name.middlename", "name.lastname").show()
spark/notebooks/pyspark-stream-1.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] toc=true # <h1>Python入门(中)</h1> # # 1. [简介](#简介) # # 2. [列表](#列表)<br> # [1. 列表的定义](#1.-列表的定义)<br> # [2. 列表的创建](#2.-列表的创建)<br> # [3. 向列表中添加元素](#3.-向列表中添加元素)<br> # [4. 删除列表中的元素](#4.-删除列表中的元素)<br> # [5. 获取列表中的元素](#5.-获取列表中的元素)<br> # [6. 列表的常用操作符](#6.-列表的常用操作符)<br> # [7. 列表的其它方法](#7.-列表的其它方法)<br> # # 3. [元组](#元组)<br> # [1. 创建和访问一个元组](#1.-创建和访问一个元组)<br> # [2. 更新和删除一个元组](#2.-更新和删除一个元组)<br> # [3. 元组相关的操作符](#3.-元组相关的操作符)<br> # [4. 内置方法](#4.-内置方法)<br> # [5. 解压元组](#5.-解压元组)<br> # # 4. [字符串](#字符串)<br> # [1. 字符串的定义](#1.-字符串的定义)<br> # [2. 字符串的切片与拼接](#2.-字符串的切片与拼接)<br> # [3. 字符串的常用内置方法](#3.-字符串的常用内置方法)<br> # [4. 字符串格式化](#4.-字符串格式化)<br> # # 5. [字典](#字典)<br> # [1. 可变类型与不可变类型](#1.-可变类型与不可变类型)<br> # [2. 字典的定义](#2.-字典的定义)<br> # [3. 创建和访问字典](#3.-创建和访问字典)<br> # [4. 字典的内置方法](#4.-字典的内置方法)<br> # # 6. [集合](#集合)<br> # [1. 集合的创建](#1.-集合的创建)<br> # [2. 访问集合中的值](#2.-访问集合中的值)<br> # [3. 集合的内置方法](#3.-集合的内置方法)<br> # [4. 集合的转换](#4.-集合的转换)<br> # [5. 不可变集合](#5.-不可变集合)<br> # # 7. [序列](#序列)<br> # [1. 针对序列的内置函数](#1.-针对序列的内置函数)<br> # - # # 简介 # Python 是一种通用编程语言,其在科学计算和机器学习领域具有广泛的应用。如果我们打算利用 Python 来执行机器学习,那么对 Python 有一些基本的了解就是至关重要的。本 Python 入门系列体验就是为这样的初学者精心准备的。 # **本实验包括以下内容**: # 1. 列表 # - 列表的定义 # - 列表的创建 # - 向列表中添加元素 # - 删除列表中的元素 # - 获取列表中的元素 # - 列表的常用操作符 # - 列表的其他方法 # 2. 元组 # - 创建和访问一个元组 # - 更新和删除一个元组 # - 元组相关的操作符 # - 内置方法 # - 解压元组 # 3. 字符串 # - 字符串的定义 # - 字符串的切片与拼接 # - 字符串的常用内置方法 # - 字符串格式化 # 4. 字典 # - 可变类型与不可变类型 # - 字典的定义 # - 创建和访问字典 # - 字典的内置方法 # 5. 集合 # - 集合的创建 # - 访问集合中的值 # - 集合的内置方法 # - 集合的转换 # - 不可变集合 # 6. 序列 # - 针对序列的内置函数 # # # 列表 # # 简单数据类型 # - 整型`<class 'int'>` # - 浮点型`<class 'float'>` # - 布尔型`<class 'bool'>` # # 容器数据类型 # - 列表`<class 'list'>` # - 元组`<class 'tuple'>` # - 字典`<class 'dict'>` # - 集合`<class 'set'>` # - 字符串`<class 'str'>` # ## 1. 列表的定义 # # 列表是有序集合,没有固定大小,能够保存任意数量任意类型的 Python 对象,语法为 `[元素1, 元素2, ..., 元素n]`。 # # - 关键点是「中括号 []」和「逗号 ,」 # - 中括号 把所有元素绑在一起 # - 逗号 将每个元素一一分开 # # # # ## 2. 列表的创建 # # - 创建一个普通列表 # # 【例子】 # # + x = ['Monday', 'Tuesday', 'Wednesday', 'Thursday', 'Friday'] print(x, type(x)) # ['Monday', 'Tuesday', 'Wednesday', 'Thursday', 'Friday'] <class 'list'> x = [2, 3, 4, 5, 6, 7] print(x, type(x)) # [2, 3, 4, 5, 6, 7] <class 'list'> # - # # - 利用`range()`创建列表 # # 【例子】 # + x = list(range(10)) print(x, type(x)) # [0, 1, 2, 3, 4, 5, 6, 7, 8, 9] <class 'list'> x = list(range(1, 11, 2)) print(x, type(x)) # [1, 3, 5, 7, 9] <class 'list'> x = list(range(10, 1, -2)) print(x, type(x)) # [10, 8, 6, 4, 2] <class 'list'> # - # # - 利用推导式创建列表 # # 【例子】 # + x = [0] * 5 print(x, type(x)) # [0, 0, 0, 0, 0] <class 'list'> x = [0 for i in range(5)] print(x, type(x)) # [0, 0, 0, 0, 0] <class 'list'> x = [i for i in range(10)] print(x, type(x)) # [0, 1, 2, 3, 4, 5, 6, 7, 8, 9] <class 'list'> x = [i for i in range(1, 10, 2)] print(x, type(x)) # [1, 3, 5, 7, 9] <class 'list'> x = [i for i in range(10, 1, -2)] print(x, type(x)) # [10, 8, 6, 4, 2] <class 'list'> x = [i ** 2 for i in range(1, 10)] print(x, type(x)) # [1, 4, 9, 16, 25, 36, 49, 64, 81] <class 'list'> x = [i for i in range(100) if (i % 2) != 0 and (i % 3) == 0] print(x, type(x)) # [3, 9, 15, 21, 27, 33, 39, # - # # 注意: # # 由于list的元素可以是任何对象,因此列表中所保存的是对象的指针。即使保存一个简单的`[1,2,3]`,也有3个指针和3个整数对象。 # # `x = [a] * 4`操作中,只是创建4个指向list的引用,所以一旦`a`改变,`x`中4个`a`也会随之改变。 # # 【例子】 # # + x = [[0] * 3] * 4 print(x, type(x)) # [[0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0]] <class 'list'> x[0][0] = 1 print(x, type(x)) # [[1, 0, 0], [1, 0, 0], [1, 0, 0], [1, 0, 0]] <class 'list'> a = [0] * 3 x = [a] * 4 print(x, type(x)) # [[0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0]] <class 'list'> x[0][0] = 1 print(x, type(x)) # [[1, 0, 0], [1, 0, 0], [1, 0, 0], [1, 0, 0]] <class 'list'> # - # # # # - 创建一个混合列表 # # 【例子】 mix = [1, 'lsgo', 3.14, [1, 2, 3]] print(mix, type(mix)) # [1, 'lsgo', 3.14, [1, 2, 3]] <class 'list'> # # # - 创建一个空列表 # # 【例子】 empty = [] print(empty, type(empty)) # [] <class 'list'> # # 列表不像元组,列表内容可更改 (mutable),因此附加 (`append`, `extend`)、插入 (`insert`)、删除 (`remove`, `pop`) 这些操作都可以用在它身上。 # # # ## 3. 向列表中添加元素 # # - `list.append(obj)` 在列表末尾添加新的对象,只接受一个参数,参数可以是任何数据类型,被追加的元素在 list 中保持着原结构类型。 # # 【例子】 # # + x = ['Monday', 'Tuesday', 'Wednesday', 'Thursday', 'Friday'] x.append('Thursday') print(x) # ['Monday', 'Tuesday', 'Wednesday', 'Thursday', 'Friday', 'Thursday'] print(len(x)) # 6 # - # # 此元素如果是一个 list,那么这个 list 将作为一个整体进行追加,注意`append()`和`extend()`的区别。 # # 【例子】 # + x = ['Monday', 'Tuesday', 'Wednesday', 'Thursday', 'Friday'] x.append(['Thursday', 'Sunday']) print(x) # ['Monday', 'Tuesday', 'Wednesday', 'Thursday', 'Friday', ['Thursday', 'Sunday']] print(len(x)) # 6 # - # # - `list.extend(seq)` 在列表末尾一次性追加另一个序列中的多个值(用新列表扩展原来的列表) # # 【例子】 # # + x = ['Monday', 'Tuesday', 'Wednesday', 'Thursday', 'Friday'] x.extend(['Thursday', 'Sunday']) print(x) # ['Monday', 'Tuesday', 'Wednesday', 'Thursday', 'Friday', 'Thursday', 'Sunday'] print(len(x)) # 7 # - # # 严格来说 `append` 是追加,把一个东西整体添加在列表后,而 `extend` 是扩展,把一个东西里的所有元素添加在列表后。 # # - `list.insert(index, obj)` 在编号 `index` 位置插入 `obj`。 # # 【例子】 # + x = ['Monday', 'Tuesday', 'Wednesday', 'Thursday', 'Friday'] x.insert(2, 'Sunday') print(x) # ['Monday', 'Tuesday', 'Sunday', 'Wednesday', 'Thursday', 'Friday'] print(len(x)) # 6 # - # # # # ## 4. 删除列表中的元素 # - `list.remove(obj)` 移除列表中某个值的第一个匹配项 # # 【例子】 # x = ['Monday', 'Tuesday', 'Wednesday', 'Thursday', 'Friday'] x.remove('Monday') print(x) # ['Tuesday', 'Wednesday', 'Thursday', 'Friday'] # # # # - `list.pop([index=-1])` 移除列表中的一个元素(默认最后一个元素),并且返回该元素的值 # # 【例子】 # # + x = ['Monday', 'Tuesday', 'Wednesday', 'Thursday', 'Friday'] y = x.pop() print(y) # Friday y = x.pop(0) print(y) # Monday y = x.pop(-2) print(y) # Wednesday print(x) # ['Tuesday', 'Thursday'] # - # `remove` 和 `pop` 都可以删除元素,前者是指定具体要删除的元素,后者是指定一个索引。 # # - `del var1[, var2 ……]` 删除单个或多个对象。 # # 【例子】 # # 如果知道要删除的元素在列表中的位置,可使用`del`语句。 # # # x = ['Monday', 'Tuesday', 'Wednesday', 'Thursday', 'Friday'] del x[0:2] print(x) # ['Wednesday', 'Thursday', 'Friday'] # # 如果你要从列表中删除一个元素,且不再以任何方式使用它,就使用`del`语句;如果你要在删除元素后还能继续使用它,就使用方法`pop()`。 # # # ## 5. 获取列表中的元素 # # - 通过元素的索引值,从列表获取单个元素,注意,列表索引值是从0开始的。 # - 通过将索引指定为-1,可让Python返回最后一个列表元素,索引 -2 返回倒数第二个列表元素,以此类推。 # # 【例子】 # x = ['Monday', 'Tuesday', 'Wednesday', ['Thursday', 'Friday']] print(x[0], type(x[0])) # Monday <class 'str'> print(x[-1], type(x[-1])) # ['Thursday', 'Friday'] <class 'list'> print(x[-2], type(x[-2])) # Wednesday <class 'str'> # # # 切片的通用写法是 `start : stop : step` # # - 情况 1 - "start :" # - 以 `step` 为 1 (默认) 从编号 `start` 往列表尾部切片。 # # 【例子】 # x = ['Monday', 'Tuesday', 'Wednesday', 'Thursday', 'Friday'] print(x[3:]) # ['Thursday', 'Friday'] print(x[-3:]) # ['Wednesday', 'Thursday', 'Friday'] # - 情况 2 - ": stop" # - 以 `step` 为 1 (默认) 从列表头部往编号 `stop` 切片。 # # 【例子】 # week = ['Monday', 'Tuesday', 'Wednesday', 'Thursday', 'Friday'] print(week[:3]) # ['Monday', 'Tuesday', 'Wednesday'] print(week[:-3]) # ['Monday', 'Tuesday'] # # - 情况 3 - "start : stop" # - 以 `step` 为 1 (默认) 从编号 `start` 往编号 `stop` 切片。 # # 【例子】 # week = ['Monday', 'Tuesday', 'Wednesday', 'Thursday', 'Friday'] print(week[1:3]) # ['Tuesday', 'Wednesday'] print(week[-3:-1]) # ['Wednesday', 'Thursday'] # # - 情况 4 - "start : stop : step" # - 以具体的 `step` 从编号 `start` 往编号 `stop` 切片。注意最后把 `step` 设为 -1,相当于将列表反向排列。 # # 【例子】 # week = ['Monday', 'Tuesday', 'Wednesday', 'Thursday', 'Friday'] print(week[1:4:2]) # ['Tuesday', 'Thursday'] print(week[:4:2]) # ['Monday', 'Wednesday'] print(week[1::2]) # ['Tuesday', 'Thursday'] print(week[::-1]) # ['Friday', 'Thursday', 'Wednesday', 'Tuesday', 'Monday'] # - 情况 5 - " : " # - 复制列表中的所有元素(浅拷贝)。 # # 【例子】 # eek = ['Monday', 'Tuesday', 'Wednesday', 'Thursday', 'Friday'] print(week[:]) # ['Monday', 'Tuesday', 'Wednesday', 'Thursday', 'Friday'] # # 【例子】浅拷贝与深拷贝 # + list1 = [123, 456, 789, 213] list2 = list1 list3 = list1[:] print(list2) # [123, 456, 789, 213] print(list3) # [123, 456, 789, 213] list1.sort() print(list2) # [123, 213, 456, 789] print(list3) # [123, 456, 789, 213] list1 = [[123, 456], [789, 213]] list2 = list1 list3 = list1[:] print(list2) # [[123, 456], [789, 213]] print(list3) # [[123, 456], [789, 213]] list1[0][0] = 111 print(list2) # [[111, 456], [789, 213]] print(list3) # [[111, 456], [789, 213]] # - # ## 6. 列表的常用操作符 # # - 等号操作符:`==` # - 连接操作符 `+` # - 重复操作符 `*` # - 成员关系操作符 `in`、`not in` # # 「等号 ==」,只有成员、成员位置都相同时才返回True。 # # 列表拼接有两种方式,用「加号 +」和「乘号 *」,前者首尾拼接,后者复制拼接。 # # # 【例子】 # + list1 = [123, 456] list2 = [456, 123] list3 = [123, 456] print(list1 == list2) # False print(list1 == list3) # True list4 = list1 + list2 # extend() print(list4) # [123, 456, 456, 123] list5 = list3 * 3 print(list5) # [123, 456, 123, 456, 123, 456] list3 *= 3 print(list3) # [123, 456, 123, 456, 123, 456] print(123 in list3) # True print(456 not in list3) # False # - # # 前面三种方法(`append`, `extend`, `insert`)可对列表增加元素,它们没有返回值,是直接修改了原数据对象。 # 而将两个list相加,需要创建新的 list 对象,从而需要消耗额外的内存,特别是当 list 较大时,尽量不要使用 “+” 来添加list。 # # ## 7. 列表的其它方法 # # `list.count(obj)` 统计某个元素在列表中出现的次数 # # 【例子】 # list1 = [123, 456] * 3 print(list1) # [123, 456, 123, 456, 123, 456] num = list1.count(123) print(num) # 3 # # `list.index(x[, start[, end]])` 从列表中找出某个值第一个匹配项的索引位置 # # 【例子】 # list1 = [123, 456] * 5 print(list1.index(123)) # 0 print(list1.index(123, 1)) # 2 print(list1.index(123, 3, 7)) # 4 # # `list.reverse()` 反向列表中元素 # # 【例子】 x = [123, 456, 789] x.reverse() print(x) # [789, 456, 123] # # `list.sort(key=None, reverse=False)` 对原列表进行排序。 # # - `key` -- 主要是用来进行比较的元素,只有一个参数,具体的函数的参数就是取自于可迭代对象中,指定可迭代对象中的一个元素来进行排序。 # - `reverse` -- 排序规则,`reverse = True` 降序, `reverse = False` 升序(默认)。 # - 该方法没有返回值,但是会对列表的对象进行排序。 # # 【例子】 # # + x = [123, 456, 789, 213] x.sort() print(x) # [123, 213, 456, 789] x.sort(reverse=True) print(x) # [789, 456, 213, 123] # 获取列表的第二个元素 def takeSecond(elem): return elem[1] x = [(2, 2), (3, 4), (4, 1), (1, 3)] x.sort(key=takeSecond) print(x) # [(4, 1), (2, 2), (1, 3), (3, 4)] x.sort(key=lambda a: a[0]) print(x) # [(1, 3), (2, 2), (3, 4), (4, 1)] # - # # # # 元组 # # 「元组」定义语法为:`(元素1, 元素2, ..., 元素n)` # - 小括号把所有元素绑在一起 # - 逗号将每个元素一一分开 # # ## 1. 创建和访问一个元组 # # - Python 的元组与列表类似,不同之处在于tuple被创建后就不能对其进行修改,类似字符串。 # - 元组使用小括号,列表使用方括号。 # - 元组与列表类似,也用整数来对它进行索引 (indexing) 和切片 (slicing)。 # # 【例子】 # # + t1 = (1, 10.31, 'python') t2 = 1, 10.31, 'python' print(t1, type(t1)) # (1, 10.31, 'python') <class 'tuple'> print(t2, type(t2)) # (1, 10.31, 'python') <class 'tuple'> tuple1 = (1, 2, 3, 4, 5, 6, 7, 8) print(tuple1[1]) # 2 print(tuple1[5:]) # (6, 7, 8) print(tuple1[:5]) # (1, 2, 3, 4, 5) tuple2 = tuple1[:] print(tuple2) # (1, 2, 3, 4, 5, 6, 7, 8) # - # # - 创建元组可以用小括号 (),也可以什么都不用,为了可读性,建议还是用 ()。 # - 元组中只包含一个元素时,需要在元素后面添加逗号,否则括号会被当作运算符使用。 # # 【例子】 # x = (1) print(type(x)) # <class 'int'> x = 2, 3, 4, 5 print(type(x)) # <class 'tuple'> x = [] print(type(x)) # <class 'list'> x = () print(type(x)) # <class 'tuple'> x = (1,) print(type(x)) # <class 'tuple'> # # 【例子】 # print(8 * (8)) # 64 print(8 * (8,)) # (8, 8, 8, 8, 8, 8, 8, 8) # # 【例子】创建二维元组。 # # + x = (1, 10.31, 'python'), ('data', 11) print(x) # ((1, 10.31, 'python'), ('data', 11)) print(x[0]) # (1, 10.31, 'python') print(x[0][0], x[0][1], x[0][2]) # 1 10.31 python print(x[0][0:2]) # (1, 10.31) # - # # # # ## 2. 更新和删除一个元组 # # 【例子】 # week = ('Monday', 'Tuesday', 'Thursday', 'Friday') week = week[:2] + ('Wednesday',) + week[2:] print(week) # ('Monday', 'Tuesday', 'Wednesday', 'Thursday', 'Friday') # # 【例子】元组有不可更改 (immutable) 的性质,因此不能直接给元组的元素赋值,但是只要元组中的元素可更改 (mutable),那么我们可以直接更改其元素,注意这跟赋值其元素不同。 # + t1 = (1, 2, 3, [4, 5, 6]) print(t1) # (1, 2, 3, [4, 5, 6]) t1[3][0] = 9 print(t1) # (1, 2, 3, [9, 5, 6]) # - # # # # ## 3. 元组相关的操作符 # # - 等号操作符:`==` # - 连接操作符 `+` # - 重复操作符 `*` # - 成员关系操作符 `in`、`not in` # # 「等号 ==」,只有成员、成员位置都相同时才返回True。 # # 元组拼接有两种方式,用「加号 +」和「乘号 *」,前者首尾拼接,后者复制拼接。 # # 【例子】 # # + t1 = (123, 456) t2 = (456, 123) t3 = (123, 456) print(t1 == t2) # False print(t1 == t3) # True t4 = t1 + t2 print(t4) # (123, 456, 456, 123) t5 = t3 * 3 print(t5) # (123, 456, 123, 456, 123, 456) t3 *= 3 print(t3) # (123, 456, 123, 456, 123, 456) print(123 in t3) # True print(456 not in t3) # False # - # # ## 4. 内置方法 # # 元组大小和内容都不可更改,因此只有 `count` 和 `index` 两种方法。 # # 【例子】 t = (1, 10.31, 'python') print(t.count('python')) # 1 print(t.index(10.31)) # 1 # - `count('python')` 是记录在元组 `t` 中该元素出现几次,显然是 1 次 # - `index(10.31)` 是找到该元素在元组 `t` 的索引,显然是 1 # # ## 5. 解压元组 # # 【例子】解压(unpack)一维元组(有几个元素左边括号定义几个变量) # # t = (1, 10.31, 'python') (a, b, c) = t print(a, b, c) # 1 10.31 python # 【例子】解压二维元组(按照元组里的元组结构来定义变量) t = (1, 10.31, ('OK', 'python')) (a, b, (c, d)) = t print(a, b, c, d) # 1 10.31 OK python # # 【例子】如果你只想要元组其中几个元素,用通配符「*」,英文叫 wildcard,在计算机语言中代表一个或多个元素。下例就是把多个元素丢给了 `rest` 变量。 # t = 1, 2, 3, 4, 5 a, b, *rest, c = t print(a, b, c) # 1 2 5 print(rest) # [3, 4] # 【例子】如果你根本不在乎 rest 变量,那么就用通配符「*」加上下划线「_」。 t = 1, 2, 3, 4, 5 a, b, *_ = t print(a, b) # 1 2 # # # 字符串 # # ## 1. 字符串的定义 # - Python 中字符串被定义为引号之间的字符集合。 # - Python 支持使用成对的 单引号 或 双引号。 # # 【例子】 # # + t1 = 'i love Python!' print(t1, type(t1)) # i love Python! <class 'str'> t2 = "I love Python!" print(t2, type(t2)) # I love Python! <class 'str'> print(5 + 8) # 13 print('5' + '8') # 58 # - # # # - Python 的常用转义字符 # # # 转义字符 | 描述 # :---:|--- # `\\` | 反斜杠符号 # `\'` | 单引号 # `\"` | 双引号 # `\n` | 换行 # `\t` | 横向制表符(TAB) # `\r` | 回车 # # # 【例子】如果字符串中需要出现单引号或双引号,可以使用转义符号`\`对字符串中的符号进行转义。 # print('let\'s go') # let's go print("let's go") # let's go print('C:\\now') # C:\now print("C:\\Program Files\\Intel\\Wifi\\Help") # C:\Program Files\Intel\Wifi\Help # # # # # # 【例子】原始字符串只需要在字符串前边加一个英文字母 r 即可。 # print(r'C:\Program Files\Intel\Wifi\Help') # C:\Program Files\Intel\Wifi\Help # # # 【例子】三引号允许一个字符串跨多行,字符串中可以包含换行符、制表符以及其他特殊字符。 # # + para_str = """这是一个多行字符串的实例 多行字符串可以使用制表符 TAB ( \t )。 也可以使用换行符 [ \n ]。 """ print(para_str) # 这是一个多行字符串的实例 # 多行字符串可以使用制表符 # TAB ( )。 # 也可以使用换行符 [ # ]。 para_str = '''这是一个多行字符串的实例 多行字符串可以使用制表符 TAB ( \t )。 也可以使用换行符 [ \n ]。 ''' print(para_str) # 这是一个多行字符串的实例 # 多行字符串可以使用制表符 # TAB ( )。 # 也可以使用换行符 [ # ]。 # - # # # ## 2. 字符串的切片与拼接 # # - 类似于元组具有不可修改性 # - 从 0 开始 (和 Java 一样) # - 切片通常写成 `start:end` 这种形式,包括「`start` 索引」对应的元素,不包括「`end`索引」对应的元素。 # - 索引值可正可负,正索引从 0 开始,从左往右;负索引从 -1 开始,从右往左。使用负数索引时,会从最后一个元素开始计数。最后一个元素的位置编号是 -1。 # # 【例子】 # # + str1 = 'I Love LsgoGroup' print(str1[:6]) # I Love print(str1[5]) # e print(str1[:6] + " 插入的字符串 " + str1[6:]) # I Love 插入的字符串 LsgoGroup s = 'Python' print(s) # Python print(s[2:4]) # th print(s[-5:-2]) # yth print(s[2]) # t print(s[-1]) # n # - # # # ## 3. 字符串的常用内置方法 # # # # - `capitalize()` 将字符串的第一个字符转换为大写。 # # 【例子】 # str2 = 'xiaoxie' print(str2.capitalize()) # Xiaoxie # # - `lower()` 转换字符串中所有大写字符为小写。 # - `upper()` 转换字符串中的小写字母为大写。 # - `swapcase()` 将字符串中大写转换为小写,小写转换为大写。 # # 【例子】 # str2 = "DAXIExiaoxie" print(str2.lower()) # daxiexiaoxie print(str2.upper()) # DAXIEXIAOXIE print(str2.swapcase()) # daxieXIAOXIE # # - `count(str, beg= 0,end=len(string))` 返回`str`在 string 里面出现的次数,如果`beg`或者`end`指定则返回指定范围内`str`出现的次数。 # # 【例子】 str2 = "DAXIExiaoxie" print(str2.count('xi')) # 2 # # - `endswith(suffix, beg=0, end=len(string))` 检查字符串是否以指定子字符串 `suffix` 结束,如果是,返回 True,否则返回 False。如果 `beg` 和 `end` 指定值,则在指定范围内检查。 # - `startswith(substr, beg=0,end=len(string))` 检查字符串是否以指定子字符串 `substr` 开头,如果是,返回 True,否则返回 False。如果 `beg` 和 `end` 指定值,则在指定范围内检查。 # # 【例子】 # str2 = "DAXIExiaoxie" print(str2.endswith('ie')) # True print(str2.endswith('xi')) # False print(str2.startswith('Da')) # False print(str2.startswith('DA')) # True # # - `find(str, beg=0, end=len(string))` 检测 `str` 是否包含在字符串中,如果指定范围 `beg` 和 `end`,则检查是否包含在指定范围内,如果包含,返回开始的索引值,否则返回 -1。 # - `rfind(str, beg=0,end=len(string))` 类似于 `find()` 函数,不过是从右边开始查找。 # # 【例子】 # str2 = "DAXIExiaoxie" print(str2.find('xi')) # 5 print(str2.find('ix')) # -1 print(str2.rfind('xi')) # 9 # # - `isnumeric()` 如果字符串中只包含数字字符,则返回 True,否则返回 False。 # # 【例子】 str3 = '12345' print(str3.isnumeric()) # True str3 += 'a' print(str3.isnumeric()) # False # # - `ljust(width[, fillchar])`返回一个原字符串左对齐,并使用`fillchar`(默认空格)填充至长度`width`的新字符串。 # - `rjust(width[, fillchar])`返回一个原字符串右对齐,并使用`fillchar`(默认空格)填充至长度`width`的新字符串。 # # 【例子】 str4 = '1101' print(str4.ljust(8, '0')) # 11010000 print(str4.rjust(8, '0')) # 00001101 # # - `lstrip([chars])` 截掉字符串左边的空格或指定字符。 # - `rstrip([chars])` 删除字符串末尾的空格或指定字符。 # - `strip([chars])` 在字符串上执行`lstrip()`和`rstrip()`。 # # 【例子】 str5 = ' I Love LsgoGroup ' print(str5.lstrip()) # 'I Love LsgoGroup ' print(str5.lstrip().strip('I')) # ' Love LsgoGroup ' print(str5.rstrip()) # ' I Love LsgoGroup' print(str5.strip()) # 'I Love LsgoGroup' print(str5.strip().strip('p')) # 'I Love LsgoGrou' # # # - `partition(sub)` 找到子字符串sub,把字符串分为一个三元组`(pre_sub,sub,fol_sub)`,如果字符串中不包含sub则返回`('原字符串','','')`。 # - `rpartition(sub)`类似于`partition()`方法,不过是从右边开始查找。 # # 【例子】 str5 = ' I Love LsgoGroup ' print(str5.strip().partition('o')) # ('I L', 'o', 've LsgoGroup') print(str5.strip().partition('m')) # ('I Love LsgoGroup', '', '') print(str5.strip().rpartition('o')) # ('I Love LsgoGr', 'o', 'up') # # - `replace(old, new [, max])` 把 将字符串中的`old`替换成`new`,如果`max`指定,则替换不超过`max`次。 # # 【例子】 str5 = ' I Love LsgoGroup ' print(str5.strip().replace('I', 'We')) # We Love LsgoGroup # # - `split(str="", num)` 不带参数默认是以空格为分隔符切片字符串,如果`num`参数有设置,则仅分隔`num`个子字符串,返回切片后的子字符串拼接的列表。 # # 【例子】 str5 = ' I Love LsgoGroup ' print(str5.strip().split()) # ['I', 'Love', 'LsgoGroup'] print(str5.strip().split('o')) # ['I L', 've Lsg', 'Gr', 'up'] # # # 【例子】 # + u = "www.baidu.com.cn" # 使用默认分隔符 print(u.split()) # ['www.baidu.com.cn'] # 以"."为分隔符 print((u.split('.'))) # ['www', 'baidu', 'com', 'cn'] # 分割0次 print((u.split(".", 0))) # ['www.baidu.com.cn'] # 分割一次 print((u.split(".", 1))) # ['www', 'baidu.com.cn'] # 分割两次 print(u.split(".", 2)) # ['www', 'baidu', 'com.cn'] # 分割两次,并取序列为1的项 print((u.split(".", 2)[1])) # baidu # 分割两次,并把分割后的三个部分保存到三个变量 u1, u2, u3 = u.split(".", 2) print(u1) # www print(u2) # baidu print(u3) # com.cn # - # 【例子】去掉换行符 # + c = '''say hello baby''' print(c) # say # hello # baby print(c.split('\n')) # ['say', 'hello', 'baby'] # - # # 【例子】 # string = "hello boy<[www.baidu.com]>byebye" print(string.split('[')[1].split(']')[0]) # www.baidu.com print(string.split('[')[1].split(']')[0].split('.')) # ['www', 'baidu', 'com'] # # # - `splitlines([keepends])` 按照行('\r', '\r\n', \n')分隔,返回一个包含各行作为元素的列表,如果参数`keepends`为 False,不包含换行符,如果为 True,则保留换行符。 # # 【例子】 str6 = 'I \n Love \n LsgoGroup' print(str6.splitlines()) # ['I ', ' Love ', ' LsgoGroup'] print(str6.splitlines(True)) # ['I \n', ' Love \n', ' LsgoGroup'] # # # - `maketrans(intab, outtab)` 创建字符映射的转换表,第一个参数是字符串,表示需要转换的字符,第二个参数也是字符串表示转换的目标。 # - `translate(table, deletechars="")` 根据参数`table`给出的表,转换字符串的字符,要过滤掉的字符放到`deletechars`参数中。 # # 【例子】 str7 = 'this is string example....wow!!!' intab = 'aeiou' outtab = '12345' trantab = str7.maketrans(intab, outtab) print(trantab) # {97: 49, 111: 52, 117: 53, 101: 50, 105: 51} print(str7.translate(trantab)) # th3s 3s str3ng 2x1mpl2....w4w!!! # # # # # ## 4. 字符串格式化 # # - `format` 格式化函数 # # 【例子】 # + str8 = "{0} Love {1}".format('I', 'Lsgogroup') # 位置参数 print(str8) # I Love Lsgogroup str8 = "{a} Love {b}".format(a='I', b='Lsgogroup') # 关键字参数 print(str8) # I Love Lsgogroup str8 = "{0} Love {b}".format('I', b='Lsgogroup') # 位置参数要在关键字参数之前 print(str8) # I Love Lsgogroup str8 = '{0:.2f}{1}'.format(27.658, 'GB') # 保留小数点后两位 print(str8) # 27.66GB # - # # # - Python 字符串格式化符号 # # # 符 号 | 描述 # :---:|:--- # # %c | 格式化字符及其ASCII码 # # %s | 格式化字符串,用str()方法处理对象 # # %r | 格式化字符串,用rper()方法处理对象 # # %d | 格式化整数 # # %o | 格式化无符号八进制数 # # %x | 格式化无符号十六进制数 # # %X | 格式化无符号十六进制数(大写) # # %f | 格式化浮点数字,可指定小数点后的精度 # # %e | 用科学计数法格式化浮点数 # # %E | 作用同%e,用科学计数法格式化浮点数 # # %g | 根据值的大小决定使用%f或%e # # %G | 作用同%g,根据值的大小决定使用%f或%E # # # 【例子】 print('%c' % 97) # a print('%c %c %c' % (97, 98, 99)) # a b c print('%d + %d = %d' % (4, 5, 9)) # 4 + 5 = 9 print("我叫 %s 今年 %d 岁!" % ('小明', 10)) # 我叫 小明 今年 10 岁! print('%o' % 10) # 12 print('%x' % 10) # a print('%X' % 10) # A print('%f' % 27.658) # 27.658000 print('%e' % 27.658) # 2.765800e+01 print('%E' % 27.658) # 2.765800E+01 print('%g' % 27.658) # 27.658 text = "I am %d years old." % 22 print("I said: %s." % text) # I said: I am 22 years old.. print("I said: %r." % text) # I said: 'I am 22 years old.' # # # # - 格式化操作符辅助指令 # # 符号 | 功能 # :---:|:--- # `m.n` | m 是显示的最小总宽度,n 是小数点后的位数(如果可用的话) # `-` | 用作左对齐 # `+` | 在正数前面显示加号( + ) # `#` | 在八进制数前面显示零('0'),在十六进制前面显示'0x'或者'0X'(取决于用的是'x'还是'X') # `0` | 显示的数字前面填充'0'而不是默认的空格 # # 【例子】 print('%5.1f' % 27.658) # ' 27.7' print('%.2e' % 27.658) # 2.77e+01 print('%10d' % 10) # ' 10' print('%-10d' % 10) # '10 ' print('%+d' % 10) # +10 print('%#o' % 10) # 0o12 print('%#x' % 108) # 0x6c print('%010d' % 5) # 0000000005 # # # 字典 # # ## 1. 可变类型与不可变类型 # # - 序列是以连续的整数为索引,与此不同的是,字典以"关键字"为索引,关键字可以是任意不可变类型,通常用字符串或数值。 # - 字典是 Python 唯一的一个 <u>映射类型</u>,字符串、元组、列表属于<u>序列类型</u>。 # # 那么如何快速判断一个数据类型 `X` 是不是可变类型的呢?两种方法: # - 麻烦方法:用 `id(X)` 函数,对 X 进行某种操作,比较操作前后的 `id`,如果不一样,则 `X` 不可变,如果一样,则 `X` 可变。 # - 便捷方法:用 `hash(X)`,只要不报错,证明 `X` 可被哈希,即不可变,反过来不可被哈希,即可变。 # # 【例子】 # + i = 1 print(id(i)) # 140732167000896 i = i + 2 print(id(i)) # 140732167000960 l = [1, 2] print(id(l)) # 4300825160 l.append('Python') print(id(l)) # 4300825160 # - # - 整数 `i` 在加 1 之后的 `id` 和之前不一样,因此加完之后的这个 `i` (虽然名字没变),但不是加之前的那个 `i` 了,因此整数是不可变类型。 # - 列表 `l` 在附加 `'Python'` 之后的 `id` 和之前一样,因此列表是可变类型。 # # # 【例子】 # # + print(hash('Name')) # 7047218704141848153 print(hash((1, 2, 'Python'))) # 1704535747474881831 print(hash([1, 2, 'Python'])) # TypeError: unhashable type: 'list' # - print(hash({1, 2, 3})) # TypeError: unhashable type: 'set' # - 数值、字符和元组 都能被哈希,因此它们是不可变类型。 # - 列表、集合、字典不能被哈希,因此它是可变类型。 # # # # # ## 2. 字典的定义 # # 字典 是无序的 键:值(`key:value`)对集合,键必须是互不相同的(在同一个字典之内)。 # # - `dict` 内部存放的顺序和 `key` 放入的顺序是没有关系的。 # - `dict` 查找和插入的速度极快,不会随着 `key` 的增加而增加,但是需要占用大量的内存。 # # # 字典 定义语法为 `{元素1, 元素2, ..., 元素n}` # # - 其中每一个元素是一个「键值对」-- 键:值 (`key:value`) # - 关键点是「大括号 {}」,「逗号 ,」和「冒号 :」 # - 大括号 -- 把所有元素绑在一起 # - 逗号 -- 将每个键值对分开 # - 冒号 -- 将键和值分开 # # # ## 3. 创建和访问字典 # # 【例子】 # + brand = ['李宁', '耐克', '阿迪达斯'] slogan = ['一切皆有可能', 'Just do it', 'Impossible is nothing'] print('耐克的口号是:', slogan[brand.index('耐克')]) # 耐克的口号是: Just do it dic = {'李宁': '一切皆有可能', '耐克': 'Just do it', '阿迪达斯': 'Impossible is nothing'} print('耐克的口号是:', dic['耐克']) # 耐克的口号是: Just do it # - # # # # # 【例子】通过字符串或数值作为`key`来创建字典。 # dic1 = {1: 'one', 2: 'two', 3: 'three'} print(dic1) # {1: 'one', 2: 'two', 3: 'three'} print(dic1[1]) # one print(dic1[4]) # KeyError: 4 dic2 = {'rice': 35, 'wheat': 101, 'corn': 67} print(dic2) # {'wheat': 101, 'corn': 67, 'rice': 35} print(dic2['rice']) # 35 # # 注意:如果我们取的键在字典中不存在,会直接报错`KeyError`。 # # 【例子】通过元组作为`key`来创建字典,但一般不这样使用。 # dic = {(1, 2, 3): "Tom", "Age": 12, 3: [3, 5, 7]} print(dic) # {(1, 2, 3): 'Tom', 'Age': 12, 3: [3, 5, 7]} print(type(dic)) # <class 'dict'> # # 通过构造函数`dict`来创建字典。 # # - `dict()` 创建一个空的字典。 # # 【例子】通过`key`直接把数据放入字典中,但一个`key`只能对应一个`value`,多次对一个`key`放入 `value`,后面的值会把前面的值冲掉。 # + dic = dict() dic['a'] = 1 dic['b'] = 2 dic['c'] = 3 print(dic) # {'a': 1, 'b': 2, 'c': 3} dic['a'] = 11 print(dic) # {'a': 11, 'b': 2, 'c': 3} dic['d'] = 4 print(dic) # {'a': 11, 'b': 2, 'c': 3, 'd': 4} # - # # - `dict(mapping)` new dictionary initialized from a mapping object's (key, value) pairs # # 【例子】 # + dic1 = dict([('apple', 4139), ('peach', 4127), ('cherry', 4098)]) print(dic1) # {'cherry': 4098, 'apple': 4139, 'peach': 4127} dic2 = dict((('apple', 4139), ('peach', 4127), ('cherry', 4098))) print(dic2) # {'peach': 4127, 'cherry': 4098, 'apple': 4139} # - # # - `dict(**kwargs)` -> new dictionary initialized with the name=value pairs in the keyword argument list. For example: dict(one=1, two=2) # # 【例子】这种情况下,键只能为字符串类型,并且创建的时候字符串不能加引号,加上就会直接报语法错误。 dic = dict(name='Tom', age=10) print(dic) # {'name': 'Tom', 'age': 10} print(type(dic)) # <class 'dict'> # # ## 4. 字典的内置方法 # # - `dict.fromkeys(seq[, value])` 用于创建一个新字典,以序列 `seq` 中元素做字典的键,`value` 为字典所有键对应的初始值。 # # 【例子】 # + seq = ('name', 'age', 'sex') dic1 = dict.fromkeys(seq) print(dic1) # {'name': None, 'age': None, 'sex': None} dic2 = dict.fromkeys(seq, 10) print(dic2) # {'name': 10, 'age': 10, 'sex': 10} dic3 = dict.fromkeys(seq, ('小马', '8', '男')) print(dic3) # {'name': ('小马', '8', '男'), 'age': ('小马', '8', '男'), 'sex': ('小马', '8', '男')} # - # # - `dict.keys()`返回一个可迭代对象,可以使用 `list()` 来转换为列表,列表为字典中的所有键。 # # 【例子】 dic = {'Name': 'lsgogroup', 'Age': 7} print(dic.keys()) # dict_keys(['Name', 'Age']) lst = list(dic.keys()) # 转换为列表 print(lst) # ['Name', 'Age'] # # - `dict.values()`返回一个迭代器,可以使用 `list()` 来转换为列表,列表为字典中的所有值。 # # 【例子】 # + dic = {'Sex': 'female', 'Age': 7, 'Name': 'Zara'} print(dic.values()) # dict_values(['female', 7, 'Zara']) print(list(dic.values())) # [7, 'female', 'Zara'] # - # # - `dict.items()`以列表返回可遍历的 (键, 值) 元组数组。 # # 【例子】 # + dic = {'Name': 'Lsgogroup', 'Age': 7} print(dic.items()) # dict_items([('Name', 'Lsgogroup'), ('Age', 7)]) print(tuple(dic.items())) # (('Name', 'Lsgogroup'), ('Age', 7)) print(list(dic.items())) # [('Name', 'Lsgogroup'), ('Age', 7)] # - # # - `dict.get(key, default=None)` 返回指定键的值,如果值不在字典中返回默认值。 # # 【例子】 dic = {'Name': 'Lsgogroup', 'Age': 27} print("Age 值为 : %s" % dic.get('Age')) # Age 值为 : 27 print("Sex 值为 : %s" % dic.get('Sex', "NA")) # Sex 值为 : NA print(dic) # {'Name': 'Lsgogroup', 'Age': 27} # # - `dict.setdefault(key, default=None)`和`get()`方法 类似, 如果键不存在于字典中,将会添加键并将值设为默认值。 # # 【例子】 dic = {'Name': 'Lsgogroup', 'Age': 7} print("Age 键的值为 : %s" % dic.setdefault('Age', None)) # Age 键的值为 : 7 print("Sex 键的值为 : %s" % dic.setdefault('Sex', None)) # Sex 键的值为 : None print(dic) # {'Age': 7, 'Name': 'Lsgogroup', 'Sex': None} # # - `key in dict` `in` 操作符用于判断键是否存在于字典中,如果键在字典 dict 里返回`true`,否则返回`false`。而`not in`操作符刚好相反,如果键在字典 dict 里返回`false`,否则返回`true`。 # # 【例子】 # + dic = {'Name': 'Lsgogroup', 'Age': 7} # in 检测键 Age 是否存在 if 'Age' in dic: print("键 Age 存在") else: print("键 Age 不存在") # 检测键 Sex 是否存在 if 'Sex' in dic: print("键 Sex 存在") else: print("键 Sex 不存在") # not in 检测键 Age 是否存在 if 'Age' not in dic: print("键 Age 不存在") else: print("键 Age 存在") # 键 Age 存在 # 键 Sex 不存在 # 键 Age 存在 # - # # - `dict.pop(key[,default])`删除字典给定键 `key` 所对应的值,返回值为被删除的值。`key` 值必须给出。若`key`不存在,则返回 `default` 值。 # - `del dict[key]` 删除字典给定键 `key` 所对应的值。 # # 【例子】 # + dic1 = {1: "a", 2: [1, 2]} print(dic1.pop(1), dic1) # a {2: [1, 2]} # 设置默认值,必须添加,否则报错 print(dic1.pop(3, "nokey"), dic1) # nokey {2: [1, 2]} del dic1[2] print(dic1) # {} # - # # - `dict.popitem()`随机返回并删除字典中的一对键和值,如果字典已经为空,却调用了此方法,就报出KeyError异常。 # # # 【例子】 dic1 = {1: "a", 2: [1, 2]} print(dic1.popitem()) # {2: [1, 2]} print(dic1) # (1, 'a') # # # - `dict.clear()`用于删除字典内所有元素。 # # # 【例子】 dic = {'Name': 'Zara', 'Age': 7} print("字典长度 : %d" % len(dic)) # 字典长度 : 2 dic.clear() print("字典删除后长度 : %d" % len(dic)) # 字典删除后长度 : 0 # # - `dict.copy()`返回一个字典的浅复制。 # # 【例子】 dic1 = {'Name': 'Lsgogroup', 'Age': 7, 'Class': 'First'} dic2 = dic1.copy() print("dic2") # {'Age': 7, 'Name': 'Lsgogroup', 'Class': 'First'} # 【例子】直接赋值和 copy 的区别 # # + dic1 = {'user': 'lsgogroup', 'num': [1, 2, 3]} # 引用对象 dic2 = dic1 # 浅拷贝父对象(一级目录),子对象(二级目录)不拷贝,还是引用 dic3 = dic1.copy() print(id(dic1)) # 148635574728 print(id(dic2)) # 148635574728 print(id(dic3)) # 148635574344 # 修改 data 数据 dic1['user'] = 'root' dic1['num'].remove(1) # 输出结果 print(dic1) # {'user': 'root', 'num': [2, 3]} print(dic2) # {'user': 'root', 'num': [2, 3]} print(dic3) # {'user': 'runoob', 'num': [2, 3]} # - # # - `dict.update(dict2)`把字典参数 `dict2` 的 `key:value`对 更新到字典 `dict` 里。 # # # 【例子】 dic = {'Name': 'Lsgogroup', 'Age': 7} dic2 = {'Sex': 'female', 'Age': 8} dic.update(dic2) print(dic) # {'Sex': 'female', 'Age': 8, 'Name': 'Lsgogroup'} # # # 集合 # # Python 中`set`与`dict`类似,也是一组`key`的集合,但不存储`value`。由于`key`不能重复,所以,在`set`中,没有重复的`key`。 # # 注意,`key`为不可变类型,即可哈希的值。 # # 【例子】 num = {} print(type(num)) # <class 'dict'> num = {1, 2, 3, 4} print(type(num)) # <class 'set'> # # ## 1. 集合的创建 # # - 先创建对象再加入元素。 # - 在创建空集合的时候只能使用`s = set()`,因为`s = {}`创建的是空字典。 # # 【例子】 basket = set() basket.add('apple') basket.add('banana') print(basket) # {'banana', 'apple'} # # - 直接把一堆元素用花括号括起来`{元素1, 元素2, ..., 元素n}`。 # - 重复元素在`set`中会被自动被过滤。 # # 【例子】 basket = {'apple', 'orange', 'apple', 'pear', 'orange', 'banana'} print(basket) # {'banana', 'apple', 'pear', 'orange'} # # - 使用`set(value)`工厂函数,把列表或元组转换成集合。 # # 【例子】 # + a = set('abracadabra') print(a) # {'r', 'b', 'd', 'c', 'a'} b = set(("Google", "Lsgogroup", "Taobao", "Taobao")) print(b) # {'Taobao', 'Lsgogroup', 'Google'} c = set(["Google", "Lsgogroup", "Taobao", "Google"]) print(c) # {'Taobao', 'Lsgogroup', 'Google'} # - # # 【例子】去掉列表中重复的元素 # + lst = [0, 1, 2, 3, 4, 5, 5, 3, 1] temp = [] for item in lst: if item not in temp: temp.append(item) print(temp) # [0, 1, 2, 3, 4, 5] a = set(lst) print(list(a)) # [0, 1, 2, 3, 4, 5] # - # # 从结果发现集合的两个特点:无序 (unordered) 和唯一 (unique)。 # # 由于 `set` 存储的是无序集合,所以我们不可以为集合创建索引或执行切片(slice)操作,也没有键(keys)可用来获取集合中元素的值,但是可以判断一个元素是否在集合中。 # # # # # ## 2. 访问集合中的值 # # - 可以使用`len()`內建函数得到集合的大小。 # # 【例子】 s = set(['Google', 'Baidu', 'Taobao']) print(len(s)) # 3 # # - 可以使用`for`把集合中的数据一个个读取出来。 # # 【例子】 # + s = set(['Google', 'Baidu', 'Taobao']) for item in s: print(item) # Baidu # Google # Taobao # - # # - 可以通过`in`或`not in`判断一个元素是否在集合中已经存在 # # 【例子】 s = set(['Google', 'Baidu', 'Taobao']) print('Taobao' in s) # True print('Facebook' not in s) # True # # ## 3. 集合的内置方法 # # - `set.add(elmnt)`用于给集合添加元素,如果添加的元素在集合中已存在,则不执行任何操作。 # # 【例子】 # + fruits = {"apple", "banana", "cherry"} fruits.add("orange") print(fruits) # {'orange', 'cherry', 'banana', 'apple'} fruits.add("apple") print(fruits) # {'orange', 'cherry', 'banana', 'apple'} # - # # - `set.update(set)`用于修改当前集合,可以添加新的元素或集合到当前集合中,如果添加的元素在集合中已存在,则该元素只会出现一次,重复的会忽略。 # # 【例子】 # + x = {"apple", "banana", "cherry"} y = {"google", "baidu", "apple"} x.update(y) print(x) # {'cherry', 'banana', 'apple', 'google', 'baidu'} y.update(["lsgo", "dreamtech"]) print(y) # {'lsgo', 'baidu', 'dreamtech', 'apple', 'google'} # - # # # - `set.remove(item)` 用于移除集合中的指定元素。如果元素不存在,则会发生错误。 # # 【例子】 fruits = {"apple", "banana", "cherry"} fruits.remove("banana") print(fruits) # {'apple', 'cherry'} # # - `set.discard(value)` 用于移除指定的集合元素。`remove()` 方法在移除一个不存在的元素时会发生错误,而 `discard()` 方法不会。 # # 【例子】 fruits = {"apple", "banana", "cherry"} fruits.discard("banana") print(fruits) # {'apple', 'cherry'} # # - `set.pop()` 用于随机移除一个元素。 # # 【例子】 fruits = {"apple", "banana", "cherry"} x = fruits.pop() print(fruits) # {'cherry', 'apple'} print(x) # banana # 由于 set 是无序和无重复元素的集合,所以两个或多个 set 可以做数学意义上的集合操作。 # - `set.intersection(set1, set2)` 返回两个集合的交集。 # - `set1 & set2` 返回两个集合的交集。 # - `set.intersection_update(set1, set2)` 交集,在原始的集合上移除不重叠的元素。 # # 【例子】 # + a = set('abracadabra') b = set('alacazam') print(a) # {'r', 'a', 'c', 'b', 'd'} print(b) # {'c', 'a', 'l', 'm', 'z'} c = a.intersection(b) print(c) # {'a', 'c'} print(a & b) # {'c', 'a'} print(a) # {'a', 'r', 'c', 'b', 'd'} a.intersection_update(b) print(a) # {'a', 'c'} # - # - `set.union(set1, set2)` 返回两个集合的并集。 # - `set1 | set2` 返回两个集合的并集。 # # 【例子】 # + a = set('abracadabra') b = set('alacazam') print(a) # {'r', 'a', 'c', 'b', 'd'} print(b) # {'c', 'a', 'l', 'm', 'z'} print(a | b) # {'l', 'd', 'm', 'b', 'a', 'r', 'z', 'c'} c = a.union(b) print(c) # {'c', 'a', 'd', 'm', 'r', 'b', 'z', 'l'} # - # # - `set.difference(set)` 返回集合的差集。 # - `set1 - set2` 返回集合的差集。 # - `set.difference_update(set)` 集合的差集,直接在原来的集合中移除元素,没有返回值。 # # 【例子】 # + a = set('abracadabra') b = set('alacazam') print(a) # {'r', 'a', 'c', 'b', 'd'} print(b) # {'c', 'a', 'l', 'm', 'z'} c = a.difference(b) print(c) # {'b', 'd', 'r'} print(a - b) # {'d', 'b', 'r'} print(a) # {'r', 'd', 'c', 'a', 'b'} a.difference_update(b) print(a) # {'d', 'r', 'b'} # - # # # - `set.symmetric_difference(set)`返回集合的异或。 # - `set1 ^ set2` 返回集合的异或。 # - `set.symmetric_difference_update(set)`移除当前集合中在另外一个指定集合相同的元素,并将另外一个指定集合中不同的元素插入到当前集合中。 # # 【例子】 # + a = set('abracadabra') b = set('alacazam') print(a) # {'r', 'a', 'c', 'b', 'd'} print(b) # {'c', 'a', 'l', 'm', 'z'} c = a.symmetric_difference(b) print(c) # {'m', 'r', 'l', 'b', 'z', 'd'} print(a ^ b) # {'m', 'r', 'l', 'b', 'z', 'd'} print(a) # {'r', 'd', 'c', 'a', 'b'} a.symmetric_difference_update(b) print(a) # {'r', 'b', 'm', 'l', 'z', 'd'} # - # # - `set.issubset(set)`判断集合是不是被其他集合包含,如果是则返回 True,否则返回 False。 # - `set1 <= set2` 判断集合是不是被其他集合包含,如果是则返回 True,否则返回 False。 # # 【例子】 # + x = {"a", "b", "c"} y = {"f", "e", "d", "c", "b", "a"} z = x.issubset(y) print(z) # True print(x <= y) # True x = {"a", "b", "c"} y = {"f", "e", "d", "c", "b"} z = x.issubset(y) print(z) # False print(x <= y) # False # - # # - `set.issuperset(set)`用于判断集合是不是包含其他集合,如果是则返回 True,否则返回 False。 # - `set1 >= set2` 判断集合是不是包含其他集合,如果是则返回 True,否则返回 False。 # # 【例子】 # + x = {"f", "e", "d", "c", "b", "a"} y = {"a", "b", "c"} z = x.issuperset(y) print(z) # True print(x >= y) # True x = {"f", "e", "d", "c", "b"} y = {"a", "b", "c"} z = x.issuperset(y) print(z) # False print(x >= y) # False # - # - `set.isdisjoint(set)` 用于判断两个集合是不是不相交,如果是返回 True,否则返回 False。 # # 【例子】 # + x = {"f", "e", "d", "c", "b"} y = {"a", "b", "c"} z = x.isdisjoint(y) print(z) # False x = {"f", "e", "d", "m", "g"} y = {"a", "b", "c"} z = x.isdisjoint(y) print(z) # True # - # ## 4. 集合的转换 # # 【例子】 # + se = set(range(4)) li = list(se) tu = tuple(se) print(se, type(se)) # {0, 1, 2, 3} <class 'set'> print(li, type(li)) # [0, 1, 2, 3] <class 'list'> print(tu, type(tu)) # (0, 1, 2, 3) <class 'tuple'> # - # ## 5. 不可变集合 # # Python 提供了不能改变元素的集合的实现版本,即不能增加或删除元素,类型名叫`frozenset`。需要注意的是`frozenset`仍然可以进行集合操作,只是不能用带有`update`的方法。 # # - `frozenset([iterable])` 返回一个冻结的集合,冻结后集合不能再添加或删除任何元素。 # # 【例子】 # + a = frozenset(range(10)) # 生成一个新的不可变集合 print(a) # frozenset({0, 1, 2, 3, 4, 5, 6, 7, 8, 9}) b = frozenset('lsgogroup') print(b) # frozenset({'g', 's', 'p', 'r', 'u', 'o', 'l'}) # - # # 序列 # # 在 Python 中,序列类型包括字符串、列表、元组、集合和字典,这些序列支持一些通用的操作,但比较特殊的是,集合和字典不支持索引、切片、相加和相乘操作。 # # ## 1. 针对序列的内置函数 # # - `list(sub)` 把一个可迭代对象转换为列表。 # # 【例子】 # + a = list() print(a) # [] b = 'I Love LsgoGroup' b = list(b) print(b) # ['I', ' ', 'L', 'o', 'v', 'e', ' ', 'L', 's', 'g', 'o', 'G', 'r', 'o', 'u', 'p'] c = (1, 1, 2, 3, 5, 8) c = list(c) print(c) # [1, 1, 2, 3, 5, 8] # - # # # - `tuple(sub)` 把一个可迭代对象转换为元组。 # # 【例子】 # + a = tuple() print(a) # () b = 'I Love LsgoGroup' b = tuple(b) print(b) # ('I', ' ', 'L', 'o', 'v', 'e', ' ', 'L', 's', 'g', 'o', 'G', 'r', 'o', 'u', 'p') c = [1, 1, 2, 3, 5, 8] c = tuple(c) print(c) # (1, 1, 2, 3, 5, 8) # - # # - `str(obj)` 把obj对象转换为字符串 # # 【例子】 a = 123 a = str(a) print(a) # 123 # # - `len(s)` 返回对象(字符、列表、元组等)长度或元素个数。 # - `s` -- 对象。 # # 【例子】 # + a = list() print(len(a)) # 0 b = ('I', ' ', 'L', 'o', 'v', 'e', ' ', 'L', 's', 'g', 'o', 'G', 'r', 'o', 'u', 'p') print(len(b)) # 16 c = 'I Love LsgoGroup' print(len(c)) # 16 # - # # - `max(sub)`返回序列或者参数集合中的最大值 # # 【例子】 print(max(1, 2, 3, 4, 5)) # 5 print(max([-8, 99, 3, 7, 83])) # 99 print(max('IloveLsgoGroup')) # v # # # - `min(sub)`返回序列或参数集合中的最小值 # # 【例子】 print(min(1, 2, 3, 4, 5)) # 1 print(min([-8, 99, 3, 7, 83])) # -8 print(min('IloveLsgoGroup')) # G # # - `sum(iterable[, start=0])` 返回序列`iterable`与可选参数`start`的总和。 # # 【例子】 print(sum([1, 3, 5, 7, 9])) # 25 print(sum([1, 3, 5, 7, 9], 10)) # 35 print(sum((1, 3, 5, 7, 9))) # 25 print(sum((1, 3, 5, 7, 9), 20)) # 45 # # # - `sorted(iterable, key=None, reverse=False) ` 对所有可迭代的对象进行排序操作。 # - `iterable` -- 可迭代对象。 # - `key` -- 主要是用来进行比较的元素,只有一个参数,具体的函数的参数就是取自于可迭代对象中,指定可迭代对象中的一个元素来进行排序。 # - `reverse` -- 排序规则,`reverse = True` 降序 , `reverse = False` 升序(默认)。 # - 返回重新排序的列表。 # # 【例子】 # + x = [-8, 99, 3, 7, 83] print(sorted(x)) # [-8, 3, 7, 83, 99] print(sorted(x, reverse=True)) # [99, 83, 7, 3, -8] t = ({"age": 20, "name": "a"}, {"age": 25, "name": "b"}, {"age": 10, "name": "c"}) x = sorted(t, key=lambda a: a["age"]) print(x) # [{'age': 10, 'name': 'c'}, {'age': 20, 'name': 'a'}, {'age': 25, 'name': 'b'}] # - # # # - `reversed(seq)` 函数返回一个反转的迭代器。 # - `seq` -- 要转换的序列,可以是 tuple, string, list 或 range。 # # 【例子】 # + s = 'lsgogroup' x = reversed(s) print(type(x)) # <class 'reversed'> print(x) # <reversed object at 0x000002507E8EC2C8> print(list(x)) # ['p', 'u', 'o', 'r', 'g', 'o', 'g', 's', 'l'] t = ('l', 's', 'g', 'o', 'g', 'r', 'o', 'u', 'p') print(list(reversed(t))) # ['p', 'u', 'o', 'r', 'g', 'o', 'g', 's', 'l'] r = range(5, 9) print(list(reversed(r))) # [8, 7, 6, 5] x = [-8, 99, 3, 7, 83] print(list(reversed(x))) # [83, 7, 3, 99, -8] # - # # # - `enumerate(sequence, [start=0])` # # # # 【例子】用于将一个可遍历的数据对象(如列表、元组或字符串)组合为一个索引序列,同时列出数据和数据下标,一般用在 for 循环当中。 # + seasons = ['Spring', 'Summer', 'Fall', 'Winter'] a = list(enumerate(seasons)) print(a) # [(0, 'Spring'), (1, 'Summer'), (2, 'Fall'), (3, 'Winter')] b = list(enumerate(seasons, 1)) print(b) # [(1, 'Spring'), (2, 'Summer'), (3, 'Fall'), (4, 'Winter')] for i, element in a: print('{0},{1}'.format(i, element)) # 0,Spring # 1,Summer # 2,Fall # 3,Winter # - # # - `zip(iter1 [,iter2 [...]])` # - 用于将可迭代的对象作为参数,将对象中对应的元素打包成一个个元组,然后返回由这些元组组成的对象,这样做的好处是节约了不少的内存。 # - 我们可以使用 `list()` 转换来输出列表。 # - 如果各个迭代器的元素个数不一致,则返回列表长度与最短的对象相同,利用 `*` 号操作符,可以将元组解压为列表。 # # 【例子】 # + a = [1, 2, 3] b = [4, 5, 6] c = [4, 5, 6, 7, 8] zipped = zip(a, b) print(zipped) # <zip object at 0x000000C5D89EDD88> print(list(zipped)) # [(1, 4), (2, 5), (3, 6)] zipped = zip(a, c) print(list(zipped)) # [(1, 4), (2, 5), (3, 6)] a1, a2 = zip(*zip(a, b)) print(list(a1)) # [1, 2, 3] print(list(a2)) # [4, 5, 6]
1_python/python_review_2.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # %matplotlib inline from matplotlib import style style.use('fivethirtyeight') import matplotlib.pyplot as plt import numpy as np import pandas as pd import datetime as dt # # Reflect Tables into SQLAlchemy ORM # Python SQL toolkit and Object Relational Mapper import sqlalchemy from sqlalchemy.ext.automap import automap_base from sqlalchemy.orm import Session from sqlalchemy import create_engine, func ,extract engine = create_engine("sqlite:///hawaii.sqlite") # reflect an existing database into a new model Base = automap_base() # reflect the tables Base.prepare(engine, reflect=True) # We can view all of the classes that automap found Base.classes.keys() # Save references to each table # so you dont have to write base.classes all the time Measurement = Base.classes.measurement Station = Base.classes.station dir(Measurement) # + dir(Station) # - # Create our session (link) from Python to the DB session = Session(engine) results = session.query(Measurement.date,Measurement.prcp).all() for result in results: print(result) # # Exploratory Climate Analysis # + june_10start = dt.date(2010,6,1) june_10end = dt.date(2010,6,30) alljune_10 =(june_10end-june_10start) alljune_10 # - # use extract inside filter to results = [] # Perform a query to retrieve the data and precipitation scores results = session.query(Measurement.date, Measurement.tobs ,Station.station).\ filter(extract('month',Measurement.date) == 6).all() #print(results) # Save the query results as a Pandas DataFrame and set the index to the date column june_df = pd.DataFrame(results, columns=['date','temp','station']) #inplace means mod the df where it is dont make a new one (which would be =false) #june_2010_df.set_index(df['date'], inplace=True) # Sort the dataframe by date #june_2010_df = df.sort_index() june_df.describe() # + #june_df.plot() # + # December temp averages results = [] # Perform a query to retrieve the data and precipitation scores results = session.query(Measurement.date, Measurement.tobs ,Station.station).\ filter(extract('month',Measurement.date) == 12).all() #print(results) # Save the query results as a Pandas DataFrame and set the index to the date column Dec_df = pd.DataFrame(results, columns=['date','temp','station']) #inplace means mod the df where it is dont make a new one (which would be =false) #Dec_df_df.set_index(df['date'], inplace=True) # Sort the dataframe by date #june_2010_df = df.sort_index() Dec_df.describe() # - Dec_df.plot() # + # Design a query to retrieve the last 12 months of precipitation data and plot the results. #Starting from the last data point in the database. june_10start = dt.date(2010,6,1) june_10end = dt.date(2010,6,30) june_11start = dt.date(2011,6,1) june_11end = dt.date(2011,6,30) # Calculate the date one year from the last date in data set. results = [] # Perform a query to retrieve the data and precipitation scores results = session.query(Measurement.date, Measurement.prcp).\ filter(Measurement.date = june_10start).\ filter(Measurement.date <= june_10end).all() #filter(Measurement.station == 'USC00519281').\ #filter(Measurement.date >= june_11start).\ #filter(Measurement.date <= june_11end).all() # filter(Measurement.date >= june_12start).\ # filter(Measurement.date <= june_12end).\ # filter(Measurement.date >= june_13start).\ # filter(Measurement.date <= june_13end).\ # filter(Measurement.date >= june_14start).\ # filter(Measurement.date <= june_14end).\ # filter(Measurement.date >= june_15start).\ # filter(Measurement.date <= june_15end).\ # filter(Measurement.date >= june_16start).\ # filter(Measurement.date <= june_16end).\ # filter(Measurement.date >= june_17start).\ # filter(Measurement.date <= june_17end) #print(results) # Save the query results as a Pandas DataFrame and set the index to the date column june_2010_df = pd.DataFrame(results, columns=['date','precipitation']) #inplace means mod the df where it is dont make a new one (which would be =false) #june_2010_df.set_index(df['date'], inplace=True) # Sort the dataframe by date #june_2010_df = df.sort_index() june_2010_df.describe() june_2010_df.plot() # Use Pandas Plotting with Matplotlib to plot the data # - june_11start = dt.date(2011,6,1) june_11end = dt.date(2011,6,30) results = [] results = session.query(Measurement.date, Measurement.prcp).\ filter(Measurement.date >= june_11start).\ filter(Measurement.date <= june_11end).all() #filter(Measurement.station == 'USC00519281').all() print(results) june_2011_df = pd.DataFrame(results, columns=['date','precipitation']) june_2011_df.describe() june_2011_df.plot() july = dt.date(2017, 7, 1) - dt.timedelta(days=30) july # + # Design a query to retrieve the last 12 months of precipitation data and plot the results. #Starting from the last data point in the database. prev_year = dt.date(2017, 8, 23) - dt.timedelta(days=365) # Calculate the date one year from the last date in data set. results = [] # Perform a query to retrieve the data and precipitation scores results = session.query(Measurement.date, Measurement.prcp).filter(Measurement.date >= prev_year).all() print(results) # Save the query results as a Pandas DataFrame and set the index to the date column df = pd.DataFrame(results, columns=['date','precipitation']) #inplace means mod the df where it is dont make a new one (which would be =false) df.set_index(df['date'], inplace=True) # Sort the dataframe by date # Use Pandas Plotting with Matplotlib to plot the data # - df = pd.DataFrame(results, columns=['date','precipitation']) df.set_index(df['date'], inplace=True) df = df.sort_index() print(df.to_string(index=False)) # this is per day with inches on the y and dates on the x , #table and columns would be good df.plot() # Use Pandas to calcualte the summary statistics for the precipitation data df.describe() # How many stations are available in this dataset? session.query(func.count(Station.station)).all() # What are the most active stations? session.query(Measurement.station, func.count(Measurement.station)).\ group_by(Measurement.station).\ order_by(func.count(Measurement.station).desc()).all() # List the stations and the counts in descending order. # Using the station id from the previous query, calculate the lowest temperature recorded, # highest temperature recorded, and average temperature most active station? session.query(func.min(Measurement.tobs), func.max(Measurement.tobs), func.avg(Measurement.tobs)).\ filter(Measurement.station == 'USC00519281').all() # Choose the station with the highest number of temperature observations. # Query the last 12 months of temperature observation data for this station and plot the results as a histogram results = session.query(Measurement.tobs).\ filter(Measurement.station == 'USC00519281').\ filter(Measurement.date >= prev_year).all() print(results) # + df = pd.DataFrame(results, columns=['tobs']) df #df = pd.DataFrame(results, columns=['date','precipitation']) #inplace means mod the df where it is dont make a new one (which would be =false) #df.set_index(df['date'], inplace=True) #columns= (['date'],['tobs']) # - # + #usually want somewhere between 5 and 20 bins, gives different resolutions df.plot.hist(bins=20) plt.tight_layout() # - # Write a function called `calc_temps` that will accept start date and end date in the format '%Y-%m-%d' # and return the minimum, average, and maximum temperatures for that range of dates # # Challenge # Design a query to retrieve june of precipitation data and plot the results. #Starting from the last data point in the database. june_data = dt.date(2017, 6, 1) - dt.timedelta(days=1)#dt.date(2017,6,1) # // dt.timedelta(days=30) # Calculate the date one year from the last date in data set. results = [] # Perform a query to retrieve the data and precipitation scores results = session.query(Measurement.date, Measurement.tobs).\ filter(Measurement.date >= june_data).\ filter(Measurement.station == 'USC00519281').all() print(results) # Save the query results as a Pandas DataFrame and set the index to the date column df = pd.DataFrame(results, columns=['date','tobs']) #inplace means mod the df where it is dont make a new one (which would be =false) df.set_index(df['date'], inplace=True) # Sort the dataframe by date df # Use Pandas Plotting with Matplotlib to plot the data june_data = dt.date(2017, 7, 1) - dt.date(2017,6,1)#dt.timedelta(days=30)# dt.date(2017,6,1) june_data print(len(results)) # + df.plot.hist(bins=20) plt.tight_layout() # -
climate_analysis.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 2 # language: python # name: python2 # --- from theano.sandbox import cuda cuda.use('gpu2') # %matplotlib inline import utils; reload(utils) from utils import * from __future__ import division, print_function from keras.layers import TimeDistributed, Activation from numpy.random import choice # ## Setup # We haven't really looked into the detail of how this works yet - so this is provided for self-study for those who are interested. We'll look at it closely next week. path = get_file('nietzsche.txt', origin="https://s3.amazonaws.com/text-datasets/nietzsche.txt") text = open(path).read().lower() print('corpus length:', len(text)) # !tail {path} -n25 # + #path = 'data/wiki/' #text = open(path+'small.txt').read().lower() #print('corpus length:', len(text)) #text = text[0:1000000] # - chars = sorted(list(set(text))) vocab_size = len(chars)+1 print('total chars:', vocab_size) chars.insert(0, "\0") ''.join(chars[1:-6]) char_indices = dict((c, i) for i, c in enumerate(chars)) indices_char = dict((i, c) for i, c in enumerate(chars)) idx = [char_indices[c] for c in text] idx[:10] ''.join(indices_char[i] for i in idx[:70]) # ## Preprocess and create model maxlen = 40 sentences = [] next_chars = [] for i in range(0, len(idx) - maxlen+1): sentences.append(idx[i: i + maxlen]) next_chars.append(idx[i+1: i+maxlen+1]) print('nb sequences:', len(sentences)) sentences = np.concatenate([[np.array(o)] for o in sentences[:-2]]) next_chars = np.concatenate([[np.array(o)] for o in next_chars[:-2]]) sentences.shape, next_chars.shape n_fac = 24 model=Sequential([ Embedding(vocab_size, n_fac, input_length=maxlen), LSTM(512, input_dim=n_fac,return_sequences=True, dropout_U=0.2, dropout_W=0.2, consume_less='gpu'), Dropout(0.2), LSTM(512, return_sequences=True, dropout_U=0.2, dropout_W=0.2, consume_less='gpu'), Dropout(0.2), TimeDistributed(Dense(vocab_size)), Activation('softmax') ]) model.compile(loss='sparse_categorical_crossentropy', optimizer=Adam()) # ## Train def print_example(): seed_string="ethics is a basic foundation of all that" for i in range(320): x=np.array([char_indices[c] for c in seed_string[-40:]])[np.newaxis,:] preds = model.predict(x, verbose=0)[0][-1] preds = preds/np.sum(preds) next_char = choice(chars, p=preds) seed_string = seed_string + next_char print(seed_string) model.fit(sentences, np.expand_dims(next_chars,-1), batch_size=64, nb_epoch=1) print_example() model.fit(sentences, np.expand_dims(next_chars,-1), batch_size=64, nb_epoch=1) print_example() model.optimizer.lr=0.001 model.fit(sentences, np.expand_dims(next_chars,-1), batch_size=64, nb_epoch=1) print_example() model.optimizer.lr=0.0001 model.fit(sentences, np.expand_dims(next_chars,-1), batch_size=64, nb_epoch=1) print_example() model.save_weights('data/char_rnn.h5') model.optimizer.lr=0.00001 model.fit(sentences, np.expand_dims(next_chars,-1), batch_size=64, nb_epoch=1) print_example() model.fit(sentences, np.expand_dims(next_chars,-1), batch_size=64, nb_epoch=1) print_example() print_example() model.save_weights('data/char_rnn.h5')
deeplearning1/nbs/char-rnn.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: 'Python 3.9.7 64-bit (''nlp'': conda)' # language: python # name: python3 # --- # # TODO # - Embedding for all the lines of the document # <!-- - Embeddings for all concepts --> # <!-- - Each concept has a list of neighboring concepts based on similarity (e.g. cosine similarity) --> # <!-- - The searched term will be embedded and compared to all concepts --> # - The searched term will be embedded and compared to all lines of the corpus (with hashing to accelerate) # <!-- - Return patients having the neighboring concepts of the searched term --> # - Return patients that have big similarity import os # path = %pwd if path.split(os.sep)[-1] == 'notebooks': # %cd .. # + # # %pip install -U sentence-transformers -q # - # ### Importing # + # ----------------------------------- tech ----------------------------------- # import os import glob import pickle # ---------------------------- Display and friends --------------------------- # from tqdm import tqdm from matplotlib import pyplot as plt # ------------------------- Transformers and freinds ------------------------- # from transformers import AutoTokenizer, AutoModelForSequenceClassification, AutoModel from sentence_transformers import SentenceTransformer, util import torch import torch.nn.functional as F import numpy as np # ------------------------ Classification and friends ------------------------ # from scipy.cluster.hierarchy import dendrogram from sklearn.cluster import AgglomerativeClustering, KMeans from sklearn.manifold import TSNE from annoy import AnnoyIndex # ----------------------------------- local ---------------------------------- # from data_preprocessing import Get_and_process_data from utils.parse_data import parse_concept # - # ### Configurations lines_per_tokenization = 5 clusters = 10 trees = 100 filename_split_key = "__at__" # Load model from HuggingFace Hub device = "cuda" model_checkpoint = "sentence-transformers/multi-qa-MiniLM-L6-cos-v1" # model_checkpoint = "gsarti/scibert-nli" # model_checkpoint = "logs/scibert_20_epochs_64_batch_99_train_split" tokenizer = AutoTokenizer.from_pretrained(model_checkpoint) model = AutoModel.from_pretrained(model_checkpoint) data_path = "../data/train/txt" embeddings_path = data_path + os.sep + "embeddings" similarity = torch.nn.CosineSimilarity() if not os.path.exists(embeddings_path): os.makedirs(embeddings_path) # ### utils # + #Mean Pooling - Take average of all tokens def mean_pooling(model_output, attention_mask): token_embeddings = model_output.last_hidden_state #First element of model_output contains all token embeddings input_mask_expanded = attention_mask.unsqueeze(-1).expand(token_embeddings.size()).float() return torch.sum(token_embeddings * input_mask_expanded, 1) / torch.clamp(input_mask_expanded.sum(1), min=1e-9) #Encode text def encode(texts, tokenizer = tokenizer, model= model): # Tokenize sentences encoded_input = tokenizer(texts, padding=True, truncation=True, return_tensors='pt') # Compute token embeddings with torch.no_grad(): model_output = model(**encoded_input, return_dict=True) # Perform pooling embeddings = mean_pooling(model_output, encoded_input['attention_mask']) # Normalize embeddings embeddings = F.normalize(embeddings, p=2, dim=1) return embeddings def find_cluster(query_emb, clustered_data, similarity=similarity): best_cluster = None best_score = -1 for i in clustered_data.keys(): center = clustered_data[i]["center"] score = similarity(query_emb, center) if score >= best_score: best_cluster = i best_score = score return best_cluster def text_splitter(text, file_path): con_file_path = os.path.dirname(os.path.dirname(file_path)) + os.sep + "concept" + os.sep + os.path.basename(file_path).split(".")[0] + ".con" concepts_lines = list(set(parse_concept(con_file_path)["start_line"])) concepts_lines.sort() texts = text.split("\n") concepts = [] for line in concepts_lines: concepts.append(texts[line-1]) return concepts def semantic_search_base(query_emb, doc_emb, docs): #Compute dot score between query and all document embeddings scores = torch.mm(query_emb, doc_emb.transpose(0, 1))[0].cpu().tolist() #Combine docs & scores doc_score_pairs = list(zip(docs, scores)) #Sort by decreasing score doc_score_pairs = sorted(doc_score_pairs, key=lambda x: x[1], reverse=True) print(doc_score_pairs) #Output passages & scores for doc, score in doc_score_pairs: print("==> ",score) print(doc) def forward(texts, tokenizer= tokenizer, model= model): # Tokenize sentences encoded_input = tokenizer(texts, padding=True, truncation=True, return_tensors='pt') # Compute token embeddings model_output = model(**encoded_input, return_dict=True) # Perform pooling embeddings = mean_pooling(model_output, encoded_input['attention_mask']) # Normalize embeddings embeddings = F.normalize(embeddings, p=2, dim=1) return embeddings def forward_doc(text, file_path, tokenizer= tokenizer, model= model, no_grad= False): texts = text_splitter(text, file_path) if len(texts) == 0: return [] # Tokenize sentences encoded_input = tokenizer(texts, padding=True, truncation=True, return_tensors='pt') # Compute token embeddings if no_grad: with torch.no_grad(): model_output = model(**encoded_input, return_dict=True) else : model_output = model(**encoded_input, return_dict=True) # Perform pooling embeddings = mean_pooling(model_output, encoded_input['attention_mask']) # NOTE: This is an easy approach # another mean pooling over the lines of the document # embeddings = torch.mean(embeddings_lines, 0).unsqueeze(0) # Normalize embeddings embeddings = F.normalize(embeddings, p=2, dim=1) return embeddings # - # ### Testing Inference from checkpoint model =model.eval() # + # Sentences we want sentence embeddings for query = "How many people live in London?" docs = ["Around 9 Million people live in London", "London is known for its financial district"] #Encode query and docs query_emb = encode(query) doc_emb = encode(docs) semantic_search_base(query_emb, doc_emb, docs) # - # 0.915637195110321 Around 9 Million people live in London # # # 0.49475765228271484 London is known for its financial district # ### Testing training encoded_input = tokenizer(query, padding=True, truncation=True, return_tensors='pt') model_output = model(**encoded_input, return_dict=True) # model_output encoded_input["input_ids"].shape model_output.last_hidden_state.shape model_output.pooler_output.shape # + # model.train() query = "How many people live in London?" answer = "Around 9 Million people live in London" loss_fn = torch.nn.MSELoss() optimizer = torch.optim.Adam(model.parameters(), lr=1e-6) q = forward(query) print("q shape :", q.shape) a = forward(answer) print("a shape :", a.shape) loss = loss_fn(a,q) optimizer.zero_grad() # loss.backward() # optimizer.step() # - # ### Getting data # + doc = "" file_path = "../data/train/txt/018636330_DH.txt" with open(file_path) as f: doc = f.read() doc_emb = forward_doc(doc, file_path, no_grad= True) doc_emb.shape # - c_emb= encode("hyperreflexia in feet") semantic_search_base(c_emb, doc_emb, text_splitter(doc, file_path)) # ### Saving embeddings # what are the elements in the folder ../data/train/txt/ all_docs = {} text_files = glob.glob(data_path + os.sep + "*.txt") for file_path in tqdm(text_files, "Encoding documents", ascii=True): with open(file_path) as f: doc = f.read() file_name = os.path.basename(file_path).split(".")[0] embeddings = forward_doc(doc, file_path, no_grad=True) for i,emb in enumerate(embeddings): all_docs[file_name+filename_split_key+str(i)] = emb.unsqueeze(0) with open(embeddings_path + os.sep + "all_docs_concepts.pkl", "wb") as f: pickle.dump(all_docs, f) # + # with open(embeddings_path + os.sep + "all_docs_concepts.pkl", "rb") as f: # all_docs = pickle.load(f) # - len(all_docs) # ### Classify the embeddings # We can use hierachical clustering to classify the embeddings for a very search efficient task. But for simplicity, we will only perform K-means clustering. sample_names_list = list(map(lambda x: x[0], all_docs.items()))[:] sample_values_list = list(map(lambda x: x[1], all_docs.items()))[:] sample = np.array(list(map(lambda x: x.numpy().reshape(-1), sample_values_list))) # array of 1 dim vectors sample.shape # #### Test hierachical clustering clustering = AgglomerativeClustering(distance_threshold=0.7, n_clusters=None).fit(sample) # + def plot_dendrogram(model, **kwargs): # Create linkage matrix and then plot the dendrogram # create the counts of samples under each node counts = np.zeros(model.children_.shape[0]) n_samples = len(model.labels_) for i, merge in enumerate(model.children_): current_count = 0 for child_idx in merge: if child_idx < n_samples: current_count += 1 # leaf node else: current_count += counts[child_idx - n_samples] counts[i] = current_count linkage_matrix = np.column_stack( [model.children_, model.distances_, counts] ).astype(float) # Plot the corresponding dendrogram dendrogram(linkage_matrix, **kwargs) plt.title("Hierarchical Clustering Dendrogram") # plot the top three levels of the dendrogram plot_dendrogram(clustering, truncate_mode="level", p=5) plt.xlabel("Number of points in node (or index of point if no parenthesis).") plt.show() # - # #### Test K-means clustering clustering = KMeans(n_clusters = clusters).fit(sample) # Scatter plot using TSNE def plot_clutering(sample): new_sample = TSNE(n_components=2).fit_transform(sample) plt.scatter(new_sample[:, 0], new_sample[:, 1], c=clustering.labels_) plt.figure(figsize=(20, 10)) plt.show() # # plot in 3D # new_sample_3D = TSNE(n_components=3).fit_transform(sample) # fig = plt.figure() # ax = fig.add_subplot(111, projection='3d') # ax.scatter(new_sample_3D[:, 0], new_sample_3D[:, 1], new_sample_3D[:, 2], c=clustering.labels_) # plt.show() plot_clutering(sample) for i in range(10): print("cluster", i , "->" , list(clustering.labels_).count(i), "element") # + clustered_data = {} for i,center in enumerate(clustering.cluster_centers_): clustered_data[i] = {"center": torch.tensor(center.reshape(1, -1)), "elements": {}} for i, cluster in enumerate(clustering.labels_): clustered_data[cluster]["elements"][sample_names_list[i]] = all_docs[sample_names_list[i]] # - with open(embeddings_path + os.sep + "clustered_data_concepts.pkl", "wb") as f: pickle.dump(clustered_data, f) # ### Annoy # + search_index = AnnoyIndex(sample.shape[1], 'dot') for i in range(len(sample)): search_index.add_item(i, sample[i]) search_index.build(trees) # - embeddings_path + os.sep + "annoy_index_concepts.ann" search_index.save(embeddings_path + os.sep + "annoy_index_concepts.ann") with open(embeddings_path + os.sep + "index_to_name.pkl", "wb") as f: pickle.dump(sample_names_list, f) # ### Search # #### For Kmeans clustering with open(embeddings_path + os.sep + "clustered_data_concepts.pkl", "rb") as f: clustered_data = pickle.load(f) center = clustered_data[0]["center"] # + class Buffer_best_k: def __init__(self, k, initia_value=-float("inf")): self.k = k self.values = [initia_value] * self.k self.data = [None] * self.k def new_val(self, value, data=None): for i in range(self.k): if self.values[i] < value: self.values[i+1:] = self.values[i:-1] self.data[i+1:] = self.data[i:-1] self.values[i] = value self.data[i] = data return True return False def get_data(self): return self.data def get_values(self): return self.values # + # query = "DIGOXIN and AMIODARONE HCL" query = "positive to abdominal carcinomatosis" query_emb = encode(query) cluster = find_cluster(query_emb, clustered_data) buffer = Buffer_best_k(k=10) for name, doc_emb in clustered_data[cluster]["elements"].items(): score = similarity(query_emb, doc_emb) # print(name, "\t{:.2f}".format(float(score))) buffer.new_val(score, name) print(buffer.get_values()) print(buffer.get_data()) # - # #### For Annoy indeces, scores = search_index.get_nns_by_vector(query_emb.numpy().reshape(-1), clusters, include_distances=True) for i, s in zip(indeces, scores): print(sample_names_list[i], "\t{:.2f}".format(s))
src/notebooks/clustering_concepts_V2.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import pandas as pd import numpy as np import matplotlib.pyplot as plt import zipfile import glob import datetime datetime.timedelta(hours=0) # + # pd.read_csv(r'../../') z = zipfile.ZipFile('../../data.zip') common_dict={} for file in z.namelist(): if '.txt' not in file: continue if file.split("/")[2] not in common_dict.keys(): common_dict[file.split("/")[2]] = [] content = pd.read_csv(z.open(file),sep=' ',header=None) _time = file.split("/")[-1].split('.')[0] time = datetime.datetime.strptime(_time[:-3], "%d-%m-%Y_%H-%M-%S") _df = content.iloc[:,:1].T _df['dt'] = [time + (datetime.timedelta(hours=12) if _time[-2:]=='PM' else datetime.timedelta(hours=0)) ] common_dict[file.split("/")[2]].append(_df) for key in common_dict: common_dict[key] = pd.concat(common_dict[key]).set_index('dt') common_dict[key].plot(marker='o') plt.figure() common_dict[key].mean(1).plot(marker='o') plt.title(key) print(key,common_dict[key].index[-1] - common_dict[key].index[0]) # - common_dict['7'].plot() import sys sys.path.insert(1, 'C:/Users/kompn/YandexDisk/papers/!Turbo/DL-anomaly-detection/') sys.path.insert(1, 'C:/Users/V.Kozitisn/Disk/papers/!Turbo/DL-anomaly-detection/') from tsad.src.useful import split_by_repeated common_dict['7'].sort_index().to_csv('8.csv',sep=';') len(common_dict['7']) common_dict['7'].sort_index().plot() datetime.datetime.strptime('26-11-2021_14-53-13', "%d-%m-%Y_%H-%M-%S") == datetime.datetime.strptime("26-11-2021_2-53-13_PM", "%d-%m-%Y_%H-%M-%S_%p")
macro_micro_analytics/get_time_series_from_data.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + import os import json import pickle import random from collections import defaultdict, Counter from indra.literature.adeft_tools import universal_extract_text from indra.databases.hgnc_client import get_hgnc_name, get_hgnc_id from adeft.discover import AdeftMiner from adeft.gui import ground_with_gui from adeft.modeling.label import AdeftLabeler from adeft.modeling.classify import AdeftClassifier from adeft.disambiguate import AdeftDisambiguator, load_disambiguator from adeft_indra.ground.ground import AdeftGrounder from adeft_indra.model_building.s3 import model_to_s3 from adeft_indra.model_building.escape import escape_filename from adeft_indra.db.content import get_pmids_for_agent_text, get_pmids_for_entity, \ get_plaintexts_for_pmids # - adeft_grounder = AdeftGrounder() shortforms = ['PI'] model_name = ':'.join(sorted(escape_filename(shortform) for shortform in shortforms)) results_path = os.path.abspath(os.path.join('../..', 'results', model_name)) # + miners = dict() all_texts = {} for shortform in shortforms: pmids = get_pmids_for_agent_text(shortform) text_dict = get_plaintexts_for_pmids(pmids, contains=shortforms) text_dict = {pmid: text for pmid, text in text_dict.items() if len(text) > 5} miners[shortform] = AdeftMiner(shortform) miners[shortform].process_texts(text_dict.values()) all_texts.update(text_dict) longform_dict = {} for shortform in shortforms: longforms = miners[shortform].get_longforms() longforms = [(longform, count, score) for longform, count, score in longforms if count*score > 2] longform_dict[shortform] = longforms combined_longforms = Counter() for longform_rows in longform_dict.values(): combined_longforms.update({longform: count for longform, count, score in longform_rows}) grounding_map = {} names = {} for longform in combined_longforms: groundings = adeft_grounder.ground(longform) if groundings: grounding = groundings[0]['grounding'] grounding_map[longform] = grounding names[grounding] = groundings[0]['name'] longforms, counts = zip(*combined_longforms.most_common()) pos_labels = [] # - list(zip(longforms, counts)) try: disamb = load_disambiguator(shortforms[0]) for shortform, gm in disamb.grounding_dict.items(): for longform, grounding in gm.items(): grounding_map[longform] = grounding for grounding, name in disamb.names.items(): names[grounding] = name pos_labels = disamb.pos_labels except Exception: pass names grounding_map, names, pos_labels = ground_with_gui(longforms, counts, grounding_map=grounding_map, names=names, pos_labels=pos_labels, no_browser=True, port=8891) result = [grounding_map, names, pos_labels] result grounding_map, names, pos_labels = [{'pain intensity': 'ungrounded', 'pain interference': 'ungrounded', 'pallidal index': 'ungrounded', 'pam interaction': 'ungrounded', 'pancreatic insufficiency': 'MESH:D010188', 'parainflammation': 'ungrounded', 'parallel imaging': 'ungrounded', 'parametrial involved': 'ungrounded', 'parent incarceration': 'ungrounded', 'pars intercerebralis': 'ungrounded', 'pars intermedia': 'ungrounded', 'paternity index': 'ungrounded', 'path integrity': 'ungrounded', 'pathogen inactivation': 'ungrounded', 'pattern identification': 'ungrounded', 'pausing index': 'ungrounded', 'peak intensity': 'ungrounded', 'peak interval': 'ungrounded', 'pelvic incidence': 'ungrounded', 'pentamidine isethionate': 'CHEBI:CHEBI:7977', 'performed improvement': 'ungrounded', 'performed index': 'ungrounded', 'performed indicated': 'ungrounded', 'perfusion index': 'MESH:D000081282', 'period': 'ungrounded', 'peripheral iridectomy': 'MESH:D032801', 'peritoneal implant': 'ungrounded', 'permanent implant': 'ungrounded', 'permeability index': 'ungrounded', 'peroxide intermediate': 'ungrounded', 'peroxidizability index': 'ungrounded', 'persistently index': 'ungrounded', 'persistently infection': 'ungrounded', 'phagocytic index': 'ungrounded', 'phatidylinositol': 'CHEBI:CHEBI:28874', 'phospha tidylinositol': 'CHEBI:CHEBI:28874', 'phosphatidyl inositol': 'CHEBI:CHEBI:28874', 'phosphatidyl myo inositol': 'CHEBI:CHEBI:28874', 'phosphatidylinositide': 'CHEBI:CHEBI:28874', 'phosphatidylinositol': 'CHEBI:CHEBI:28874', 'phospho ibuprofen': 'MESH:C559491', 'phospho inositide': 'CHEBI:CHEBI:28874', 'phosphoinositide': 'CHEBI:CHEBI:28874', 'phosphoinositide hydrolysis': 'CHEBI:CHEBI:28874', 'phosphoinositol': 'CHEBI:CHEBI:28874', 'phospholipids': 'MESH:D010743', 'phosphotidylinositol': 'CHEBI:CHEBI:28874', 'phosphotyrosine interaction': 'ungrounded', 'photoinactivation': 'ungrounded', 'photoinitiator': 'ungrounded', 'physical inactivity': 'ungrounded', 'phytochemical index': 'ungrounded', 'phytosanitary irradiation': 'ungrounded', 'pi for 4 h': 'ungrounded', 'pistillata': 'ungrounded', 'pkr inhibitor': 'ungrounded', 'place invariants': 'ungrounded', 'placental insufficiency': 'MESH:D010927', 'plant introduction': 'ungrounded', 'plantar incision': 'ungrounded', 'plaque index': 'ungrounded', 'plasma insulin': 'HGNC:6081', 'plasmin inhibitor': 'ungrounded', 'pma and calcium ionophore': 'ungrounded', 'pma and ionomycin': 'ungrounded', 'pma ionomycin': 'ungrounded', 'pneumatosis intestinalis': 'ungrounded', 'point': 'ungrounded', 'polar index': 'ungrounded', 'polydispersity index': 'ungrounded', 'polyfunctionality index': 'ungrounded', 'polyimide': 'ungrounded', 'polyinosinic acid': 'CHEBI:CHEBI:76777', 'polyisoprene': 'CHEBI:CHEBI:53405', 'polyprenyl immunostimulant': 'ungrounded', 'ponderal index': 'ungrounded', 'positive index': 'ungrounded', 'positive intervention': 'ungrounded', 'post immune': 'ungrounded', 'post infection': 'ungrounded', 'post injection': 'ungrounded', 'post injury': 'ungrounded', 'post inoculation': 'ungrounded', 'post inspiratory': 'ungrounded', 'post institutionalized': 'ungrounded', 'post irradiation': 'ungrounded', 'posterior insula': 'ungrounded', 'postinfection': 'ungrounded', 'postinoculation': 'ungrounded', 'povidone iodine': 'ungrounded', 'pr inhibitor': 'ungrounded', 'preference index': 'ungrounded', 'prescriptive information': 'ungrounded', 'pressure injury': 'ungrounded', 'primary index': 'ungrounded', 'primary insomnia': 'ungrounded', 'principal investigated': 'ungrounded', 'pristinamycin i': 'ungrounded', 'proactive interference': 'ungrounded', 'probability interval': 'ungrounded', 'progestin independent': 'ungrounded', 'prognostic index': 'ungrounded', 'proinflammatory': 'ungrounded', 'proinsulin': 'MESH:D011384', 'proliferation index': 'ungrounded', 'proliferative index': 'ungrounded', 'prolonged infusion': 'ungrounded', 'propidium iodide': 'CHEBI:CHEBI:51240', 'propidium iodine': 'CHEBI:CHEBI:51240', 'propionate acid': 'ungrounded', 'proportional integrity': 'ungrounded', 'prostate index': 'ungrounded', 'prostate inflammation': 'ungrounded', 'prostate interstitial brachytherapy': 'ungrounded', 'protease inhibitor': 'MESH:D011480', 'protease inhibitor cocktail': 'MESH:D011480', 'proteasome inhibition': 'CHEBI:CHEBI:52726', 'proteasome inhibitor': 'CHEBI:CHEBI:52726', 'protective index': 'ungrounded', 'protein inclusions': 'ungrounded', 'proteinase inhibitor': 'MESH:D011480', 'proximal intestine': 'ungrounded', 'psychological intervention': 'ungrounded', 'psychophysiological insomnia': 'ungrounded', 'pulmonary insufficiency': 'ungrounded', 'pulsatility index': 'ungrounded', 'pulsatility indicated': 'ungrounded', 'pulse interval': 'ungrounded', 'pulse inversion': 'ungrounded', 'punishment insensitivity': 'ungrounded', 'pyrrole imidazole': 'ungrounded'}, {'MESH:D010188': 'Exocrine Pancreatic Insufficiency', 'CHEBI:CHEBI:7977': 'pentamidine isethionate', 'MESH:D000081282': 'Perfusion Index', 'MESH:D032801': 'Iridectomy', 'CHEBI:CHEBI:28874': 'phosphatidylinositol', 'MESH:C559491': '2-(4-isobutylphenyl)propionic acid 4-(diethoxyphosphoryloxy)butyl ester', 'MESH:D010743': 'Phospholipids', 'MESH:D010927': 'Placental Insufficiency', 'HGNC:6081': 'INS', 'CHEBI:CHEBI:76777': 'poly(inosinic acid)', 'CHEBI:CHEBI:53405': 'poly(isoprene) macromolecule', 'MESH:D011384': 'Proinsulin', 'CHEBI:CHEBI:51240': 'propidium iodide', 'MESH:D011480': 'Protease Inhibitors', 'CHEBI:CHEBI:52726': 'proteasome inhibitor'}, ['CHEBI:CHEBI:28874', 'CHEBI:CHEBI:51240', 'MESH:D011480']] excluded_longforms = ['pi for 4h', 'pkr inhibitor', 'pr inhibitor'] # + grounding_dict = {shortform: {longform: grounding_map[longform] for longform, _, _ in longforms if longform in grounding_map and longform not in excluded_longforms} for shortform, longforms in longform_dict.items()} result = [grounding_dict, names, pos_labels] if not os.path.exists(results_path): os.mkdir(results_path) with open(os.path.join(results_path, f'{model_name}_preliminary_grounding_info.json'), 'w') as f: json.dump(result, f) # - additional_entities = {} unambiguous_agent_texts = {} # + labeler = AdeftLabeler(grounding_dict) corpus = labeler.build_from_texts((text, pmid) for pmid, text in all_texts.items()) agent_text_pmid_map = defaultdict(list) for text, label, id_ in corpus: agent_text_pmid_map[label].append(id_) entity_pmid_map = {entity: set(get_pmids_for_entity(*entity.split(':', maxsplit=1), major_topic=True))for entity in additional_entities} # - intersection1 = [] for entity1, pmids1 in entity_pmid_map.items(): for entity2, pmids2 in entity_pmid_map.items(): intersection1.append((entity1, entity2, len(pmids1 & pmids2))) intersection2 = [] for entity1, pmids1 in agent_text_pmid_map.items(): for entity2, pmids2 in entity_pmid_map.items(): intersection2.append((entity1, entity2, len(set(pmids1) & pmids2))) intersection1 intersection2 # + all_used_pmids = set() for entity, agent_texts in unambiguous_agent_texts.items(): used_pmids = set() for agent_text in agent_texts[1]: pmids = set(get_pmids_for_agent_text(agent_text)) new_pmids = list(pmids - all_texts.keys() - used_pmids) text_dict = get_plaintexts_for_pmids(new_pmids, contains=agent_texts) corpus.extend([(text, entity, pmid) for pmid, text in text_dict.items() if len(text) >= 5]) used_pmids.update(new_pmids) all_used_pmids.update(used_pmids) for entity, pmids in entity_pmid_map.items(): new_pmids = list(set(pmids) - all_texts.keys() - all_used_pmids) if len(new_pmids) > 10000: new_pmids = random.choices(new_pmids, k=10000) _, contains = additional_entities[entity] text_dict = get_plaintexts_for_pmids(new_pmids, contains=contains) corpus.extend([(text, entity, pmid) for pmid, text in text_dict.items() if len(text) >= 5]) # - names.update({key: value[0] for key, value in additional_entities.items()}) names.update({key: value[0] for key, value in unambiguous_agent_texts.items()}) pos_labels = list(set(pos_labels) | additional_entities.keys() | unambiguous_agent_texts.keys()) # + # %%capture classifier = AdeftClassifier(shortforms, pos_labels=pos_labels, random_state=1729) param_grid = {'C': [100.0], 'max_features': [10000]} texts, labels, pmids = zip(*corpus) classifier.cv(texts, labels, param_grid, cv=5, n_jobs=5) # - classifier.stats disamb = AdeftDisambiguator(classifier, grounding_dict, names) disamb.dump(model_name, results_path) print(disamb.info()) model_to_s3(disamb) preds = [disamb.disambiguate(text) for text in all_texts.values()] texts = [text for pred, text in zip(preds, all_texts.values()) if pred[0] == 'HGNC:10967'] texts[3]
model_notebooks/PI/model.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import pandas as pd import matplotlib.pyplot as plt import numpy as np import sklearn # + data_train = pd.read_csv('train.csv').set_index('Id') data_test = pd.read_csv('test.csv').set_index('Id') data = pd.concat([data_train.iloc[:,:-1], data_test]) data # - list(zip(data_train.columns,data_train.dtypes, data.dtypes)) # #### Descriptions of each column in 'data_description.txt'. #examine data_description file = open('data_description.txt', 'r') text = file.read() print(text) # #### Next we compare the training and test datasets. The two sets are not imbalanced in any way. display(data_train.describe()) # + display(data_test.describe()) # compute a relative differences of means. Nothing abnormaly. (data_train.describe().loc['mean']-data_test.describe().loc['mean'])/(data_train.describe().loc['mean']+data_test.describe().loc['mean']) # - # #### One more sanity check: compare the distributions of house type in the training and test sets. # + # visualize the distribution of type of houses "MSSubClass" plt.style.use('seaborn') plt.figure(figsize=(10,6)) #bins=len(set(data_train.MSSubClass.values)) bins=[20,30,40,45,50,60,70,75,80,85,90,120,150,160,180,190] plt.hist([data_train['MSSubClass'],data_test.MSSubClass], bins=bins, label=['Train','Test']) #plt.xticks(np.array(bins),bins) plt.ylabel('Frequency') plt.xlabel('House type code (see "data_description.txt")') plt.title('House type distribution') plt.legend() plt.show() #conclusion: training and test sets have similar house type distributions. # - # #### Next, see how many 'NaN'/'None' values in each columns. # Combine the following counts with the column description in 'data_description.txt', we conclude that most of the 'NaN'/'None' values are indicating 'Not applicable'. # + n_na=[] for col in data.columns: n_na.append((sum(data_train[col].isna()), sum(data_test[col].isna()))) n_Na = list(zip(data_train.columns, n_na)) n_Na # + corrs = [(col, abs(data_train.SalePrice.corr(data_train[col]))) for col in data_train.columns[:-1] if data_train[col].dtype in ['int64','float64','bool']] corrs.sort(key= lambda x:x[1], reverse=True) corrs = dict(corrs) corrs # - # 'MSSubClass' should be categorical rather than numeric or ordinal, # change its dtype to 'str' and later replaced with dummy variables. # data_train['MSSubClass'] = data_train['MSSubClass'].apply(str) data_test['MSSubClass'] = data_test['MSSubClass'].apply(str) data_train['YrSold'] = data_train['YrSold'].astype('str') data_test['YrSold'] = data_test['YrSold'].astype('str') # + data_train.shape, data_test.shape # + # exam the 1 missing value for 'electrical' data_train.loc[data_train.Electrical.isna(), 'Electrical'] = data # 'street'=Pave, 'YrSold'=2008, it's unlikely that this house does not have an electrical system. # Must be missing data. Fill in mean value. # - data_train[data_train.BsmtExposure.isna()| data_train.BsmtFinType2.isna()][['BsmtQual','BsmtCond','BsmtExposure','BsmtFinType1','BsmtFinSF1','BsmtFinType2','BsmtFinSF2','BsmtUnfSF','TotalBsmtSF']] #delete the two missing unit rows data_train=data_train.drop([332,948]) data_train[data_train.BsmtExposure.isna()| data_train.BsmtFinType2.isna()][['BsmtQual','BsmtCond','BsmtExposure','BsmtFinType1','BsmtFinSF1','BsmtFinType2','BsmtFinSF2','BsmtUnfSF','TotalBsmtSF']]; # #### Same inspection for test set # + n_na=[] for col in data_test.columns: n_na.append(sum(data_test[col].isna())) list(zip(data_test.columns, n_na)) # - plt.scatter(data_train.YrSold, data_train.SalePrice, alpha=0.1) data_train.groupby('YrSold')[['SalePrice']].mean() # 'YrSold' should be categorical, change dtype into 'str' to get dummy variables later. # + fig,ax=plt.subplots(10,2,figsize=(5,30)) for i in range(10): ax[i][0].scatter(data_train[corrs[i][0]], data_train.SalePrice) ax[i][0].set_xlabel(corrs[i][0]) ax[i][1].scatter(data_train[corrs[i][0]], data_train.SalePrice) ax[i][1].set_xlabel(corrs[i][0]) ax[i][1].set_yscale('log') plt.show() # - data_train[(data_train.GrLivArea>4000) & (data_train.SalePrice<200000)][['SalePrice', 'OverallQual','YrSold','SaleType','SaleCondition','1stFlrSF']] data_train = data_train.drop([524,1299]) data_train[data_train.GarageCars >3][['GarageArea','GarageCars']] # + data_train[data_train.SaleCondition == 'Partial'] data_train.SaleCondition.value_counts() # - # ### Prepare training and test datasets data_train.shape, data_test.shape x_all = pd.concat([data_train.iloc[:,:-1], data_test]) y_train = data_train['SalePrice'].apply(np.log) exp_y_train = data_train['SalePrice'] x_all.shape, y_train.dtype # + cat_vars = [col for col in x_all.columns if x_all[col].dtype not in ['int64','float64','bool']] #cat_vars_test = [col for col in data_test.columns if data_test[col].dtype not in ['int64','float64','bool']] # + x_all_dummies = pd.get_dummies(x_all, prefix=cat_vars, dummy_na=True) x_train_dummies = x_all_dummies[:len(data_train)] x_test_dummies = x_all_dummies[len(data_train):] x_train_dummies.shape, x_test_dummies.shape, y_train.shape # - from sklearn.model_selection import train_test_split # + x_train, x_cv, y_train, y_cv, exp_y_train, exp_y_cv = train_test_split(x_train_dummies, y_train, exp_y_train, test_size=0.35,random_state =57) #x_train, x_cv, y_train, y_cv = train_test_split(x_train, y_train, test_size=0.33, random_state=57) #x_train.shape, len(y_train), x_cv.shape, y_test.shape # + from sklearn.preprocessing import MinMaxScaler scaler = MinMaxScaler() x_train_scaled = scaler.fit_transform(x_train) x_cv_scaled = scaler.fit_transform(x_cv) x_test_scaled = scaler.fit_transform(x_test_dummies) # + from sklearn.decomposition import PCA pca = PCA(n_components=100) # - import xgboost as xgb from xgboost import XGBRegressor from sklearn.metrics import mean_squared_error as mse DM_train = xgb.DMatrix(x_train.values, label=y_train) DM_cv = xgb.DMatrix(x_cv, label=y_cv) #DM_test = xgb.DMatrix(x_test, label=y_test) model_xgb = XGBRegressor(learning_rate=.1, n_estimators = 20000, max_depth = 2) exp_model_xgb = XGBRegressor(learning_rate=.1, n_estimators = 20000, max_depth = 2) model_xgb.fit(x_train, y_train) pred_cv=model_xgb.predict(x_cv) pred_train = model_xgb.predict(x_train) pred_test = model_xgb.predict(x_test_dummies) exp_model_xgb.fit(x_train, exp_y_train) exp_pred_cv=exp_model_xgb.predict(x_cv) exp_pred_train = exp_model_xgb.predict(x_train) exp_pred_test = exp_model_xgb.predict(x_test_dummies) # + rmse_train = np.sqrt(mse(pred_train, y_train)) rmse_cv = np.sqrt(mse(pred_cv, y_cv)) exp_rmse_train = np.sqrt(mse(np.exp(pred_train), np.exp(y_train))) exp_rmse_cv = np.sqrt(mse(np.exp(pred_cv), np.exp(y_cv))) rmse_train, rmse_cv, exp_rmse_train, exp_rmse_cv, np.sqrt(mse(exp_pred_train, exp_y_train)), np.sqrt(mse(exp_pred_cv, exp_y_cv)) # - list(zip(pred_cv, y_cv)) results = pd.DataFrame({'Id':x_test_dummies.index, 'SalePrice':np.exp(pred_test)}).set_index('Id', drop=True) results results.to_csv('result1.csv') results2 = pd.DataFrame({'Id':x_test_dummies.index, 'SalePrice':exp_pred_test}).set_index('Id', drop=True) results2 results2.to_csv('result2.csv') # + from sklearn.model_selection import GridSearchCV # -
notebook 1.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ### Hopping and Tumbling Windows # # In this demonstration we'll see how to create Tables with windowing enabled. # # ### Tumbling Windows # # Let's create a tumbling clickevents table, where the window size is 30 seconds. # # ``` # CREATE STREAM clickevents_tumbling AS # SELECT * FROM clickevents # WINDOW TUMBLING (SIZE 30 SECONDS); # ``` # # ### Hopping Windows # # Now we can create a Table with a hopping window of 30 seconds with 5 second increments. # # ``` # CREATE TABLE clickevents_hopping AS # SELECT uri FROM clickevents # WINDOW HOPPING (SIZE 30 SECONDS, ADVANCE BY 5 SECONDS) # WHERE uri LIKE 'http://www.b%' # GROUP BY uri; # ``` # # The above window is 30 seconds long and advances by 5 second. If you query the table you will see # the associated window times! # # ### Session Windows # # Finally, lets see how session windows work. We're going to define the session as 5 minutes in # order to group many events to the same window # # ``` # CREATE TABLE clickevents_session AS # SELECT uri FROM clickevents # WINDOW SESSION (5 MINUTES) # WHERE uri LIKE 'http://www.b%' # GROUP BY uri; # ``` #
2. Data Ingestion with Kafka & Kafka Streaming/Lesson7/Exercise7.4.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Now You Code 4: Guess A Number # # Write a program to play the classic "Guess a number" game. # # In this game the computer selects a random number between 1 and 10. # It's your job to guess the number. Whenever you guess, the computer will # give you a hint of higher or lower. This process repeats until you guess # the number, after which the computer reports the number of guesses it took you. # # For Example: # # I'm thinking of a number between 1 and 10... # Your guess: 5 # Too low. Guess higher. # Your guess: 7 # Too high. Guess lower. # Your guess: 6 # You guessed it in 3 tries. # # Your loop should continue until your input guess equals the # computer generated random number. # # ### How do you make Python generate a random number? # Sample code which demostrates how to generate a number between 1 and 10 import random number = random.randint(1,10) print(number) # Run the cell above a couple of times. Notice how each time you execute the code, it comes up with a different number. # # Here's a breakdown of the code # # ``` # line 1 imports the random module # line 2 randomly selects an integer between 1 and 10 # line 3 prints the number # ``` # # Now that you understand how to generate a random number, try to design then write code for the program. The first step in your program should be to generate the random number. # ## Step 1: Problem Analysis # # Inputs: # # guess a number # # Outputs: # # close # far # right number # # Algorithm (Steps in Program): # # # copy code from first thing # guess a number # if elif and else for close,far,or right number # # # import random number = random.randint(1,10) total = 0 user_number = int(input("Enter a number from 1 to 10. ")) while True: total = total + 1 if user_number == number: print("You got the right number. ") print(total) break elif user_number > number: print("Too high, guess lower. ") break elif user_number < number: print("Too low, guess higher. ") break else: print("Not even close. ") break # ## Step 3: Questions # # 1. Which loop did you use to solve the problem? What is a definite or indefinite loop? I used a while loop, which is indefinite because you don't know how long it will take the number to be guessed. # 2. Modify this program to allow you to guess a number between 1 and 100. How much of your code did you need to change to make this work? Just add another zero onto some 10's. # 3. This program is a good example of a difficult problem to conceptualize which has a simple solution when you look at actual lines of code. I assume you did not write this in a single try, so explain where you got stuck and describe your approach to overcoming it. I could not get the total amount of guesses to work so I looked at other codes for help. # # ## Reminder of Evaluation Criteria # # 1. What the problem attempted (analysis, code, and answered questions) ? # 2. What the problem analysis thought out? (does the program match the plan?) # 3. Does the code execute without syntax error? # 4. Does the code solve the intended problem? # 5. Is the code well written? (easy to understand, modular, and self-documenting, handles errors) #
content/lessons/05/Now-You-Code/NYC4-Guess-A-Number.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] colab_type="text" # We'll start by an introduction to Keras. # # Some of the examples are from [Deep Learning with Python, Second Edition](https://www.manning.com/books/deep-learning-with-python-second-edition?a_aid=keras&a_bid=76564dff). # + [markdown] colab_type="text" # ## Keras and MNIST # Let us first build an introductory model for the MNIST dataset. # More on MNIST can be found here: https://www.kaggle.com/hojjatk/mnist-dataset?msclkid=a59b1a61bffd11ec953ecf3f2a143919 # More on Layers API can be found here: https://keras.io/api/layers/?msclkid=0799043ac07911ec832b34d2444c3574 # On layers types, regularizers, initializers (beyond the Keras Layerd Documentation): https://www.tutorialspoint.com/keras/keras_layers.htm # + [markdown] colab_type="text" # **Loading the MNIST dataset in Keras** # + colab_type="code" from tensorflow.keras.datasets import mnist (train_images, train_labels), (test_images, test_labels) = mnist.load_data() # + colab_type="code" train_images.shape # + colab_type="code" len(train_labels) len(train_images) # + colab_type="code" train_labels train_images # + colab_type="code" test_images.shape # + colab_type="code" len(test_labels) # + colab_type="code" test_labels # + [markdown] colab_type="text" # **Building the network** # - # So network architecture for MNIST. # For example: https://github.com/JHP4911/htMultiple-MLP-Architectures-on-MNIST-database-using-Keras?msclkid=b0b3576ec08a11ecb731533187ed8284 # Or in the future even: https://keras.io/examples/vision/mnist_convnet/?msclkid=b0b3b888c08a11eca260b4525969c7f7 # + colab_type="code" from tensorflow import keras from tensorflow.keras import layers model = keras.Sequential([ layers.Dense(512, activation="relu"), layers.Dense(10, activation="softmax") ]) # + colab_type="code" model.compile(optimizer="adamax", loss="sparse_categorical_crossentropy", metrics=["accuracy"]) # + [markdown] colab_type="text" # **Preparing the image data** # + colab_type="code" train_images = train_images.reshape((60000, 28 * 28)) train_images = train_images.astype("float32") / 255 test_images = test_images.reshape((10000, 28 * 28)) test_images = test_images.astype("float32") / 255 # - import matplotlib.pyplot as plt digit = test_images[0].reshape(28, 28) plt.imshow(digit, cmap=plt.cm.binary) plt.show() # + [markdown] colab_type="text" # **"Fitting" the model** # + colab_type="code" model.fit(train_images, train_labels, epochs=5, batch_size=128) # + [markdown] colab_type="text" # **Using the model to make predictions** # + colab_type="code" test_digits = test_images[0:10] predictions = model.predict(test_digits) predictions[0] # + colab_type="code" predictions[0].argmax() # + colab_type="code" predictions[0][7] # + colab_type="code" test_labels[0] # + [markdown] colab_type="text" # **Evaluating the model on new data** # + colab_type="code" test_loss, test_acc = model.evaluate(test_images, test_labels) print(f"test_acc: {test_acc}") # + [markdown] colab_type="text" # ### Reimplementing our first example from scratch in TensorFlow # - # ![Keras vs. Tensorflow](../img/keras-tensorflow.png) # + [markdown] colab_type="text" # #### A simple Dense class # + colab_type="code" import tensorflow as tf class NaiveDense: def __init__(self, input_size, output_size, activation): self.activation = activation w_shape = (input_size, output_size) w_initial_value = tf.random.uniform(w_shape, minval=0, maxval=1e-1) self.W = tf.Variable(w_initial_value) b_shape = (output_size,) b_initial_value = tf.zeros(b_shape) self.b = tf.Variable(b_initial_value) def __call__(self, inputs): return self.activation(tf.matmul(inputs, self.W) + self.b) @property def weights(self): return [self.W, self.b] # + [markdown] colab_type="text" # #### A simple Sequential class # + colab_type="code" class NaiveSequential: def __init__(self, layers): self.layers = layers def __call__(self, inputs): x = inputs for layer in self.layers: x = layer(x) return x @property def weights(self): weights = [] for layer in self.layers: weights += layer.weights return weights # + colab_type="code" model = NaiveSequential([ NaiveDense(input_size=28 * 28, output_size=512, activation=tf.nn.relu), NaiveDense(input_size=512, output_size=10, activation=tf.nn.softmax) ]) assert len(model.weights) == 4 # + [markdown] colab_type="text" # #### A batch generator # + colab_type="code" import math class BatchGenerator: def __init__(self, images, labels, batch_size=128): assert len(images) == len(labels) self.index = 0 self.images = images self.labels = labels self.batch_size = batch_size self.num_batches = math.ceil(len(images) / batch_size) def next(self): images = self.images[self.index : self.index + self.batch_size] labels = self.labels[self.index : self.index + self.batch_size] self.index += self.batch_size return images, labels # + [markdown] colab_type="text" # ### Running one training step # + colab_type="code" def one_training_step(model, images_batch, labels_batch): with tf.GradientTape() as tape: predictions = model(images_batch) per_sample_losses = tf.keras.losses.sparse_categorical_crossentropy( labels_batch, predictions) average_loss = tf.reduce_mean(per_sample_losses) gradients = tape.gradient(average_loss, model.weights) update_weights(gradients, model.weights) return average_loss # + colab_type="code" learning_rate = 1e-3 def update_weights(gradients, weights): for g, w in zip(gradients, weights): w.assign_sub(g * learning_rate) # + colab_type="code" from tensorflow.keras import optimizers optimizer = optimizers.SGD(learning_rate=1e-3) def update_weights(gradients, weights): optimizer.apply_gradients(zip(gradients, weights)) # + [markdown] colab_type="text" # ### The full training loop # + colab_type="code" def fit(model, images, labels, epochs, batch_size=128): for epoch_counter in range(epochs): print(f"Epoch {epoch_counter}") batch_generator = BatchGenerator(images, labels) for batch_counter in range(batch_generator.num_batches): images_batch, labels_batch = batch_generator.next() loss = one_training_step(model, images_batch, labels_batch) if batch_counter % 100 == 0: print(f"loss at batch {batch_counter}: {loss:.2f}") # + colab_type="code" from tensorflow.keras.datasets import mnist (train_images, train_labels), (test_images, test_labels) = mnist.load_data() train_images = train_images.reshape((60000, 28 * 28)) train_images = train_images.astype("float32") / 255 test_images = test_images.reshape((10000, 28 * 28)) test_images = test_images.astype("float32") / 255 fit(model, train_images, train_labels, epochs=10, batch_size=128) # + [markdown] colab_type="text" # ### Evaluating the model # + colab_type="code" import numpy as np predictions = model(test_images) predictions = predictions.numpy() predicted_labels = np.argmax(predictions, axis=1) matches = predicted_labels == test_labels print(f"accuracy: {matches.mean():.2f}")
Classes/Class-04.20/code/keras-intro.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python [default] # language: python # name: python3 # --- # + id="zEtX9cQa3qgY" colab_type="code" colab={} import torch # + [markdown] id="sBX43jfy3qg2" colab_type="text" # ### torch.nn Module # + id="ahu2gndg3qg3" colab_type="code" colab={} import torch.nn as nn import torch.nn.functional as F # + id="ogPKP3ps3qg6" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 102} outputId="53a4fc30-3f03-49a6-f1bd-5edca99c0a2f" executionInfo={"status": "ok", "timestamp": 1564091131514, "user_tz": 240, "elapsed": 4473, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "09166577195279766198"}} # create simple Neural Network class NeuralNetwork(nn.Module): def __init__(self, input_size, output_size, hidden_size): super(NeuralNetwork, self).__init__() # create fully connected layers self.dense_layer_1 = nn.Linear(input_size, hidden_size) self.dense_layer_2 = nn.Linear(hidden_size, hidden_size) # create output self.output = nn.Linear(hidden_size, output_size) def forward(self, x): # use forward to construct the forward pass of the computational graph x = F.relu(self.dense_layer_1(x)) x = F.relu(self.dense_layer_2(x)) return self.output(x) my_neural_network = NeuralNetwork(input_size=5, output_size=1, hidden_size=32) print(my_neural_network) # + [markdown] id="LrshCi5l3qgd" colab_type="text" # ### Gradients with Autograd # + id="EoobcSJy3qge" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 68} outputId="041b29cc-266a-4d6b-e1c6-17c752ab9b43" executionInfo={"status": "ok", "timestamp": 1564091131655, "user_tz": 240, "elapsed": 4505, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "09166577195279766198"}} # create a tensor of ones a = torch.ones(3, 3) print(a) # + id="9LA5F5Pz3qgk" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 68} outputId="3a03e17b-7004-4654-d249-c8c86a47ead4" executionInfo={"status": "ok", "timestamp": 1564091131656, "user_tz": 240, "elapsed": 3477, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "09166577195279766198"}} # enable gradient computaton a.requires_grad = True print(a) # + id="hytFPg0N3qgo" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 68} outputId="9dd3a2e5-d59c-49cb-96a1-7ffff60ac107" executionInfo={"status": "ok", "timestamp": 1564091131657, "user_tz": 240, "elapsed": 2919, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "09166577195279766198"}} # perform operation on tensor a b = a * 5 + 2 print(b) # + id="CyL__dTR3qgs" colab_type="code" colab={} # perform operation on b c = b.sum() # + id="mfa-CPgW3qgv" colab_type="code" colab={} # compute gradients with .backward() c.backward() # + id="ZaO0jIzG3qgy" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 68} outputId="884e3370-017f-4751-cb42-a9fd4292e845" executionInfo={"status": "ok", "timestamp": 1564091131666, "user_tz": 240, "elapsed": 604, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "09166577195279766198"}} # print gradients of tensor a print(a.grad) # + [markdown] id="ddtlwiNJ3qg9" colab_type="text" # ### Loss and Optimizer # + id="NizLGx2H3qg-" colab_type="code" colab={} # import optimizer import torch.optim as optim # + id="LE_J-wFa3qhB" colab_type="code" colab={} # create neural network my_neural_network = NeuralNetwork(input_size=5, output_size=1, hidden_size=32) #create mean squared error loss function loss_fn = nn.MSELoss() # create Adam optimizer optimizer = optim.Adam(my_neural_network.parameters()) # create data input_data = torch.randn(32,5) target_data = input_data.sum(dim=1) * 10 - 2 target_data = target_data.view(-1,1) # + id="0RYAWhHT3qhE" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 357} outputId="6d1c20a2-87ec-43c0-9757-7b41dc47ae19" executionInfo={"status": "ok", "timestamp": 1564091156668, "user_tz": 240, "elapsed": 1363, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "09166577195279766198"}} # train network 1000 episodes for i in range(1000): # feed input data into network and get neural network output neural_network_output = my_neural_network(input_data) # calculate loss on neural network output and target data loss = loss_fn(neural_network_output, target_data) # clear the accumulated gradients optimizer.zero_grad() # backpropagate the gradients loss.backward() # optimize the neural network parameters in the direction of the gradients optimizer.step() if i % 50 == 0: print("Episode {} Loss is {}".format(i,loss)) # + id="81Uzl-WC3qhI" colab_type="code" colab={}
Section 1/1.4 Introduction to PyTorch.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="view-in-github" colab_type="text" # <a href="https://colab.research.google.com/github/andreidore/aicrowd_blitz_may_2020/blob/master/minileaves.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # + id="V74G8tLT-DIn" colab_type="code" outputId="659e6c5c-e352-44ba-95a0-f81048c418a3" colab={"base_uri": "https://localhost:8080/", "height": 442} # !pip install wandb==0.8.33 # + id="F1dxWAWK-aNk" colab_type="code" outputId="96e5d371-7118-4a1c-c78e-c4e38dca52c3" colab={"base_uri": "https://localhost:8080/", "height": 765} # !wget https://s3.eu-central-1.wasabisys.com/aicrowd-practice-challenges/public/minileaves/v0.1/train-images.npy # !wget https://s3.eu-central-1.wasabisys.com/aicrowd-practice-challenges/public/minileaves/v0.1/train-labels.npy # !wget https://s3.eu-central-1.wasabisys.com/aicrowd-practice-challenges/public/minileaves/v0.1/test-images.npy # !wget https://s3.eu-central-1.wasabisys.com/aicrowd-practice-challenges/public/minileaves/v0.1/all_classes.txt # + id="VtU2eJn__Aoe" colab_type="code" colab={} from collections import Counter import numpy as np from sklearn.model_selection import train_test_split from sklearn.utils.class_weight import compute_class_weight import tensorflow as tf from tensorflow.keras.preprocessing.image import ImageDataGenerator from tensorflow.keras.models import Sequential from tensorflow.keras.layers import Flatten, Dense, Conv2D, MaxPooling2D, Dropout, Activation, BatchNormalization from tensorflow.keras.optimizers import Adam from tensorflow.keras.utils import to_categorical from tensorflow.keras.callbacks import ModelCheckpoint, ReduceLROnPlateau from tensorflow.keras import regularizers, optimizers import matplotlib.pyplot as plt # %matplotlib inline import wandb from wandb.keras import WandbCallback # + id="3YgmzwUsLtwD" colab_type="code" outputId="7df46c44-a9f9-4a30-8819-e51ac2f65159" colab={"base_uri": "https://localhost:8080/", "height": 119} wandb.init(name='CNN-5', project="minileaves",config={"hyper": "parameter"}) # + [markdown] id="StNu8qfd_QxG" colab_type="text" # ## Import data # + id="Uuqe05Dy_Lq_" colab_type="code" colab={} train_images_path = "train-images.npy" #path where data is stored train_labels_path = "train-labels.npy" train_images = np.load(train_images_path) train_labels = np.load(train_labels_path) # Load Class mapping class_names = [x.strip() for x in open("all_classes.txt").readlines()] # + id="EafWm94z_UxX" colab_type="code" outputId="34259858-15e2-4ea9-c5fb-a7cac90dcb5c" colab={"base_uri": "https://localhost:8080/", "height": 105} print(train_images.shape) print(train_labels.shape) print(train_labels[0:10]) print(class_names) # + [markdown] id="771Imft_A70K" colab_type="text" # ## Visualize date # + id="Kf2hw7DkA-IY" colab_type="code" outputId="99dc87ca-2a11-437c-b9b1-6c512c9fe0a9" colab={"base_uri": "https://localhost:8080/", "height": 1000} def random_data_sample(): """ Returns a random data sample from the training set """ random_index = np.random.randint(0, train_images.shape[0]) # Render the image image_array = train_images[random_index] class_label = train_labels[random_index] # an integral number in [0-38) class_name = class_names[class_label] # a human readable class name return image_array, class_label, class_name fig=plt.figure(figsize=(20, 20)) w=10 h=10 columns = 4 rows = 5 for i in range(1, columns*rows +1): img = np.random.randint(10, size=(h,w)) fig.add_subplot(rows, columns, i) image,class_label,class_name=random_data_sample() plt.imshow(image) plt.title(class_name) # + [markdown] id="U8Dol1hwEWy2" colab_type="text" # ## Distribution # + id="h_hc0IFHEYzd" colab_type="code" outputId="ab5410ff-0b35-4c1d-80d8-9ddcec3c630c" colab={"base_uri": "https://localhost:8080/", "height": 265} #data_counter = Counter(train_labels) #unique_class_indices = data_counter.keys() #for _class_index in unique_class_indices: # print("Class Index : ", _class_index) # print("Class Name : ", class_names[_class_index]) # print("Number of images in the dataset : ", data_counter[_class_index]) # print("="*100) plt.hist(train_labels, bins = len(np.unique(train_labels))) plt.show() # + id="FTZLU77mFy7F" colab_type="code" colab={} X_train, X_val= train_test_split(train_images, test_size=0.2, random_state=42) y_train, y_val= train_test_split(train_labels, test_size=0.2, random_state=42) # + id="DTaWEkVXKx4N" colab_type="code" outputId="74ed49ac-d2d4-49c6-8c0f-fc30da7388d9" colab={"base_uri": "https://localhost:8080/", "height": 139} #X_train = X_train.astype('float32')/255 #X_val = X_val.astype('float32')/255 print(np.unique(y_train)) class_weights = compute_class_weight('balanced',np.unique(y_train),y_train) class_weights = dict(enumerate(class_weights)) print(class_weights) mean = np.mean(X_train) std = np.std(X_train) X_train = (X_train-mean)/(std+1e-7) X_val = (X_val-mean)/(std+1e-7) # one-hot encode the labels num_classes = len(np.unique(y_train)) y_train = to_categorical(y_train, num_classes) y_val = to_categorical(y_val, num_classes) # print shape of training set print('x_train shape:', X_train.shape) # print number of training, validation, and test images print(X_train.shape[0], 'train samples') print(X_val.shape[0], 'validation samples') # + id="ISqrIY-BOREE" colab_type="code" outputId="03515e47-25a4-460b-dd0d-fa3e6b94e11a" colab={"base_uri": "https://localhost:8080/", "height": 1000} # number of hidden units variable # we are declaring this variable here and use it in our CONV layers to make it easier to update from one place base_hidden_units = 32 # l2 regularization hyperparameter weight_decay = 1e-4 # instantiate an empty sequential model model = Sequential() # CONV1 # notice that we defined the input_shape here because this is the first CONV layer. # we don’t need to do that for the remaining layers model.add(Conv2D(base_hidden_units, (3,3), padding='same', kernel_regularizer=regularizers.l2(weight_decay), input_shape=X_train.shape[1:])) model.add(Activation('relu')) model.add(BatchNormalization()) # CONV2 model.add(Conv2D(base_hidden_units, (3,3), padding='same', kernel_regularizer=regularizers.l2(weight_decay))) model.add(Activation('relu')) model.add(BatchNormalization()) model.add(MaxPooling2D(pool_size=(2,2))) model.add(Dropout(0.2)) # CONV3 model.add(Conv2D(2*base_hidden_units, (3,3), padding='same', kernel_regularizer=regularizers.l2(weight_decay))) model.add(Activation('relu')) model.add(BatchNormalization()) # CONV4 model.add(Conv2D(2*base_hidden_units, (3,3), padding='same', kernel_regularizer=regularizers.l2(weight_decay))) model.add(Activation('relu')) model.add(BatchNormalization()) model.add(MaxPooling2D(pool_size=(2,2))) model.add(Dropout(0.3)) # CONV5 model.add(Conv2D(4*base_hidden_units, (3,3), padding='same', kernel_regularizer=regularizers.l2(weight_decay))) model.add(Activation('relu')) model.add(BatchNormalization()) # CONV6 model.add(Conv2D(4*base_hidden_units, (3,3), padding='same', kernel_regularizer=regularizers.l2(weight_decay))) model.add(Activation('relu')) model.add(BatchNormalization()) model.add(MaxPooling2D(pool_size=(2,2))) model.add(Dropout(0.4)) model.add(Flatten()) model.add(Dense(1024, activation='relu')) model.add(BatchNormalization()) model.add(Dropout(0.4)) model.add(Dense(1024, activation='relu')) model.add(BatchNormalization()) model.add(Dropout(0.4)) # FC7 #model.add(Flatten()) model.add(Dense(num_classes, activation='softmax')) # print model summary model.summary() # + id="spxsksbLdfAw" colab_type="code" colab={} datagen = ImageDataGenerator( featurewise_center=False, samplewise_center=False, featurewise_std_normalization=False, samplewise_std_normalization=False, zca_whitening=False, rotation_range=15, width_shift_range=0.1, height_shift_range=0.1, horizontal_flip=True, vertical_flip=False ) # compute the data augmentation on the training set datagen.fit(X_train) # + id="H0_H3kZ_oa6o" colab_type="code" colab={} optimizer = Adam(lr=0.001,decay=1e-6) model.compile(loss='categorical_crossentropy', optimizer=optimizer, metrics=['accuracy']) # + id="sg9XdiT6Ou9o" colab_type="code" outputId="6448c8c8-c941-47d8-a0ed-3e9f5a2a3981" colab={"base_uri": "https://localhost:8080/", "height": 1000} # compile the model lrr= ReduceLROnPlateau( monitor='val_loss', #Metric to be measured factor=.5, #Factor by which learning rate will be reduced patience=5, #No. of epochs after which if there is no improvement in the val_acc, the learning rate is reduced min_lr=1e-6) #The minimum learning rate model_checkpoint=ModelCheckpoint(filepath="best_model.hdf5",verbose=1,save_best_only=True) wand_callback=WandbCallback() batch_size=128 history=model.fit(datagen.flow(X_train, y_train, batch_size=batch_size),steps_per_epoch=X_train.shape[0] // batch_size,epochs=100,verbose=2, callbacks=[model_checkpoint,wand_callback,lrr],validation_data=(X_val,y_val),shuffle=True) # + id="UaaklaaJRfVC" colab_type="code" colab={} model.load_weights('best_model.hdf5') # + id="kJ5TNkp1YDEh" colab_type="code" outputId="7aa4776f-1bd6-45d6-83d2-988a415dc8a7" colab={"base_uri": "https://localhost:8080/", "height": 68} scores = model.evaluate(X_val, y_val, batch_size=128, verbose=1) print('\nTest result: %.3f loss: %.3f' % (scores[1]*100,scores[0])) # + [markdown] id="cLKGtDmlZBrR" colab_type="text" # # + id="HR1hdu_IZCxC" colab_type="code" colab={} test_file_path = "test-images.npy" test_images = np.load(test_file_path) # + id="dk48NlY7ZSN-" colab_type="code" colab={} #test_images=test_images.astype('float32')/255 test_images = (test_images-mean)/(std+1e-7) # + id="WtUaF3AXZNJv" colab_type="code" colab={} submission=model.predict(test_images) # + id="wBcAzq4zZnuF" colab_type="code" outputId="34d05bb8-166f-4af8-86d0-c37112db14b6" colab={"base_uri": "https://localhost:8080/", "height": 34} submission=np.argmax(submission,axis=1) print(submission.shape) # + [markdown] id="K_-hNgK9Jx7p" colab_type="text" # ## Save the prediction to csv # + id="UQrGefmRJ2lD" colab_type="code" colab={} import pandas as pd submission = pd.DataFrame(submission) submission.to_csv('submission.csv',header=['class_index'],index=False) # + [markdown] id="Smf9n3qoJ52k" colab_type="text" # ## To download the generated csv in colab run the below command # + id="pcTNc8-8KC6d" colab_type="code" colab={} from google.colab import files files.download('submission.csv')
minileaves.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python [conda env:genpen] # language: python # name: conda-env-genpen-py # --- # + Collapsed="false" import itertools import numpy as np import os import seaborn as sns from tqdm import tqdm from dataclasses import asdict, dataclass, field import vsketch import shapely.geometry as sg from shapely.geometry import box, MultiLineString, Point, MultiPoint, Polygon, MultiPolygon, LineString import shapely.affinity as sa import shapely.ops as so import matplotlib.pyplot as plt import pandas as pd import vpype_cli from typing import List, Generic from genpen import genpen as gp, utils as utils from scipy import stats as ss import geopandas from shapely.errors import TopologicalError import functools # %load_ext autoreload # %autoreload 2 import vpype from skimage import io from pathlib import Path from sklearn.preprocessing import minmax_scale from skimage import feature from genpen.utils import Paper # + Collapsed="false" def rule30(left, center, right): return left != (center | right) def run_automata(vector, n_iterations, padtype='constant'): vectors = [] for iteration in range(n_iterations): vectors.append(vector) if padtype == 'constant': padfunc = functools.partial(np.pad, pad_width=1, mode='constant', constant_values=0) elif padtype == 'wrap': padfunc = functools.partial(np.pad, pad_width=1, mode='wrap') padded = padfunc(vector).astype(bool) new_vector = np.zeros_like(vector) for ii in range(1, len(padded)-1): left = padded[ii-1] center = padded[ii] right = padded[ii+1] new_val = rule30(left, center, right) new_vector[ii-1] = new_val vector = new_vector return (1-np.stack(vectors).astype('uint8')) * 255 # + Collapsed="false" scale = 0.35 # + Collapsed="false" print((11 * scale, 17 * scale)) # + Collapsed="false" n_cols = 15 n_iterations = 3 init_vector = np.zeros(n_cols) init_vector[np.floor(n_cols/2).astype(int)] = 1 # + Collapsed="false" np.nonzero(255-output) # + Collapsed="false" # + Collapsed="false" output = run_automata(init_vector, n_iterations, padtype='constant') plt.matshow(output) # + Collapsed="false" io.imsave('/mnt/c/code/side/plotter_images/bounded_rule30.jpeg', output, quality=100) # + Collapsed="false" n_cols = 38 n_iterations = 64 init_vector = np.zeros(n_cols) # init_vector[20] = 1 init_vector[35] = 1 # + Collapsed="false" output = run_automata(init_vector, n_iterations, padtype='constant') plt.matshow(output) # + Collapsed="false" io.imsave('/mnt/c/code/side/plotter_images/bounded_rule30_2.jpeg', output, quality=100) # + Collapsed="false" # + Collapsed="false" from skimage import filters from skimage.color import rgb2gray from skimage.transform import rescale, resize, downscale_local_mean from skimage.morphology import disk from sklearn.preprocessing import minmax_scale from skimage import feature from skimage import exposure # + Collapsed="false" def local_angle(dx, dy): """Calculate the angles between horizontal and vertical operators.""" return np.mod(np.arctan2(dy, dx), np.pi) # + Collapsed="false" image_path = '/mnt/c/code/side/plotter_images/bounded_rule30.jpeg' image_rescale_factor = 1 hist_clip_limit = 0.1 hist_nbins = 32 hatch_spacing_min = 0.3 hatch_spacing_max = 1 farid_disk_size = 0 # + Collapsed="false" img = rgb2gray(io.imread(Path(image_path))) img_rescale = rescale(img, image_rescale_factor) # img_contrast_adj = exposure.equalize_adapthist(img_rescale, clip_limit=hist_clip_limit, nbins=hist_nbins) img_renorm = img_contrast_adj * (hatch_spacing_max - 0.01 - hatch_spacing_min) + hatch_spacing_min # calc dominant angle selem = disk(farid_disk_size) filt_img = filters.rank.mean(img_renorm, selem) angle_farid = local_angle(filters.farid_h(filt_img), filters.farid_v(filt_img)) plt.matshow(filt_img.T) # + Collapsed="false" # + Collapsed="false"
scratch/012_cellular_automata.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + id="moYb7ZnxNBmD" import pandas as pd import matplotlib.pyplot as plt import seaborn as sns from sklearn.model_selection import train_test_split from sklearn.linear_model import LinearRegression from sklearn.linear_model import Lasso from sklearn import metrics # + id="u4xDMJXMNTL3" car_dataset = pd.read_csv('/content/car.csv') # + colab={"base_uri": "https://localhost:8080/"} id="QMSoAowMNUpS" outputId="7d3b56cc-b839-4098-88b4-de39da07d0a1" car_dataset.isnull().sum() # + colab={"base_uri": "https://localhost:8080/"} id="NGhOC4xWNZjl" outputId="bf54cce7-74ed-40aa-c665-8bac3833fa8a" print(car_dataset.Fuel_Type.value_counts()) print(car_dataset.Seller_Type.value_counts()) print(car_dataset.Transmission.value_counts()) # + id="QWqABER8Nem7" # encoding "Fuel_Type" Column car_dataset.replace({'Fuel_Type':{'Petrol':0,'Diesel':1,'CNG':2}},inplace=True) # encoding "Seller_Type" Column car_dataset.replace({'Seller_Type':{'Dealer':0,'Individual':1}},inplace=True) # encoding "Transmission" Column car_dataset.replace({'Transmission':{'Manual':0,'Automatic':1}},inplace=True) # + id="YQvJFFPUNgMk" X = car_dataset.drop(['Car_Name','Selling_Price'],axis=1) Y = car_dataset['Selling_Price'] # + id="amVqJA1eNj6E" X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size = 0.1, random_state=2) # + id="QqvlpeZENmKw" lin_reg_model = LinearRegression() # + colab={"base_uri": "https://localhost:8080/"} id="NUQip-7mNn6A" outputId="fd6eab01-a994-4711-b053-1763f4cc1c46" lin_reg_model.fit(X_train,Y_train) # + id="I-_SIiqKNp2K" test_data_prediction = lin_reg_model.predict(X_test) # + colab={"base_uri": "https://localhost:8080/"} id="k1W1kPRFOEuR" outputId="924d9565-27c5-444a-f0bc-26a1a5b97801" error_score = metrics.r2_score(Y_test, test_data_prediction) print("R squared Error : ", error_score) # + id="kiNTQ1nvOGUi" lass_reg_model = Lasso() # + colab={"base_uri": "https://localhost:8080/"} id="aFSQYKpdOUrC" outputId="d8e64356-bbb6-44b2-ea43-943fdff0c62c" lass_reg_model.fit(X_train,Y_train) # + id="3qXvug-EOWIG" test_data_prediction = lass_reg_model.predict(X_test) # + colab={"base_uri": "https://localhost:8080/"} id="O2L3aeWaOYRP" outputId="099d8b91-bbee-4df0-c883-5808b8c7e1d3" error_score = metrics.r2_score(Y_test, test_data_prediction) print("R squared Error : ", error_score) # + colab={"base_uri": "https://localhost:8080/", "height": 295} id="KNKA3LtBOdID" outputId="ec04465a-3389-46ea-894a-dbabb5e26c73" plt.scatter(Y_test, test_data_prediction) plt.xlabel("Actual Price") plt.ylabel("Predicted Price") plt.title(" Actual Prices vs Predicted Prices") plt.show()
Car price prediction/CarPrediction.ipynb
# # Convolutional Neural Network Custom Estimator for MNIST, built with TensorFlow layers # # In this example, we'll look at how to build a Custom Estimator -- a CNN model -- using tf.layers to define the model. # # First, do some imports and define some variables. # **If you're running this notebook on colab**, download the `dataset.py` file from the repo: # + language="bash" # wget https://raw.githubusercontent.com/amygdala/tensorflow-workshop/master/workshop_sections/high_level_APIs/mnist_cnn_custom_estimator/dataset.py # ls -l dataset.py # + """Convolutional Neural Network Custom Estimator for MNIST, built with tf.layers.""" from __future__ import absolute_import, division, print_function import argparse import os import numpy as np import time import dataset import tensorflow as tf BATCH_SIZE = 100 MODEL_DIR = os.path.join("/tmp/tfmodels/mnist_cnn_estimator", "tf_" + str(int(time.time()))) # This is too short for proper training (especially with 'Fashion-MNIST'), # but we'll use it here to make the notebook quicker to run. NUM_STEPS = 5000 print("using model dir: %s" % MODEL_DIR) # Tensorflow version should be >=1.4. print(tf.__version__) # - # ### Download Fashion-MNIST # # Next, download Fashion-MNIST if you haven't already done so. # If you have, skip the next two cells and just edit `DATA_DIR` to point to the correct directory. # + language="bash" # mkdir -p fashion_mnist # cd fashion_mnist # wget http://fashion-mnist.s3-website.eu-central-1.amazonaws.com/train-images-idx3-ubyte.gz # wget http://fashion-mnist.s3-website.eu-central-1.amazonaws.com/train-labels-idx1-ubyte.gz # wget http://fashion-mnist.s3-website.eu-central-1.amazonaws.com/t10k-images-idx3-ubyte.gz # wget http://fashion-mnist.s3-website.eu-central-1.amazonaws.com/t10k-labels-idx1-ubyte.gz # gunzip * # cd .. # + language="bash" # ls -l fashion_mnist # - # Edit the following value as appropriate. DATA_DIR = 'fashion_mnist' # DATA_DIR = '/tmp/MNIST_data' # Define the model function that will be used in constructing the Estimator. def cnn_model_fn(features, labels, mode): """Model function for CNN.""" # Input Layer # Reshape X to 4-D tensor: [batch_size, width, height, channels] # MNIST images are 28x28 pixels, and have one color channel input_layer = tf.reshape(features["pixels"], [-1, 28, 28, 1]) # Convolutional Layer #1 # Computes 32 features using a 5x5 filter with ReLU activation. # Padding is added to preserve width and height. # Input Tensor Shape: [batch_size, 28, 28, 1] # Output Tensor Shape: [batch_size, 28, 28, 32] conv1 = tf.layers.conv2d( inputs=input_layer, filters=32, kernel_size=[5, 5], padding="same", activation=tf.nn.relu) # Pooling Layer #1 # First max pooling layer with a 2x2 filter and stride of 2 # Input Tensor Shape: [batch_size, 28, 28, 32] # Output Tensor Shape: [batch_size, 14, 14, 32] pool1 = tf.layers.max_pooling2d(inputs=conv1, pool_size=[2, 2], strides=2) # Convolutional Layer #2 # Computes 64 features using a 5x5 filter. # Padding is added to preserve width and height. # Input Tensor Shape: [batch_size, 14, 14, 32] # Output Tensor Shape: [batch_size, 14, 14, 64] conv2 = tf.layers.conv2d( inputs=pool1, filters=64, kernel_size=[5, 5], padding="same", activation=tf.nn.relu) # Pooling Layer #2 # Second max pooling layer with a 2x2 filter and stride of 2 # Input Tensor Shape: [batch_size, 14, 14, 64] # Output Tensor Shape: [batch_size, 7, 7, 64] pool2 = tf.layers.max_pooling2d(inputs=conv2, pool_size=[2, 2], strides=2) # Flatten tensor into a batch of vectors # Input Tensor Shape: [batch_size, 7, 7, 64] # Output Tensor Shape: [batch_size, 7 * 7 * 64] pool2_flat = tf.reshape(pool2, [-1, 7 * 7 * 64]) # Dense Layer # Densely connected layer with 1024 neurons # Input Tensor Shape: [batch_size, 7 * 7 * 64] # Output Tensor Shape: [batch_size, 1024] dense = tf.layers.dense(inputs=pool2_flat, units=1024, activation=tf.nn.relu, name="dense1") # Add dropout operation; 0.6 probability that element will be kept dropout = tf.layers.dropout( inputs=dense, rate=0.4, training=mode == tf.estimator.ModeKeys.TRAIN) # Logits layer # Input Tensor Shape: [batch_size, 1024] # Output Tensor Shape: [batch_size, 10] logits = tf.layers.dense(inputs=dropout, units=10) predictions = { # Generate predictions (for PREDICT and EVAL mode) "classes": tf.argmax(input=logits, axis=1), # Add `softmax_tensor` to the graph. It is used for PREDICT and by the # `logging_hook`. "probabilities": tf.nn.softmax(logits, name="softmax_tensor") } prediction_output = tf.estimator.export.PredictOutput({"classes": tf.argmax(input=logits, axis=1), "probabilities": tf.nn.softmax(logits, name="softmax_tensor")}) if mode == tf.estimator.ModeKeys.PREDICT: return tf.estimator.EstimatorSpec(mode=mode, predictions=predictions, export_outputs={tf.saved_model.signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY: prediction_output}) # Calculate Loss (for both TRAIN and EVAL modes) onehot_labels = tf.one_hot(indices=tf.cast(labels, tf.int32), depth=10) loss = tf.losses.softmax_cross_entropy( onehot_labels=onehot_labels, logits=logits) # Generate some summary info tf.summary.scalar('loss', loss) tf.summary.histogram('conv1', conv1) tf.summary.histogram('dense', dense) # Configure the Training Op (for TRAIN mode) if mode == tf.estimator.ModeKeys.TRAIN: optimizer = tf.train.AdamOptimizer(learning_rate=1e-4) train_op = optimizer.minimize( loss=loss, global_step=tf.train.get_global_step()) return tf.estimator.EstimatorSpec(mode=mode, loss=loss, train_op=train_op) # Add evaluation metrics (for EVAL mode) eval_metric_ops = { "accuracy": tf.metrics.accuracy( labels=labels, predictions=predictions["classes"])} return tf.estimator.EstimatorSpec( mode=mode, loss=loss, eval_metric_ops=eval_metric_ops) # Define input functions for reading in data. # + def train_input_fn(data_dir, batch_size=100): """Prepare data for training.""" # When choosing shuffle buffer sizes, larger sizes result in better # randomness, while smaller sizes use less memory. MNIST is a small # enough dataset that we can easily shuffle the full epoch. ds = dataset.train(data_dir) ds = ds.cache().shuffle(buffer_size=50000).batch(batch_size=batch_size) # Iterate through the dataset a set number of times # during each training session. ds = ds.repeat(40) features = ds.make_one_shot_iterator().get_next() return {'pixels': features[0]}, features[1] def eval_input_fn(data_dir, batch_size=100): features = dataset.test(data_dir).batch( batch_size=batch_size).make_one_shot_iterator().get_next() return {'pixels': features[0]}, features[1] # - # Create the Estimator object. # Create the Estimator mnist_classifier = tf.estimator.Estimator( model_fn=cnn_model_fn, model_dir=MODEL_DIR) # Now we'll define the `TrainSpec` and `EvalSpec` to pass to `tf.estimator.train_and_evaluate()`. As part of the `EvalSpec`, we define an Exporter. # + # Train and evaluate the model train_input = lambda: train_input_fn( DATA_DIR, batch_size=BATCH_SIZE ) eval_input = lambda: eval_input_fn( DATA_DIR, batch_size=BATCH_SIZE ) # Set up logging for predictions # Log the values in the "Softmax" tensor with label "probabilities" tensors_to_log = {"probabilities": "softmax_tensor"} logging_hook = tf.train.LoggingTensorHook( tensors=tensors_to_log, every_n_iter=2000) train_spec = tf.estimator.TrainSpec(train_input, max_steps=NUM_STEPS, hooks=[logging_hook] ) def serving_input_receiver_fn(): feature_tensor = tf.placeholder(tf.float32, [None, 784]) return tf.estimator.export.ServingInputReceiver( {'pixels': feature_tensor}, {'pixels': feature_tensor}) exporter = tf.estimator.FinalExporter('cnn_mnist', serving_input_receiver_fn) # While not shown here, we can also add a model 'exporter' to the EvalSpec. eval_spec = tf.estimator.EvalSpec(eval_input, steps=NUM_STEPS, exporters=[exporter], name='cnn_mnist_tf' ) # - # Now train and evaluate the model. tf.estimator.train_and_evaluate(mnist_classifier, train_spec, eval_spec) # We can look at the characteristics of the exported model. # %env MODEL_DIR=$MODEL_DIR # + language="bash" # exported_model_dir=$(ls ${MODEL_DIR}/export/cnn_mnist) # saved_model_cli show --dir ${MODEL_DIR}/export/cnn_mnist/${exported_model_dir} --tag serve --all # - # Now let's look at info about our training run in TensorBoard. # # **If you're running this notebook on colab**, you'll need to skip this step. # # Start up TensorBoard as follows in a new terminal window, pointing it to the MODEL_DIR. (If you get a 'not found' error, make sure you've activated your virtual environment in that new window): # ``` # $ tensorboard --logdir=<model_dir> # ``` # # Try the following to compare across runs: # # ``` # $ tensorboard --logdir=/tmp/tfmodels # ``` # # Or run the following (select Kernel --> Interrupt from the menu when you're done): # !tensorboard --logdir=/tmp/tfmodels # Copyright 2017 The TensorFlow Authors. All Rights Reserved. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License.
workshop_sections/high_level_APIs/mnist_cnn_custom_estimator/cnn_mnist_tf.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import numpy as np import pandas as pd # + [markdown] heading_collapsed=true # # Draft Elips # + hidden=true # t1 t2 .. tn # l1 xx xx xx r1 # l2 xx xx xx r2 # .. # lm xx xx xx rm # b1 b2 bn # row => m # col => n # + hidden=true m = 1 n = 2 # kalo nilainya sama pake ini t = np.full(n, 5) r = np.full(m, 55) b = np.full(n, 25) l = np.full(m, 30) # + hidden=true matrix = np.zeros((m+2, n+2)) matrix # - # # Parabola # + # HELPER FUNCTIONS | gunakan jika perlu def getAlpha(k, rho, C): return k/(rho*C) def getLambda(alpha, dt, dx): return alpha*dt/(dx**2) def getJ(t, dt): return int(round(t/dt)) # + # misal kasus (di buku chapra halaman 882) # 100 ? ? ? ? 50 (suhu mula2 0) # env = suhu lingkungan (suhu awal) = 0 # l = suhu kiri = 100 # r = suhu kanan = 50 # I = ? ada ... = 4 # J = sampai j ke-... = 10 (delta t = 1s -> selama 10s -> berarti sampai j10) # q = lambda = 0.20875 # - # ## Metode Eksplisit # # Menghitung masa depan dari masa lalu # + def IterateExplicitParabol(Tj, j, I, J, q): Tjj = Tj.copy() for i in range(1, I+1): Tjj[i] = Tj[i] + q*(Tj[i-1] - 2*Tj[i] + Tj[i+1]) if j < J: return IterateExplicitParabol(Tjj, j+1, I, J, q) return Tjj def ExplicitParabol(env, l, r, I, J, q): Tj0 = np.concatenate((l, np.full(I, env, dtype="float32"), r), axis=None) return IterateExplicitParabol(Tj0, 1, I, J, q) ExplicitParabol(env=0, l=100, r=50, I=4, J=10, q=0.20875) # - # ## Metode Implisit # # Menghitung masa lalu dari masa depan # + def CreateMatrixImplicitParabol(T, q): I = T.size A = np.zeros((I, I)) B = np.zeros(I) for i in range(1, I-1): B[i] += T[i] if i == 1: B[i] += q*T[i-1] else: A[i, i-1] = -q A[i, i] = 1 + 2*q if i+2 == I: B[i] += q*T[i+1] else: A[i, i+1] = -q return [A[1:I-1, 1:I-1], B[1:I-1]] def IterateImplicitParabol(Tj, j, J, q): A, B = CreateMatrixImplicitParabol(Tj, q) X = np.linalg.solve(A, B) Tjj = np.concatenate((Tj[0], X, Tj[-1]), axis=None) if j < J: return IterateImplicitParabol(Tjj, j+1, J, q) return Tjj def ImplicitParabol(env, l, r, I, J, q): Tj0 = np.concatenate((l, np.full(I, env, dtype="float32"), r), axis=None) return IterateImplicitParabol(Tj0, 1, J, q) ImplicitParabol(env=0, l=100, r=50, I=4, J=10, q=0.20875) # - # ## Metode Crank-Nicolson # # Gabungan dari Eksplisit dan Implisit # + def CreateMatrixCrankNicolson(T, q): I = T.size A = np.zeros((I, I)) B = np.zeros(I) for i in range(1, I-1): B[i] += q*T[i-1] + 2*(1-q)*T[i] + q*T[i+1] if i == 1: B[i] += q*T[i-1] else: A[i, i-1] = -q A[i, i] = 2 + 2*q if i+2 == I: B[i] += q*T[i+1] else: A[i, i+1] = -q return [A[1:I-1, 1:I-1], B[1:I-1]] def IterateCrankNicolson(Tj, j, J, q): A, B = CreateMatrixCrankNicolson(Tj, q) X = np.linalg.solve(A, B) Tjj = np.concatenate((Tj[0], X, Tj[-1]), axis=None) if j < J: return IterateCrankNicolson(Tjj, j+1, J, q) return Tjj def CrankNicolson(env, l, r, I, J, q): Tj0 = np.concatenate((l, np.full(I, env, dtype="float32"), r), axis=None) return IterateCrankNicolson(Tj0, 1, J, q) CrankNicolson(env=0, l=100, r=50, I=4, J=10, q=0.20875)
07 - Partial Differential Equation.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- from pathlib import Path import pandas as pd from functools import partial import json import numpy as np import geopandas as gpd import matplotlib.pyplot as plt import rasterio from rasterio.plot import show from rasterio.mask import mask from tqdm.notebook import tqdm, trange import fiona import torch import torch.nn as nn import torch.nn.functional as F import torch.optim as optim from torch.optim import lr_scheduler import torchvision from torch.utils.data import Dataset, DataLoader, random_split from torchvision import datasets, models, transforms import torchvision.transforms as T from sklearn.metrics import confusion_matrix import seaborn as sns from time import time # ## Preprocessing steps # # The preprocessing steps's goal is to find the best stats to equilibrate the dataset and the model. In order to standardize the data, we need the average on the whole dataset. # + # Main setting train_path = Path('../data/processed/train') test_path = Path('../data/processed/test') print(train_path) print(test_path) # + # Look for information in the datafile csv_res = train_path / 'Building_Solutions.csv' def generate_csv(csv_res): csv_path = train_path / 'info' dfs = [pd.read_csv(csv_file) for csv_file in csv_path.iterdir()] result = pd.concat(dfs) result.to_csv(csv_res, index=None) return result # Use function to load csv # df = generate_csv(csv_res) df = pd.read_csv(csv_res) df # + cities = ["Paris", "Shanghai", "Khartoum", "Vegas"] for city in cities: print(city, ':', df[df['ImageId'].str.contains(city)]['ImageId'].unique().size, '/', df['ImageId'].unique().size) # + def count_files_per_city(fp): count_dict = {city: 0 for city in cities} for filename in tqdm(fp.iterdir(), desc=f"Folder peeling"): for city in cities: if city in filename.stem: count_dict[city] += 1 return count_dict # GeoJSONs print("GeoJSON") print(count_files_per_city((train_path / "buildings"))) # Img print("Images:") # print("MUL", count_files_per_city((train_path / "data" / "MUL" / "MUL"))) print("MUL-PanSharpen", count_files_per_city((train_path / "data" / "MUL-PanSharpen" / "MUL-PanSharpen"))) # print("PAN",count_files_per_city((train_path / "data" / "PAN" / "PAN"))) # print("RGB-Pan",count_files_per_city((train_path / "data" / "RGB-PanSharpen" / "RGB-PanSharpen"))) # + list_sh = set(df[df['ImageId'].str.contains('Shanghai')]['ImageId'].unique().tolist()) folder_sh = set([img.stem.replace("MUL-PanSharpen_", "") for img in (train_path / "data" / "MUL-PanSharpen" / "MUL-PanSharpen").iterdir() if 'Shanghai' in img.stem]) list_sh.difference(folder_sh) # + img_folder = Path('../data/processed/train/data/MUL-PanSharpen') subfolder = list(img_folder.iterdir())[0] list(subfolder.iterdir())[0] # + from rasterio.mask import mask # mask? # + def compute_mean_std(filepath, n_ch): sum_channels = np.zeros(n_ch) #8, 3 or 1 std_channels = np.zeros(n_ch) total_pixel = 0 for img in tqdm((filepath).iterdir(), desc="10593"): with rasterio.open(img, 'r') as ds: try: arr = ds.read() except: print(f"Uh oh, {img.stem} seems to be corrupted...") else: arr = arr.reshape(arr.shape[0], -1) sum_channels += arr.sum(axis=-1) total_pixel += arr[0].size mean_channels = sum_channels / total_pixel for img in tqdm((filepath).iterdir()): with rasterio.open(img, 'r') as ds: try: arr = ds.read() except: print(f"Uh oh, {img.stem} seems to be corrupted...") else: arr = arr.reshape(arr.shape[0], -1) std_channels += np.sum((arr - mean_channels.reshape(n_ch, 1)) ** 2, axis=-1) std_channels = np.sqrt(std_channels / total_pixel) stats = {'mean': mean_channels.tolist(), 'std': std_channels.tolist()} return stats # folder_path = train_path / "data" / "MUL-PanSharpen" / "MUL-PanSharpen" # stats = compute_mean_std(folder_path,8) # with open(train_path / 'stats_mul_pan.json', 'w') as file: # json.dump(stats, file) # + def load_stats(filepath): with open(filepath, 'r') as file: n_params = json.load(file) mean_channels = np.array(n_params['mean']) std_channels = np.array(n_params['std']) return mean_channels, std_channels mean_channels, std_channels = load_stats(train_path / 'stats_mul_pan.json') print(mean_channels) print(std_channels) # + import pathlib def norm_img(img, mean_arr, std_arr): res = (np.transpose(img, (1, 2, 0)) - mean_arr) / std_arr return np.transpose(res, (2,0,1)) def load_tif(fn, df, mean_vec, std_vec): img_id = "_".join(pathlib.Path(fn).stem.split("_")[1:]) # get img id train_path = pathlib.Path(fn).parents[3] # Get train path from img no_building = df[df['BuildingId']==-1]['ImageId'].unique().tolist() geojson_path = train_path / "buildings" / f"buildings_{img_id}.geojson" # Extract the file as a (8 x 650 x 650) cube with rasterio.open(fn) as tif: arr = tif.read() info = tif.meta info['count'] = 1 # Extract geofeatures if the image has buildings if img_id in no_building: X = np.zeros((info['height'],info['width']), dtype='uint16') features = [] else: with fiona.open(geojson_path, "r") as geojson: features = [feature["geometry"] for feature in geojson] X = np.ones((info['height'],info['width']), dtype='uint16') # Write polygons as a tif whose dimensions are the same than the opened tif with rasterio.open('temp.tif','w', **info) as new_ds: new_ds.write(X, 1) # Extract mask if necessary with rasterio.open('temp.tif') as tif: if features: mask_img, _ = rasterio.mask.mask(tif, features) else: mask_img = tif.read() # arr = norm_img(arr, mean_vec, std_vec) arr, mask_img = arr.astype('float32'), mask_img.squeeze().astype('int64') pathlib.Path('temp.tif').unlink() return arr, mask_img # Need to standardize by avg / std and show as tensor load_img = partial( load_tif, df = df, #df directly mean_vec = mean_channels, std_vec = std_channels ) # + img_path = train_path / 'data' / 'MUL-PanSharpen' # Define dataset here ds = datasets.DatasetFolder(root=img_path, loader=load_img, extensions=('.tif',)) print("N° of images:", len(ds)) print("Type of img:", ds.classes[0]) # + def split_dataset(ds, train_size=0.8, random_seed=0): if type(train_size) is float: train_size = int(len(ds)*train_size) train_ds, val_ds = random_split(ds, (train_size, len(ds)-train_size), generator=torch.Generator().manual_seed(random_seed)) return train_ds, val_ds train_ds, val_ds = split_dataset(ds, train_size=0.8, random_seed=123) print(len(train_ds)) print(len(val_ds)) # + batch_size = 16 #16 train_dl = DataLoader(train_ds, batch_size=batch_size, shuffle=True, num_workers=0) val_dl = DataLoader(val_ds, batch_size=batch_size, shuffle=True, num_workers=0) print("N° of iterations per batch (train):", len(train_dl)) print("N° of iterations per batch (val):", len(val_dl)) # - # ## Definition of the model # # The model used is an AE (autoencoder) trained specially for the pan-sharpened part of the multichannel dataset, thus for `8x650x650` images. # # Some modifications might be done in order to exploit this model for other datasets, like changing the number of channels. # + # Train on whole dataset class AutoencoderBuildingMulPs(nn.Module): def __init__(self): super().__init__() self.conv_init = nn.Conv2d(8, 16, 3, padding=1) self.conv1 = nn.Conv2d(16, 32, 3, padding=1) self.conv2 = nn.Conv2d(32, 16, 3, padding=1) self.conv3 = nn.Conv2d(16, 16, 3, padding=1) self.conv4 = nn.Conv2d(16, 8, 3, padding=1) self.conv5 = nn.Conv2d(8, 16, 3, padding=1) self.conv6 = nn.Conv2d(16, 16, 3, padding=1) self.conv7 = nn.Conv2d(16, 32, 3, padding=1) self.conv8 = nn.Conv2d(32, 16, 3, padding=1) self.conv_last = nn.Conv2d(16, 2, 3, padding=1) def forward(self, x): # Encoder x = F.relu(self.conv_init(x)) x = F.relu(self.conv1(x)) x = F.max_pool2d(x,2) x = F.relu(self.conv2(x)) x = F.relu(self.conv3(x)) x = F.max_pool2d(x,2) x = F.relu(self.conv4(x)) x = F.max_pool2d(x,2) # Decoder x = F.interpolate(x, scale_factor=2, mode='bilinear', align_corners=False) x = F.relu(self.conv5(x)) x = F.interpolate(x, scale_factor=2, mode='bilinear', align_corners=False) n_size = tuple([(dim+1)*2 for dim in x.shape[2:]]) x = F.relu(self.conv6(x)) x = F.relu(self.conv7(x)) x = F.interpolate(x, mode='bilinear', align_corners=False, size=n_size) x = F.relu(self.conv8(x)) x = self.conv_last(x) return x # + ae_model = AutoencoderBuildingMulPs() print(ae_model) #Send to GPU device = torch.device(("cuda" if torch.cuda.is_available() else "cpu")) print(device) ae_model = ae_model.to(device) # - epochs = 100 criterion = nn.CrossEntropyLoss(weight=torch.tensor([.11, .89])) criterion = criterion.to(device) optimizer = optim.Adam(ae_model.parameters(), lr=1e-3) # + #Defining a class where we register every parameter necessary to train the model class ModelParameters: def __init__(self, model, device, epochs, criterion, optimizer, train_dl, val_dl, sim_bs=None): self.model = model.to(device) self.device = device self.epochs = epochs self.criterion = criterion.to(device) self.optimizer = optimizer self.train_dl = train_dl self.val_dl = val_dl if sim_bs: self.sim_bs = sim_bs // self.train_dl.batch_size def __str__(self): return f"Model: {self.model}\nOn: {self.device}\nN°epochs: {epochs}" mp = ModelParameters(ae_model, device, epochs, criterion, optimizer, train_dl, val_dl) print(mp) # + # Eval cell def n_correct_pred(output, label): bs, ch, h, w = output.shape pred = output.argmax(dim=1) return (pred==label).sum() / h*w def batch_cm(output, label): to_numpy = lambda tens: tens.cpu().detach().reshape(-1).numpy() cm = np.zeros((2,2)) pred = output.argmax(dim=1) bs, ch, h, w = output.shape # use zip instead for i in range(bs): cm += confusion_matrix(to_numpy(label[i]), to_numpy(pred[i]), labels=[0, 1]) return cm @torch.no_grad() def evaluate_model(mp): mp.model.eval() output_loss, n_corr_preds, total_cm = 0, 0, [] for batch_n, ((img, mask), _) in tqdm(enumerate(mp.val_dl), desc="Model evaluation", unit="batch", total=len(mp.val_dl)): img, mask = img.to(mp.device), mask.to(mp.device) pred = mp.model(img) loss = mp.criterion(pred, mask) output_loss += loss.detach().item() corr_pred = n_correct_pred(pred, mask) n_corr_preds += corr_pred.detach().item() cm = batch_cm(pred,mask) total_cm.append(cm) return output_loss, n_corr_preds / len(mp.val_dl), total_cm @torch.no_grad() def eval_model_limit(mp, lim): mp.model.eval() total_size, output_loss, n_corr_preds = 0, 0, 0 tot_bs = mp.train_dl.batch_size for batch_n, ((img, mask), _) in tqdm(enumerate(mp.train_dl), desc="Val training", total=lim, unit="batch"): img, mask = img.to(mp.device), mask.to(mp.device) pred = mp.model(img) total_size += len(pred.view(-1)) loss = mp.criterion(pred, mask) output_loss += loss.detach().item() corr_pred = n_correct_pred(pred, mask) n_corr_preds += corr_pred.detach().item() if batch_n==lim: cm = batch_cm(pred,mask) break return output_loss, n_corr_preds / lim, 100* cm / (cm.sum() * tot_bs) def train_model(mp): (train_path /'save_states').mkdir(exist_ok=True) total_results = {'train': {'loss': []}, 'val': {'loss': [], 'correct_pred': [], 'cm': []}} mp.model.zero_grad() for epoch in trange(mp.epochs, desc="Train", unit="epoch"): train_epoch = 0 for bn, ((img, mask), _) in tqdm(enumerate(mp.train_dl), desc=f"Batch training", total=len(mp.train_dl), unit="batch"): img, mask = img.to(mp.device), mask.to(mp.device) mp.model.train() pred = mp.model(img) loss = mp.criterion(pred, mask) #Avg loss on whole batch loss.backward() train_epoch += loss.detach().item() # test_batch, pred_batch, cm_batch = eval_model_limit(mp, 5) # print(f"Epoch {epoch}, batch {bn}, average loss: {np.array(test_batch).mean()}") # iou = cm_batch[-1, -1] / (cm_batch.sum() - cm_batch[0, 0]) # print("IoU:", iou, '%') mp.model.train() mp.optimizer.step() mp.optimizer.zero_grad() # torch.save({'epoch': epoch, # 'model_state_dict': mp.model.state_dict(), # 'optimizer_state_dict': mp.optimizer.state_dict()}, # train_path /'save_states'/ f'model_chkpt_ep_{epoch:03d}') total_results['train']['loss'].append(train_epoch) print(f"Train loss: {train_epoch}") # Evaluation step output_loss, n_corr_preds, total_cm = evaluate_model(mp) last_cm = total_cm[-1] print(f"Val loss: {output_loss}") print(f"N of correct preds: {n_corr_preds}") print(f"Last CM:\n{total_cm[-1]}") iou = last_cm[-1, -1] / (last_cm.sum() - last_cm[0, 0]) print(f"Last IoU: {iou}") total_results['val']['loss'].append(output_loss) total_results['val']['correct_pred'].append(n_corr_preds) total_results['val']['cm'].append(total_cm) torch.save({'epoch': epoch, 'model_state_dict': mp.model.state_dict(), 'optimizer_state_dict': mp.optimizer.state_dict(), 'total_results': total_results}, train_path /'save_states'/ f'model_bs_{mp.train_dl.batch_size}_chkpt_ep_{epoch:03d}') torch.save(mp.model.state_dict(), train_path / f'ae_building_mask') return total_results # + import json total_results = train_model(mp) with open('total_result_dict.json', 'w') as fp: json.dump(total_results, fp) # + serial_results = total_results.copy() with open('total_result_dict.json', 'w') as fp: serial_results['val']['cm'] = [[cm.tolist() for cm in ep] for ep in total_results['val']['cm']] json.dump(serial_results, fp) # + import matplotlib.pyplot as plt plt.plot(total_results["train"]["loss"]) plt.plot(total_results["val"]["loss"]) plt.title("Loss evolution") plt.xlabel("Epoch") plt.ylabel("Loss value") plt.savefig("loss_value.png") # - plt.plot(total_results["val"]["correct_pred"]) plt.title("N of correct predictions") plt.xlabel("Epoch") plt.ylabel("N correct prediction") plt.savefig("corr_preds.png") # + total_cm = total_results['val']['cm'] cm_prds = [sum([np.array(cm) for cm in ep]) for ep in total_cm] fig, axs = plt.subplots(2, 3, figsize=(18,12)) for i, axrow in enumerate(axs): for j, ax in enumerate(axrow): idx = i*3 + j sns.heatmap(cm_prds[idx*5] / (cm_prds[idx*5].sum()), ax=ax, annot=True) # sns.heatmap(cm_prds[idx*5] / (cm_prds[idx*5].sum() * mp.val_dl.batch_size*len(mp.val_dl)), ax=ax, annot=True) # - iou = [cm[-1, -1] / (cm.sum() - cm[0, 0]) for cm in cm_prds] iou plt.plot(iou) plt.title("IoU") plt.ylabel("IoU") plt.xlabel("Epoch") plt.savefig("iou.png") assert False # ## Resume training # + chpt_list = [fn for fn in (train_path /'save_states').iterdir() if fn.name.startswith("model_bs_16")] last_chpt = sorted(chpt_list, key=lambda x: int(x.name.split("_")[-1]))[-1] last_chpt # - def resume_training(mp, last_chpt): checkpoint = torch.load(last_chpt) mp.model.load_state_dict(checkpoint['model_state_dict']) mp.optimizer.load_state_dict(checkpoint['optimizer_state_dict']) start_epoch = checkpoint['epoch'] total_results = checkpoint["total_results"] for epoch in trange(start_epoch + 1, mp.epochs + start_epoch, desc="Train", unit="epoch"): train_epoch = 0 for bn, ((img, mask), _) in tqdm(enumerate(mp.train_dl), desc=f"Batch training", total=len(mp.train_dl), unit="batch"): img, mask = img.to(mp.device), mask.to(mp.device) mp.model.train() pred = mp.model(img) loss = mp.criterion(pred, mask) #Avg loss on whole batch loss.backward() train_epoch += loss.detach().item() mp.model.train() mp.optimizer.step() mp.optimizer.zero_grad() total_results['train']['loss'].append(train_epoch) print(f"Train loss: {train_epoch}") # Evaluation step output_loss, n_corr_preds, total_cm = evaluate_model(mp) last_cm = total_cm[-1] print(f"Val loss: {output_loss}") print(f"N of correct preds: {n_corr_preds}") print(f"Last CM:\n{total_cm[-1]}") iou = last_cm[-1, -1] / (last_cm.sum() - last_cm[0, 0]) print(f"Last IoU: {iou}") total_results['val']['loss'].append(output_loss) total_results['val']['correct_pred'].append(n_corr_preds) total_results['val']['cm'].append(total_cm) torch.save({'epoch': epoch, 'model_state_dict': mp.model.state_dict(), 'optimizer_state_dict': mp.optimizer.state_dict(), 'total_results': total_results}, train_path /'save_states'/ f'model_bs_{mp.train_dl.batch_size}_chkpt_ep_{epoch:03d}') torch.save(mp.model.state_dict(), train_path / f'ae_building_mask') return total_results # + import json total_results = train_model(mp) with open('total_result_dict.json', 'w') as fp: json.dump(total_results, fp) # -
notebooks/spacenet.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + # PyQt5 painting system is able to render vector graphics, images and outline font-based text. # Painting is needed in applications when we want to change or enhance and existing widget, # or if we are creating a custom widget from scratch. # To the drawing we use the painting API provided by the PyQt5 toolkit. # - # ## QPainter # + # QPainter performs low-level painting on widgets and other paint devices. # It can draw everything from simple lines to complex shapes. # - # ## The paintEvent method # + # The painting is done within the paintEvent method. Rhe painting code is placed between the # begin and end methods of the QPainter object. It performs low-level painting on widgets and # other paint devices. # - # ## PyQt5 draw text # + # In this example we draw some Unicode text on the client area of window. # - import sys from PyQt5.QtWidgets import QWidget, QApplication from PyQt5.QtGui import QPainter, QColor, QFont from PyQt5.QtCore import Qt # + class Example(QWidget): def __init__(self): super().__init__() self.initUI() def initUI(self): self.text = "Witam wszystkich serdecznie" self.setGeometry(300, 300, 350, 300) self.setWindowTitle('Drawing text') self.show() def paintEvent(self, event): qp = QPainter() qp.begin(self) self.drawText(event, qp) qp.end() def drawText(self, event, qp): qp.setPen(QColor(168, 34, 3)) qp.setFont(QFont('Decorative', 10)) qp.drawText(event.rect(), Qt.AlignCenter, self.text) def main(): app = QApplication(sys.argv) ex = Example() sys.exit(app.exec_()) if __name__ == '__main__': main() # In this example we draw some text in Polish. The text is vertivcally and horizontally aligned. # Drawing is done within the paint event (paintEvent(self, event)). # The QPainter class is responsible for all the low-level painting. # All the painting methods go between begin and end methods. # The actual painting is delegated to the drawText method. # The draw text method draws text on the window. # The rect method of the paint event returns the rectangle that needs to be updated. # With the Qt.AlignCenter we align the text in both dimensions. # - # ## PyQt5 draw points # + # A point is the most simple graphics object that can be drawn. It is a small spot on the window. # - import sys, random from PyQt5.QtWidgets import QWidget, QApplication from PyQt5.QtGui import QPainter from PyQt5.QtCore import Qt # + class Example2(QWidget): def __init__(self): super().__init__() self.initUI() def initUI(self): self.setGeometry(300, 300, 300, 190) self.setWindowTitle('Points') self.show() def paintEvent(self, e): qp = QPainter() qp.begin(self) self.drawPoints(qp) qp.end() def drawPoints(self, qp): qp.setPen(Qt.red) size = self.size() if size.height() <= 1 or size.height() <= 1: return for i in range(1000): x = random.randint(1, size.width() -1) y = random.randint(1, size.height() -1) qp.drawPoint(x, y) def main(): app = QApplication(sys.argv) ex = Example2() sys.exit(app.exec_()) if __name__ == '__main__': main() # In this example we draw randomly 100 red points on the client area of the window. # We set the pen to red colour (qp.setPen(Qt.red)). We use a predefined Qt.red colour constant. # Each time we resize the window a paint event is generated. # We get the current size of the window with the size() method. # We use the size of the window to distribute the points all over the client area of the window. # We draw the point with the drawPoint() method. # - # ## PyQt5 colours # + # A colour is an object representing a combination of Red, Green and Blue (RGB) intensity values. # Valid RGB values are in the range from 0 to 255. # We can define a colour in various ways. # The most common are RGB ddecimal values or hexadecimal values. # We can also use and RGBA value which stands for Red, Green, Blue and Alpha. # Here we add some extra information regarding transparency. # Alpha value of 255 defines full opacity, 0 is for full transparency, e.g. the colour is invisible. # - import sys from PyQt5.QtWidgets import QWidget, QApplication from PyQt5.QtGui import QPainter, QColor, QBrush # + class Example3(QWidget): def __init__(self): super().__init__() self.initUI() def initUI(self): self.setGeometry(300, 300, 350, 100) self.setWindowTitle('Colours') self.show() def paintEvent(self, e): qp = QPainter() qp.begin(self) self.drawRectangles(qp) qp.end() def drawRectangles(self, qp): col = QColor(0, 0, 0) col.setNamedColor('#d4d4d4') qp.setPen(col) qp.setBrush(QColor(100, 0, 0)) qp.drawRect(10, 15, 90, 60) qp.setBrush(QColor(155, 80, 0, 160)) qp.drawRect(130, 15, 90, 60) qp.setBrush(QColor(25, 0, 90, 200)) qp.drawRect(250, 15, 90, 60) def main(): app = QApplication(sys.argv) ex = Example3() sys.exit(app.exec_()) if __name__ == '__main__': main() # In this example we draw three coloured rectangles. # We define a colour using a hexadecimal notation (color.setNamedColor('#d4d4d4')). # We define a brush and draw a rectangle. # A brush is an elementary graphics object which is used to draw the background of a shape. # The drawRect() method accepts four parameters. The first to are x and y values on the axis. # The third and fourth parameters are the iwdth and height of the rectangle. # The method draws the rectangle using the current pen and brush. # - # ## PyQt5 QPen # + # The QPen is an elementary graphics object. It is used to draw lines, curves and outlines of # rectangles, ellipses, polygons or other shapes. # - import sys from PyQt5.QtWidgets import QWidget, QApplication from PyQt5.QtCore import Qt from PyQt5.QtGui import QPainter, QPen # + class Example4(QWidget): def __init__(self): super().__init__() self.initUI() def initUI(self): self.setGeometry(300, 300, 280, 270) self.setWindowTitle('Pen styles') self.show() def paintEvent(self, e): qp = QPainter() qp.begin(self) self.drawLines(qp) qp.end() def drawLines(self, qp): pen = QPen(Qt.black, 2, Qt.SolidLine) qp.setPen(pen) qp.drawLine(20, 40, 250, 40) pen.setStyle(Qt.DashLine) qp.setPen(pen) qp.drawLine(20, 80, 250, 80) pen.setStyle(Qt.DashDotLine) qp.setPen(pen) qp.drawLine(20, 120, 250, 120) pen.setStyle(Qt.DotLine) qp.setPen(pen) qp.drawLine(20, 160, 250, 160) pen.setStyle(Qt.DashDotDotLine) qp.setPen(pen) qp.drawLine(20, 200, 250, 200) pen.setStyle(Qt.CustomDashLine) pen.setDashPattern([1, 4, 5, 4]) qp.setPen(pen) qp.drawLine(20, 240, 250, 240) def main(): app = QApplication(sys.argv) ex = Example4() sys.exit(app.exec_()) if __name__ == '__main__': main() # In this example we draw six lines. The lines are drawn in six different pen styles. # There are five predefined pen styles. # We can create also custom pen styles. The last line is drawn using a custom pen style. # We create QPen object. The colour is black. # Qt.SolidLine is one of the predefined pen styles. # We define a custom pen style (pen.setStyle(...)). # We set a Qt.CustomDashLine pen style and call the setDashPattern method. # The list of numbers defines a style. There must be and even number of numbers. # Odd numbers define a dash, even numbers space. # The greater the number, the greater the space or the even numbers space. # - # ## PyQt5 QBrush import sys from PyQt5.QtWidgets import QWidget, QApplication from PyQt5.QtCore import Qt from PyQt5.QtGui import QPainter, QBrush # + class Example5(QWidget): def __init__(self): super().__init__() self.initUI() def initUI(self): self.setGeometry(300, 300, 355, 280) self.setWindowTitle('Brushes') self.show() def paintEvent(self, e): qp = QPainter() qp.begin(self) self.drawBrushes(qp) qp.end() def drawBrushes(self, qp): brush = QBrush(Qt.SolidPattern) qp.setBrush(brush) qp.drawRect(10, 15, 90, 60) brush.setStyle(Qt.Dense1Pattern) qp.setBrush(brush) qp.drawRect(130, 15, 90, 60) brush.setStyle(Qt.Dense2Pattern) qp.setBrush(brush) qp.drawRect(250, 15, 90, 60) brush.setStyle(Qt.DiagCrossPattern) qp.setBrush(brush) qp.drawRect(10, 105, 90, 60) brush.setStyle(Qt.Dense5Pattern) qp.setBrush(brush) qp.drawRect(130, 105, 90, 60) brush.setStyle(Qt.Dense6Pattern) qp.setBrush(brush) qp.drawRect(250, 105, 90, 60) brush.setStyle(Qt.HorPattern) qp.setBrush(brush) qp.drawRect(10, 195, 90, 60) brush.setStyle(Qt.VerPattern) qp.setBrush(brush) qp.drawRect(130, 195, 90, 60) brush.setStyle(Qt.BDiagPattern) qp.setBrush(brush) qp.drawRect(250, 195, 90, 60) def main(): app = QApplication(sys.argv) ex = Example5() sys.exit(app.exec_()) if __name__ == '__main__': main() # In this example we draw nine different rectangles. # We define a brush object by brush = QBrush(...). # We set it to the painter object and draw the rectangle by calling the drawRect method. # - # ## Bezier curve # + # Bezier curve is a cubic line. Bezier curve in PyQt5 can be created with QPainterPath. # A painter path is an object composed of a number of graphical blocks such as rectangles, # ellipses, lines and curves. # - import sys from PyQt5.QtWidgets import QWidget, QApplication from PyQt5.QtGui import QPainter, QPainterPath # + class Example6(QWidget): def __init__(self): super().__init__() self.initUI() def initUI(self): self.setGeometry(300, 300, 380, 250) self.setWindowTitle('Bezier curve') self.show() def paintEvent(self, e): qp = QPainter() qp.begin(self) qp.setRenderHint(QPainter.Antialiasing) self.drawBezierCurve(qp) qp.end() def drawBezierCurve(self, qp): path = QPainterPath() path.moveTo(30, 30) path.cubicTo(30, 30, 200, 350, 350, 30) qp.drawPath(path) def main(): app = QApplication(sys.argv) ex = Example6() sys.exit(app.exec_()) if __name__ == '__main__': main() # In this example we draw a Bezier curve. # We create a Bezier curve with QPainterPath path. # The curve is created with cubicTo() method, which takes three points: starting point, # control point and ending point. # The final path is drawn with drawPath() method.
10 Painting.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="view-in-github" colab_type="text" # <a href="https://colab.research.google.com/github/eda-ricercatore/gulyas-scripts/blob/master/sandbox/python/google-colab/importing_code.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # + id="AWPy7OpEw0eI" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 534} outputId="4f137472-0348-4ce8-b138-3436986d0120" from google.colab import drive drive.mount('/content/drive') # %ls -al /content/drive/My\ Drive/Colab\ Notebooks # %cd /content/drive/My\ Drive/Colab\ Notebooks # %ls -al
google-colab/importing_code.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Handwritten Digits Classifier # # # # # # # # + import torch from torch import nn import torch.nn.functional as F from torchvision import datasets, transforms # Defining transform to normalize the data transform = transforms.Compose([transforms.ToTensor(), transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)), ]) # Downloading and loading the training data trainset = datasets.MNIST('~/.pytorch/MNIST_data/', download=True, train=True, transform=transform) trainloader = torch.utils.data.DataLoader(trainset, batch_size=64, shuffle=True) # + # Building a feed-forward network model = nn.Sequential(nn.Linear(784, 128), nn.ReLU(), nn.Linear(128, 64), nn.ReLU(), nn.Linear(64, 10), nn.LogSoftmax(dim=1)) criterion = nn.NLLLoss() images, labels = next(iter(trainloader)) images = images.view(images.shape[0], -1) logps = model(images) loss = criterion(logps, labels) # + print('Before backward pass: \n', model[0].weight.grad) loss.backward() print('After backward pass: \n', model[0].weight.grad) # - # ## Training the network! # # + from torch import optim optimizer = optim.SGD(model.parameters(), lr=0.01) # + print('Initial weights - ', model[0].weight) images, labels = next(iter(trainloader)) images.resize_(64, 784) # Clearing the gradients, optimizer.zero_grad() # Forward pass, then backward pass, then update weights output = model.forward(images) loss = criterion(output, labels) loss.backward() print('Gradient -', model[0].weight.grad) # - # update step optimizer.step() print('Updated weights - ', model[0].weight) # ### Training for real # # # + model = nn.Sequential(nn.Linear(784, 128), nn.ReLU(), nn.Linear(128, 64), nn.ReLU(), nn.Linear(64, 10), nn.LogSoftmax(dim=1)) criterion = nn.NLLLoss() optimizer = optim.SGD(model.parameters(), lr=0.003) # training for 5 epochs epochs = 5 for e in range(epochs): running_loss = 0 for images, labels in trainloader: images = images.view(images.shape[0], -1) optimizer.zero_grad() output = model.forward(images) loss = criterion(output, labels) loss.backward() optimizer.step() running_loss += loss.item() else: print(f"Training loss: {running_loss/len(trainloader)}") # - # Now checking out it's predictions. # + # %matplotlib inline import helper images, labels = next(iter(trainloader)) img = images[0].view(1, 784) with torch.no_grad(): logps = model.forward(img) ps = torch.exp(logps) helper.view_classify(img.view(1, 28, 28), ps) # - # Accurate Prediction #
deep_learning_cit/MNIST-Handwritten-Digit-Recognition-master/MNIST Handwritten digits Classifier Using Deep learning.ipynb
# Exploring a candidate data set # ============================== # # *Fraida Fund* # Introduction # ------------ # # In this notebook, we will consider several machine learning tasks # (satirical headline classification, chest X-ray classification, and # candidate data sets for them. We will explore the following questions: # # - Do these data sets seem appropriate for the task? # - Are there any important limitations of the datasets, or problems # that need addressing before we use them to train a machine learning # model? # # In fact, each of these datasets has a significant problem that - if not # detected early on - would be a “Garbage In, Garbage Out” situation. See # if you can identify the problem with each dataset! # # To get you started, I included some code to show you how to read in the # data. You can add additional code and text cells to explore the data. If # you find something interesting, share it on Piazza! import pandas as pd import matplotlib.pyplot as plt import seaborn as sns # Taxi tip prediction # ------------------- # ### Scenario # # You are developing an app for NYC taxi drivers that will predict what # the typical tip would be for a given fare. You consider using data # collected by the NYC Taxi and Limousine Commission on taxi trips. The # links are for 2019 data, but previous years are also available. [Data # link for yellow (Manhattan) taxi # trips](https://data.cityofnewyork.us/Transportation/2019-Yellow-Taxi-Trip-Data/2upf-qytp) # and [data link for green (non-Manhattan) taxi # trips](https://data.cityofnewyork.us/Transportation/2019-Green-Taxi-Trip-Data/q5mz-t52e) # ### Read in data # # We’ll start by reading in the 2019 Green Taxi trip data. It’s a large # file and takes a long time to download, so we may interrupt the download # in middle (using the Runtime menu in Colab) and just work with the # partial data. # # In the next couple of cells, `wget` and `wc` are not Python code - # they’re Linux commands. We can run some basic Linux commands inside our # Colab runtime, and it’s often helpful to do so. For example, we may use # Linux commands to install extra software libraries that are not # pre-installed in our runtime, clone a source code repository from # Github, or download data from the Internet. # !wget "https://data.cityofnewyork.us/api/views/q5mz-t52e/rows.csv?accessType=DOWNLOAD" -O 2019-Green-Taxi-Trip-Data.csv # Is the cell above taking a long time to run? That’s because this data # set is very large, and the server from which it is retrieved is not very # fast. Since we don’t need to explore the whole dataset, necessarily, we # can interrupt the partial download by clicking on the square icon to the # left of the cell that is running. # # Then, we can read in just 10,000 rows of data. df_taxi = pd.read_csv('2019-Green-Taxi-Trip-Data.csv', nrows=10000) df_taxi.head() # Highway traffic prediction # -------------------------- # ### Scenario # # You are working for the state of New York to develop a traffic # prediction model for the NYS Thruway. The following Thruway data is # available: Number and types of vehicles that entered from each entry # point on the Thruway, along with their exit points, at 15 minute # intervals. The link points to the most recent week’s worth of available # data, but this data is available through 2014. [Link to NYS Thruway # data](https://data.ny.gov/Transportation/NYS-Thruway-Origin-and-Destination-Points-for-All-/4dbf-24u2) # ### Read in data url = 'https://data.ny.gov/api/views/4dbf-24u2/rows.csv?accessType=DOWNLOAD&sorting=true' df_thruway = pd.read_csv(url) # Satirical headline classification # --------------------------------- # ### Scenario # # You are hired by a major social media platform to develop a machine # learning model that will be used to clearly mark *satirical news # articles* when they are shared on social media. You consider using this # dataset of 9,000 headlines from [The Onion](https://www.theonion.com/) # and 15,000 headlines from [Not The Onion on # Reddit](https://www.reddit.com/r/nottheonion/). [Link to OnionOrNot # data](https://github.com/lukefeilberg/onion) # ### Read in data # # This time, we’ll retrieve the data from Github. # !git clone https://github.com/lukefeilberg/onion.git # %cd onion df_headline = pd.read_csv("OnionOrNot.csv") # Offensive post classification # ----------------------------- # ### Scenario # # The social media platform was so impressed with your work on detection # of satirical headlines, that they asked you to work on a model to # identify posts using offensive language. As training data, they hand you # 80,000 tweets, labeled as either “hateful”, “abusive”, “spam”, or # “none”, by majority vote of five people. [Link to abusive tweets # data](https://dataverse.mpi-sws.org/dataset.xhtml?persistentId=doi:10.5072/FK2/ZDTEMN) # ### Read in data # # This time, we’ll read in data to Colab by downloading it to our own # computer from the link above, then uploading it to Colab. # # Use the interactive file upload form below to upload the # `hatespeechtwitter.csv` file. # + from google.colab import files uploaded = files.upload() for fn in uploaded.keys(): print('User uploaded file "{name}" with length {length} bytes'.format( name=fn, length=len(uploaded[fn]))) # - df_tweets = pd.read_csv('hatespeechtwitter.csv') df_tweets # Chest X-ray classification # -------------------------- # ### Scenario # # You are working for a large hospital system to develop a machine # learning model that, given a chest X-ray, should identify those that # likely have COVID-19 so that they can take proper precautions against # the spread of infection within the hospital. You consider using two # datasets together: one with several hundred images of chest X-rays of # likely COVID-19 patients, and a pre-COVID dataset of chest X-ray images. # [Link to COVID-19 chest X-ray # data](https://github.com/ieee8023/covid-chestxray-dataset), [Link to # pre-COVID chest X-ray # data](https://www.kaggle.com/c/rsna-pneumonia-detection-challenge/overview) # ### Read in data # # First, we will download the RSNA data from the [RSNA # website](https://www.rsna.org/en/education/ai-resources-and-training/ai-image-challenge/RSNA-Pneumonia-Detection-Challenge-2018). # # Then, we’ll also retrieve the COVID-19 data from Github. # #### RSNA data # !wget https://s3.amazonaws.com/east1.public.rsna.org/AI/2018/pneumonia-challenge-dataset-adjudicated-kaggle_2018.zip -O pneumonia-challenge-dataset-adjudicated-kaggle_2018.zip # !mkdir rsna # !unzip -j -d rsna/ pneumonia-challenge-dataset-adjudicated-kaggle_2018.zip # Now, we’ll make a list of all the image files: import glob rsna_images = glob.glob("rsna/*.dcm") len(rsna_images) rsna_images[:5] # These images are in DICOM format, a medical imaging file format. We need # to install an extra library to read them in: # !pip install pydicom import pydicom as dicom import numpy as np import matplotlib.pyplot as plt # Now, we can read in one file from the list: ref_xray = dicom.read_file(rsna_images[0]) ref_xray dir(ref_xray) # We’ll find out the dimensions of the image, then represent it as an # array of pixels, and plot it: pixel_dims = (int(ref_xray.Rows), int(ref_xray.Columns)) pixel_dims ref_xray.pixel_array.shape print(ref_xray.pixel_array) plt.imshow(ref_xray.pixel_array, cmap='bone') # #### COVID-19 data # !git clone https://github.com/ieee8023/covid-chestxray-dataset covid_metadata = pd.read_csv('covid-chestxray-dataset/metadata.csv') covid_metadata.info() covid_metadata.head() covid_metadata.modality.value_counts() covid_metadata.finding.value_counts() # We’re going to pull out a subset of the data that (1) is a chest X-ray, # not CT, and (2) has a positive COVID-19 finding, covid_xray_metadata = covid_metadata[(covid_metadata["modality"] == "X-ray") & (covid_metadata["finding"] == "COVID-19")] covid_xray_metadata.info() # Make a list of image files: covid_images = 'covid-chestxray-dataset/images/' + covid_xray_metadata['filename'] len(covid_images) covid_images # We’ll use the PIL library to read in JPG and PNG files, and plot one: from PIL import Image image = Image.open(covid_images[0]) image_bw = image.convert('L') # L is 8-bit pixels, black and white image_data = np.asarray(image_bw) image_data.shape plt.imshow(image_bw, cmap='bone') # #### Plot samples of each # + num_classes = 2 samples_per_class = 10 figure = plt.figure(figsize=(samples_per_class*3, num_classes*3)) # plot RSNA samples rsna_samples = np.random.choice(rsna_images, samples_per_class, replace=False) for i, sample in enumerate(rsna_samples): plt_idx = i + 1 plt.subplot(num_classes, samples_per_class, plt_idx) sample_img = dicom.read_file(sample).pixel_array plt.imshow(sample_img, cmap='bone') plt.axis('off') plt.title("Non-COVID") # plot COVID samples covid_samples = np.random.choice(covid_images, samples_per_class, replace=False) for i, sample in enumerate(covid_samples): plt_idx = samples_per_class + i + 1 plt.subplot(num_classes, samples_per_class, plt_idx) sample_img = Image.open(sample) sample_image_bw = sample_img.convert('L') plt.imshow(sample_image_bw, cmap='bone') plt.axis('off') plt.title("COVID-19") plt.show()
notebooks/1-explore-candidate-datasets.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="cedf868076a2" # ##### Copyright 2020 The Cirq Developers # + cellView="form" id="906e07f6e562" #@title Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # + [markdown] id="8bbd73c03ac2" # # Transforming circuits # + [markdown] id="25eb74f260d6" # <table class="tfo-notebook-buttons" align="left"> # <td> # <a target="_blank" href="https://www.example.org/cirq/transform"><img src="https://www.tensorflow.org/images/tf_logo_32px.png" />View on QuantumLib</a> # </td> # <td> # <a target="_blank" href="https://colab.research.google.com/github/quantumlib/Cirq/blob/master/docs/transform.ipynb"><img src="https://www.tensorflow.org/images/colab_logo_32px.png" />Run in Google Colab</a> # </td> # <td> # <a target="_blank" href="https://github.com/quantumlib/Cirq/blob/master/docs/transform.ipynb"><img src="https://www.tensorflow.org/images/GitHub-Mark-32px.png" />View source on GitHub</a> # </td> # <td> # <a href="https://storage.googleapis.com/tensorflow_docs/Cirq/docs/transform.ipynb"><img src="https://www.tensorflow.org/images/download_logo_32px.png" />Download notebook</a> # </td> # </table> # + id="846b32703c5c" try: import cirq except ImportError: print("installing cirq...") # !pip install --quiet cirq print("installed cirq.") # + [markdown] id="9d3d49b9ca2a" # ## Circuit optimizers # # Cirq comes with the concept of an optimizer. Optimizers will pass over a circuit and perform tasks that will modify the circuit in place. These can be used to transform a circuit in specific ways, such as combining single-qubit gates, commuting Z gates through the circuit, or readying the circuit for certain hardware or gate set configurations. # # Optimizers will have a function `optimize_circuit()` that can be used to perform this optimization. Here is a simple example that removes empty moments: # + id="e2893a817870" import cirq c=cirq.Circuit() c.append(cirq.Moment([])) c.append(cirq.Moment([cirq.X(cirq.GridQubit(1,1))])) c.append(cirq.Moment([])) print(f'Before optimization, Circuit has {len(c)} moments') cirq.DropEmptyMoments().optimize_circuit(circuit=c) print(f'After optimization, Circuit has {len(c)} moments') # + [markdown] id="861ea1ada088" # Optimizers that come with cirq can be found in the `cirq.optimizers` package. # # A few notable examples are: # # * **ConvertToCzAndSingleGates**: Attempts to convert a circuit into CZ gates and single qubit gates. This uses gate's unitary and decompose methods to transform them into CZ + single qubit gates. # * **DropEmptyMoments** / **DropNegligible**: Removes moments that are empty or have very small effects, respectively. # * **EjectPhasedPaulis**: Pushes X, Y, and PhasedX gates towards the end of the circuit, potentially absorbing Z gates and modifying gates along the way. # * **EjectZ**: Pushes Z gates towards the end of the circuit, potentially adjusting phases of gates that they pass through. # * **ExpandComposite**: Uses `cirq.decompose` to expand composite gates. # * **MergeInteractions**: Combines series of adjacent one and two-qubit gates acting on a pair of qubits. # * **MergeSingleQubitGates**: Combines series of adjacent unitary 1-qubit operations # * **SynchronizeTerminalMeasurements**: Moves all measurements in a circuit to the final moment if possible. # # + [markdown] id="c6c7e3ed57ba" # ### Create your own optimizers # # You can create your own optimizers to transform and modify circuits to fit hardware, gate sets, or other requirements. Optimizers can also be used to generate noise. See [noise](noise.ipynb) for details. # # You can do this by implementing the function `optimize_circuit`. # # If your optimizer is a local optimizer and depends primarily on operator being examined, you can alternatively inherit `cirq.PointOptimizer` and implement the function `optimization_at(self, circuit, index, op)` that optimizes a single operation. # # Below is an example of implementing a simple `PointOptimizer` that removes measurements. # + id="e046ef24c70e" class RemoveMeasurements(cirq.PointOptimizer): def optimization_at(self, circuit: cirq.Circuit, index: int, op: cirq.Operation): if isinstance(op.gate, cirq.MeasurementGate): return cirq.PointOptimizationSummary(clear_span=1, new_operations=[], clear_qubits=op.qubits) else: return None q=cirq.LineQubit(0) c=cirq.Circuit(cirq.X(q), cirq.measure(q)) print('Before optimization') print(c) RemoveMeasurements().optimize_circuit(c) print('After optimization') print(c)
docs/transform.ipynb
# ##### Copyright 2021 Google LLC. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # # bacp # <table align="left"> # <td> # <a href="https://colab.research.google.com/github/google/or-tools/blob/master/examples/notebook/contrib/bacp.ipynb"><img src="https://raw.githubusercontent.com/google/or-tools/master/tools/colab_32px.png"/>Run in Google Colab</a> # </td> # <td> # <a href="https://github.com/google/or-tools/blob/master/examples/contrib/bacp.py"><img src="https://raw.githubusercontent.com/google/or-tools/master/tools/github_32px.png"/>View source on GitHub</a> # </td> # </table> # First, you must install [ortools](https://pypi.org/project/ortools/) package in this colab. # !pip install ortools # + # Copyright 2010 <NAME> <EMAIL> # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import argparse from ortools.constraint_solver import pywrapcp parser = argparse.ArgumentParser() parser.add_argument( '--data', default='examples/data/bacp/bacp12.txt', help='path to data file') #----------------helper for binpacking posting---------------- def BinPacking(solver, binvars, weights, loadvars): """post the load constraint on bins. constraints forall j: loadvars[j] == sum_i (binvars[i] == j) * weights[i]) """ pack = solver.Pack(binvars, len(loadvars)) pack.AddWeightedSumEqualVarDimension(weights, loadvars) solver.Add(pack) solver.Add(solver.SumEquality(loadvars, sum(weights))) #------------------------------data reading------------------- def ReadData(filename): """Read data from <filename>.""" f = open(filename) nb_courses, nb_periods, min_credit, max_credit, nb_prereqs =\ [int(nb) for nb in f.readline().split()] credits = [int(nb) for nb in f.readline().split()] prereq = [int(nb) for nb in f.readline().split()] prereq = [(prereq[i * 2], prereq[i * 2 + 1]) for i in range(nb_prereqs)] return (credits, nb_periods, prereq) #------------------solver and variable declaration------------- credits, nb_periods, prereq = ReadData(args.data) nb_courses = len(credits) solver = pywrapcp.Solver('Balanced Academic Curriculum Problem') x = [ solver.IntVar(0, nb_periods - 1, 'x' + str(i)) for i in range(nb_courses) ] load_vars = [ solver.IntVar(0, sum(credits), 'load_vars' + str(i)) for i in range(nb_periods) ] #-------------------post of the constraints-------------- # Bin Packing. BinPacking(solver, x, credits, load_vars) # Add dependencies. for i, j in prereq: solver.Add(x[i] < x[j]) #----------------Objective------------------------------- objective_var = solver.Max(load_vars) objective = solver.Minimize(objective_var, 1) #------------start the search and optimization----------- db = solver.Phase(x, solver.CHOOSE_MIN_SIZE_LOWEST_MIN, solver.INT_VALUE_DEFAULT) search_log = solver.SearchLog(100000, objective_var) solver.Solve(db, [objective, search_log])
examples/notebook/contrib/bacp.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # HIDDEN import numpy as np import matplotlib.pyplot as plt from mpl_toolkits.mplot3d import Axes3D from mpl_toolkits.mplot3d.art3d import Poly3DCollection, Line3DCollection # %matplotlib inline import numpy as np # Let's dive into the world of high-dimensional geometry! # # When considering high-dimensional spaces (4 dimensions or higher), we rely on mental models and intuitions from 2D or 3D objects which generalize poorly to high dimensions. This is especially in machine learning, where estimators, decision boundaries, and pretty much everything else as well are defined in $d$-dimensional space (where $d$ is *very high*), and all our insights often collapse. This post will attempt to highlight some peculiarities of high-dimensional spaces, and their implications for machine learning applications. # ## Volumes Concentrate on the Outside # In high-dimensional spaces, **volume concentrates on the outside**, exponentially more so, as dimension increases. # # Let's first look at this fact through "hypercubes": when $d=1$, this is an interval, when $d=2$, a square, when $d=3$, a cube, and so on. Mathematically, a hypercube with edge-length $l$ centered at the origin corresponds to the set $$\mathcal{A}_{d}(l) = \{x \in \mathbb{R}^d ~~\vert~~ \|x\|_\infty \leq \frac{l}{2}\}$$ # + # HIDDEN fig = plt.figure(figsize=(15,5)) ax = fig.add_subplot(131) ax.hlines(np.linspace(-0.05,0.05,10),-1,1,color=blue_color) #ax.hlines(np.linspace(-0.05,0.05,10),-0.33,0.33,color=red_color) ax.axis('off') ax.set_ylim(-1,1) ax.set_xlim(-1,1) ax.set_title('$d=1$') ax = fig.add_subplot(132) ax.add_patch( patches.Rectangle( (-1, -1), 2, 2, fill=True, color=blue_color# remove background ) ) ax.set_ylim(-1.5,1.5) ax.set_xlim(-1.5,1.5) ax.axis('off') ax.set_title('$d=2$ ') ax = fig.add_subplot(133, projection='3d') def plot_cube(cube_definition,color=(0,0,1,0.1)): cube_definition_array = [ np.array(list(item)) for item in cube_definition ] points = [] points += cube_definition_array vectors = [ cube_definition_array[1] - cube_definition_array[0], cube_definition_array[2] - cube_definition_array[0], cube_definition_array[3] - cube_definition_array[0] ] points += [cube_definition_array[0] + vectors[0] + vectors[1]] points += [cube_definition_array[0] + vectors[0] + vectors[2]] points += [cube_definition_array[0] + vectors[1] + vectors[2]] points += [cube_definition_array[0] + vectors[0] + vectors[1] + vectors[2]] points = np.array(points) edges = [ [points[0], points[3], points[5], points[1]], [points[1], points[5], points[7], points[4]], [points[4], points[2], points[6], points[7]], [points[2], points[6], points[3], points[0]], [points[0], points[2], points[4], points[1]], [points[3], points[6], points[7], points[5]] ] faces = Poly3DCollection(edges, linewidths=1, edgecolors='k') faces.set_facecolor(color) ax.add_collection3d(faces) # Plot the points themselves to force the scaling of the axes ax.scatter(points[:,0], points[:,1], points[:,2], s=0) ax.set_aspect('equal') ax.axis('off') cube_definition = [ (-1,-1,-1), (-1,1,-1), (1,-1,-1), (-1,-1,1) ] cube_small_definition = [ (-0.33,-0.33,-0.33), (-0.33,0.33,-0.33), (0.33,-0.33,-0.33), (-0.33,-0.33,0.33) ] plot_cube(cube_definition, blue_color) ax.set_title('$d=3$'); # - # Volumes in $\mathbb{R}^d$ are calculated exactly like they are in 2 or 3 dimensions: the volume of a hyper-rectangle is the product of all of the edge lengths.By these calculations, hypercubes $\mathcal{A}_d(l)$ will have volume $\prod_{k=1}^d l = l^d$. # # Now, volumes of different dimensional objects aren't directly comparable (it's like comparing apples and oranges), but what we can look at are *relative volumes*. # # Say we have two hypercubes, one of length $l$ and another of $\frac{l}{3}$, what is the relative volume of the smaller cube to the larger cube? How does this proportion change as the dimension increases? Let's first visualize in the dimensions where we can. # + # HIDDEN fig = plt.figure(figsize=(15,5)) ax = fig.add_subplot(131) ax.hlines(np.linspace(-0.05,0.05,10),-1,1,color=blue_color) ax.hlines(np.linspace(-0.05,0.05,10),-0.33,0.33,color=red_color) ax.axis('off') ax.set_ylim(-1,1) ax.set_xlim(-1,1) ax.set_title('$d=1$ Relative Volume = $\\frac{1}{3}$') ax = fig.add_subplot(132) ax.add_patch( patches.Rectangle( (-1, -1), 2, 2, fill=True, color=blue_color# remove background ) ) ax.add_patch( patches.Rectangle( (-0.33, -0.33), 0.66, 0.66, fill=True, color=red_color# remove background ) ) ax.set_ylim(-1.5,1.5) ax.set_xlim(-1.5,1.5) ax.axis('off') ax.set_title('$d=2$ Relative Volume = $\\frac{1}{9}$') ax = fig.add_subplot(133, projection='3d') def plot_cube(cube_definition,color=(0,0,1,0.1)): cube_definition_array = [ np.array(list(item)) for item in cube_definition ] points = [] points += cube_definition_array vectors = [ cube_definition_array[1] - cube_definition_array[0], cube_definition_array[2] - cube_definition_array[0], cube_definition_array[3] - cube_definition_array[0] ] points += [cube_definition_array[0] + vectors[0] + vectors[1]] points += [cube_definition_array[0] + vectors[0] + vectors[2]] points += [cube_definition_array[0] + vectors[1] + vectors[2]] points += [cube_definition_array[0] + vectors[0] + vectors[1] + vectors[2]] points = np.array(points) edges = [ [points[0], points[3], points[5], points[1]], [points[1], points[5], points[7], points[4]], [points[4], points[2], points[6], points[7]], [points[2], points[6], points[3], points[0]], [points[0], points[2], points[4], points[1]], [points[3], points[6], points[7], points[5]] ] faces = Poly3DCollection(edges, linewidths=1, edgecolors='k') faces.set_facecolor(color) ax.add_collection3d(faces) # Plot the points themselves to force the scaling of the axes ax.scatter(points[:,0], points[:,1], points[:,2], s=0) ax.set_aspect('equal') ax.axis('off') cube_definition = [ (-1,-1,-1), (-1,1,-1), (1,-1,-1), (-1,-1,1) ] cube_small_definition = [ (-0.33,-0.33,-0.33), (-0.33,0.33,-0.33), (0.33,-0.33,-0.33), (-0.33,-0.33,0.33) ] blue_color_transparent = list(blue_color) blue_color_transparent[3] = 0.25 plot_cube(cube_definition, blue_color_transparent) plot_cube(cube_small_definition,red_color) ax.set_title('$d=3$ Relative Volume = $\\frac{1}{27}$'); # - # Our visualizations indicate that as dimension increases, the relative volume of the smaller cube vanishes exponentially fast. We can confirm this mathematically as well with a simple calculation: # # $$\text{Relative Volume} = \frac{\text{Volume}(\mathcal{A}_{d}(\frac{l}{3}))}{\text{Volume}(\mathcal{A}_{d}(l))} = \frac{(l/3)^d}{l^d} = \left(\frac{1}{3}\right)^d$$ # # This implies that most of the volume in a hypercube lies around the edges (near the surface), and that very little volume lies in the center of the cube. # Why is this an issue for machine learning? Most optimization problems in machine learning can be written of the form: # # $$\min_{x \in U_d} ~~~f(x)$$ # # where $U_d = A_d(1)$ is a unit hypercube. In many applications (including reinforcement learning), the function $f$ is sufficiently complicated that we can only evaluate *the value* of a function at a point, but no access to gradients or higher-order data from the function. A typical solution is **exhaustive search**: we test a grid of points in the space, and choose the point that has the best value. # # <!-- # ``` # function exhaustive_search(f, ε): # # Find a solution to min f(x) with precision ε # # # Generate data points ((1/ε)^d of them) # # grid = [ (x_1, x_2, ..., x_d) # for x_1 in (0, ε, 2ε, ... 1-ε, 1), # for x_2 in (0, ε, 2ε, ... 1-ε, 1), # ... # for x_d in (0, ε, 2ε, ... 1-ε, 1), # ] # # x_pred = arg min([f(x) for x in grid]) # return x_pred # # ``` # --> # + # HIDDEN fig = plt.figure(figsize=(15,5)) ax = fig.add_subplot(131) ax.add_patch( patches.Rectangle( (-1, -0.05), 2, 0.1, fill=True, color=blue_color# remove background ) ) ax.scatter(np.linspace(-1,1,10), np.zeros(10),zorder=2) #ax.hlines(np.linspace(-0.05,0.05,10),-0.33,0.33,color=red_color) ax.axis('off') ax.set_ylim(-1,1) ax.set_xlim(-1,1) ax.set_title('$d=1$') ax = fig.add_subplot(132) ax.add_patch( patches.Rectangle( (-1, -1), 2, 2, fill=True, color=blue_color# remove background ) ) ax.set_ylim(-1.5,1.5) ax.set_xlim(-1.5,1.5) ax.axis('off') ax.set_title('$d=2$ ') x,y = np.meshgrid(np.linspace(-1,1,10),np.linspace(-1,1,10)) x,y = x.flat[:], y.flat[:] ax.scatter(x,y,zorder=2) ax = fig.add_subplot(133, projection='3d') def plot_cube(cube_definition,color=(0,0,1,0.1)): cube_definition_array = [ np.array(list(item)) for item in cube_definition ] points = [] points += cube_definition_array vectors = [ cube_definition_array[1] - cube_definition_array[0], cube_definition_array[2] - cube_definition_array[0], cube_definition_array[3] - cube_definition_array[0] ] points += [cube_definition_array[0] + vectors[0] + vectors[1]] points += [cube_definition_array[0] + vectors[0] + vectors[2]] points += [cube_definition_array[0] + vectors[1] + vectors[2]] points += [cube_definition_array[0] + vectors[0] + vectors[1] + vectors[2]] points = np.array(points) edges = [ [points[0], points[3], points[5], points[1]], [points[1], points[5], points[7], points[4]], [points[4], points[2], points[6], points[7]], [points[2], points[6], points[3], points[0]], [points[0], points[2], points[4], points[1]], [points[3], points[6], points[7], points[5]] ] faces = Poly3DCollection(edges, linewidths=1, edgecolors='k') faces.set_facecolor(color) ax.add_collection3d(faces) # Plot the points themselves to force the scaling of the axes #ax.scatter(points[:,0], points[:,1], points[:,2], s=0) ax.set_aspect('equal') ax.axis('off') cube_definition = [ (-1,-1,-1), (-1,1,-1), (1,-1,-1), (-1,-1,1) ] cube_small_definition = [ (-0.33,-0.33,-0.33), (-0.33,0.33,-0.33), (0.33,-0.33,-0.33), (-0.33,-0.33,0.33) ] blue_color_transparent = list(blue_color) blue_color_transparent[3] = 0.3 plot_cube(cube_definition, blue_color_transparent) x,y,z = np.meshgrid(np.linspace(-1,1,10),np.linspace(-1,1,10),np.linspace(-1,1,10)) x,y,z = x.flat[:], y.flat[:], z.flat[:] ax.scatter(x,y,z) ax.set_title('$d=3$'); # - # The number of points we need to test to get the same accuracy scales exponentially with dimension, for the exact same argument as the volume. To get accuracy $\varepsilon$ (that is $\left|f(\hat{x})-f(x^*)\right| < \varepsilon$ where $\hat{x}$ is our estimate and $x^*$ is the optimal point), the number of points we need to test is on the order of $\left(\frac{1}{\varepsilon}\right)^d$, which is exponential in dimension (a rigorous proof can be given assuming $f$ is Lipschitz continuous). This is often referred to as optimization's *curse of dimensionality*. # # A similar problem exists when computing expectations of functions: a naive way one might compute an expectation is by evaluating the function on a grid of points, and averaging the values like in a Riemannian sum, and computing in this way would also take time exponential in dimension. # ## Spheres and their Equators # Instead of considering cubes now, let's think about spheres. In particular, we'll think about the unit sphere in $d$ dimensions, which we'll call the $(d-1)$-sphere $S^{(d-1)}$ ($d=2$, a circle, $d=3$, a sphere). # # $$S^{(d-1)} = \{x \in \mathbb{R}^d~~\vert~~ \|x\|_2 = 1\}$$ # # A side note: Calling it a $(d-1)$-sphere may seem odd, but is standard mathematical notation; feel free to mentally substitute $d-1$ with $d$ if it helps improve intuition (the reason it's called a $(d-1)$-sphere is because the sphere is a manifold of dimension $d-1$) # # The primary question we'll concern ourselves with is the following: # # **What proportion of points are near the equator?** # # We'll approach the problem dually, by asking the question *how wide does a band around the equator need to be to capture $1-\varepsilon$ proportion of the points on the sphere?* # # For the time being, we'll let $\varepsilon = \frac14$ (that is we hope to capture 75% of points), and let's start by investigating $d=2$ (the unit circle) # + # HIDDEN from matplotlib.patches import Circle, Wedge, Polygon from matplotlib.collections import PatchCollection w = Wedge((0,0),1,0, 360, width=0.05,color=blue_color) w_mid1 = Wedge((0,0),1,-30, 30, width=0.05,color=red_color) w_mid2 = Wedge((0,0),1,180-30, 180+30, width=0.05,color=red_color) fig, axes = plt.subplots(1, 2, figsize=(10,5)) ax = axes[0] ax.add_artist(w,) ax.add_artist(w_mid1,) ax.add_artist(w_mid2,) ax.hlines(0,-1,1) ax.vlines(np.sqrt(3)/2-0.05, 0, 1/2, linestyles='--') ax.plot([0,np.sqrt(3)/2-0.05], [0, 1/2], 'k-', linestyle='--') ax.text(np.sqrt(3)/2-0.2, 0.25, '$h$') ax.text(0.2, 0.02, '$\\theta= \\sin^{-1}(h)$') ax.set_xlim(-1,1) ax.set_ylim(-1,1) ax.set_title("$h=0.5$ \t Proportion Covered = 0.33") ax.axis('off'); theta = 67.5/180*np.pi w = Wedge((0,0),1,0, 360, width=0.05,color=blue_color) w_mid1 = Wedge((0,0),1, -67.5, 67.5, width=0.05,color=red_color) w_mid2 = Wedge((0,0),1,180-67.5, 180+67.5, width=0.05,color=red_color) ax = axes[1] ax.add_artist(w,) ax.add_artist(w_mid1,) ax.add_artist(w_mid2,) ax.hlines(0,-1,1) ax.vlines(np.cos(theta), 0, np.sin(theta), linestyles='--') ax.text(np.cos(theta)+0.05, np.sin(theta)/2, '$0.92$') ax.set_xlim(-1,1) ax.set_ylim(-1,1) ax.set_title("$h=0.92$ \t Proportion Covered = 0.75") ax.axis('off'); # - # For circles ($d=2$), a band of arbitrary height $h$ covers $\frac{4\sin^{-1}(h)}{2\pi} = \frac{2}{\pi}\sin^{-1}(h)$ of the circumference (the picture above serves as a rough proof). To cover 75% of the space, we can solve to find that $h$ needs to be at least $0.92$. # # Now let's consider spheres ($d=3$). # + # HIDDEN import matplotlib.colors as mcolors def make_colormap(seq): """Return a LinearSegmentedColormap seq: a sequence of floats and RGB-tuples. The floats should be increasing and in the interval (0,1). """ seq = [(None,) * 3, 0.0] + list(seq) + [1.0, (None,) * 3] cdict = {'red': [], 'green': [], 'blue': []} for i, item in enumerate(seq): if isinstance(item, float): r1, g1, b1 = seq[i - 1] r2, g2, b2 = seq[i + 1] cdict['red'].append([item, r1, r2]) cdict['green'].append([item, g1, g2]) cdict['blue'].append([item, b1, b2]) return mcolors.LinearSegmentedColormap('CustomMap', cdict) fig = plt.figure(figsize=(10,5)) ax = fig.add_subplot(121, projection='3d') c = mcolors.ColorConverter().to_rgb rvb = make_colormap( [blue_color[:3], blue_color[:3], 0.25, red_color[:3], red_color[:3], 0.49, c('black'), c('black'), 0.51, red_color[:3], red_color[:3], 0.75, blue_color[:3],]) u = np.linspace(0, 2 * np.pi, 100) v = np.linspace(0, np.pi, 100) x = np.outer(np.cos(u), np.sin(v)) y = np.outer(np.sin(u), np.sin(v)) z = np.outer(np.ones(np.size(u)), np.cos(v)) ax.plot_surface(x, y, z, cmap=rvb, rstride=1, cstride=1, linewidth=0) ax.set_zlim(-1,1) ax.view_init(elev=15, azim=10) ax.text3D(1,0.1,0.15,'$h$') ax.plot3D(np.ones(10), np.zeros(10), np.linspace(0,0.50,10),c='black',linestyle='--') ax.set_xticks([], []) ax.set_yticks([], []) ax.set_zticks([], []) ax.set_title("$h=0.5$ \t Proportion Covered = 0.5") ax = fig.add_subplot(122, projection='3d') c = mcolors.ColorConverter().to_rgb rvb = make_colormap( [blue_color[:3], blue_color[:3], 0.125, red_color[:3], red_color[:3], 0.49, c('black'), c('black'), 0.51, red_color[:3], red_color[:3], 0.875, blue_color[:3],]) u = np.linspace(0, 2 * np.pi, 13) v = np.linspace(0, np.pi, 100) x = np.outer(np.cos(u), np.sin(v)) y = np.outer(np.sin(u), np.sin(v)) z = np.outer(np.ones(np.size(u)), np.cos(v)) ax.plot_surface(x, y, z, cmap=rvb, rstride=1, cstride=1, linewidth=0) ax.set_zlim(-1,1) ax.view_init(elev=15, azim=10) ax.text3D(1,0.1,0.15,'$h$') ax.plot3D(np.ones(10), np.zeros(10), np.linspace(0,0.75,10),c='black',linestyle='--') ax.set_xticks([], []) ax.set_yticks([], []) ax.set_zticks([], []) ax.set_title("$h=0.75$ \t Proportion Covered = 0.75"); # - # For spheres, a band of height $h$ covers a proportion $h$ of the surface area (one can look at [spherical caps](https://en.wikipedia.org/wiki/Spherical_cap) to derive the formula). Then to cover 75% of the space, we need a band with half-width only $0.75$, which is significantly less than the $0.92$ required for a circle. This seems to indicate the following hypothesis, that we shall now investigate: # # **Hypothesis**: As dimension increases, more of the points on the sphere reside closer to the equator. # # # Let's jump into $d$ dimensions. For low-dimensional folks like ourselves, analyzing volumes for a $(d-1)$-sphere is difficult, so we'll instead consider the problem *probabilistically*. What does it mean for a band to cover $1-\varepsilon$ proportion of the sphere? With probability, we can imagine it as saying # # > If we sample a point uniformly at random from the $(d-1)$-sphere, the probability that it lands in the band is $1-\varepsilon$. # # How can we sample a point uniformly at random from the $(d-1)$ sphere? If we recall the symmetry of the *multivariate Gaussian distribution* about the origin, we encounter an elegant way to sample points from the sphere, by sampling such a vector, and then normalizing it to lie on the sphere. def sample_sphere(d): # Sample a point uniformly from a (d-1) sphere x = np.random.randn(d) return x / np.linalg.norm(x) # We can investigate this problem empirically by sampling many points from a $(d-1)$-sphere, plot their "x"-coordinates, and find a band that contains 75% of the points. Below, we show it for d = 3 (the sphere), 9, 27, and 81. # + # HIDDEN def sample_many_from_sphere(d, n=1000): x = np.random.randn(n,d) return x / np.linalg.norm(x, axis=1,keepdims=True) from scipy.interpolate import interp1d fig, axes = plt.subplots(1,4,figsize=(20,5)) for n, ax in zip(range(1,5), axes): xs = sample_many_from_sphere(3**n,10000)[:,0] vals = ax.hist(xs,bins=np.linspace(-1,1,200),density=True) point = np.sort(np.abs(xs))[7500] l = int((-1 * point)*100 + 100) r = int(point*100+100) ax.fill_between(vals[1][l:r], vals[0][l:r], alpha=1, color=red_color,zorder=2) ax.set_title('$d=%d$ h=%.02f'%(3**n,point)) ax.get_yaxis().set_visible(False) # - # Notice that as the dimension increases, the x-coordinates group up very close to the center, and a great majority of them can be captured by very small bands. This yields an interesting point that is not at all intuitive! # # **In high dimensions, almost all points lie very close to the equator** # # We can also examine how quickly this clusters by plotting the required height to get 75% of the points as dimension varies: this is shown below. # + # HIDDEN def sample_many_from_sphere(d, n=1000): x = np.random.randn(n,d) return x / np.linalg.norm(x, axis=1,keepdims=True) from scipy.interpolate import interp1d ds = [] hs = [] for n in range(1,200,5): xs = sample_many_from_sphere(n,10000)[:,0] hs.append(np.sort(np.abs(xs))[7500]) ds.append(n) plt.plot(ds,hs) plt.title('Band width required to capture 75% of points') plt.xlabel('Dimension'); # - # We can also prove how quickly points concentrate near the equator mathematically: we show that the square deviation of a point from the equator is distributed according to a Beta($\frac{1}{2}, \frac{d-1}{2}$) distribution, which shows that *points concentrate in measure around the equator* - that is, the probability that points lie outside of a band of fixed width around the equator goes to $0$ as the dimension increases. See the proof below. # <!-- PROOF --> # We provide some analysis of this problem. # # # Consider sampling uniformly on the $(d-1)$-sphere: we can do so by sampling $(Z_1, \dots Z_d) \sim \mathcal{N}(0, I_d)$, and then normalizing to get $(X_1, \dots, X_d) = \frac{1}{\sqrt{\sum Z_k^2}}(Z_1, \dots Z_d)$. What is the distribution of $X_1$? First, let's consider what the distribution of $X_1^2$ is: # # $$X_1^2 = \frac{Z_1^2}{\sum Z_k^2} = \frac{Z_1^2}{Z_1^2 + \sum_{k > 1} Z_k^2}$$ # # Now, recall that $Z_k^2$ is Gamma($r=\frac12, \lambda=\frac12$) and so by the closure of the family of Gamma distributions, $Z_1^2 \sim \text{Gamma}(r=\frac12, \lambda=\frac12)$ and $\sum_{k > 1} Z_k^2 \sim \text{Gamma}(r=\frac{d-1}{2},\lambda=\frac12)$. Gamma distributions possess the interesting property that if $X \sim \text{Gamma}(r_1, \lambda)$ and $Y \sim \text{Gamma}(r_2, \lambda)$, then $\frac{X}{X+Y} \sim \text{Beta}(r_1, r_2)$. Then we simply have that $X_1^2 \sim \text{Beta}(\frac{1}{2}, \frac{d-1}{2})$. # # Now, this is a profound fact, and we can get a lot of insight from this formula, but for the time being, we'll use a simple Markov Bound to show that as $d \to \infty$, $X_1$ converges in probability to $0$ (that is that points come very close to the equator). For an arbitrary $\varepsilon$, # $$P(|X| > \varepsilon) = P(X^2 > \varepsilon^2) \leq \frac{E(X^2)}{\varepsilon^2} = \frac{1}{d\epsilon^2}$$ # # This completes the statement. # <!-- # # ## Gaussians in High Dimensions # # In the first section, we talked about how for a unit hypercube in high dimensions, most of the volume was contained near the outside of the hypercubes towards the surface. Probabilistically, if we sampled a point uniformly at random from a hypercube, with high probability it will be near the surface. This intuition is very powerful for bounded regions, but what happens when we sample from a probability distribution that is defined on all of $\mathbb{R}^d$? More particularly, consider specifying a random variable from the standard multivariate Gaussian distribution: $Z = (Z_1, \dots Z_{d}) \sim \mathcal{N}(\vec{0}, I_d)$. # # --> # ## Summary and Perspective: Probability Distributions and the "Typical Set" # The core tool in statistical inference is the expectation operator: most operations, whether querying the posterior distribution for Bayesian inference or computing confidence intervals for estimators or doing variational inference, etc. The core problem is then to *accurately estimate expectations* of some function $g$ with respect to some probability distribution $\pi$ where $\pi$ and $g$ are defined on some high-dimensional space ($\mathbb{R}^d$). # # $$\mathbb{E}_{X \sim \pi}[g(X)] = \int_{\mathbb{R}^d} g d\pi = \int_{\mathbb{R}^d} g(x) f_\pi(x) dx$$ # # In the first section, we spent a little time discussing how one may compute this expectation integral: previously, we talked about evaluating the integrand at a grid of points, and averaging (as in a Riemann sum) to arrive at our estimate. However, in practice, we don't need to evaluate at all the points, only at the points that contribute meaningfully to the integral, that is we want to only evaluate in regions of high probability (places where points concentrate). # # The previous two sections have hinted at the following fact: # > *For probability distributions in high-dimensional spaces, most of the probability concentrates in small regions (not necessarily the full space).* # # - For points sampled at uniform from inside a hypercube, with overwhelming probability, it will be near the surface of the hypercube and not in the center. # - For points sampled at uniform from the surface of a hypersphere, with overwhelming probability, the points will lie near the *equator* of the sphere. # # # This concept can be made rigorous with the **typical set**, a set $A_\epsilon$ such that $P_\pi(X \in A_{\epsilon} > 1 - \epsilon)$. Then, if $g(x)$ is well-behaved enough, we can write # # $$\mathbb{E}_{X \sim \pi}[g(X)] = \int_{\mathbb{R}^d} g d\pi = \int_{A_{\epsilon}} g d\pi + \int_{A_{\epsilon}^C} g d\pi \approx \int_{A_{\epsilon}} g d\pi$$ # # What will help us is that for most distributions, this typical set is actually rather small compared to the full high-dimensional space. In the next article, we'll consider how we can efficiently sample from the typical sets of probability distributions, which will introduce us to topics like *Markov Chain Monte Carlo*, *Metropolis-Hastings*, and *Hamiltonian Monte Carlo*.
notebooks/highdimensionalgeometry.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="view-in-github" colab_type="text" # <a href="https://colab.research.google.com/github/j54854/myColab/blob/main/Rinko_SimPy_2.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # + [markdown] id="Ji91oL0eGLNL" # # Pythonを用いた離散事象シミュレーション入門 #2 # # #1 はじめに # # 前回は,離散事象シミュレーションのメカニズムと,その最も基本的な実装方法について理解した.具体的には,イベントカレンダ(事象を生起タイミングの順に並べたリスト)を保持しておき,その先頭から順に事象を1つずつ取り出して,そのタイプに応じた処理を実行していくという流れであった.サンプルコードでは,モデルのrun()メソッドの中にwhileループがあり,そのループを周回するごとに,イベントカレンダから先頭の事象を取り出し,そのタイプをif文で確認し,タイプに応じた処理に誘導する,というステップを実行していた.この実装方法は,離散事象シミュレーションのメカニズムをそのまま素直にコード化する,最もナイーブなコーディングスタイルであるともいえる(以降,このスタイルをイベント志向と呼ぶことがある). # # 実は,このイベント志向のコーディングスタイルには(少なくとも)2つの問題がある.1つ目は,事象のタイプ数や対象システムの構成要素数などに応じて,run()メソッドが肥大化していくという点である.2つ目は,対象システムの状態を変化させる処理が,その変化を引き起こす主体や変化を引き受ける客体ではなく,変化を引き起こすシグナル,すなわち事象ごとにまとめられるため,(主体や客体の視点から)互いに関連する一連の変化が,細かな部分に分割され,複数の異なる箇所に記述されることになるという点である.これらはいずれも,コード全体の見通しを阻害するものであり,その影響は,対象システムの規模が大きくなるにつれてどんどん深刻になっていく. # # これらの問題を解消するための方法の1つとして,プロセス志向のコーディングスタイルを採用することが挙げられる.[PythonのSimPyモジュール](https://simpy.readthedocs.io/en/latest/)を導入すると,簡単にこのプロセス志向のスタイルで開発を進めることができるようになる.今回は,このモジュールを使った離散事象シミュレーションのコーディングの基礎を身につけよう. # # # # + [markdown] id="6BjmGiihUVHY" # #2 SimPyの導入 # # ##2.1 SimPyの概要とインストール # # 最初にすべきことはSimpyモジュールのインストールである.Google Colaboratotyでは,下のように,pipを使って簡単にインストールすることができる(なお,自分のローカルな環境にインストールする際には行頭の!は不要). # + id="qx7zQbifpTWr" # ! pip install simpy # + [markdown] id="rzrGrx4IsoF9" # [SimPyの主な構成要素](https://gitlab.com/team-simpy/simpy)は,core.pyの中にあるEnvironmentクラス,events.pyの中にあるEventクラスとそのサブクラス群,resourcesディレクトリの中にあるリソース関連のクラス群,の3つだと考えればよいだろう.また,これらに加えてジェネレータとして実装されるプロセス関数・メソッドが重要な役割を果たす.リソース関連のクラス群については次回に取り上げることにして,今回はそれ以外の3つに基づいて,プロセス志向コーディングの基本をおさえよう. # + [markdown] id="ecGamv3d38Ob" # ##2.2 シミュレーション環境と事象 # # Environmentクラスは,シミュレーション時間の管理やイベントカレンダの操作など,離散事象シミュレーションのための最も基本的な機能を提供してくれる.したがって,SimPyを用いてシミュレーションモデルを開発する際には,シミュレーション環境(=Environmentクラスのインスタンス)を必ず1つ生成することになる.一方,Eventクラスは,事象を表現するためのクラスであり,後で見るようにいくつかのサブクラスも用意されている. # # ある事象(=Eventクラスのインスタンス)をイベントカレンダに登録することをtriggerするという.通常のEventクラスの事象の場合,それはtriggerされたのと同時刻に生起することになる.一方,triggerしてから所定の時間遅れ後に生起するような事象を利用したいことも多い.その場合は,Eventクラスのサブクラスである,Timeoutクラスを利用すればよい. # # SimPyでは,ある事象が生起した際に実行される処理をその事象のコールバックと呼び,各事象eにコールバックの集合e.callbacksを付与することで,事象eの生起に伴って発生する処理がまとめて実行されるようになっている.簡単な例を見てみよう. # + id="K3Z_T0xa3xUe" import random import simpy class Skelton: def __init__(self, env): self.env = env # pointer to the SimPy environment self.count = 0 # an example state variable def update(self, e): self.count += 1 # increment the event counter def print_state(self): print('{} th event occurs at {}'.format(self.count, round(self.env.now))) def run(self, horizon): while True: e = simpy.Timeout(self.env, random.expovariate(1)) # create an Timeout instance e.callbacks.append(self.update) # register update() method in e's callbacks if self.env.now > horizon: # if horizen is passed break # stop simulation else: self.print_state() self.env.step() # process the next event env = simpy.Environment() model = Skelton(env) model.run(200) # + [markdown] id="yD_qHsEl86vQ" # この例では,前回のスケルトンモデルとほぼ同じ機能をSimPyのEnvironmentクラスとTimeoutクラスを利用して再現している.前回自作していたEventクラスやCalendarクラスは(それらに相当する機能をSimPyが提供してくれるので)不要である.末尾の3行を見てほしい.シミュレーション環境(=env)を生成した後,それを引数として,対象システムのモデル(=model)を生成している.そして,そのモデルのrun()メソッドを,horizon=200で実行している. # # Skeltonクラスの中身を確認すると,run()メソッドにはwhileループがあり,その各周回で,Timeoutクラスの事象(=e)を生成して,そのコールバックの集合(=e.callbacks)に,update()メソッドを登録していることがわかる.なお,update()メソッドは,countをインクリメントするだけのダミーである.また,事象のコールバックは,その事象を唯一の引数とする関数(厳密には,呼び出し可能オブジェクト)の形式でなければならない. # # Timeoutクラスの事象を生成する際の第1引数は,対応するシミュレーション環境env,第2引数は,時間遅れの長さ(上の例では指数分布に従う乱数で与えられている)である.なお,Timeout事象は,それを生成した際に自動的にtriggerされる(が,通常のEventクラスの事象は,後述するように,明示的にtriggerする必要がある). # # シミュレーション時間は,シミュレーション環境envの変数nowで管理されている(上のrun()メソッドの中からはself.env.nowで参照できるようになっている).この値が,引数として渡されたhorizon以上であれば,whileループを抜け,シミュレーションを終了する.そうでなければ,シミュレーション環境のstep()メソッドを呼んでいるが,これはイベントカレンダから先頭の事象eを1つ取り出して生起させる(つまり,e.callbacksに含まれているコールバックを順に実行していく)という処理に対応している. # + [markdown] id="zuj8-_HxwrER" # ##2.3 プロセス関数・メソッド # # 上の例のSkeltonクラスは,機能の一部をシミュレーション環境に任せているため,前回と比べるとかなりシンプルになっている.ただしそれだけでは,基本的・共通的な機能はSimPyが面倒を見てくれるので,自分でコーディングしなければならない部分が少なくなるということにしか過ぎない.実は,SimPyを導入することの本質的なメリットは,むしろその先にこそあるといえる. # # この本質的なメリットをもたらすものが,プロセス関数・メソッドである.これによって,SimPyでは,プロセス志向でコーディングしていくことが可能になるのである.次に,その基本的な仕組みを例を用いて説明していこう.下の例を見てほしい. # + id="-UYt2DTyo0nh" class Skelton2: def __init__(self, env): self.env = env # pointer to the SimPy environment self.count = 0 # an example state variable def print_state(self): print('{} th event occurs at {}'.format(self.count, round(self.env.now))) def process_method(self): # an example process method while True: self.print_state() yield simpy.Timeout(self.env, random.expovariate(1)) self.count += 1 # corresponding to Skelton's update() def process_func(env): # an example process function while True: env.model.print_state() yield simpy.Timeout(env, random.expovariate(1)) env.model.count += 1 # corresponding to Skelton's update() env = simpy.Environment() env.model = Skelton2(env) # simpy.Process(env, process_func(env)) # when using process function simpy.Process(env, env.model.process_method()) # when using process method env.run(until=200) # + [markdown] id="4BqAGNplpKGo" # これは,上でみた例をプロセス関数・メソッドを用いて書き直したものである.Skeltonクラスにあったrun()メソッド(とupdate()メソッド)がなくなり,Skelton2クラスには,process_method()というメソッドが新たに登場していることに気がついたと思う.これがプロセスメソッドである.なお,このプロセスメソッドは利用せず,代わりに,同じ機能を果たすプロセス関数(上の例では,process_func()関数)を用いてもよい(この例では双方とも用意されているが,実際にはどちらか一方だけでよい). # # process_method()やprocess_func()の中にyield文があることからわかるように,これらはPythonのジェネレータになっている.通常の関数やメソッドがreturnで結果を返して終了するのに対して,ジェネレータはyieldで結果を返すとそこで一時停止するだけで,終了はしない.そして,後で再開命令のシグナルを受け取ると,yield文の先から処理を再開する. # # このように,プロセス関数・メソッドは,Eventクラスのインスタンスをyieldする形で定義されたジェネレータであり,SimPyでは,これをプロセス志向コーディングのためのトリックとして利用している.具体的には,プロセス関数・メソッドがある事象eをyieldすると,e.callbacksに,そのプロセス関数・メソッドの再開命令が自動的に追加されるようになっているのである. # # プロセス関数・メソッドは,yieldした事象が生起すると再開されるので,その再開後の部分にその事象によって引き起こされる状態変化(この例では,countのインクリメント)を直接記述しておけばよいことになる.したがって,この例では,update()メソッドをコールバックの集合に登録することは不要になっている.この例のように,1つのTimeout事象と単純な状態変化(countのインクリメント)だけではメリットは実感しにくいかもしれないが,複数の事象の影響を受けながら,複雑に状態変化が進んでいくようなプロセスも,これによって直感的に記述できるようになる. # # なお,作成したプロセス関数・メソッドがシミュレーション内で実行されるようにするためには,それをシミュレーション環境に登録しておかなければならない.これを行っているのが,下から2行目(やコメントアウトされている3行目)である.具体的には,Processクラスのインスタンスを作成していることがわかる.この際に,該当のプロセスをスタートさせるシグナルを発する事象(Initialize事象)を生成し,triggerするという処理が,裏で自動的に実行されている. # # また,一番下の行にある,シミュレーション環境のrun()メソッドは,step()メソッドを繰り返すラッパーである.run(until=時刻)あるいはrun(until=事象)として,ある時刻まであるいはある事象が生起するまでシミュレーションを進めることができる.この例では,シミュレーション時間が200になるまでシミュレーションを進めている. # # # # # # # + [markdown] id="2WvYLEM_R8mv" # ##2.4 複数プロセスの相互作用 # # 複数のプロセス関数・メソッドを定義して,同じシミュレーションの中で互いに関連付けながら実行していくことができる.ここではその例をみておこう.下に簡単な例を示す. # # # + id="3y1zD_gppK2F" class Skelton3(Skelton2): def __init__(self, env): super().__init__(env) def main_process(self): while True: self.print_state() yield self.env.timeout(random.expovariate(1)) # shortcut for simpy.Timeout() self.count += 1 if self.count %3 == 0: self.env.signal4A.succeed() # signal for resuming sub process A def sub_process_A(self): self.env.signal4A = self.env.event() # create the first signal while True: yield self.env.signal4A print('> sub process A is resumed at {}'.format(round(self.env.now))) self.env.signal4A = self.env.event() # create the next signal if self.count %5 == 0: self.env.process(self.sub_process_B()) # register sub process B def sub_process_B(self): print('>> sub process B is started at {}'.format(round(self.env.now))) yield self.env.timeout(10) # shortcut for simpy.Timeout() print('>> sub process B is finished at {}'.format(round(self.env.now))) env = simpy.Environment() env.model = Skelton3(env) env.process(env.model.main_process()) # shortcut for simpy.Process() env.process(env.model.sub_process_A()) # shortcut for simpy.Process() env.run(until=200) # + [markdown] id="twV-FF-GiDtk" # Skelton3クラスの中に,main_process(),sub_pricess_A(),sub_process_B()という3つのプロセスメソッドが定義されている.これらのうち,main_process()メソッドは,末尾の2行を除くと,Skelton2クラスのprocess_method()メソッドとほぼ同じである.なお,シミュレーション環境のtimeout()メソッドは,simpy.Timeout()へのショートカットであり,引数が1つで済むためよく用いられる. # # 追加されている末尾の2行では,countの値が3で割り切れる際に,ある処理を実行していることがわかる.ここに,シミュレーション環境のsignal4Aは,sub_process_A()メソッドの1行目(および5行目)で生成されているEventクラスのインスタンス,すなわち事象である.そして,事象のsucceed()メソッドは,それをtriggerするという処理を実行するものである.したがって,この箇所は,countが3で割り切れるたびにsignal4Aをtriggerする,という機能を果たしていることになる. # # 次に,sub_process_A()メソッドの方を見てほしい.3行目でこの事象をyieldしていることから,このメソッドはこの箇所で一時停止することになる.そして,main_process()メソッドの方でsignal4Aがtriggerされ,シミュレーション環境がこの事象を生起させると,sub_process_A()メソッドが再開される,という流れになっている.この流れは,複数のプロセス関数・メソッドを関連付けるための典型的な方法の1つである. # # コード全体の下から2行目,3行目を見ると,main_process()メソッド,sub_process_A()メソッドは共に,シミュレーション開始前にシミュレーション環境に登録されていることがわかる.なお,シミュレーション環境のprocess()メソッドは,simpy.Process()へのショートカットであり,こちらも,引数が1つで済むためよく用いられる. # # したがって,シミュレーションが始まるとこれらのプロセスは自動的に開始され,上で定めた相互作用に則って進んでいくことになる(具体的には,まずmain_process()メソッドがスタートし,yieldまで進み,一時停止した後,sub_process_A()メソッドがスタートし,yieldまで進み,一時停止する.その後は,Timeout事象が生起すると,main_process()メソッドが再開され,その中でsignal4A事象が生起すると(その次にmain_process()メソッドが一時停止した後に)sub_process_A()メソッドが再開される,という具合である). # # 次に,sub_process_B()メソッドの方を見てみよう.こちらは,whileループをもたない,単発のプロセスになっていることがわかる.このプロセスの実行はどのように制御されているのだろうか.実は,sub_process_A()メソッドの中にその謎が隠れている.末尾の2行を見てほしい.countが5で割り切れる際に,sub_process_B()メソッドをシミュレーション環境に登録していることがわかる.これを受けて,このプロセスが自動的に実行されるようになるわけである.このように,シミュレーション環境への新たなプロセスの登録は,シミュレーション開始前だけではなく,開始後の任意の時点でも行うことができる.この流れもまた,複数のプロセス関数・メソッドを関連付けるための典型的な方法の1つである. # + [markdown] id="JpFGx6SLrKo1" # ##2.5 少し発展的な話題(への入口) # # ###2.5.1 事象のvalueとok # # 事象eはvalueという変数をもっている.e.valueのデフォルト値はNoneであるが,それに(None以外の)値をセットして,プロセス関数・メソッドに渡すことができる.そのためには,事象eをトリガーする際に, # # e.succeed(valueにセットしたい値) # # とする(Timeout事象の場合は,インスタンス生成時に,キーワード引数として,「value=valueにセットしたい値」のように指定する).そして,プロセス関数・メソッド側で,yieldの箇所に, # # v = yied e # # と書けば,vにe.valueの値が入るという仕組みである. # # さらに,事象eはokという変数ももっている.事象eをtriggerするときにsucceed()メソッドを利用すると,自動的に,e.ok=Trueとなる.これは,succeed()メソッドの名称からもわかるように,その事象が成功裏に生起したことを表す. # # 実は,事象eをtriggerするには他にも,e.fail(exception)やe.trigger(event)というメソッドを使うこともできる.前者では,e.ok=Falseとなり,その事象の生起が何らかの意味で失敗したことを示唆する.このメソッドを使った場合は,e.valueにexceptionに指定された例外が入り,事象eが処理される際にその例外が発生する(ので,待ち受けしているプロセス関数・メソッドなどで例外処理を行う).また,後者では,事象eのokやvalueの値は,引数として渡された別の事象eventと同じにセットされる. # # # + [markdown] id="E6Nyqu85WcEM" # ###2.5.2 事象待受けのあれこれ # # プロセス関数・メソッドで複数の事象の論理結合を待受けすることができる.その際は,and結合には&,or結合には|をそれぞれ利用する.例えば,3つの事象e1,e2,e3があったとして, # # values = yield (e1 | e2) & e3 # # のようにできるということである.このとき,valuesは,各事象のvalueのOrderedDictになる(もちろん,各事象のvalueの値が不要であれは,「values=」は書かなくてよい). # # 逆に,同じ事象を複数のプロセス関数・メソッドで待受けしてもよい.この場合は,その事象のコールバックの集合に(自動的に)再開命令が追加された順に,それらのプロセスが再開されていくことになる. # # # + [markdown] id="Nbsfv3EFZWnt" # ### 2.5.3 Processクラスについて # # プロセス関数・メソッドを登録する際に,Processクラスのインスタンスを作成していた.これを, # # p = simpy.Process(env, process_func()) # # などのように,後で参照できるようにしておくと便利なことがある. # # 実は,ProcessクラスはEventクラスを継承しているので,これも事象の一種であるとみなせる.すなわち,上のpを事象として扱うことができる(returnした際にtriggerされたとみなされ,戻り値があればそれがvalueの値となる). # # また,事象pがtriggerされる前にそのinterrupt()メソッドを呼ぶことで,対応するプロセス関数・メソッドを中断(異常終了)させることができる.これによって,そのプロセス関数・メソッドがyieldで待っている事象eのコールバックの集合から,対応する再開命令が削除される.また,このプロセス関数・メソッドに例外simpy.exceptions.Interrupt(cause)が投げ込まれるので,それを受け取って処理することによって異常終了時の挙動を指定することができる.このinterrupt()メソッドは事象e自体には影響を与えない(ので,例外処理後にその事象eを再度待受けしてもよい). # # # # + [markdown] id="V8NvwVWkw4Mk" # #3 簡単な在庫管理の例 # # もう少し具体的なイメージを掴んでもらうために,最後に,前回も取り上げた簡単な在庫管理の例を示しておこう. # + id="afXSbjVKGHPL" class Model: def __init__(self, env, op, oq, lt, init): self.env = env self.op = op # ordering point self.oq = oq # order quantity self.lt = lt # replenishment lead time self.at_hand = init # how many items you have at hand self.loss = 0 # opportunity loss self.orders = [] # list of back orders @property def total(self): return sum(self.orders) +self.at_hand def print_state(self): print('[{}] current level: {}, back order: {}, lost sales: {} '.format(round(self.env.now), self.at_hand, self.orders, self.loss)) self.env.log.extend() def seller(self): while True: yield self.env.timeout(random.expovariate(1)) if self.at_hand > 0: self.at_hand -= 1 # sell an item to the customer self.env.stocktake.succeed() # activate the stocktaker else: self.loss += 1 # sorry we are out of stock self.print_state() # state after dealing with each customer def stocktaker(self): self.env.stocktake = self.env.event() # create the first signal while True: yield self.env.stocktake if self.total <= self.op: self.orders.append(self.oq) self.env.process(self.deliverer()) # activate deliverer self.env.stocktake = self.env.event() # create the next signal def deliverer(self): self.print_state() # state after an order is placed yield self.env.timeout(self.lt) if len(self.orders) > 0: self.at_hand += self.orders.pop(0) self.print_state() # state after an order is fulfilled # + [markdown] id="HhR5ZXFDjAdu" # 前回のModelクラスと比較すると,run()メソッド(とその他のいくつかのメソッド)が削除され,3つのプロセスメソッドが新たに定義されていることがわかる.これらのプロセスメソッドは,それぞれ,ランダムに到着する顧客に対応する販売担当者,店頭在庫量を確認して必要に応じて発注を行う在庫管理者,発注を受けて商品を配送する配送担当者の働きにぞれぞれ対応している.これらの働きが混在して記述されていた前回のrun()メソッドと比較すると,コードの見通しが良くなったと感じられるのではないだろうか.この効果は,対象システムの規模に伴って大きくなっていく. # # SImPyを導入したのに応じて,Logクラスにも少し変更を加えておこう. # + id="f3jWd5eijBG0" import matplotlib.pyplot as plt class Log: def __init__(self, env): self.env = env self.time = [] self.at_hand = [] self.loss = [] self.total = [] self.extend() def extend(self): self.time.append(self.env.now) self.at_hand.append(self.env.model.at_hand) self.loss.append(self.env.model.loss) self.total.append(self.env.model.total) def plot_log(self): plt.plot(self.time, self.at_hand, drawstyle = "steps-post") plt.xlabel("time (minute)") plt.ylabel("number of items") plt.show() # + [markdown] id="5Z35uET4jJEF" # このシミュレーションモデルを実行してみるには,下のようにすればよい. # + id="-lLDQ_nHjJmp" env = simpy.Environment() env.model = Model(env, 10, 20, 10, 20) # op, oq, lt, init env.log = Log(env) env.process(env.model.seller()) env.process(env.model.stocktaker()) env.run(until=200) env.log.plot_log() # + [markdown] id="Bu07IQwkx5ce" # #4 演習課題 # # 前回の演習課題で作成した,飲食店のランチタイムの様子を表現したシミュレーションモデルを,SImPyを使って,プロセス志向のコードに書き換えてみよう. # # # + [markdown] id="dKFX00OXx-2_" # #5 まとめ # # 今回は,SimPyを導入し,それを用いてプロセス志向でシミュレーションモデルを構築していく方法の基礎を紹介した.次回は,リソース関連のクラス群とその使い方についてみていこう. #
Rinko_SimPy_2.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3.9.7 ('base') # language: python # name: python3 # --- # Variability Estimates of State Poplulation # ------------------------------------------- import pandas as pd state_df = pd.read_csv('murderrate.csv') state_df # standard deviation state_df['Population'].std() # Lets calculate IQR state_df['Population'].quantile(0.75) - state_df['Population'].quantile(0.25) # For the scores fo the students 3,5,7,9,12,21,25,30 calculate the 25th percentile students_df = pd.read_csv('studentscores.csv') students_df['score'].quantile(0.25)
Mar22/Statistics/variabilityestimates.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: medusa_dev_1 # language: python # name: medusa_dev_1 # --- # ### Why? # # ## Where do ensembles come from? # # In the constraint-based reconstruction and analysis (COBRA) field, there are a variety of analyses and approaches that have alternative optimal solutions. Examples of this come up often when analyses involve [integer programming](https://en.wikipedia.org/wiki/Integer_programming), where variables in a problem are switched on/off to maximize or minimize an objective value. When integer programming is applied to fill in gaps in a genome-scale metabolic model (GEM) to fulfill a function, such as production of biomass, there are generally multiple sets of equally likely solutions to the problem (e.g. there is more than one unique set of reactions of the same size that enable growth). # # Beyond examples of analyses where multiple solutions with the *exact same* objective value might exist, keeping track of multiple sub-optimal solutions might be valuable because a biologically-correct solution is almost never the exact same as the most likely numerical solution. Instead, we can hedge our bets by maintaining an ensemble of feasible solutions to a problem that all meet some minimum acceptable likelihood. # # The primary aim of medusa is to make ensemble analyses more accessible to the COBRA field so that we can start accounting for this uncertainty to improve our predictions and improve our reconstructions more efficiently. The software is written and developed with usability as the top priority, secondary to performance and novelty. Thus, user feedback is essential--please do not hesitate to provide your thoughts or ask questions via an issue on the [medusa github repository](https://github.com/gregmedlock/Medusa/).
docs/why.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 2 # language: python # name: python2 # --- # # Language Model Analysis # ### Model Architecture # + report_file = 'reports/encdec_200_512_2.json' log_file = 'logs/encdec_200_512_logs.json' import json import matplotlib.pyplot as plt with open(report_file) as f: report = json.loads(f.read()) with open(log_file) as f: logs = json.loads(f.read()) print'Architecture: \n\n', report['architecture'], # - # ### Perplexity on Each Dataset print('Train Perplexity: ', report['train_perplexity']) print('Valid Perplexity: ', report['valid_perplexity']) print('Test Perplexity: ', report['test_perplexity']) # ### Loss vs. Epoch # %matplotlib inline for k in logs.keys(): plt.plot(logs[k][0], logs[k][1], label=str(k) + ' (train)') plt.plot(logs[k][0], logs[k][2], label=str(k) + ' (valid)') plt.title('Loss v. Epoch') plt.xlabel('Epoch') plt.ylabel('Loss') plt.legend() plt.show() # ### Perplexity vs. Epoch # %matplotlib inline for k in logs.keys(): plt.plot(logs[k][0], logs[k][3], label=str(k) + ' (train)') plt.plot(logs[k][0], logs[k][4], label=str(k) + ' (valid)') plt.title('Perplexity v. Epoch') plt.xlabel('Epoch') plt.ylabel('Perplexity') plt.legend() plt.show() # ### Generations def print_sample(sample): enc_input = ' '.join([w for w in sample['encoder_input'].split(' ') if w != '<pad>']) gold = ' '.join([w for w in sample['gold'].split(' ') if w != '<mask>']) print('Input: '+ enc_input + '\n') print('Gend: ' + sample['generated'] + '\n') print('True: ' + gold + '\n') print('\n') for sample in report['train_samples']: print_sample(sample) for sample in report['valid_samples']: print_sample(sample) for sample in report['test_samples']: print_sample(sample)
report_templates/LMReportTemplate.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 (ipykernel) # language: python # name: python3 # --- # # Curso MAT281 (2019) # # > Blog del curso MAT281 (2019) # # - toc: true # - badges: false # - comments: true # - categories: [markdown] # - image: images/course.png # # Descripción de la Asignatura # # Curso enfocado a cubrir aspectos básicos del mundo de data science, principalmente en el data analysis. # # Repositorio # # * El material está disponible en el siguiente [repositorio](https://github.com/fralfaro/mat281_20192S_santiago_modificado), para obtener el código de fuente basta con que ejecutes el siguiente comando: # # > https://github.com/fralfaro/mat281_20192S_santiago_modificado # # # * La documentación se encuentra disponible en el siguiente [link](https://gitlab.com/FAAM). # # Topicos # # * Toolkit básico: # * IDE (Integrated Development Environment) # * Git-Gihub/Gitlab # * Ambientes virtuales # * Jupyter Notebook # * Repaso Python # # * Computación científica: # * CPython (Introducción) # * Numpy, Scipy, Sympy # # * Manipulación de datos (lo meteria en analisis exploratorio) # * Introducción base de datos # * Pandas (Módulos básicos, groupby, merge, pivot, etc.) # * Sqlalchemy (syntax básica en SQL) # # * Visualización de datos: # * Introducción # * Visualización Imperativa, Declarativa, Interactiva # # * Análisis exploratorio de datos # * Introducción # * Casos Aplicados # # * Machine Learning # * Análisis Supervisado # * Análisis No Supervisado # * Análisis Overfitting # * Series Temporales (Introducción)
_notebooks/2019-07-31-mat281_2019.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .jl # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Julia 1.1.0 # language: julia # name: julia-1.1 # --- using BenchmarkTools # ## Starting a Cluster using Distributed procs() # change to 2 if started julia with -p2 # + addprocs(4) procs() # - # addprocs(["10.0.2.1", "10.0.2.2"]) ; # ## Communication between Julia processes # + a = remotecall(sqrt, 2, 4.0) wait(a) fetch(a) remotecall_fetch(sqrt, 2, 4.0) # - # # Programming parallel tasks # + using Pkg Pkg.add("Distributions") using Distributions # precompile @everywhere using Distributions @everywhere println(rand(Normal())) # - # ## @spawn macro # + a=@spawn randn(5,5)^2 fetch(a) b=rand(5,5) a=@spawn b^2 fetch(a) @time begin A = rand(1000,1000) Bref = @spawn A^2 fetch(Bref) end; @time begin Bref = @spawn rand(1000,1000)^2 fetch(Bref) end; # - # ## @spawnat # + r = remotecall(rand, 2, 2, 2) s = @spawnat 3 1 .+ fetch(r) fetch(s) # - # ## @parallel for # + function serial_add() s=0.0 for i = 1:1000000 s=s+randn() end return s end function parallel_add() return @distributed (+) for i=1:1000000 randn() end end @btime serial_add() @btime parallel_add() # - # ## Parallel map # + x=[rand(100,100) for i in 1:10]; @everywhere using LinearAlgebra @btime map(svd, x); @btime pmap(svd, x); # - # ## Distributed Monte Carlo # + @everywhere function darts_in_circle(N) n = 0 for i in 1:N if rand()^2 + rand()^2 < 1 n += 1 end end return n end function pi_distributed(N, loops) n = sum(pmap((x)->darts_in_circle(N), 1:loops)) 4 * n / (loops * N) end function pi_serial(n) return 4 * darts_in_circle(n) / n end @btime pi_distributed(1_000_000, 50) @btime pi_serial(50_000_000) # - # ## Distributed Arrays # + using Pkg Pkg.add("DistributedArrays") using DistributedArrays @everywhere using DistributedArrays d=dzeros(12, 12) x=rand(10,10); dx = distribute(x) @everywhere function par(I) d=(size(I[1], 1), size(I[2], 1)) m = fill(myid(), d) return m end m = DArray(par, (800, 800)) d.indices r = @spawnat 2 localpart(d) fetch(r) @distributed (+) for i in 1:nworkers() sum(localpart(m)) end # - # ### Game of Life # + function life_step(d::DArray) DArray(size(d),procs(d)) do I top = mod1(first(I[1])-1,size(d,1)) #outside edge bot = mod1( last(I[1])+1,size(d,1)) left = mod1(first(I[2])-1,size(d,2)) right = mod1( last(I[2])+1,size(d,2)) old = Array{Bool}(undef, length(I[1])+2, length(I[2])+2) #temp array old[1 , 1 ] = d[top , left] #get from remote old[2:end-1, 1 ] = d[I[1], left] # left old[end , 1 ] = d[bot , left] old[1 , end ] = d[top , right] old[2:end-1, end ] = d[I[1], right] # right old[end , end ] = d[bot , right] old[1 , 2:end-1] = d[top , I[2]] # top old[end , 2:end-1] = d[bot , I[2]] # bottom old[2:end-1, 2:end-1] = d[I[1], I[2]] # middle (local) life_rule(old) # Step! end end @everywhere function life_rule(old) m, n = size(old) new = similar(old, m-2, n-2) for j = 2:n-1 @inbounds for i = 2:m-1 nc = (+)(old[i-1,j-1], old[i-1,j], old[i-1,j+1], old[i ,j-1], old[i ,j+1], old[i+1,j-1], old[i+1,j], old[i+1,j+1]) new[i-1,j-1] = (nc == 3 || nc == 2 && old[i,j]) end end new end A = DArray(I->rand(Bool, length.(I)), (20,20)) using Pkg; Pkg.add("Colors") using Colors Gray.(A) B = copy(A) B = Gray.(life_step(B)) # - # # Shared Arrays # + using SharedArrays S=SharedArray{Float64}((100, 100, 5), pids=[2,3, 4, 5]); pmap(x->S[x]=myid(), eachindex(S)); S # - # ## Parallel Prefix sum function prefix_shared!(y::SharedArray) l=length(y) k=ceil(Int, log2(l)) for j=1:k @sync @distributed for i=2^j:2^j:min(l, 2^k) @inbounds y[i] = y[i-2^(j-1)] + y[i] end end for j=(k-1):-1:1 @sync @distributed for i=3*2^(j-1):2^j:min(l, 2^k) @inbounds y[i] = y[i-2^(j-1)] + y[i] end end y end # *This notebook was generated using [Literate.jl](https://github.com/fredrikekre/Literate.jl).*
Chapter10/Chapter10.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import numpy as np import ceo import matplotlib.pyplot as plt # %matplotlib inline gmt = ceo.GMT_MX() n = 512 src = ceo.Source('V',zenith=0,azimuth=0,rays_box_sampling=n,rays_box_size=25.5,rays_origin=[0,0,25]) src.reset() gmt.propagate(src) src.wavefront.rms(-9) imgr = ceo.Imaging(N_PX_PUPIL=n-1,N_PX_IMAGE=32) imgr.propagate(src) plt.imshow(imgr.frame.host()) imgr.propagate_cpx(src) A = np.real(imgr.cpx.host()) plt.imshow(A) plt.xlim([512-16,512+16]) plt.ylim([512-16,512+16])
imaging/Complex_Amplitude.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import simple_sim.simulation_objects as Objects import simple_sim.simulation_server as World import pybullet_data import pybullet import os import numpy as np from time import sleep import simple_sim.simulation_robot as Robots import simple_sim.simulation_controller as Controls # Start a simulation server server = World.simulationServer() # Add a heightmap pydata = pybullet_data.getDataPath() #heightmap = Objects.SimHeightmap(server, './models/heightmap.obj', meshScale = [10, 10, 10]) #plane = pybullet.loadURDF(os.path.join(pydata, 'plane.urdf')) plane = Objects.SimObject(server, os.path.join(pydata, 'plane.urdf')) # This loads a relative path robot = Robots.Robot(server, './models/SimpleWalker/urdf/SimpleWalker.urdf', basePosition = [0, 0, 2]) # Set the gravity server.setGravity([0, 0, -9.81]) # Simple loop over all joints, not ordered controller_dict = {} for joint in sorted(robot.joints.keys()): controller_dict.update( {joint : Controls.PositionControl(robot, joint, False)} ) #Controls.PositionControl(robot, joint, False) controller_Group1 = {} controller_Group2 = {} controller_keyList = ['Joint_1', 'Joint_2','Joint_3', 'Joint_4'] for controller in controller_keyList: controller_Group1.update( {controller : controller_dict[controller]} ) controller_keyList = ['Joint_13', 'Joint_14','Joint_15', 'Joint_16'] for controller in controller_keyList: controller_Group2.update( {controller : controller_dict[controller]} ) def controlJoints(time, shift, amplitude ,controlGroup): counter = 0 for joint, controller in controlGroup.items(): if counter == 0: reference = amplitude*0.3*np.pi*np.sin(time+shift) elif counter == 1: reference = amplitude*0.2*np.pi*np.sin(time+shift) else: reference = 0 controller.setReference(reference) # Start the simulation server.startSimulation() t = 0 positions = [] orientations = [] times = [] while t < 10: # Limit the length of the measurement to 10 if len(positions) > 10: positions = positions[:-1] # Get the base position in World frame positions.append(pybullet.getBasePositionAndOrientation(robot.id)[0]) # Get the base orientation in World frame ( Euler Angles) orientations.append( pybullet.getEulerFromQuaternion( # Index 1 == Quaternions pybullet.getBasePositionAndOrientation(robot.id)[1] ) ) # Get the current time times.append(t) # Apply control law controlJoints(t, 0, 2, controller_Group1) controlJoints(t, 1, -2, controller_Group2) # For slowing down sleep(0.2) t += 0.2 # + import pandas as pd # Convert that to a pandas data frame pos_data = pd.DataFrame(positions, columns = ['x', 'y', 'z']) orn_data = pd.DataFrame(orientations,columns = ['x', 'y', 'z']) # Store as csv file #pos_data.to_csv('test.csv') orn_data # - pybullet.resetBasePositionAndOrientation(robot.id, [2,2,2], pybullet.getQuaternionFromEuler([0, 0, 0])) def flowfield(x,y,z): return [30*x**2, 20*np.sin(y), 30] x,y,z = pybullet.getBasePositionAndOrientation(robot.id)[0] pybullet.applyExternalForce(robot.id, -1, flowfield(x,y,z), [0,0,0], 2 ) t = 0 while t < 10: for controller in robot.controller.values(): controller.setReference(reference = control(t)) t += 0.1 sleep(0.5) testy = robot.controller[1] testy def slidingMode(joint, reference): # Domain of non switching eps = 0.2 error = # Get Joint Pos - reference if error < -eps: return 10 elif error > eps: return -10 else: return 0
DumbControl.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # <h1>Reinforcement Learning</h1> # <h6><NAME></h6> # Date: 08 July 2019 # + #isme humlog machine ko sikhaynge ki kab kon sa action perform karna #reinforcement me humlog model ko btaynge ki action jo liya h wo sahi h ya galat humlog ko output nhi pata, reward btaynge #state t par koi action perform kiye to wo state (t+1) me jayga.. lekin jo reward milega humlogon ko wo (t+1) ka reward milega # alpha - learning rate # gamma - discount factor # epsilon - exploration rate #agar game me(5x4) 20 pos honge to q table me rows 20 hoga #Q-learning -> Q-table -> rows- describes states, columns-action #formula -> Q(s_t , a_t)= r_(t+1) + gamma* r_(t+2) + gamma^2 * r_(T+3) #future me jo reward milne wala h usko hmlog disregard karte jaynge kyuki hmlog future me confirm nhi h # ..ki wo reward milega ki nahi humlogon ko #we are concerned with overall reward not the immediate reward #Q(s_t , a_t)= r_(t+1) + gamma* r_(t+2) + gamma^2 * r_(T+3) # Q(s_(t+1), a_(t+1)) = r_(t+2) +gamma*r_(t+3) + gamma^2 *r_(t+4) #Therefor final reward = Q(s_t , a_t) = r_(t+1) + gamma *Q(s_(t+1), a_(t+1)) # or Q(s_t , a_t) = r_(t+1) +gamma * Q_max_(s_(t+1)) Q_max me future me jitna reward milte wala h #exploration rate explores the other states for highest reward # - import gym import random import numpy as np import time from IPython.display import clear_output # + # frozen ice -> slippery # - from gym.envs.registration import register # + #https://github.com/openai/gym/blob/master/gym/envs/__init__.py par ja kr randomness off krnge try: register( id='FrozenLakeNoSlip-v0', entry_point='gym.envs.toy_text:FrozenLakeEnv', kwargs={'map_name' : '4x4', 'is_slippery':False}, max_episode_steps=100, reward_threshold=0.78, # optimum = .8196 ) except: pass # - env_name = 'FrozenLakeNoSlip-v0' env= gym.make(env_name) env.observation_space type(env.action_space) # + class Agent(): def __init__(self, env): self.isDiscrete =( type( env.action_space)) ==gym.spaces.discrete.Discrete def get_action(self,state):#check kareha ki discrete ya continuous if self.isDiscrete: action = random.choice(range(env.action_space.n)) else: action = np.random.uniform( env.action_space.low, env.action_space.high, env.action_space.shape) #because it is a continuous h return action agent= Agent(env) # - total_reward=0 for ep in range(10): state= env.reset() # game fresh start hoga done=False # gave abhi over nahi hua h while not done: action= agent.get_action(state) state, reward, done, info = env.step(action) env.render() time.sleep(0.05) clear_output(wait=True) env.close() # # Q-Learning # + class Agent(): def __init__(self, env, discountedRate=0.97, learningRate=0.01): self.isDiscrete =( type( env.action_space)) ==gym.spaces.discrete.Discrete self.stateSize=env.observation_space.n self.actionSize = env.action_space.n self.explorationRate=1.0 self.discountedRate= discountedRate self.learningRate= learningRate self.qTable= 1e-4*np.random.random([self.stateSize, self.actionSize]) def get_action(self,state):#check kareha ki discrete ya continuous qState= self.qTable[state] # we will get a single row with corresponding state #sbse pehle kaun se state pr h actionGreedy=np.argmax(qState) actionRandom=env.action_space.sample() return actionRandom if random.random()<self.explorationRate else actionGreedy def train(self, state, action, next_state , reward, done): #(state, action, next_state , reward, done) is called experience qStateNext=self.qTable[next_state] if done: qStateNext = np.zeros([self.actionSize]) else: qStateNext= self.qTable[next_state] qTarget = reward + self.discountedRate*np.max(qStateNext) qUpdate = qTarget - self.qTable[state, action] self.qTable[state, action]+=self.learningRate*qUpdate if done: self.explorationRate*=0.99 agent= Agent(env) # - total_reward=0 for ep in range(100): state= env.reset() # game fresh start hoga done=False # gave abhi over nahi hua h while not done: action= agent.get_action(state) nextState, reward, done, info = env.step(action) agent.train(state,action, nextState,reward,done) state=nextState total_reward+=reward print(f"s:{state},a:{action}") print(f'Ep:{ep},Goals:{total_reward}, Explore:{agent.explorationRate}') env.render() print(agent.qTable) time.sleep(0.05) clear_output(wait=True) env.close() # # Q-Learning with Neural Networks import tensorflow as tf # + class Agent(): def __init__(self, env, discountedRate=0.97, learningRate=0.01): self.isDiscrete =( type( env.action_space)) ==gym.spaces.discrete.Discrete self.stateSize=env.observation_space.n self.actionSize = env.action_space.n self.explorationRate=1.0 self.discountedRate= discountedRate self.learningRate= learningRate tf.reset_default_graph() #creating variables self.stateIn =tf.placeholder(tf.int32,shape=[1]) self.actionIn =tf.placeholder(tf.int32,shape=[1]) self.targetIn =tf.placeholder(tf.float32,shape=[1]) self.state =tf.one_hot(self.stateIn, depth=self.stateSize) self.action =tf.one_hot(self.actionIn, depth=self.actionSize) self.qState= tf.layers.dense(self.state, units= env.action_space.n, name='Qtable') self.qAction = tf.reduce_sum(tf.multiply(self.qState, self.action), axis=1) #Scalar self.loss=tf.reduce_sum(tf.square(self.targetIn-self.qAction)) self.optimizer=tf.train.AdamOptimizer(self.learningRate).minimize(self.loss) self.sess=tf.Session() self.sess.run(tf.global_variables_initializer()) def get_action(self,state):#check kareha ki discrete ya continuous qState= self.sess.run(self.qState,feed_dict= {self.stateIn:[state]}) # we will get a single row with corresponding state #sbse pehle kaun se state pr h actionGreedy=np.argmax(qState) actionRandom=env.action_space.sample() return actionRandom if random.random()<self.explorationRate else actionGreedy def train(self, state, action, next_state , reward, done): #(state, action, next_state , reward, done) is called experience if done: qStateNext = np.zeros([self.actionSize]) else: qStateNext= self.sess.run(self.qState, feed_dict= {self.stateIn:[next_state] }) qTarget = reward + self.discountedRate*np.max(qStateNext) feed ={ self.stateIn:[state],self.actionIn:[action], self.targetIn:[qTarget]} self.sess.run(self.optimizer, feed_dict=feed) if done: self.explorationRate*=0.99 def __del__(self): self.sess.close() agent= Agent(env) # - total_reward=0 for ep in range(100): state= env.reset() # game fresh start hoga done=False # gave abhi over nahi hua h while not done: action= agent.get_action(state) nextState, reward, done, info = env.step(action) agent.train(state,action, nextState,reward,done) state=nextState total_reward+=reward print(f"s:{state},a:{action}") print(f'Ep:{ep},Goals:{total_reward}, Explore:{agent.explorationRate}') env.render() with tf.variable_scope('Qtable', reuse=True): #if reuse=True weights will not reset. weights=agent.sess.run(tf.get_variable('kernel')) print(weights) time.sleep(0.05) clear_output(wait=True) env.close()
Reinforcement Learning/Reinforcement_Learning.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # BladeX # ## Tutorial 3: Prepare a blade 3D sectional profiles # In this tutorial we show how to construct a blade 3D sectional profiles, where the planar foils undergo several transformation operation to satisfy the blade parameters `pitch`, `rake`, `skew`. In addition to that, we show the cylindrical transformations that are applied on such foils so that they resemble the blade cylindrical sections instead of the planar ones. # The foil transformations according to the pitch, rake, and skew are all given in the following transformations map. # # <img src="data/transformations.png"> # # First of all we import from BladeX the `NacaProfile` class for generating the 2D sectional profiles, and we import the `Blade` class for the 3D blade generation. # We also import `numpy` and we set `matplotlib` for the notebook. import matplotlib.pyplot as plt import numpy as np from bladex import NacaProfile, Blade # We instantiate the object **sample_blade** of the `blade` class. The arguments are as follows: # # 1. `sections` which is an array_like of class objects from the `profiles` module. Each object represents a sectional profile and holds all information of that foil. # # 1. `radii`: array_like of the radii from which the sectional profiles are taken. # # 1. `chord_lengths`: array_like of the chord lengths for the sectional profiles, so that each foil is scaled to its actual size. # # 1. `pitch`: array_like of the radial distribution of the blade pitch, given in unit length. # # 1. `rake`: array_like of radial distribution of the blade rake, given in unit length. # # 1. `skew_angles`: array_like of radial distribution of the blade skew, given in degrees. # We provide the sections as an array of 10 NacaProfile objects, all with NACA 0012 profile. The attributes of each object are in fact the same attributes of the NacaProfile class and its parent ProfileBase, which were clearly explained in the previous two tutorials. # # An example of how two access the NacaProfile objects attributes is given below. sections = np.asarray([NacaProfile(digits='0012') for i in range(10)]) print('NACA 4-series digits of profile 0: ', sections[0].digits) print('Number of points used to generate profile 1: ', sections[1].n_points) print('Cosine spacing of profile 3: ', sections[3].cosine_spacing) print('xup_coordinates array shape of profile 6: ', sections[6].xup_coordinates.shape) print('Chord length of profile 8: ', sections[8].chord_length) # Now we provide the remaining arguments needed for the **sample_blade** object. After that we instantiate the object. # + radii = np.arange(1.0, 11.0, 1.0) chord_lengths = np.arange(2.0, 7., 0.5) pitch = np.arange(1., 11.) rake = np.arange(0.1, 1.1, 0.1) skew_angles = np.arange(1., 21, 2.) sample_blade = Blade(sections=sections, radii=radii, chord_lengths=chord_lengths, pitch=pitch, rake=rake, skew_angles=skew_angles) # - # The following members can be accessed for the object by just printing the object itself, as in the following. print(sample_blade) # Or we can access the same attributes manually, as in the following. print('Blade number of sections = ', sample_blade.n_sections) print('Blade radii sections = ', sample_blade.radii) print('Chord lengths of the sectional profiles = ', sample_blade.chord_lengths) print('Radial distribution of the pitch (in unit lengths) = ', sample_blade.pitch) print('Radial distribution of the rake (in unit length) = ', sample_blade.rake) print('Radial distribution of the skew angles (in degrees) = ', sample_blade.skew_angles) print('Pitch angles (in radians) for the sections = ', sample_blade.pitch_angles) print('Induced rake from skew (in unit length) for the sections = ', sample_blade.induced_rake) # Also all the attributes associated with the `NacaProfile` or `ProfileBase` classes can be accessed through the blade sections objects, as shown in the following. We also note that this, in fact, yields the same results as if we printed the same attributes but without the parent object `sample_blade`, i.e. to call the objects attributes directly from the `sections` array of the NacaProfile objects. We also gave such example for calling the `sections` objects attributes earlier in this notebook, cell [2]. # + print('NACA digits for blade section "0": ', sample_blade.sections[0].digits) print('Number of points used to generate blade sectional profile "1": ', sample_blade.sections[1].n_points) print('Maximum thickness of the blade sectional profile "5": ', sample_blade.sections[5].max_thickness()) print('NACA digits and maximum camber of the blade sectional profile "3": ', sample_blade.sections[3].digits, sample_blade.sections[3].max_camber()) print('\nPlotting of the blade sectional profile "5"') sample_blade.sections[5].plot() # - # Now in order to apply all the transformations corresponding to the `chord_length`, `pitch`, `rake`, `skew`, we execute the `apply_transformations` method. Such method has the `reflect` argument in case we prefer to reflect all the foils about the origin before applying the transformations. For this tutorial we set the `reflect` off. sample_blade.apply_transformations(reflect=False) # Now we are ready to plot the final blade sectional profiles after all transformations # `elev` and `azim` sets the elevation and azimuth view angle of the plot. # None value means using the default view angle. `outfile` with None value indicates not saving the plot, otherwise # we pass valid string for the filename sample_blade.plot(elev=None, azim=None, outfile=None) # If we want to export a .ppg file that carries the information for the blade sections, then we execute the `export_ppg` method, specifying a valid string for the filename. Also the .ppg file will be generated with the following default values: # # 1. D_prop = 0.25 (for the propeller diameter) # # 1. D_hub = 0.075 (for the hub diameter) # # 1. n_blades = 5 (for the propeller number of blades) # # 1. params_normalized = False (to specify that the blade parameters are not normalized) sample_blade.export_ppg(filename='data/data_out.ppg', D_prop=0.25, D_hub=0.075, n_blades=5, params_normalized=False)
tutorials/tutorial-3-generate_blade.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import torch import torch.nn as nn from torch.autograd import Variable import numpy as np # # Coattention encoder # # We've constructed encoding matrices for the documents and questions, D and Q. Each document (e.g. paragraph in SQUAD) has its own D and each question has its own Q. Matrix D is of shape 300 x m+1 where m is the number of words in the document. Matrix Q is of shape 300 x n+1 where n is the number of wordsd in the question. 300 is the length of the word vectors, and the +1 comes from the sentinel vector. Let's assume we have these matrices. For now we'll use dummies of the right shape to get the process right. Also, we'll start by doing this for one document and one question. Later, we'll extend this to the whole dataset. # + n = 20 m = 40 l = 300 Q = Variable(torch.Tensor(l, n+1).zero_(), requires_grad=False) D = Variable(torch.Tensor(l, m+1).zero_(), requires_grad=False) # We don't need to calculate gradients for this part of the process, right?" # - # Calculate affinity matrix L = D.transpose(0, 1).matmul(Q) # Calculate attention weights A for Q and D softmax = nn.Softmax(dim=1) A_q = softmax(L) A_d = softmax(L.transpose(0, 1)) # Compute attention context of the document wrt each word in the question C_q = D.matmul(A_q) # Compute attention context of the question wrt each word in the document and # compute the attention context of the previous attention context wrt each # word in the document. This is the coattention context C_d. C_d = torch.cat((Q, C_q), dim=0).matmul(A_d) class CoattentionEncoderBiLSTM(nn.Module): def __init__(self, input_size, hidden_size, num_layers): super(CoattentionEncoderBiLSTM, self).__init__() self.lstm = nn.LSTM(input_size, hidden_size, num_layers, bidirectional=True, batch_first=True) def forward(self, x): out = self.lstm(x) return out # + # Setup parameters input_size = 900 hidden_size = 450 num_layers = 2 batch_size = 32 learning_rate = 0.0007 num_epochs = 2 # - # Setup model model = CoattentionEncoderBiLSTM(input_size, hidden_size, num_layers) model.cuda() lossfun = nn.MSELoss() optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate) x = torch.cat((D, C_d), dim=0) x = x.transpose(0, 1) x = x.expand(32, 41, 900) x.size() for epoch in range(num_epochs): x = x.cuda() y = x output = model(x) optimizer.zero_grad() print(output[0].size()) loss = lossfun(output[0], y) loss.backward() optimizer.step()
notebooks/03-jdb-coattention-encoder.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Parte 1: Caminho entre cidades # --- # ## Dependências # Antes de executar este caderno, pode ser necessário instalar previamente as dependências em seu sistema. Isso pode ser feito com os seguintes comandos: # ```bash # pip install --user pandas # pip install --user tqdm # ``` import pandas as pd import heapq as heap from math import sqrt from tqdm import tqdm # ## Importando os dados # Import australia = pd.read_csv('australia.csv', sep = ",") # Cleaning australia = australia[['id', 'city', 'lat', 'lng']] # Printing australia # ## Pré-Game # ### Estrutura de Dados # + class City: def __init__(self, id, name, lat, lng): self.id = id self.name = name self.lat = lat self.lng = lng self.roads = set() self.weight = None def successors(self): for road in self.roads: yield road.city, road.length class Road: def __init__(self, city, length): self.city = city self.length = length def add_road_between(city1, city2): distance = 1.1 * sqrt((city1.lat - city2.lat)**2 + (city1.lng - city2.lng)**2) # city1 -> city2 road1 = Road(city2, distance) city1.roads.add(road1) # city2 -> city1 road2 = Road(city1, distance) city2.roads.add(road2) # - # ### Preenchendo a estrutura de dados # + cities = [None] # None é só para preencher o id = 0 (outra alternativa seria trabalhar com `índice - 1`) # Cities for index, row in australia.iterrows(): city = City(row['id'], row['city'], row['lat'], row['lng']) cities.append(city) # Roads for city in cities: if city: if city.id % 2 == 0: # Road to (x - 1) add_road_between(city, cities[city.id - 1]) # Road to (x + 2) if it exists if city.id + 2 < len(cities): add_road_between(city, cities[city.id + 2]) elif city.id > 2: # Road to (x - 2) add_road_between(city, cities[city.id - 2]) # Road to (x + 1) if it exists if city.id + 1 < len(cities): add_road_between(city, cities[city.id + 1]) # - # ## Algoritmo de Busca # ### Parâmetros # + # Escolher partida e destino (pode colocar o nome da cidade (formato string) ou o id (formato int)) source = '<NAME>' destiny = 'Yulara' # Também funcionaria # source = 5 # destiny = 219 # - # ### Adiciona informação (Busca com informação) # + # Calcula as distâncias das cidades até o destino (em linha reta) # Encontra o nó da cidade destino if type(destiny) == str: for city in cities: if city and city.name == destiny: dest_city = city break dest_city = None else: for city in cities: if city and city.id == destiny: dest_city = city break dest_city = None # Encontra o nó da cidade fonte if type(source) == str: for city in cities: if city and city.name == source: src_city = city break src_city = None else: for city in cities: if city and city.id == source: src_city = city break src_city = None if dest_city == None or src_city == None: print('Cidades não encontradas') # Para cada distância seta o peso como a distância em linha reta até a cidade destino for city in cities: if city: city.weight = sqrt((city.lat - dest_city.lat)**2 + (city.lng - dest_city.lng)**2) # + tags=[] print(src_city.id, src_city.name, len(src_city.roads)) print(dest_city.id, dest_city.name, len(dest_city.roads)) # - # ### Algoritmo # + # Algoritmo A* class Node: def __init__(self, f_value, g_value, element, parent): self.f_value = f_value self.g_value = g_value self.element = element self.parent = parent def __lt__(self, other): if self.f_value != other.f_value: return self.f_value < other.f_value else: return self.element.id < other.element.id def astar_search(initial_state, goal_test): expanded = set() # heap de minimo pq = [Node(initial_state.weight, 0, initial_state, None)] heap.heapify(pq) for _ in tqdm(range(1000000)): # não há caminho if len(pq) == 0: return None curr = heap.heappop(pq) if curr.element in expanded: continue else: expanded.add(curr.element) # encontrou o objetivo if goal_test(curr.element): return curr # expandindo vizinhos for succ, price in curr.element.successors(): if succ in expanded: continue new_g_value = (curr.g_value + price) new_f_value = new_g_value + succ.weight heap.heappush(pq, Node(new_f_value, new_g_value, succ, curr)) # + tags=[] def goal_test(city): return city == dest_city solution_node = astar_search(src_city, goal_test) # - # ## Resultado # + tags=[] path = [] aux_node = solution_node while aux_node is not None: path.append(aux_node.element) aux_node = aux_node.parent path.reverse() print('LENGTH: ', len(path)) for city in path: print(city.name, ' ->')
Projeto1-Buscas/.ipynb_checkpoints/city-path-checkpoint.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 2 # language: python # name: python2 # --- import matplotlib.pyplot as plt year = [1950, 1970, 1990, 2010] pop = [2.519, 3.692, 5.263, 6.972] plt.plot(year, pop) plt.show() year = [1950, 1970, 1990, 2010] pop = [2.519, 3.692, 5.263, 6.972] plt. scatter (year, pop) plt.show() help(plt.hist) values = [0,0.6,1.4,1.6,2.2,2.5,2.6,3.2,3.5,3.9,4.2,6] plt.hist(values, bins = 3) plt.show() year = [1950, 1951, 1952, 1953, 1954, 1955, 1956, 1957, 1958, 1959, 1960, 1961, 1962, 1963, 1964, 1965, 1966, 1967, 1968, 1969, 1970, 1971, 1972, 1973, 1974, 1975, 1976, 1977, 1978, 1979, 1980, 1981, 1982, 1983, 1984, 1985, 1986, 1987, 1988, 1989, 1990, 1991, 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011, 2012, 2013, 2014, 2015, 2016, 2017, 2018, 2019, 2020, 2021, 2022, 2023, 2024, 2025, 2026, 2027, 2028, 2029, 2030, 2031, 2032, 2033, 2034, 2035, 2036, 2037, 2038, 2039, 2040, 2041, 2042, 2043, 2044, 2045, 2046, 2047, 2048, 2049, 2050, 2051, 2052, 2053, 2054, 2055, 2056, 2057, 2058, 2059, 2060, 2061, 2062, 2063, 2064, 2065, 2066, 2067, 2068, 2069, 2070, 2071, 2072, 2073, 2074, 2075, 2076, 2077, 2078, 2079, 2080, 2081, 2082, 2083, 2084, 2085, 2086, 2087, 2088, 2089, 2090, 2091, 2092, 2093, 2094, 2095, 2096, 2097, 2098, 2099, 2100] pop = [2.53, 2.57, 2.62, 2.67, 2.71, 2.76, 2.81, 2.86, 2.92, 2.97, 3.03, 3.08, 3.14, 3.2, 3.26, 3.33, 3.4, 3.47, 3.54, 3.62, 3.69, 3.77, 3.84, 3.92, 4.0, 4.07, 4.15, 4.22, 4.3, 4.37, 4.45, 4.53, 4.61, 4.69, 4.78, 4.86, 4.95, 5.05, 5.14, 5.23, 5.32, 5.41, 5.49, 5.58, 5.66, 5.74, 5.82, 5.9, 5.98, 6.05, 6.13, 6.2, 6.28, 6.36, 6.44, 6.51, 6.59, 6.67, 6.75, 6.83, 6.92, 7.0, 7.08, 7.16, 7.24, 7.32, 7.4, 7.48, 7.56, 7.64, 7.72, 7.79, 7.87, 7.94, 8.01, 8.08, 8.15, 8.22, 8.29, 8.36, 8.42, 8.49, 8.56, 8.62, 8.68, 8.74, 8.8, 8.86, 8.92, 8.98, 9.04, 9.09, 9.15, 9.2, 9.26, 9.31, 9.36, 9.41, 9.46, 9.5, 9.55, 9.6, 9.64, 9.68, 9.73, 9.77, 9.81, 9.85, 9.88, 9.92, 9.96, 9.99, 10.03, 10.06, 10.09, 10.13, 10.16, 10.19, 10.22, 10.25, 10.28, 10.31, 10.33, 10.36, 10.38, 10.41, 10.43, 10.46, 10.48, 10.5, 10.52, 10.55, 10.57, 10.59, 10.61, 10.63, 10.65, 10.66, 10.68, 10.7, 10.72, 10.73, 10.75, 10.77, 10.78, 10.79, 10.81, 10.82, 10.83, 10.84, 10.85] plt.plot(year, pop) plt.show() plt.plot(year, pop) plt.xlabel('Year') plt.ylabel('Population') plt.show() plt.plot(year, pop) plt.xlabel('Year') plt.ylabel('Population') plt.title('World Population Projections') plt.show() plt.plot(year, pop) plt.xlabel('Year') plt.ylabel('Population') plt.title('World Population Projections') plt.yticks([0, 2, 4, 6, 8, 10]) plt.show() plt.plot(year, pop) plt.xlabel('Year') plt.ylabel('Population') plt.title('World Population Projections') plt.yticks([0, 2, 4, 6, 8, 10], ['0', '2B', '4B', '6B', '8B', '10B']) plt.show() plt.plot(year, pop) plt.xlabel('Year') plt.ylabel('Population') plt.title('World Population Projections') plt.yticks([0, 2, 4, 6, 8, 10], ['0', '2B', '4B', '6B', '8B', '10B']) plt.show()
Bagian 2 - Python Lanjutan/1. Plot Dasar menggunakan matplotlib.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Logistic Regression Example # + from __future__ import absolute_import, division, print_function import tensorflow as tf import numpy as np # + # MNIST dataset parameters. num_classes = 10 # 0 to 9 digits num_features = 784 # 28*28 # Training parameters. learning_rate = 0.01 training_steps = 1000 batch_size = 256 display_step = 50 # - # Prepare MNIST data. from tensorflow.keras.datasets import mnist (x_train, y_train), (x_test, y_test) = mnist.load_data() # Convert to float32. x_train, x_test = np.array(x_train, np.float32), np.array(x_test, np.float32) # Flatten images to 1-D vector of 784 features (28*28). x_train, x_test = x_train.reshape([-1, num_features]), x_test.reshape([-1, num_features]) # Normalize images value from [0, 255] to [0, 1]. x_train, x_test = x_train / 255., x_test / 255. # Use tf.data API to shuffle and batch data. train_data = tf.data.Dataset.from_tensor_slices((x_train, y_train)) train_data = train_data.repeat().shuffle(5000).batch(batch_size).prefetch(1) # + # Weight of shape [784, 10], the 28*28 image features, and total number of classes. W = tf.Variable(tf.ones([num_features, num_classes]), name="weight") # Bias of shape [10], the total number of classes. b = tf.Variable(tf.zeros([num_classes]), name="bias") # Logistic regression (Wx + b). def logistic_regression(x): # Apply softmax to normalize the logits to a probability distribution. return tf.nn.softmax(tf.matmul(x, W) + b) # Cross-Entropy loss function. def cross_entropy(y_pred, y_true): # Encode label to a one hot vector. y_true = tf.one_hot(y_true, depth=num_classes) # Clip prediction values to avoid log(0) error. y_pred = tf.clip_by_value(y_pred, 1e-9, 1.) # Compute cross-entropy. return tf.reduce_mean(-tf.reduce_sum(y_true * tf.math.log(y_pred))) # Accuracy metric. def accuracy(y_pred, y_true): # Predicted class is the index of highest score in prediction vector (i.e. argmax). correct_prediction = tf.equal(tf.argmax(y_pred, 1), tf.cast(y_true, tf.int64)) return tf.reduce_mean(tf.cast(correct_prediction, tf.float32)) # Stochastic gradient descent optimizer. optimizer = tf.optimizers.SGD(learning_rate) # - # Optimization process. def run_optimization(x, y): # Wrap computation inside a GradientTape for automatic differentiation. with tf.GradientTape() as g: pred = logistic_regression(x) loss = cross_entropy(pred, y) # Compute gradients. gradients = g.gradient(loss, [W, b]) # Update W and b following gradients. optimizer.apply_gradients(zip(gradients, [W, b])) # Run training for the given number of steps. for step, (batch_x, batch_y) in enumerate(train_data.take(training_steps), 1): # Run the optimization to update W and b values. run_optimization(batch_x, batch_y) if step % display_step == 0: pred = logistic_regression(batch_x) loss = cross_entropy(pred, batch_y) acc = accuracy(pred, batch_y) print("step: %i, loss: %f, accuracy: %f" % (step, loss, acc))
tensorflow2.0/BasicModels/logistic_regression.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python [conda env:DSC478] # language: python # name: conda-env-DSC478-py # --- # + from sklearn.datasets.samples_generator import make_blobs from sklearn.cluster import DBSCAN from sklearn.preprocessing import StandardScaler from dbscan import MyDBSCAN # - import numpy as np # + centers = [[1, 1], [-1, -1], [1, -1],[4, 4], [-3, -2], [2, -4]] X, labels_true = make_blobs(n_samples=1050, centers=centers, cluster_std=0.6, random_state=546456) X = StandardScaler().fit_transform(X) # - print ('Running my implementation...') my_labels = np.array(MyDBSCAN(X, eps=.4, MinPts=20, MaxPts = 60)) core_samples_mask = np.zeros_like(my_labels, dtype=bool) core_samples_mask[np.array(range(0,len(my_labels)))] = True labels = my_labels # + # Plot result import matplotlib.pyplot as plt # Black removed and is used for noise instead. unique_labels = set(labels) colors = [plt.cm.Spectral(each) for each in np.linspace(0, 1, len(unique_labels))] # + plt.figure(figsize=(20,10)) for k, col in zip(unique_labels, colors): if k == -1: # Black used for noise. col = [0, 0, 0, 1] #print(k) class_member_mask = (my_labels == k) #print(my_labels == k) xy = X[class_member_mask & core_samples_mask] #print(xy) plt.plot(xy[:, 0], xy[:, 1], 'o', markerfacecolor=tuple(col), markeredgecolor='k', markersize=10) #print(xy) xy = X[class_member_mask & ~core_samples_mask] plt.plot(xy[:, 0], xy[:, 1], 'o', markerfacecolor=tuple(col), markeredgecolor='k', markersize=10) #plt.title('Estimated number of clusters: %d' % n_clusters_) plt.show() # - unique_labels = set([8]) # + for k, col in zip(unique_labels, colors): if k == -1: # Black used for noise. col = [0, 0, 0, 1] #print(k) class_member_mask = (my_labels == k) #print(my_labels == k) xy = X[class_member_mask & core_samples_mask] #print(xy) plt.plot(xy[:, 0], xy[:, 1], 'o', markerfacecolor=tuple(col), markeredgecolor='k', markersize=10) #print(xy) xy = X[class_member_mask & ~core_samples_mask] plt.plot(xy[:, 0], xy[:, 1], 'o', markerfacecolor=tuple(col), markeredgecolor='k', markersize=10) #plt.title('Estimated number of clusters: %d' % n_clusters_) plt.show() # - clusterDict = {} for label in labels: if label in clusterDict.keys(): clusterDict[label] += 1 else: clusterDict[label] = 1 clusterDict
.ipynb_checkpoints/LearnDBSCAN-checkpoint.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 (ipykernel) # language: python # name: python3 # --- import pandas as pd import numpy as np survey_April20 = pd.read_csv('Data/ProlificAcademic/April 2020/Data/CRISIS_Adult_April_2020.csv',index_col='ID') survey_May20 = pd.read_csv('Data/ProlificAcademic/May 2020/Data/CRISIS_Adult_May_2020.csv',index_col='ID') survey_Nov20 = pd.read_csv('Data/ProlificAcademic/November 2020/Data/CRISIS_Adult_November_2020.csv',index_col='ID') survey_April21 = pd.read_csv('Data/ProlificAcademic/April 2021/Data/CRISIS_Adult_April_2021.csv',index_col='ID') # check individual row and column dimensions print(survey_April20.shape) print(survey_May20.shape) print(survey_Nov20.shape) print(survey_April21.shape) # + # cant use merge as columns are not same # survey_df = pd.merge(survey_April20, survey_Nov20, how="inner", on='ID') # - # ### Find common participants across all groups series_1 = pd.Series(np.intersect1d(survey_April20.index,survey_May20.index, assume_unique=True)) series_2 = pd.Series(np.intersect1d(survey_Nov20.index,survey_April21.index, assume_unique=True)) unique_ids = list(pd.Series(np.intersect1d(series_1,series_2,assume_unique=True))) #pd.Series(np.intersect1d(pd.Series([1,2,3,5,42]), pd.Series([4,5,6,20,42])))a len(unique_ids) # Test entry in a dataframe print('location' in survey_April20.columns) survey_April21.loc['00db60f1','location'] ## filter rows survey_April20 = survey_April20.loc[unique_ids] survey_May20 = survey_May20.loc[unique_ids] survey_Nov20 = survey_Nov20.loc[unique_ids] survey_April21 = survey_April21.loc[unique_ids] ## Compare shapes after filtering print(survey_April20.shape) print(survey_May20.shape) print(survey_Nov20.shape) print(survey_April21.shape) # + import matplotlib.pyplot as plt plt.style.use('ggplot') ages = survey_Nov20['age'] bins = list(range(10,100,10)) median_age = survey_Nov20['age'].median() plt.hist(ages, bins=bins, edgecolor='black') plt.axvline(median_age, color='blue', label="Median Age") plt.xlabel("Age") plt.ylabel("Age Disribution") plt.title("Age Distribution Histogram in 2020") plt.legend() plt.show() # - # ### List Common columns across All time whose data is unlikely to change - eg. gender,age,occupation, country etc. # + ## bar graph of categorical variables - location, use start of survey April 20 data survey_April20['location'].value_counts().plot(kind='bar') # - survey_April21['location'].value_counts().plot(kind='bar')
survey-adult.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # This file contains code of the paper 'Rejecting Novel Motions in High-Density Myoelectric Pattern Recognition using Hybrid Neural Networks' import scipy.io as sio import numpy as np from keras.layers import Conv2D, MaxPool2D, Flatten, Dense,Dropout, Input, BatchNormalization from keras.models import Model from keras.losses import categorical_crossentropy from keras.optimizers import Adadelta import keras # + # load data path = './data/data' data=sio.loadmat(path) wristPronation = data['wristPronation'] wristSupination = data['wristSupination'] wristExtension = data['wristExtension'] wristFlexion = data['wristFlexion'] handOpen = data['handOpen'] handClose = data['handClose'] shoot = data['shoot'] pinch = data['pinch'] typing = data['typing'] writing = data['writing'] mouseManipulating = data['mouseManipulating'] radialDeviation = data['radialDeviation'] ulnarDeviation = data['ulnarDeviation'] # - # ## part1: CNN def Spatial_Model(input_shape): input_layer = Input(input_shape) x = Conv2D(filters=32, kernel_size=(3, 3),activation='relu',name = 'conv_layer1')(input_layer) x = Conv2D(filters=32, kernel_size=(3, 3), activation='relu',name = 'conv_layer2')(x) x = Flatten()(x) x = Dense(units=1024, activation='relu',name = 'dense_layer1')(x) x = Dropout(0.4)(x) x = Dense(units=512, activation='relu',name = 'dense_layer2')(x) x = Dropout(0.4)(x) output_layer = Dense(units=7, activation='softmax',name = 'output_layer')(x) model = Model(inputs=input_layer, outputs=output_layer) return model # + def getIntermediate(layer_name,X,model): intermediate_layer_model = Model(inputs=model.input, outputs=model.get_layer(layer_name).output) intermediate_output = intermediate_layer_model.predict(X) return intermediate_output def getPointedGesture(X,y,flag): index = np.where(y==flag) temp = X[index] return temp # + classNum = 7 X_inliers = np.concatenate((wristPronation,wristSupination,wristExtension,wristFlexion,handOpen,handClose,shoot),axis=0) print('X_inliers.shape: ',X_inliers.shape) y_inliers = np.concatenate((np.ones(wristPronation.shape[0])*0,np.ones(wristSupination.shape[0])*1, np.ones(wristExtension.shape[0])*2,np.ones(wristFlexion.shape[0])*3, np.ones(handOpen.shape[0])*4,np.ones(handClose.shape[0])*5, np.ones(shoot.shape[0])*6),axis=0) print('y_inliers.shape: ',y_inliers.shape) X_outliers = np.concatenate((typing,writing,mouseManipulating,pinch),axis=0) print('X_outliers.shape: ',X_outliers.shape) y_outliers = np.concatenate((np.ones(typing.shape[0])*7,np.ones(writing.shape[0])*8, np.ones(mouseManipulating.shape[0])*9,np.ones(pinch.shape[0])*10),axis=0) print('y_outliers.shape: ',y_outliers.shape) # - model = Spatial_Model((12, 8, 3)) model.summary() # + trainModel = False from sklearn.model_selection import train_test_split X_train, X_test_norm, y_train, y_test_norm = train_test_split(X_inliers, y_inliers, test_size=0.20, random_state=42) X_train, X_val, y_train, y_val = train_test_split(X_train, y_train, test_size=0.2, random_state=1) y_train_onehot = keras.utils.to_categorical(y_train, classNum) y_test_onehot = keras.utils.to_categorical(y_test_norm, classNum) model.compile(loss=categorical_crossentropy, optimizer=Adadelta(lr=0.1), metrics=['acc']) if trainModel: model.fit(x=X_train, y=y_train_onehot, batch_size=16, epochs=50, shuffle=True, validation_split=0.05) model.compile(loss=categorical_crossentropy, optimizer=Adadelta(lr=0.01), metrics=['acc']) model.fit(x=X_train, y=y_train_onehot, batch_size=16, epochs=50, shuffle=True, validation_split=0.05) model.save_weights('./model/modelCNN.h5') else: model.load_weights('./model/modelCNN.h5') # + model_evaluate = [] model_evaluate.append(model.evaluate(X_test_norm,y_test_onehot)) print('model_evaluate',model_evaluate) layer_name = 'dense_layer2' X_train_intermediate = getIntermediate(layer_name,X_train,model) X_test_intermediate_norm = getIntermediate(layer_name,X_test_norm,model) typing_intermediate = getIntermediate(layer_name,typing,model) writing_intermediate = getIntermediate(layer_name,writing,model) mouseManipulating_intermediate = getIntermediate(layer_name,mouseManipulating,model) pinch_intermediate = getIntermediate(layer_name,pinch,model) radialDeviation_intermediate = getIntermediate(layer_name,radialDeviation,model) ulnarDeviation_intermediate = getIntermediate(layer_name,ulnarDeviation,model) # + ## train Data wristPronation_intermediate_train = getPointedGesture(X_train_intermediate,y_train,0) wristSupination_intermediate_train = getPointedGesture(X_train_intermediate,y_train,1) wristExtension_intermediate_train = getPointedGesture(X_train_intermediate,y_train,2) wristFlexion_intermediate_train = getPointedGesture(X_train_intermediate,y_train,3) handOpen_intermediate_train = getPointedGesture(X_train_intermediate,y_train,4) handClose_intermediate_train = getPointedGesture(X_train_intermediate,y_train,5) shoot_intermediate_train = getPointedGesture(X_train_intermediate,y_train,6) ## test data wristPronation_intermediate_test = getPointedGesture(X_test_intermediate_norm,y_test_norm,0) wristSupination_intermediate_test = getPointedGesture(X_test_intermediate_norm,y_test_norm,1) wristExtension_intermediate_test = getPointedGesture(X_test_intermediate_norm,y_test_norm,2) wristFlexion_intermediate_test = getPointedGesture(X_test_intermediate_norm,y_test_norm,3) handOpen_intermediate_test = getPointedGesture(X_test_intermediate_norm,y_test_norm,4) handClose_intermediate_test = getPointedGesture(X_test_intermediate_norm,y_test_norm,5) shoot_intermediate_test = getPointedGesture(X_test_intermediate_norm,y_test_norm,6) typing_intermediate_test = typing_intermediate writing_intermediate_test = writing_intermediate mouseManipulating_intermediate_test = mouseManipulating_intermediate pinch_intermediate_test = pinch_intermediate radialDeviation_intermediate_test = radialDeviation_intermediate ulnarDeviation_intermediate_test = ulnarDeviation_intermediate outlierData = {'typing_intermediate_test':typing_intermediate_test, 'writing_intermediate_test':writing_intermediate_test, 'mouseManipulating_intermediate_test':mouseManipulating_intermediate_test, 'pinch_intermediate_test':pinch_intermediate_test} motionNameList = ['wristPronation','wristSupination','wristExtension','wristFlexion','handOpen','handClose','shoot'] trainDataDict = {motionNameList[0]:wristPronation_intermediate_train,motionNameList[1]:wristSupination_intermediate_train, motionNameList[2]:wristExtension_intermediate_train,motionNameList[3]:wristFlexion_intermediate_train, motionNameList[4]:handOpen_intermediate_train,motionNameList[5]:handClose_intermediate_train, motionNameList[6]:shoot_intermediate_train} testDataNameList = ['wristPronation','wristSupination','wristExtension','wristFlexion','handOpen','handClose','shoot', 'typing','writing','mouseManipulating','pinch','radialDeviation','ulnarDeviation'] testDataDict = {testDataNameList[0]:wristPronation_intermediate_test,testDataNameList[1]:wristSupination_intermediate_test, testDataNameList[2]:wristExtension_intermediate_test,testDataNameList[3]:wristFlexion_intermediate_test, testDataNameList[4]:handOpen_intermediate_test,testDataNameList[5]:handClose_intermediate_test, testDataNameList[6]:shoot_intermediate_test,testDataNameList[7]:typing_intermediate_test[0:150], testDataNameList[8]:writing_intermediate_test[0:150],testDataNameList[9]:mouseManipulating_intermediate_test[0:150], testDataNameList[10]:pinch_intermediate_test[0:150],testDataNameList[11]:radialDeviation_intermediate_test[0:150], testDataNameList[12]:ulnarDeviation_intermediate_test[0:150]} # + X_val_intermediate = getIntermediate(layer_name,X_val,model) wristPronation_intermediate_val = getPointedGesture(X_val_intermediate,y_val,0) wristSupination_intermediate_val = getPointedGesture(X_val_intermediate,y_val,1) wristExtension_intermediate_val = getPointedGesture(X_val_intermediate,y_val,2) wristFlexion_intermediate_val = getPointedGesture(X_val_intermediate,y_val,3) handOpen_intermediate_val = getPointedGesture(X_val_intermediate,y_val,4) handClose_intermediate_val = getPointedGesture(X_val_intermediate,y_val,5) shoot_intermediate_val = getPointedGesture(X_val_intermediate,y_val,6) valDataDict = {motionNameList[0]:wristPronation_intermediate_val,motionNameList[1]:wristSupination_intermediate_val, motionNameList[2]:wristExtension_intermediate_val,motionNameList[3]:wristFlexion_intermediate_val, motionNameList[4]:handOpen_intermediate_val,motionNameList[5]:handClose_intermediate_val, motionNameList[6]:shoot_intermediate_val} # - # ## part2: autoEncoder # + from keras import regularizers from keras.losses import mean_squared_error from keras.optimizers import SGD def autoModel(input_shape): input_img = Input(input_shape) encoded = Dense(256, activation='relu',kernel_regularizer=regularizers.l2(0.002))(input_img) encoded = BatchNormalization()(encoded) encoded = Dense(64, activation='relu',kernel_regularizer=regularizers.l2(0.002))(encoded) encoded = BatchNormalization()(encoded) decoded = Dense(256, activation='relu',kernel_regularizer=regularizers.l2(0.002))(encoded) decoded = BatchNormalization()(decoded) decoded = Dense(512, activation='relu',kernel_regularizer=regularizers.l2(0.002))(decoded) model = Model(input_img, decoded) return model # + trainAutoFlag = False if trainAutoFlag: for motionId in range(len(motionNameList)): motionName = motionNameList[motionId] x_train = trainDataDict[motionName] x_val = valDataDict[motionName] autoencoder = autoModel((512,)) autoencoder.compile(loss=mean_squared_error, optimizer=SGD(lr=0.1)) autoencoder.fit(x_train, x_train, epochs=600, batch_size=16, shuffle=True, validation_data=(x_val, x_val)) autoencoder.compile(loss=mean_squared_error, optimizer=SGD(lr=0.01)) autoencoder.fit(x_train, x_train, epochs=300, batch_size=16, shuffle=True, validation_data=(x_val, x_val)) autoencoder.save_weights('./model/autoencoder/Autoencoder_'+motionName+'.h5') # - # ### Calculate ROC curve # + import matplotlib import matplotlib.pyplot as plt from scipy.spatial.distance import pdist from sklearn.metrics import roc_curve, auc targetDict = {} for motionId in range(len(motionNameList)): targetList = [] motionName = motionNameList[motionId] print('motionName: ', motionName) # load models autoencoder = autoModel((512,)) autoencoder.compile(loss=mean_squared_error, optimizer=Adadelta(lr=0.5)) autoencoder.load_weights('./model/autoencoder/Autoencoder_'+motionName+'.h5') original = valDataDict[motionName] decoded_imgs = autoencoder.predict(original) num = decoded_imgs.shape[0] for i in range(num): X = np.vstack([original[i,:],decoded_imgs[i,:]]) lose = pdist(X,'braycurtis') targetList.append(lose[0]) targetDict[motionName] = targetList # - mdDict = {} for motionId in range(len(motionNameList)): motionName = motionNameList[motionId] print('motionName: ', motionName) # load models autoencoder = autoModel((512,)) autoencoder.compile(loss=mean_squared_error, optimizer=Adadelta(lr=0.5)) autoencoder.load_weights('./model/autoencoder/AutoEncoder_'+motionName+'.h5') originalDict = {} decodedDict = {} for gestureId in range(len(testDataNameList)): originalDict[testDataNameList[gestureId]] = testDataDict[testDataNameList[gestureId]] decodedDict[testDataNameList[gestureId]] = autoencoder.predict(originalDict[testDataNameList[gestureId]]) reconstruction_error = [] for gestureID in range(len(testDataNameList)): original = originalDict[testDataNameList[gestureID]] decoded_imgs = decodedDict[testDataNameList[gestureID]] num = decoded_imgs.shape[0] for i in range(num): X = np.vstack([original[i,:],decoded_imgs[i,:]]) lose = pdist(X,'braycurtis') reconstruction_error.append(lose[0]) mdDict[motionName] = reconstruction_error # + outlierAllNum = 150 * 6 #six novel motions, 150 samples for each motion y_label = [] for motionId in range(len(motionNameList)): motionName = motionNameList[motionId] y_label.extend(np.ones(len(testDataDict[motionName]))) y_label.extend(np.zeros(len(testDataDict['typing']))) y_label.extend(np.zeros(len(testDataDict['writing']))) y_label.extend(np.zeros(len(testDataDict['mouseManipulating']))) y_label.extend(np.zeros(len(testDataDict['pinch']))) y_label.extend(np.zeros(len(testDataDict['radialDeviation']))) y_label.extend(np.zeros(len(testDataDict['radialDeviation']))) outliers_fraction_List = [] P_List = [] R_List = [] F1_List = [] TPR_List = [] FPR_List = [] #outliers_fraction = 0.02 for outliers_i in range(-1,101): outliers_fraction = outliers_i/100 outliers_fraction_List.append(outliers_fraction) y_pred = np.zeros(len(y_label)) thresholdDict = {} for motionId in range(len(motionNameList)): # motionId = 0 motionName = motionNameList[motionId] distances = targetDict[motionName] distances = np.sort(distances) num = len(distances) # print('outliers_fraction:',outliers_fraction) if outliers_fraction >= 0: threshold = distances[num-1-int(outliers_fraction*num)]# get threshold if outliers_fraction < 0: threshold = 10000.0 if outliers_fraction == 1.0: threshold = 0 thresholdDict[motionName] = threshold mdDistances = mdDict[motionName] y_pred_temp = (np.array(mdDistances)<=threshold)*1 y_pred = y_pred + y_pred_temp y_pred = (y_pred>0)*1 TP = np.sum(y_pred[0:-outlierAllNum]) FN = len(y_pred[0:-outlierAllNum])-TP FP = np.sum(y_pred[-outlierAllNum:]) TN = outlierAllNum - FP t = 0.00001 P = TP/(TP+FP+t) R = TP/(TP+FN+t) F1 = 2*P*R/(P+R+t) TPR = TP/(TP+FN+t) FPR = FP/(TN+FP+t) P_List.append(P) R_List.append(R) F1_List.append(F1) TPR_List.append(TPR) FPR_List.append(FPR) roc_auc = auc(FPR_List, TPR_List) fig, ax = plt.subplots(figsize=(5, 5)) plt.plot(FPR_List, TPR_List, lw=2,label='AUC = %0.2f' % ( roc_auc)) plt.plot([0, 1], [0, 1], linestyle='--', lw=2, color='r',label='Chance', alpha=.8) plt.xlim([-0.05, 1.05]) plt.ylim([-0.05, 1.05]) plt.xlabel('False Positive Rate') plt.ylabel('True Positive Rate') plt.title('Receiver operating characteristic(ROC)') plt.legend(loc="lower right") plt.show() # - # ### calculate classification accuracies resultDict = {} for motionId in range(len(motionNameList)): motionName = motionNameList[motionId] # load models autoencoder = autoModel((512,)) autoencoder.compile(loss=mean_squared_error, optimizer=Adadelta(lr=0.5)) autoencoder.load_weights('./model/autoencoder/AutoEncoder_'+motionName+'.h5') # refactore data originalDict = {} decodedDict = {} for gestureId in range(len(testDataNameList)): originalDict[testDataNameList[gestureId]] = testDataDict[testDataNameList[gestureId]] decodedDict[testDataNameList[gestureId]] = autoencoder.predict(originalDict[testDataNameList[gestureId]]) loseDict = {} for gestureID in range(len(testDataNameList)): loseList= [] original = originalDict[testDataNameList[gestureID]] decoded_imgs = decodedDict[testDataNameList[gestureID]] num = decoded_imgs.shape[0] for i in range(num): X = np.vstack([original[i,:],decoded_imgs[i,:]]) lose = pdist(X,'braycurtis') loseList.append(lose[0]) loseDict[testDataNameList[gestureID]] = loseList resultDict[motionName] = loseDict outliers_fraction = 0.15 thresholdDict = {} for motionId in range(len(motionNameList)): motionName = motionNameList[motionId] # load model autoencoder = autoModel((512,)) autoencoder.compile(loss=mean_squared_error, optimizer=Adadelta(lr=0.5)) autoencoder.load_weights('./model/autoencoder/AutoEncoder_'+motionName+'.h5') # val data original_val = valDataDict[motionName] decoded_val = autoencoder.predict(original_val) loseList= [] original = original_val decoded_imgs = decoded_val num = decoded_imgs.shape[0] for i in range(num): X = np.vstack([original[i,:],decoded_imgs[i,:]]) lose = pdist(X,'braycurtis') loseList.append(lose[0]) ## calculate threshold for each task loseArray = np.array(loseList) loseArraySort = np.sort(loseArray) anomaly_threshold = loseArraySort[-(int((outliers_fraction*len(loseArray)))+1)] thresholdDict[motionName] = anomaly_threshold # plot lose and threshold fig, ax = plt.subplots(figsize=(5, 5)) t = np.arange(num) s = loseArray ax.scatter(t,s,label=motionName) ax.hlines(anomaly_threshold,0,150,colors = "r") ax.set(xlabel='sample (n)', ylabel='MSE', title='MSEs of '+ motionName + ', threshold:' + str(anomaly_threshold)) ax.grid() plt.legend(loc="lower right") plt.xlim(xmin = -3) plt.xlim(xmax = 70) plt.show() # + errorSum = 0 testSum = 0 barDict = {} outlierClass = 6 rejectMotion = {} for motionId in range(len(testDataNameList)): recogList = [] motionName = testDataNameList[motionId] for recogId in range(len(testDataNameList)-outlierClass): identyResult = resultDict[testDataNameList[recogId]] targetResult = np.array(identyResult[motionName]) recogList.append((targetResult<=thresholdDict[testDataNameList[recogId]])*1) # 每一个类别有自己的threshold用于拒判 recogArray = np.array(recogList) recogArray = np.sum(recogArray,axis=0) recogArray = (recogArray>0)*1 rejectMotion[testDataNameList[motionId]] = recogArray if motionId<(len(testDataNameList)-outlierClass): numError = np.sum(1-recogArray) else: numError = np.sum(recogArray) numTarget = len(recogArray) if motionId<(len(testDataNameList)-outlierClass): errorSum = errorSum + numError testSum = testSum + numTarget barDict[testDataNameList[motionId]] = (numError/numTarget) barDict['target overall'] = errorSum/testSum print(barDict) import matplotlib.pyplot as plt; plt.rcdefaults() import numpy as np import matplotlib.pyplot as plt from matplotlib.pyplot import figure figure(num=None, figsize=(15, 6)) objects = ('wristPronation','wristSupination','wristExtension','wristFlexion','handOpen','handClose','shoot','target overall', 'typing','writing','mouseManipulating','pinch','radialDeviation','ulnarDeviation') y_pos = np.arange(len(objects)) proposed = [] for i in range(len(objects)): proposed.append(barDict[objects[i]]) bar_width = 0.35 opacity = 0.8 rects2 = plt.bar(y_pos + bar_width, proposed, bar_width, alpha=opacity, label='Proposed') plt.xticks(y_pos + bar_width, objects) plt.ylabel('Error Rates of Novelty Detection (%)') plt.legend() plt.tight_layout() plt.show() # -
.ipynb_checkpoints/novelRejection-checkpoint.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3.6 # language: python # name: python36 # --- import feedparser import bs4 url = 'https://pythonbytes.fm/rss' feed = feedparser.parse(url) # print(str(feed)[:100]) # + # get descriptions from episodes feed.get('items')[0].get('description') # - descriptions = [item.get('description') for item in feed.get('items')] print(f"We found {len(descriptions)} descriptions.") # + all_links = [] for d in descriptions: soup = bs4.BeautifulSoup(d) links = [a['href'] for a in soup.findAll('a')] # print(links) all_links.extend(links) print(f"We found {len(all_links):,} total links.") # + import urllib.parse domains = [ l for l in all_links] domains # - domains = [ urllib.parse.urlparse(l) for l in all_links] domains domains = [ urllib.parse.urlparse(l).netloc for l in all_links] domains domains = [ urllib.parse.urlparse(l).netloc for l in all_links] print(f"There are {len(set(domains))} unique domains.") # + import collections counter = collections.Counter(domains) most_common = counter.most_common() top_25 = most_common[0:25] top_25 # -
RSS_feeder/main.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # DEMetropolis(Z): Population vs. History efficiency comparison # The idea behind `DEMetropolis` is quite simple: Over time, a population of MCMC chains converges to the posterior, therefore the population can be used to inform joint proposals. # But just like the most recent positions of an entire population converges, so does the history of each individual chain. # # In [<NAME> & Vrugt, 2008](https://doi.org/10.1007/s11222-008-9104-9) this history of posterior samples is used in the "DE-MCMC-Z" variant to make proposals. # # The implementation in PyMC3 is based on `DE-MCMC-Z`, but a few details are different. Namely, each `DEMetropolisZ` chain only looks into its own history. Also we use a different tuning scheme. # # In this notebook, a D-dimenstional multivariate normal target densities are sampled with `DEMetropolis` and `DEMetropolisZ` at different $N_{chains}$ settings. # + import pathlib import time import arviz as az import fastprogress import ipywidgets import numpy as np import pandas as pd import pymc3 as pm from matplotlib import cm from matplotlib import pyplot as plt print('Running on PyMC3 v{}'.format(pm.__version__)) # - # ## Benchmarking with a D-dimensional MVNormal model # The function below constructs a fresh model for a given dimensionality and runs either `DEMetropolis` or `DEMetropolisZ` with the given settings. The resulting trace is saved with ArviZ. # # If the saved trace is already found, it is loaded from disk. # # Note that all traces are sampled with `cores=1`. This is because parallelization of `DEMetropolis` chains is slow at $O(N_{chains})$ and the comparison would be different depending on the number of available CPUs. # + def get_mvnormal_model(D:int) -> pm.Model: true_mu = np.zeros(D) true_cov = np.eye(D) true_cov[:5,:5] = np.array([ [1, 0.5, 0, 0, 0], [0.5, 2, 2, 0, 0], [0, 2, 3, 0, 0], [0, 0, 0, 4, 4], [0, 0, 0, 4, 5], ]) with pm.Model() as pmodel: x = pm.MvNormal('x', mu=true_mu, cov=true_cov, shape=(D,)) true_samples = x.random(size=1000) truth_id = az.data.convert_to_inference_data(true_samples[np.newaxis,:], group='random') return pmodel, truth_id def run_setting(D, N_tune, N_draws, N_chains, algorithm): savename = f'{algorithm}_{D}_{N_tune}_{N_draws}_{N_chains}.nc' print(f'Scenario filename: {savename}') if not pathlib.Path(savename).exists(): pmodel, truth_id = get_mvnormal_model(D) with pmodel: if algorithm == 'DE-MCMC': step = pm.DEMetropolis() elif algorithm == 'DE-MCMC-Z': step = pm.DEMetropolisZ() idata = pm.sample( cores=1, tune=N_tune, draws=N_draws, chains=N_chains, step=step, start={'x': [0]*D}, discard_tuned_samples=False, return_inferencedata=True ) idata.to_netcdf(savename) else: idata = az.from_netcdf(savename) return idata # - # ## Running the Benchmark Scenarios # Here a variety of different scenarios is computed and the results are aggregated in a multi-indexed DataFrame. # + df_results = pd.DataFrame(columns='algorithm,D,N_tune,N_draws,N_chains,t,idata'.split(',')) df_results = df_results.set_index('algorithm,D,N_tune,N_draws,N_chains'.split(',')) for algorithm in {'DE-MCMC', 'DE-MCMC-Z'}: for D in (10, 20, 40): N_tune = 10000 N_draws = 10000 for N_chains in (5,10,20,30,40,80): idata = run_setting(D, N_tune, N_draws, N_chains, algorithm) t = idata.posterior.sampling_time df_results.loc[(algorithm, D, N_tune, N_draws, N_chains)] = (t, idata) # - df_results[['t']] # ## Analyzing the traces # From the traces, we need to compute the absolute and relative $N_{eff}$ and the $\hat{R}$ to see if we can trust the posteriors. df_temp = df_results.reset_index(['N_tune', 'N_draws']) df_temp['N_samples'] = [ row.N_draws * row.Index[2] for row in df_temp.itertuples() ] df_temp['ess'] = [ float(az.ess(idata.posterior).x.mean()) for idata in fastprogress.progress_bar(df_temp.idata) ] df_temp['rel_ess'] = [ row.ess / (row.N_samples) for row in df_temp.itertuples() ] df_temp['r_hat'] = [ float(az.rhat(idata.posterior).x.mean()) for idata in fastprogress.progress_bar(df_temp.idata) ] df_temp = df_temp.sort_index(level=['algorithm', 'D', 'N_chains']) df_temp # ## Visualizing Effective Sample Size # In this diagram, we'll plot the relative effective sample size agains the number of chains. # # Because our computation above ran everything with $N_{cores}=1$, we can't make a realistic comparison of effective sampling rates. # + fig, right = plt.subplots(dpi=140, ncols=1, sharey='row', figsize=(12,6)) for algorithm, linestyle in zip(['DE-MCMC', 'DE-MCMC-Z'], ['-', '--']): dimensionalities = list(sorted(set(df_temp.reset_index().D)))[::-1] N_dimensionalities = len(dimensionalities) for d, dim in enumerate(dimensionalities): color = cm.autumn(d / N_dimensionalities) df = df_temp.loc[(algorithm, dim)].reset_index() right.plot( df.N_chains, df.rel_ess*100, linestyle=linestyle, color=color, label=f'{algorithm}, {dim} dimensions' ) right.legend() right.set_ylabel('$S_{eff}$ [%]') right.set_xlabel('$N_{chains}$ [-]') right.set_ylim(0) right.set_xlim(0) plt.show() # - # ## Visualizing Computation Time # As all traces were sampled with `cores=1`, we expect the computation time to grow linearly with the number of samples. # + fig, ax = plt.subplots(dpi=140) for alg in ['DE-MCMC', 'DE-MCMC-Z']: df = df_temp.sort_values('N_samples').loc[alg] ax.scatter(df.N_samples/1000, df.t, label=alg) ax.legend() ax.set_xlabel('$N_{samples} / 1000$ [-]') ax.set_ylabel('$t_{sampling}$ [s]') fig.tight_layout() plt.show() # - # ## Visualizing the Traces # By comparing DE-MCMC and DE-MCMC-Z for a setting such as D=10, $N_{chains}$=5, you can see how DE-MCMC-Z has a clear advantage over a DE-MCMC that is run with too few chains. # + def plot_trace(algorithm, D, N_chains): n_plot = min(10, N_chains) fig, axs = plt.subplots(nrows=n_plot, figsize=(12,2*n_plot)) idata = df_results.loc[(algorithm, D, 10000, 10000, N_chains), 'idata'] for c in range(n_plot): samples = idata.posterior.x[c,:,0] axs[c].plot(samples, linewidth=0.5) plt.show(); return ipywidgets.interact_manual( plot_trace, algorithm=['DE-MCMC', 'DE-MCMC-Z'], D=sorted(set(df_results.reset_index().D)), N_chains=sorted(set(df_results.reset_index().N_chains)), ); # - # ## Inspecting the Sampler Stats # With the following widget, you can explore the sampler stats to better understand the tuning phase. # # The `tune=None` default setting of `DEMetropolisZ` is the most robust tuning strategy. However, setting `tune='lambda'` can improves the initial convergence by doing a swing-in that makes it diverge much faster than it would with a constant `lambda`. The downside of tuning `lambda` is that if the tuning is stopped too early, it can get stuck with a very inefficient `lambda`. # # Therefore, you should always inspect the `lambda` and rolling mean of `accepted` sampler stats when picking $N_{tune}$. # + def plot_stat(*, sname:str='accepted', rolling=True, algorithm, D, N_chains): fig, ax = plt.subplots(ncols=1, figsize=(12,7), sharey='row') row = df_results.loc[(algorithm, D, 10000, 10000, N_chains)] for c in df_results.idata[0].posterior.chain: S = np.hstack([ #idata.warmup_sample_stats[sname].sel(chain=c), idata.sample_stats[sname].sel(chain=c) ]) y = pd.Series(S).rolling(window=500).mean().iloc[500-1:].values if rolling else S ax.plot( y, linewidth=0.5 ) ax.set_xlabel('iteration') ax.set_ylabel(sname) plt.show() return ipywidgets.interact_manual( plot_stat, sname=set(df_results.idata[0].sample_stats.keys()), rolling=True, algorithm=['DE-MCMC-Z','DE-MCMC'], D=sorted(set(df_results.reset_index().D)), N_chains=sorted(set(df_results.reset_index().N_chains)), ); # - # ## Conclusion # When used with the recommended settings, `DEMetropolis` is on par with `DEMetropolisZ`. On high-dimensional problems however, `DEMetropolisZ` can achieve the same effective sample sizes with less chains. # # On problems where not enough CPUs are available to run $N_{chains}=2\cdot D$ `DEMetropolis` chains, the `DEMetropolisZ` should have much better scaling. # %load_ext watermark # %watermark -n -u -v -iv -w
docs/source/notebooks/DEMetropolisZ_EfficiencyComparison.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + class Node: def __init__(self, name): self.name = name self.children = [] self.parents = [] self.auth = 1.0 self.hub = 1.0 def HITS_one_iter(graph): node_list = graph.nodes for node in node_list: node.update_auth() for node in node_list: node.update_hub() graph.normalize_auth_hub() def update_auth(self): self.auth = sum(node.hub for node in self.parents) def update_hub(self): self.hub = sum(node.auth for node in self.children) def normalize_auth_hub(self): auth_sum = sum(node.auth for node in self.nodes) hub_sum = sum(node.hub for node in self.nodes) for node in self.nodes: node.auth /= auth_sum node.hub /= hub_sum # -
Semester 6/DWM/Untitled1.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python [conda root] # language: python # name: conda-root-py # --- # # Quickstart # ## #Scikit-Learn大表: http://scikit-learn.org/stable/tutorial/machine_learning_map/index.html # ## Loading an example dataset: datasets.load_資料名稱() from sklearn import datasets iris = datasets.load_iris() #字典形式儲存資料 digits = datasets.load_digits() #字典形式儲存資料 type(iris) iris.keys() #用字典形式儲存的資料 iris.data[:5] #每個sample有4個feature iris.data.shape iris.target[:5] #每個sample有1個label digits.keys() digits.data[:2] #每sample(row)包含8x8=64個pixel digits.data.shape digits.target[:5] digits.target.shape digits.images[0] #第一個sample(row)的8x8=64個pixel # + import matplotlib.pyplot as plt # %matplotlib inline plt.imshow(digits.images[-1], cmap=plt.cm.gray_r) # - # ## Learning and predicting: clf = method(params), clf.predict(feature數組) from sklearn import svm clf = svm.SVC(gamma=0.001, C=100.) #clf表示classifier, 記得幫model放上參數 clf.fit(digits.data[:-1], digits.target[:-1]) #除了最後一個之外,通通拿來training clf.predict(digits.data[-1:]) #拿訓練好的model來預測最後一個數組 # ## Model persistence: joblib # + from sklearn import svm from sklearn import datasets iris = datasets.load_iris() X, y = iris.data, iris.target clf = svm.SVC() clf.fit(X, y) #train # - import pickle from sklearn.externals import joblib joblib.dump(clf, 'intro.pkl') #儲存成binary的pickle檔案 clf = joblib.load('intro.pkl') #讀取binary的pickle檔案 clf.predict(X[0:1]) y[0] # ## Conventions: regression --> float64 # + import numpy as np from sklearn import random_projection rng = np.random.RandomState(0) X = rng.rand(10, 2000) X = np.array(X, dtype='float32') X.dtype # - transformer = random_projection.GaussianRandomProjection() X_new = transformer.fit_transform(X) X_new.dtype #返回float64 from sklearn import datasets from sklearn.svm import SVC iris = datasets.load_iris() clf = SVC() clf.fit(iris.data, iris.target) list(clf.predict(iris.data[:3])) iris.target_names clf.fit(iris.data, iris.target_names[iris.target]) #回推到名字 list(clf.predict(iris.data[:3])) # ### Refitting and updating parameters: clf.set_params(kernel='') # + import numpy as np from sklearn.svm import SVC rng = np.random.RandomState(0) X = rng.rand(100, 10) y = rng.binomial(1, 0.5, 100) # np.binomial(n,p, size) ,每回丟n次,測試size回數 X_test = rng.rand(5, 10) # - clf = SVC() clf.set_params(kernel='linear').fit(X, y) clf.predict(X_test) clf.set_params(kernel='rbf').fit(X, y) clf.predict(X_test) # ### Multiclass vs. multilabel fitting # + from sklearn.svm import SVC from sklearn.multiclass import OneVsRestClassifier from sklearn.preprocessing import LabelBinarizer X = [[1, 2], [2, 4], [4, 5], [3, 2], [3, 1]] y = [0, 0, 1, 1, 2] classif = OneVsRestClassifier(estimator=SVC(random_state=0)) classif.fit(X, y).predict(X) # - # ### LabelBinarizer().fit_transform(y),類似onehot encoding y = LabelBinarizer().fit_transform(y) y # 00112的onehot encoding y = LabelBinarizer().fit_transform(y) classif.fit(X, y).predict(X) # ### MultiLabelBinarizer().fit_transform(y) from sklearn.preprocessing import MultiLabelBinarizer y = [[0, 1], [0, 2], [1, 3], [0, 2, 3], [2, 4]] y = MultiLabelBinarizer().fit_transform(y) classif.fit(X, y).predict(X) #使用0和1表示一串數組 # # Supervised learning: Classification / Regression # ## k-Nearest neighbors classifier: knn = KNeighborsClassifier() # + import numpy as np from sklearn import datasets iris = datasets.load_iris() iris_X = iris.data iris_y = iris.target np.unique(iris_y) #只會有0,1,2三種結果,就是pure python的set() # - # ### 手動洗牌 indices = np.random.permutation(len(iris_X)),然後直接用indices指定data位置 # Split iris data in train and test data # A random permutation, to split the data randomly np.random.seed(0) indices = np.random.permutation(len(iris_X)) iris_X_train = iris_X[indices[:-10]] iris_y_train = iris_y[indices[:-10]] iris_X_test = iris_X[indices[-10:]] iris_y_test = iris_y[indices[-10:]] iris_X_train[0:5] iris_y_train[0:5] # Create and fit a nearest-neighbor classifier from sklearn.neighbors import KNeighborsClassifier knn = KNeighborsClassifier() knn.fit(iris_X_train, iris_y_train) knn.predict(iris_X_test) iris_y_test # ## Linear model: from regression to sparsity diabetes = datasets.load_diabetes() diabetes_X_train = diabetes.data[:-20] diabetes_X_test = diabetes.data[-20:] diabetes_y_train = diabetes.target[:-20] diabetes_y_test = diabetes.target[-20:] diabetes_X_train[0] #影響糖尿病的因子 diabetes_y_train[0] # ### Linear regression from sklearn import linear_model regr = linear_model.LinearRegression() regr.fit(diabetes_X_train, diabetes_y_train) print(regr.coef_) # The mean square error: np.mean((y_hat-y)**2) np.mean((regr.predict(diabetes_X_test)-diabetes_y_test)**2) # Explained variance score: 1 is perfect prediction and 0 means that there is no linear relationship between X and y. # 相關係數 regr.score(diabetes_X_test, diabetes_y_test) # ### Shrinkage: 可以讓數組樣本少時,變化不會太大 ex. Ridge(alpha=.1) X = np.c_[ .5, 1].T # np.c_ : 可以想像成就是橫向的concatenation y = [.5, 1] test = np.c_[ 0, 2].T regr = linear_model.LinearRegression() import matplotlib.pyplot as plt plt.figure() # ### 用for迴圈重現每一次訓練畫出來的regression line,可以發現用一般的OLS受到noise影響很大 np.random.seed(0) for _ in range(6): this_X = .1*np.random.normal(size=(2, 1)) + X #this_X = 用noise干擾X regr.fit(this_X, y) #訓練一次 plt.plot(test, regr.predict(test)) plt.scatter(this_X, y, s=3) # ### linear_model.Ridge(alpha=.1),受到noise影響小很多 # + regr = linear_model.Ridge(alpha=.1) plt.figure() np.random.seed(0) for _ in range(6): this_X = .1*np.random.normal(size=(2, 1)) + X regr.fit(this_X, y) plt.plot(test, regr.predict(test)) plt.scatter(this_X, y, s=3) # + alphas = np.logspace(-4, -1, 6) #np.logspace(start, stop, num=50, endpoint=True, base=10.0, dtype=None) from __future__ import print_function #使用原本的dataset來試試看shrinkage,發現效果差不多穩定 print([regr.set_params(alpha=alpha ).fit(diabetes_X_train, diabetes_y_train, ).score(diabetes_X_test, diabetes_y_test) for alpha in alphas]) # - # ### Sparsity: linear_model.Lasso() (least absolute shrinkage and selection operator), LassoLars # + alphas = np.logspace(-4, -1, 6) regr = linear_model.Lasso() scores = [regr.set_params(alpha=alpha ).fit(diabetes_X_train, diabetes_y_train ).score(diabetes_X_test, diabetes_y_test) for alpha in alphas] best_alpha = alphas[scores.index(max(scores))] #得最高分score的alpha取出來使用 regr.alpha = best_alpha #把best_alpha帶入模型的regr.alpha regr.fit(diabetes_X_train, diabetes_y_train) # - print(regr.coef_) # ### Classification: linear_model.LogisticRegression (區分0和1的Sigmoid function) logistic = linear_model.LogisticRegression(C=1e5) #C越大,代表regularization越小! logistic.fit(iris_X_train, iris_y_train) logistic.predict(iris_X_test) iris_y_test # ## Support vector machines (SVMs) # ### Linear SVM (提醒:訓練前資料最好先標準化) # + # C越大,表示margin可以越大,越soft (less regularization) from sklearn import svm svc = svm.SVC(kernel='linear') svc.fit(iris_X_train, iris_y_train) # Normalizing data: For many estimators, including the SVMs, # having datasets with unit standard deviation for each feature is important to get good prediction. # - # ## Using kernels # ### rbf svc = svm.SVC(kernel='rbf') # Radial Basis Function svc.fit(iris_X_train, iris_y_train) # ### poly, deg=3 svc = svm.SVC(kernel='poly',degree=3) #用三次方程式去劃分界線 svc.fit(iris_X_train, iris_y_train) # # Model selection: choosing estimators and their parameters # ### model.fit(train_X, train_y).score(test_X, test_y): score越高越好,越fit # + from sklearn import datasets, svm digits = datasets.load_digits() X_digits = digits.data y_digits = digits.target svc = svm.SVC(C=1, kernel='linear') svc.fit(X_digits[:-100], y_digits[:-100]).score(X_digits[-100:], y_digits[-100:]) # - # ## K-fold cross-validation: 切成K等份,每次取K-1份訓練,1份測試,交互驗證 # + #土法煉鋼for迴圈示範 import numpy as np X_folds = np.array_split(X_digits, 3) # np.array_split(,3) 分成3等份 y_folds = np.array_split(y_digits, 3) scores = list() for k in range(3): # We use 'list' to copy, in order to 'pop' later on X_train = list(X_folds) X_test = X_train.pop(k) #打掉第k份 X_train = np.concatenate(X_train) #組裝剩下的k-1分 y_train = list(y_folds) y_test = y_train.pop(k) #打掉第k份 y_train = np.concatenate(y_train) #組裝剩下的k-1分 scores.append(svc.fit(X_train, y_train).score(X_test, y_test)) print(scores) # - X_folds #分成三等份 # ## Cross-validation generators # + from sklearn.model_selection import KFold, cross_val_score X = ["a", "a", "b", "c", "c", "c"] #定義切法 k_fold = KFold(n_splits=3) #針對indices做區分,不看內容物。6個就6/3=2,切出2份來當test for train_indices, test_indices in k_fold.split(X): print('Train: %s | test: %s' % (train_indices, test_indices)) # - [svc.fit(X_digits[train], y_digits[train]).score(X_digits[test], y_digits[test]) for train, test in k_fold.split(X_digits)] #這裡的train跟test都是被split出來的indicies,可以直接套用 # ### cross_val_score cross_val_score(svc, X_digits, y_digits, cv=k_fold, n_jobs=-1) #n_job=-1代表全部的CPU都可以用來工作 # ### K-Fold: 分離1份test,用剩下k-1份train。 GroupKFold: test跟train不要重複到。 ShuffleSplit:使用亂數排列做split # ## Grid-search: 算出cross validation不同參數的score,用最好的score回推最好的參數 # ### GridSearchCV(estimator=svc, param_grid=dict(C=Cs),n_jobs=-1),用dict放入Cs的範圍 # + from sklearn.model_selection import GridSearchCV, cross_val_score Cs = np.logspace(-6, -1, 10) #生出一堆Cs來準備做測試 clf = GridSearchCV(estimator=svc, param_grid=dict(C=Cs),n_jobs=-1) clf.fit(X_digits[:1000], y_digits[:1000]) # - # ### clf.best\_score_ , clf.best\_estimator\_.C clf.best_score_ clf.best_estimator_.C #最好的C就是這個 # Prediction performance on test set is not as good as on train set clf.score(X_digits[1000:], y_digits[1000:]) # ## Cross-validated estimators,直接從CV中尋找最佳參數 # + from sklearn import linear_model, datasets lasso = linear_model.LassoCV() diabetes = datasets.load_diabetes() X_diabetes = diabetes.data y_diabetes = diabetes.target lasso.fit(X_diabetes, y_diabetes) # - # The estimator chose automatically its lambda: lasso.alpha_ # # Unsupervised learning # ## Clustering: grouping observations together # ### K-means clustering: cluster.KMeans(n_clusters=n) 需要自己定義n_cluster其實就是極大的缺點,會有bias # + from sklearn import cluster, datasets iris = datasets.load_iris() X_iris = iris.data y_iris = iris.target k_means = cluster.KMeans(n_clusters=3) k_means.fit(X_iris) # - print(k_means.labels_[::10]) print(y_iris[::10]) # ## Decompositions: from a signal to components and loadings # ### Principal component analysis(PCA): decomposition.PCA() # Create a signal with only 2 useful dimensions x1 = np.random.normal(size=100) x2 = np.random.normal(size=100) x3 = x1 + x2 X = np.c_[x1, x2, x3] from sklearn import decomposition pca = decomposition.PCA() pca.fit(X) # ### pca.explained\_variance_ print(pca.explained_variance_) #數值越大表示越有意義,在這個例子裡面只有第一個和第二個有 # As we can see, only the 2 first components are useful pca.n_components = 2 X_reduced = pca.fit_transform(X) X_reduced.shape # ### Independent Component Analysis(ICA) # + # Generate sample data import numpy as np from scipy import signal time = np.linspace(0, 10, 2000) s1 = np.sin(2 * time) # Signal 1 : sinusoidal signal s2 = np.sign(np.sin(3 * time)) # Signal 2 : square signal s3 = signal.sawtooth(2 * np.pi * time) # Signal 3: saw tooth signal S = np.c_[s1, s2, s3] S += 0.2 * np.random.normal(size=S.shape) # Add noise S /= S.std(axis=0) # Standardize data # Mix data A = np.array([[1, 1, 1], [0.5, 2, 1], [1.5, 1, 2]]) # Mixing matrix X = np.dot(S, A.T) # Generate observations # - S[0:5] S.shape A X[0:5] X.shape # ### decomposition.FastICA() : 沒有很理解QQ # Compute ICA ica = decomposition.FastICA() S_ = ica.fit_transform(X) # Get the estimated sources A_ = ica.mixing_.T np.allclose(X, np.dot(S_, A_) + ica.mean_) # # Putting it all together # ## Pipelining # ### pipe = Pipeline(steps=[('pca', pca), ('logistic', logistic)]) # ### estimator = GridSearchCV(pipe, dict(pca\__n_components=n_components, logistic\__C=Cs)) # ### estimator.best\_estimator\_.named\_steps['pca'].n_components : 最好的n\_components參數 # + import numpy as np import matplotlib.pyplot as plt from sklearn import linear_model, decomposition, datasets from sklearn.pipeline import Pipeline from sklearn.model_selection import GridSearchCV #Pipelining: logistic = linear_model.LogisticRegression() pca = decomposition.PCA() pipe = Pipeline(steps=[('pca', pca), ('logistic', logistic)]) #先做pca在做logistic digits = datasets.load_digits() X_digits = digits.data y_digits = digits.target # Plot the PCA spectrum pca.fit(X_digits) plt.figure(1, figsize=(4, 3)) plt.clf() plt.axes([.2, .2, .7, .7]) plt.plot(pca.explained_variance_, linewidth=2) plt.axis('tight') plt.xlabel('n_components') plt.ylabel('explained_variance_') # Prediction n_components = [20, 40, 64] Cs = np.logspace(-4, 4, 3) # Parameters of pipelines can be set using ‘__’ separated parameter names: 使用__分隔出參數的名稱 estimator = GridSearchCV(pipe, dict(pca__n_components=n_components, logistic__C=Cs)) estimator.fit(X_digits, y_digits) plt.axvline(estimator.best_estimator_.named_steps['pca'].n_components, linestyle=':', label='n_components chosen') #選取的最好數字畫上垂直線 plt.legend(prop=dict(size=12)) plt.show() # - # # Working With Text Data # ## Loading the 20 newsgroups dataset # + categories = ['alt.atheism', 'soc.religion.christian', 'comp.graphics', 'sci.med'] #選4個種類就好 from sklearn.datasets import fetch_20newsgroups twenty_train = fetch_20newsgroups(subset='train', categories=categories, shuffle=True, random_state=42) # - twenty_train.keys() np.unique(twenty_train.target) twenty_train.target[0:5] #target就是文字標籤的indices twenty_train.target_names #target_names是真正的文字 len(twenty_train.data) len(twenty_train.filenames) print("\n".join(twenty_train.data[0].split("\n")[:3])) print(twenty_train.target[0]) print(twenty_train.target_names[twenty_train.target[0]]) for t in twenty_train.target[:10]: print(twenty_train.target_names[t]) # ## Extracting features from text files (high-dimensional sparse datasets) # ### Tokenizer: count_vect = CountVectorizer(), count_vect.fit_transform(twenty_train.data) from sklearn.feature_extraction.text import CountVectorizer count_vect = CountVectorizer() X_train_counts = count_vect.fit_transform(twenty_train.data) X_train_counts.shape # #### count\_vect.vocabulary\_.get count_vect.vocabulary_.get(u'algorithm') count_vect # ### From occurrences to frequencies from sklearn.feature_extraction.text import TfidfTransformer tf_transformer = TfidfTransformer(use_idf=False).fit(X_train_counts) X_train_tf = tf_transformer.transform(X_train_counts) X_train_tf.shape X_train_tf[0] tfidf_transformer = TfidfTransformer() X_train_tfidf = tfidf_transformer.fit_transform(X_train_counts) X_train_tfidf.shape X_train_tfidf[0] # ## Training a classifier: clf = MultinomialNB().fit(X_train_tfidf, twenty_train.target) from sklearn.naive_bayes import MultinomialNB clf = MultinomialNB().fit(X_train_tfidf, twenty_train.target) docs_new = ['God is love', 'OpenGL on the GPU is fast'] X_new_counts = count_vect.transform(docs_new) X_new_tfidf = tfidf_transformer.transform(X_new_counts) predicted = clf.predict(X_new_tfidf) for doc, category in zip(docs_new, predicted): print('%r => %s' % (doc, twenty_train.target_names[category])) # ## Building a pipeline: vect --> tfidf --> clf from sklearn.pipeline import Pipeline text_clf = Pipeline([('vect', CountVectorizer()), ('tfidf', TfidfTransformer()), ('clf', MultinomialNB()), ]) text_clf.fit(twenty_train.data, twenty_train.target) # ## Evaluation of the performance on the test set import numpy as np twenty_test = fetch_20newsgroups(subset='test', categories=categories, shuffle=True, random_state=42) docs_test = twenty_test.data predicted = text_clf.predict(docs_test) np.mean(predicted == twenty_test.target) # + from sklearn.linear_model import SGDClassifier text_clf = Pipeline([('vect', CountVectorizer()), ('tfidf', TfidfTransformer()), ('clf', SGDClassifier(loss='hinge', penalty='l2', alpha=1e-3, random_state=42, max_iter=5, tol=None)), ]) text_clf.fit(twenty_train.data, twenty_train.target) predicted = text_clf.predict(docs_test) np.mean(predicted == twenty_test.target) # - # ### metrics.classification_report from sklearn import metrics print(metrics.classification_report(twenty_test.target, predicted, target_names=twenty_test.target_names)) # ### metrics.confusion_matrix metrics.confusion_matrix(twenty_test.target, predicted) # ## Parameter tuning using grid search from sklearn.model_selection import GridSearchCV parameters = {'vect__ngram_range': [(1, 1), (1, 2)], 'tfidf__use_idf': (True, False), 'clf__alpha': (1e-2, 1e-3), } gs_clf = GridSearchCV(text_clf, parameters, n_jobs=-1) gs_clf = gs_clf.fit(twenty_train.data[:400], twenty_train.target[:400]) twenty_train.target_names[gs_clf.predict(['God is love'])[0]] gs_clf.best_score_ for param_name in sorted(parameters.keys()): print("%s: %r" % (param_name, gs_clf.best_params_[param_name]))
Skl_Official_Tutorial.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Matching Model # To find the expected effect of the intervention on the population, we match each treated individual with one or more untreated individuals which are "almost the same" as him or her. from causallib.estimation import IPW, Matching import matplotlib.pyplot as plt import seaborn as sb import pandas as pd import numpy as np from causallib.evaluation.weight_evaluator import calculate_covariate_balance from sklearn.linear_model import LogisticRegression from causallib.preprocessing.transformers import PropensityTransformer, MatchingTransformer from causallib.evaluation import PropensityEvaluator from causallib.datasets import load_nhefs # %matplotlib inline # #### Data: # The effect of quitting to smoke on weight loss. # Data example is taken from [Hernan and Robins Causal Inference Book](https://www.hsph.harvard.edu/miguel-hernan/causal-inference-book/) # When we are looking for nearby data points to match against, the one hot encoding may not be the best choice. Augmented features are also not needed and may introduce bias (eg, if we inlude `age^2`, we will call one pair of subjects more distant in the age variable than another pair if the base age is older, even though the difference is the same ). So we do not augment the continuous variables, and instead of one hot encoding the categorical variables, we binarize them to "high/low" values. # + def binarize(df, column_name): df = df.copy() m = df[column_name].median() def balance(i): return np.abs(0.5 - (df[column_name] < i).sum()/len(df)) mstar = min([m-1, m, m+1], key=balance) df = df.assign(**{column_name: (df[column_name] < mstar).astype(int)}) df = df.rename(columns={column_name: column_name + f"<{mstar}"}) return df def get_matching_data(): data = load_nhefs(onehot=False, augment=False) data.X = binarize(data.X, "education") data.X = binarize(data.X, "exercise") data.X = binarize(data.X, "active") return data binarized_data = get_matching_data() X, a, y = binarized_data.X, binarized_data.a, binarized_data.y # - binarized_data.X.join(binarized_data.a).join(binarized_data.y).head() # We can run either a Euclidean or a Mahalanobis metric match and predict the individual outcome using `MatchingIndividualOutcomeEstimator`: m_euclid = Matching(metric="euclidean").fit(X, a, y) m_mahalanobis = Matching(metric="mahalanobis").fit(X, a, y) Y_euclid = m_euclid.estimate_individual_outcome(X, a) Y_mahalanobis = m_mahalanobis.estimate_individual_outcome(X, a) # We can see that the two metrics lead to very similar results on a population level. Y_euclid.assign(ATE=Y_euclid[1]-Y_euclid[0]).mean() Y_mahalanobis.assign(ATE=Y_mahalanobis[1]-Y_mahalanobis[0]).mean() # If we inspect the individual counterfactuals, we find, as expected that both metrics return the same value for the observed outcome but differ in the unobserved outcome: Y_euclid.join(Y_mahalanobis, lsuffix="_euclidean", rsuffix="_mahalanobis").join(a).sample(10) # # Propensity Matching # # To do propensity score matching, we can supply a transformer that replaces the covariates with a learned propensity model, using a given learner. propensity_transform = PropensityTransformer( learner=LogisticRegression( solver="liblinear", class_weight="balanced"), include_covariates=False) # In this case we will want to use the augmented data to improve the accuracy of the propensity model. We can calculate the ATE: augmented_data = load_nhefs() X, a, y = augmented_data.X, augmented_data.a, augmented_data.y matcher = Matching(propensity_transform=propensity_transform) matcher.fit(X, a, y) matcher.estimate_population_outcome(X, a) # We have also provided a convenience subclass `PropensityMatching` which makes this common task straightforward: # + from causallib.estimation import PropensityMatching pm = PropensityMatching(learner=LogisticRegression( solver="liblinear", class_weight="balanced")) pm.fit(X, a, y) pm.estimate_population_outcome(X, a) # - # ## Multiple neighbor match (with replacement) # As long as we permit replacement, we can allow multiple neighbors to match. We now check how the number of neighbors impacts the ATE. for n in range(1, 10): matcher.n_neighbors = n matcher.fit(X, a, y) Y = matcher.estimate_population_outcome(X, a) print(f"Using {n} neighbors, the effect is: {(Y[1] - Y[0]):.3f}") # ## Replacement # # Until now, we have executed all of the matching with replacement, meaning that we can select the same treated sample as a match for multiple control samples or vice versa. If we want to only allow each sample to be used once, we must disallow replacement. # # If we mix in-sample and out-of-sample data, we would end up generating different estimated counterfactuals for a set of samples if they were checked all at once compared to if they were checked in subsets. Because of this, we have restricted no-replacement matching to operate on a single dataset only as a `PopulationOutcomeEstimator` called `Matching`. matcher = Matching(with_replacement=True, propensity_transform=propensity_transform) matcher.fit(X, a, y) match_df_with = matcher.match(X, a) ATE_with_replacement = matcher.estimate_population_outcome(X, a).diff()[1] matcher = Matching(with_replacement=False, propensity_transform=propensity_transform) matcher.fit(X, a, y) match_df_without = matcher.match(X, a) ATE_without_replacement = matcher.estimate_population_outcome(X, a).diff()[1] print( f"With replacement we find:\n{ATE_with_replacement:.3f}\nWithout replacement we find:\n{ATE_without_replacement:.3f}") # + ipw = IPW(LogisticRegression(solver="liblinear")) ipw.fit(augmented_data.X, augmented_data.a) Yipw = ipw.estimate_population_outcome( augmented_data.X, augmented_data.a, augmented_data.y) ATE_ipw = Yipw[1] - Yipw[0] ATE_naive = y[a == 1].mean() - y[a == 0].mean() print( f"With IPW we find:\n{ATE_ipw:.3f}\nand the naive estimate is:\n{ATE_naive:.3f}") # - # ## Caliper # # Often we want to impose a restriction on the proximity of examples so that we do not permit any match if there is more than a distance $\kappa$ between the samples. We call this a "caliper" and we can see how it impacts the predicted effect here: # + caliper = np.logspace(-3, 0, 20) def check_caliper(c, with_replacement=True): matcher = Matching(propensity_transform=propensity_transform, caliper=c, with_replacement=with_replacement) matcher.fit(augmented_data.X, augmented_data.a, augmented_data.y) Y = matcher.estimate_population_outcome( augmented_data.X, augmented_data.a,) p = matcher.samples_used_.sum() / len(augmented_data.y) return p, (Y[1] - Y[0]) p_with, ATE_with = zip( *[check_caliper(c, with_replacement=True) for c in caliper]) p_without, ATE_without = zip( *[check_caliper(c, with_replacement=False) for c in caliper]) # - with sb.axes_style("dark") as s: f, ax = plt.subplots(1, 2, figsize=(16, 6)) ax[0].semilogx(caliper, p_with, "blue", label="with replacement") ax[0].semilogx(caliper, p_without, "purple", label="no replacement") ax[0].set_ylabel("fraction of data matched ", color="blue") ax[0].legend() ax[0].set_xlabel("caliper") ax[1].semilogx(caliper, ATE_with, "green", label="matching (with replacement)") ax[1].semilogx(caliper, ATE_without, "orange", label="matching (no replacement)") ax[1].set_ylabel("ATE", color="green") ax[1].hlines(xmin=caliper.min(), xmax=caliper.max(), y=ATE_ipw, ls="--", color="green", label="ipw") ax[1].hlines(xmin=caliper.min(), xmax=caliper.max(), y=ATE_naive, ls=":", color="green", label="naive") ax[1].legend(loc=4) ax[1].set_xlabel("caliper"); # # Intermediate results # The `Matching` object implements `IndividualOutcomeEstimator`, specifically the `fit` and `estimate_individual_outcome` methods. Because matching has many uses, `Matching` also outputs a DataFrame of matches and a vector of weights. These can be used for filtering data before applying regression, for example, or for assessing the quality of the covariate balancing (as shown above). matcher = Matching(with_replacement=False, propensity_transform=propensity_transform) matcher.fit(X, a, y) match_df = matcher.match(X, a) match_df # The matching DataFrame can be understood in the following way: when sample `sample_id` searched for a match with treatment value `match_to_treatment` it found the samples indexed in `matches` which were located at distances according to the `distances` entry. If `matches` is empty it means `sample_id` had no match in treatment class `match_to_treatment`. This allows us to handle the case with multiple matches, as well as with uneven numbers of matches due to the caliper constraint: matcher = Matching(with_replacement=True, propensity_transform=propensity_transform, n_neighbors=3, caliper=0.001).fit(X, a, y) match_df_3 = matcher.match(X, a) match_df_3 # We can also output weights which can be useful for comparing with other methods or preparing the data for further processing: # weights_df = matcher.matches_to_weights(match_df) weights_df # We distinguish between matching from 0 to 1 and from 1 to 0 because in general they are distinct processes which can have different weights. For the case with no replacement the two columns are identical and always only 1 or 0. We see this when we examine the weights obtained with replacement, caliper and 3 neighbors. weights_df_3 = matcher.matches_to_weights(match_df_3) weights_df_3 # The columns no longer match because a given treatment sample may be selected multiple times when matched from the set of control samples. Each selection increases its weight. However, if it is one of $n$ chosen samples it is only increased by $1/n$. # ## Compare Covariate Balancing with IPW # Using the weights we can compare how well the matching algorithm balances the covariate distributions, compared to IPW. Even though IPW is a population effect estimator and matching is an individual outcome estimator, their results can both be expressed as weights and compared using `calculate_covariate_balance`. ipw = IPW(LogisticRegression(solver="liblinear")) ipw.fit(X, a) ipw_binarized_weights = ipw.compute_weights(X, a) # + matcher = Matching(with_replacement=True, propensity_transform=propensity_transform, caliper=0.01).fit(X, a, y) match_df = matcher.match(X, a) weights_df = matcher.matches_to_weights(match_df) covbal = {} covbal["match_control_to_treatment"] = calculate_covariate_balance( X, a, w=weights_df.control_to_treatment) covbal["match_treatment_to_control"] = calculate_covariate_balance( X, a, w=weights_df.treatment_to_control) covbal["ipw_binarized"] = calculate_covariate_balance( X, a, w=ipw_binarized_weights) covbal["match_both"] = calculate_covariate_balance( X, a, w=weights_df.sum(axis=1)) for k in covbal: covbal[k] = covbal[k].drop( columns="unweighted") if not "both" in k else covbal[k] covbal_df = pd.concat(covbal, axis=1) covbal_df.columns = list(covbal.keys()) + ["unweighted"] covbal_df # - import matplotlib.pyplot as plt import seaborn as sb sb.set("notebook") f, axes = plt.subplots(figsize=(8, 6)) sb.violinplot(data=covbal_df, ax=axes, cut=0) axes.set_xticklabels(axes.get_xticklabels(), rotation=90); sb.heatmap(data=covbal_df); # Comparing the results of the IPW weights and the matching weights, we see that while both lead to substantially better weighted covariates, the IPW does a better job in general. In the Lalonde matching notebook, we will show how matching and IPW an be used together to obtain even better covariate distribution balancing.
examples/matching.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3.6.6 64-bit # metadata: # interpreter: # hash: 8185b0a4f39b92c69e514520d2d9d7dc664d18802dd476fd1a58e7b3cf02750f # name: python3 # --- import pandas as pd import numpy as np # df_log=pd.read_pickle('src/data/testA/totalExposureLog.pkl') # df=pd.read_pickle('src/data/testA/user_data.pkl') df=pd.read_pickle('src/data/testA/ad_operation.pkl') # df=pd.read_pickle('src/data/testA/ad_static_feature.pkl') # df=pd.read_pickle('src/data/testA/test_sample.pkl') df=pd.read_csv('src/data/testA/ad_operation.dat', sep='\t',names=['aid','request_timestamp','type','op_type','value']).sort_values(by='request_timestamp') df['request_time'] = df.apply(lambda x:(pd.to_datetime('20190228000000') if x['request_timestamp'] == 20190230000000 else (pd.to_datetime(x['request_timestamp']) if x['request_timestamp'] == 0 else pd.to_datetime(str(x['request_timestamp'])))), axis=1 ) df.to_pickle('src/data/testA/ad_operation.pkl') df df.info() df.nunique() df.isnull().any(axis=0)# 查看各列是否存在空值,True表示有空值,axis=0代表是查看列,axis=1是查看行是否存在控制 df.isnull().any(axis=0).sum()#计算含有空值的列数,若把axis=0改成axis=1,则是计算含有空值的行数 df['ad_size'].isnull().sum()#将某一列中为空或者NA的个数统计出来,把column_name改成字段名 # 缺失值有三种常用的处理方法,分别是删除法、替换法、插补法。 # # 2.11 删除法 # 删除法可以通过删除观测样本或者删除变量来实现。删除法适用于变量有较大缺失且对研究目标影响不大的情况。如果删除了之后应该很多不建议用删除法。在kaggle中有人这样处理缺失数据,如果数据的缺失达到15%,且并没有发现该变量有多大作用,就删除该变量。 # # `del data['column_name'] #删除某一列`<br> # `data['column_name'].dropna() #删除某一行为空值或者NA的元素`<br> # `data.drop(data.columns[[0,1]],axis=1,inplace=True) #删除第1,2列,inplace=True表示直接就在内存中替换了,不用二次赋值生效。`<br> # `data.dropna(axis=0) #删除带有空值的行 `<br> # `data.dropna(axis=1) #删除带有空值的列 `<br> # # # 2.12 替换法 # 如果缺失值所在变量为数值型,一般用均值来替换;如果为非数值型变量,则使用中位数或者是众数来替换。 # # `data['column_name']=data['column_name'].fillna(num) #将该列中的空值或者NA填充为num.其中num是某位数字,这个代码用于用数字进行替换。`<br> # `data['column_name'][data['column_name'].isnull()]=data['column_name'].dropna().mode().values#如果该列是字符串的,就将该列中出现次数最多的字符串赋予空值,mode()函数就是取出现次数最多的元素`<br> # `data['column_name'].fillna(method='pad') #使用前一个数值替代空值或者NA,就是NA前面最近的非空数值替换`<br> # `data['column_name'].fillna(method='bfill',limit=1) #使用后一个数值替代空值或者NA,limit=1就是限制如果几个连续的空值,只能最近的一个空值可以被填充。`<br> # `data['column_name'].fillna(data['column_name'].mean()) #使用平均值进行填充`<br> # `data= data.fillna(data.mean()) #将缺失值全部用该列的平均值代替,这个时候一般已经提前将字符串特征转换成了数值。`<br> # # 2.13 插补法 # 使用删除法和替换法有时会存在信息浪费的问题且数据结构会发生变动,以致最后得到有偏的统计结果。用插补法可以很轻松地解决此类问题。常用的插补法有回归插补,多重插补、拉格朗日插补法等。这里就不插入代码了,这个要看情况而选择插补。 # 拉格朗日插值法参考:https://blog.csdn.net/sinat_22510827/article/details/80972389 # import matplotlib.pyplot as plt plt.boxplot(df['good_type'],vert = False)#箱线图 plt.show() plt.plot(df['good_id'], df['good_type'], 'o', color='black')#散点图 df['good_type'].describe()#描述性统计 # + def count_box(Q_3,Q_1):#Q_3为75%分位数(第三四分位数),Q_1为25%分位数(第一四分位数) IQR=Q_3-Q_1 down_line=Q_1-1.5*IQR up_line=Q_3+1.5*IQR print("异常值上限:",up_line," 异常值下限:",down_line) count_box(18,3)
ad_operation_insight.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # %autosave 2 from threading import Thread import numpy as np # + def f(x, a, b): return a * x + b n = 1000001 n_threads = 8 x = np.linspace(0, 1, n) A = np.vstack((x, np.ones_like(x))).T # - # %%timeit a_ = [] b_ = [] for _ in range(n_threads): y = (f(x, np.pi, np.exp(1)) + np.random.normal(scale=0.5, size=n)) a, b = np.linalg.lstsq(A, y)[0] a_.append(a) b_.append(b) print(np.mean(a_), np.mean(b_)) # + # %%timeit a_ = [] b_ = [] def worker(): y = (f(x, np.pi, np.exp(1)) + np.random.normal(scale=0.5, size=n)) a, b = np.linalg.lstsq(A, y)[0] a_.append(a) b_.append(b) threads = [] for _ in range(n_threads): t = Thread(target=worker) t.start() threads.append(t) for t in threads: t.join() print(np.mean(a_), np.mean(b_)) # - # %%timeit for _ in range(n_threads): y = np.random.normal(scale=0.5, size=n) # + # %%timeit def worker(): y = np.random.normal(scale=0.5, size=n) threads = [] for _ in range(n_threads): t = Thread(target=worker) t.start() threads.append(t) for t in threads: t.join() # -
misc/jupyter_notebooks/17.12.08/threading.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 (ipykernel) # language: python # name: python3 # --- # ## Concatenate # ### Task # #### Concatenate # # Two or more arrays can be concatenated together using the concatenate function with a tuple of the arrays to be joined: # # import numpy # # array_1 = numpy.array([1,2,3]) # array_2 = numpy.array([4,5,6]) # array_3 = numpy.array([7,8,9]) # # print numpy.concatenate((array_1, array_2, array_3)) # # #Output # # [1 2 3 4 5 6 7 8 9] # If an array has more than one dimension, it is possible to specify the axis along which multiple arrays are concatenated. By default, it is along the first dimension. # # import numpy # # array_1 = numpy.array([[1,2,3],[0,0,0]]) # # array_2 = numpy.array([[0,0,0],[7,8,9]]) # # print numpy.concatenate((array_1, array_2), axis = 1) # # #Output # # [[1 2 3 0 0 0] # # [0 0 0 7 8 9]] # # #### Task # # You are given two integer arrays of size X and X ( & are rows, and is the column). Your task is to concatenate the arrays along axis . # # Input Format # # The first line contains space separated integers , and . # The next lines contains the space separated elements of the columns. # After that, the next lines contains the space separated elements of the columns. # # Output Format # # Print the concatenated array of size X. # # Sample Input # # 4 3 2 # # 1 2 # # 1 2 # # 1 2 # # 1 2 # # 3 4 # # 3 4 # # 3 4 # # Sample Output # # [[1 2] # # [1 2] # # [1 2] # # [1 2] # # [3 4] # # [3 4] # # [3 4]] # # + import numpy as np N,M,P = map(int,input().split()) array_1 = np.array([list( map(int, input().split())) for i in range(N)]) array_2 = np.array([list( map(int, input().split())) for i in range(M)] ) sum = np.concatenate((array_1,array_2),axis=0) print(sum) # -
UoH_HackerRank_Challenge/Problem_9.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ## PCA analysis of two lung cancer sets # # Here we are perfoming a PCA of two different datasets within the TCGA. We will first merge the two datasets and subsequently try to separate the samples based on their principal components. # # First we retrieve our two TCGA lungcancer data from cbioportal.org. One of the sets are from [Lung Adenocarcinomas](https://en.wikipedia.org/wiki/Adenocarcinoma_of_the_lung) and the other is from [Lung Squamous Cell Carcinomas](https://en.wikipedia.org/wiki/Squamous-cell_carcinoma_of_the_lung). The code for the retrieval of this data set is not important for the understanding of the analysis, but the code is kept for completness. Execute the code and proceed to next step. # + import pandas as pd import seaborn as sns import numpy as np import tarfile import requests def track_dl(url,tar): response = requests.get(url, stream=True) with open(tar, "wb") as handle: for data in response.iter_content(): handle.write(data) def get_tar(url,path): try: tf = tarfile.open(path) except: track_dl(url, path) tf = tarfile.open(path) return tf def get_expression_data(path,url,file): df = get_data(path,url,file) df.dropna(axis=0, how='any', inplace=True) df.set_index('Hugo_Symbol', inplace=True) df.drop(columns=['Unnamed: 0', 'Entrez_Gene_Id'], inplace=True) #df.drop(columns=['Entrez_Gene_Id'], inplace=True) df = df.reindex(sorted(df.columns), axis=1) return df def get_data(path,url,file): try: df = pd.read_csv(path, sep="\t") except: tf = get_tar(url,"my.tar.gz") tf.extract(file) df = pd.read_csv(file, sep="\t") df.to_csv(path, sep="\t") # df.to_csv(gzip.open(path,'wb'), sep="\t") return df luad = get_expression_data("../../data/luad.tsv.gz", 'http://download.cbioportal.org/luad_tcga_pan_can_atlas_2018.tar.gz',"data_RNA_Seq_v2_expression_median.txt") lusc = get_expression_data("../../data/lusc.tsv.gz", 'http://download.cbioportal.org/lusc_tcga_pan_can_atlas_2018.tar.gz',"data_RNA_Seq_v2_expression_median.txt") # - # We now merge the datasets, and see too that we only include transcripts that are measured in all the carcinomas with an count larger than 0. combined = pd.concat([lusc, luad], axis=1, sort=False) combined.dropna(axis=0, how='any', inplace=True) combined = combined.loc[~(combined<=0.0).any(axis=1)] combined = pd.DataFrame(data=np.log2(combined),index=combined.index,columns=combined.columns) # Make a PCA with a SVD. from numpy.linalg import svd X = combined.values Xm = np.tile(np.mean(X, axis=1)[np.newaxis].T, (1,X.shape[1])) U,S,Vt = svd(X-Xm, full_matrices=True, compute_uv=True) # First we plot the Eigen genes. These illustrate the linear combinations of genes that explains the variance of the genes. First one describes the most, the second explains most of the variance when the variance of the first gene-compination is removed. Here we only explore the first two component, but one could plot the other ones as well. # + transformed_patients = pd.DataFrame(data=Vt[0:2,:].T,columns=["Eigen gene 1","Eigen gene 2"],index=list(lusc.columns) + list(luad.columns)) transformed_patients["Set"]= (["LUSC" for _ in lusc.columns]+["LUAD" for _ in luad.columns]) sns.set(rc={'figure.figsize':(10,10)}) sns.set_style("white") #sns.set_context("talk") sns.lmplot("Eigen gene 1","Eigen gene 2", hue='Set', data=transformed_patients, fit_reg=False) # - # Here we see a non-complet separation of the patients based on their two first eigengenes. This means that the patients gene expression differ and that diference is covered by the first two principal components. # # Lets explore which genes that are most reponsible for that difference. We can do so by investigating their Eigen patients. transformed_genes=pd.DataFrame(data=U[:,0:2], index = combined.index, columns = ["Eigen patient 1","Eigen patient 2"]) sns.lmplot("Eigen patient 1","Eigen patient 2", data=transformed_genes, fit_reg=False) # The genes pointing in a positive direction for the two components are: transformed_genes.idxmax(axis=0) # The genes pointing in a negative direction for the two components are: transformed_genes.idxmin(axis=0)
nb/pca/PCAofCarcinomas.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] toc="true" # # Table of Contents # <p><div class="lev2 toc-item"><a href="#Common-Layers" data-toc-modified-id="Common-Layers-01"><span class="toc-item-num">0.1&nbsp;&nbsp;</span>Common Layers</a></div><div class="lev3 toc-item"><a href="#Convolution-Layers" data-toc-modified-id="Convolution-Layers-011"><span class="toc-item-num">0.1.1&nbsp;&nbsp;</span>Convolution Layers</a></div><div class="lev4 toc-item"><a href="#tf.nn.depthwise_conv2d" data-toc-modified-id="tf.nn.depthwise_conv2d-0111"><span class="toc-item-num">0.1.1.1&nbsp;&nbsp;</span>tf.nn.depthwise_conv2d</a></div><div class="lev4 toc-item"><a href="#tf.nn.separable_conv2d" data-toc-modified-id="tf.nn.separable_conv2d-0112"><span class="toc-item-num">0.1.1.2&nbsp;&nbsp;</span>tf.nn.separable_conv2d</a></div><div class="lev4 toc-item"><a href="#tf.nn.conv2d_transpose" data-toc-modified-id="tf.nn.conv2d_transpose-0113"><span class="toc-item-num">0.1.1.3&nbsp;&nbsp;</span>tf.nn.conv2d_transpose</a></div><div class="lev3 toc-item"><a href="#Activation-Functions" data-toc-modified-id="Activation-Functions-012"><span class="toc-item-num">0.1.2&nbsp;&nbsp;</span>Activation Functions</a></div><div class="lev4 toc-item"><a href="#tf.nn.relu" data-toc-modified-id="tf.nn.relu-0121"><span class="toc-item-num">0.1.2.1&nbsp;&nbsp;</span>tf.nn.relu</a></div><div class="lev4 toc-item"><a href="#tf.sigmoid" data-toc-modified-id="tf.sigmoid-0122"><span class="toc-item-num">0.1.2.2&nbsp;&nbsp;</span>tf.sigmoid</a></div><div class="lev4 toc-item"><a href="#tf.tanh" data-toc-modified-id="tf.tanh-0123"><span class="toc-item-num">0.1.2.3&nbsp;&nbsp;</span>tf.tanh</a></div><div class="lev4 toc-item"><a href="#tf.nn.dropout" data-toc-modified-id="tf.nn.dropout-0124"><span class="toc-item-num">0.1.2.4&nbsp;&nbsp;</span>tf.nn.dropout</a></div><div class="lev3 toc-item"><a href="#Pooling-Layers" data-toc-modified-id="Pooling-Layers-013"><span class="toc-item-num">0.1.3&nbsp;&nbsp;</span>Pooling Layers</a></div><div class="lev4 toc-item"><a href="#tf.nn.max_pool" data-toc-modified-id="tf.nn.max_pool-0131"><span class="toc-item-num">0.1.3.1&nbsp;&nbsp;</span>tf.nn.max_pool</a></div><div class="lev4 toc-item"><a href="#tf.nn.avg_pool" data-toc-modified-id="tf.nn.avg_pool-0132"><span class="toc-item-num">0.1.3.2&nbsp;&nbsp;</span>tf.nn.avg_pool</a></div><div class="lev3 toc-item"><a href="#Normalization" data-toc-modified-id="Normalization-014"><span class="toc-item-num">0.1.4&nbsp;&nbsp;</span>Normalization</a></div><div class="lev4 toc-item"><a href="#tf.nn.local_response_normalization-(tf.nn.lrn)" data-toc-modified-id="tf.nn.local_response_normalization-(tf.nn.lrn)-0141"><span class="toc-item-num">0.1.4.1&nbsp;&nbsp;</span>tf.nn.local_response_normalization (tf.nn.lrn)</a></div><div class="lev3 toc-item"><a href="#High-Level-Layers" data-toc-modified-id="High-Level-Layers-015"><span class="toc-item-num">0.1.5&nbsp;&nbsp;</span>High Level Layers</a></div><div class="lev4 toc-item"><a href="#tf.contrib.layers.convolution2d" data-toc-modified-id="tf.contrib.layers.convolution2d-0151"><span class="toc-item-num">0.1.5.1&nbsp;&nbsp;</span>tf.contrib.layers.convolution2d</a></div><div class="lev4 toc-item"><a href="#tf.contrib.layers.fully_connected" data-toc-modified-id="tf.contrib.layers.fully_connected-0152"><span class="toc-item-num">0.1.5.2&nbsp;&nbsp;</span>tf.contrib.layers.fully_connected</a></div><div class="lev4 toc-item"><a href="#Layer-Input" data-toc-modified-id="Layer-Input-0153"><span class="toc-item-num">0.1.5.3&nbsp;&nbsp;</span>Layer Input</a></div> # - # ## Common Layers # # For a neural network architecture to be considered a CNN, it requires at least one convolution layer (`tf.nn.conv2d`). There are practical uses for a single layer CNN (edge detection), for image recognition and categorization it is common to use different layer types to support a convolution layer. These layers help reduce over-fitting, speed up training and decrease memory usage. # # The layers covered in this chapter are focused on layers commonly used in a CNN architecture. A CNN isn't limited to use only these layers, they can be mixed with layers designed for other network architectures. # setup-only-ignore import tensorflow as tf import numpy as np # setup-only-ignore sess = tf.InteractiveSession() # ### Convolution Layers # # One type of convolution layer has been covered in detail (`tf.nn.conv2d`) but there are a few notes which are useful to advanced users. The convolution layers in TensorFlow don't do a full convolution, details can be found in [the TensorFlow API documentation](https://www.tensorflow.org/versions/r0.8/api_docs/python/nn.html#convolution). In practice, the difference between a convolution and the operation TensorFlow uses is performance. TensorFlow uses a technique to speed up the convolution operation in all the different types of convolution layers. # # There are use cases for each type of convolution layer but for `tf.nn.conv2d` is a good place to start. The other types of convolutions are useful but not required in building a network capable of object recognition and classification. A brief summary of each is included. # #### tf.nn.depthwise_conv2d # # Used when attaching the output of one convolution to the input of another convolution layer. An advanced use case is using a `tf.nn.depthwise_conv2d` to create a network following the [inception architecture](http://arxiv.org/abs/1512.00567). # # #### tf.nn.separable_conv2d # # Similar to `tf.nn.conv2d` but not a replacement. For large models, it speeds up training without sacrificing accuracy. For small models, it will converge quickly with worse accuracy. # # #### tf.nn.conv2d_transpose # # Applies a kernel to a new feature map where each section is filled with the same values as the kernel. As the kernel strides over the new image, any overlapping sections are summed together. There is a great explanation on how `tf.nn.conv2d_transpose` is used for learnable upsampling in [Stanford's CS231n Winter 2016: Lecture 13](https://www.youtube.com/watch?v=ByjaPdWXKJ4&t=20m00s). # ### Activation Functions # # These functions are used in combination with the output of other layers to generate a feature map. They're used to smooth (or differentiate) the results of certain operations. The goal is to introduce non-linearity into the neural network. Non-linearity means that the input is a curve instead of a straight line. Curves are capable of representing more complex changes in input. For example, non-linear input is capable of describing input which stays small for the majority of the time but periodically has a single point at an extreme. Introduction of non-linearity in a neural network allows it to train on the complex patterns found in data. # # TensorFlow has [multiple activation functions](https://www.tensorflow.org/versions/r0.8/api_docs/python/nn.html#activation-functions) available. With CNNs, `tf.nn.relu` is primarily used because of its performance although it sacrifices information. When starting out, using `tf.nn.relu` is recommended but advanced users may create their own. When considering if an activation function is useful there are a few primary considerations. # # 1. The function is [**monotonic**](https://en.wikipedia.org/wiki/Monotonic_function), so its output should always be increasing or decreasing along with the input. This allows gradient descent optimization to search for local minima. # 2. The function is [**differentiable**](https://en.wikipedia.org/wiki/Differentiable_function), so there must be a derivative at any point in the function's domain. This allows gradient descent optimization to properly work using the output from this style of activation function. # # Any functions which satisfy those considerations could be used as activation functions. In TensorFlow there are a few worth highlighting which are common to see in CNN architectures. A brief summary of each is included with a small sample code illustrating their usage. # #### tf.nn.relu # # A rectifier (rectified linear unit) called a ramp function in some documentation and looks like a skateboard ramp when plotted. ReLU is linear and keeps the same input values for any positive numbers while setting all negative numbers to be 0. It has the benefits that it doesn't suffer from [gradient vanishing](https://en.wikipedia.org/wiki/Vanishing_gradient_problem) and has a range of <span class="math-tex" data-type="tex">\\([0,+\infty)\\)</span>. A drawback of ReLU is that it can suffer from neurons becoming saturated when too high of a learning rate is used. features = tf.range(-2, 3) # Keep note of the value for negative features sess.run([features, tf.nn.relu(features)]) # In this example, the input in a rank one tensor (vector) of integer values between <span class="math-tex" data-type="tex">\\([-2, 3]\\)</span>. A `tf.nn.relu` is ran over the values the output highlights that any value less than 0 is set to be 0. The other input values are left untouched. # #### tf.sigmoid # # A sigmoid function returns a value in the range of <span class="math-tex" data-type="tex">\\([0.0, 1.0]\\)</span>. Larger values sent into a `tf.sigmoid` will trend closer to 1.0 while smaller values will trend towards 0.0. The ability for sigmoids to keep a values between <span class="math-tex" data-type="tex">\\([0.0, 1.0]\\)</span> is useful in networks which train on probabilities which are in the range of <span class="math-tex" data-type="tex">\\([0.0, 1.0]\\)</span>. The reduced range of output values can cause trouble with input becoming saturated and changes in input becoming exaggerated. # Note, tf.sigmoid (tf.nn.sigmoid) is currently limited to float values features = tf.to_float(tf.range(-1, 3)) sess.run([features, tf.sigmoid(features)]) # In this example, a range of integers is converted to be float values (`1` becomes `1.0`) and a sigmoid function is ran over the input features. The result highlights that when a value of 0.0 is passed through a sigmoid, the result is 0.5 which is the midpoint of the simoid's domain. It's useful to note that with 0.5 being the sigmoid's midpoint, negative values can be used as input to a sigmoid. # #### tf.tanh # # A hyperbolic tangent function (tanh) is a close relative to `tf.sigmoid` with some of the same benefits and drawbacks. The main difference between `tf.sigmoid` and `tf.tanh` is that `tf.tanh` has a range of <span class="math-tex" data-type="tex">\\([-1.0, 1.0]\\)</span>. The ability to output negative values may be useful in certain network architectures. # Note, tf.tanh (tf.nn.tanh) is currently limited to float values features = tf.to_float(tf.range(-1, 3)) sess.run([features, tf.tanh(features)]) # In this example, all the setup is the same as the `tf.sigmoid` example but the output shows an important difference. In the output of `tf.tanh` the midpoint is 0.0 with negative values. This can cause trouble if the next layer in the network isn't expecting negative input or input of 0.0. # #### tf.nn.dropout # # Set the output to be 0.0 based on a configurable probability. This layer performs well in scenarios where a little randomness helps training. An example scenario is when there are patterns being learned which are too tied to their neighboring features. This layer will add a little noise to the output being learned. # # **NOTE**: This layer should only be used during training because the random noise it adds will give misleading results while testing. features = tf.constant([-0.1, 0.0, 0.1, 0.2]) # Note, the output should be different on almost ever execution. Your numbers won't match # this output. sess.run([features, tf.nn.dropout(features, keep_prob=0.5)]) # In this example, the output has a 50% probability of being kept. Each execution of this layer will have different output (most likely, it's somewhat random). When an output is dropped, its value is set to 0.0. # ### Pooling Layers # # Pooling layers reduce over-fitting and improving performance by reducing the size of the input. They're used to scale down input while keeping important information for the next layer. It's possible to reduce the size of the input using a `tf.nn.conv2d` alone but these layers execute much faster. # #### tf.nn.max_pool # # Strides over a tensor and chooses the maximum value found within a certain kernel size. Useful when the intensity of the input data is relevant to importance in the image. # # ![Example Max Pooling](./images/chapter-05-object-recognition-and-classification/layers/max-pooling.png) # # The same example is modeled using example code below. The goal is to find the largest value within the tensor. # + # Usually the input would be output from a previous layer and not an image directly. batch_size=1 input_height = 3 input_width = 3 input_channels = 1 layer_input = tf.constant([ [ [[1.0], [0.2], [1.5]], [[0.1], [1.2], [1.4]], [[1.1], [0.4], [0.4]] ] ]) # The strides will look at the entire input by using the image_height and image_width kernel = [batch_size, input_height, input_width, input_channels] max_pool = tf.nn.max_pool(layer_input, kernel, [1, 1, 1, 1], "VALID") sess.run(max_pool) # - # The `layer_input` is a tensor with a shape similar to the output of `tf.nn.conv2d` or an activation function. The goal is to keep only one value, the largest value in the tensor. In this case, the largest value of the tensor is `1.5` and is returned in the same format as the input. If the `kernel` were set to be smaller, it would choose the largest value in each kernel size as it strides over the image. # # Max-pooling will commonly be done using `2x2` receptive field (kernel with a height of 2 and width of 2) which is often written as a "2x2 max-pooling operation". One reason to use a `2x2` receptive field is that it's the smallest amount of downsampling which can be done in a single pass. If a `1x1` receptive field were used then the output would be the same as the input. # #### tf.nn.avg_pool # # Strides over a tensor and averages all the values at each depth found within a kernel size. Useful when reducing values where the entire kernel is important, for example, input tensors with a large width and height but small depth. # # ![Example Average Pool](./images/chapter-05-object-recognition-and-classification/layers/avg-pool.png) # # The same example is modeled using example code below. The goal is to find the average of all the values within the tensor. # + batch_size=1 input_height = 3 input_width = 3 input_channels = 1 layer_input = tf.constant([ [ [[1.0], [1.0], [1.0]], [[1.0], [0.5], [0.0]], [[0.0], [0.0], [0.0]] ] ]) # The strides will look at the entire input by using the image_height and image_width kernel = [batch_size, input_height, input_width, input_channels] max_pool = tf.nn.avg_pool(layer_input, kernel, [1, 1, 1, 1], "VALID") sess.run(max_pool) # - # Doing a summation of all the values in the tensor, then divide them by the size of the number of scalars in the tensor: # # <br /> # <span class="math-tex" data-type="tex">\\(\dfrac{1.0 + 1.0 + 1.0 + 1.0 + 0.5 + 0.0 + 0.0 + 0.0 + 0.0}{9.0}\\)</span> # # This is exactly what the example code did above but by reducing the size of the kernel, it's possible to adjust the size of the output. # ### Normalization # # Normalization layers are not unique to CNNs and aren't used as often. When using `tf.nn.relu`, it is useful to consider normalization of the output. Since ReLU is unbounded, it's often useful to utilize some form of normalization to identify high-frequency features. # #### tf.nn.local_response_normalization (tf.nn.lrn) # # Local response normalization is a function which shapes the output based on a summation operation best explained in [TensorFlow's documentation](https://www.tensorflow.org/versions/master/api_docs/python/nn.html#local_response_normalization). # # > ... Within a given vector, each component is divided by the weighted, squared sum of inputs within depth_radius. # # One goal of normalization is to keep the input in a range of acceptable numbers. For instance, normalizing input in the range of <span class="math-tex" data-type="tex">\\([0.0,1.0]\\)</span> where the full range of possible values is normalized to be represented by a number greater than or equal to `0.0` and less than or equal to `1.0`. Local response normalization normalizes values while taking into account the significance of each value. # # [Cuda-Convnet](https://code.google.com/p/cuda-convnet/wiki/LayerParams) includes further details on why using local response normalization is useful in some CNN architectures. [ImageNet](https://papers.nips.cc/paper/4824-imagenet-classification-with-deep-convolutional-neural-networks.pdf) uses this layer to normalize the output from `tf.nn.relu`. # + # Create a range of 3 floats. # TensorShape([batch, image_height, image_width, image_channels]) layer_input = tf.constant([ [[[ 1.]], [[ 2.]], [[ 3.]]] ]) lrn = tf.nn.local_response_normalization(layer_input) sess.run([layer_input, lrn]) # - # In this example code, the layer input is in the format `[batch, image_height, image_width, image_channels]`. The normalization reduced the output to be in the range of <span class="math-tex" data-type="tex">\\([-1.0, 1.0]\\)</span>. For `tf.nn.relu`, this layer will reduce its unbounded output to be in the same range. # ### High Level Layers # # TensorFlow has introduced high level layers designed to make it easier to create fairly standard layer definitions. These aren't required to use but they help avoid duplicate code while following best practices. While getting started, these layers add a number of non-essential nodes to the graph. It's worth waiting until the basics are comfortable before using these layers. # #### tf.contrib.layers.convolution2d # # The `convolution2d` layer will do the same logic as `tf.nn.conv2d` while including weight initialization, bias initialization, trainable variable output, bias addition and adding an activation function. Many of these steps haven't been covered for CNNs yet but should be familiar. A kernel is a trainable variable (the CNN's goal is to train this variable), weight initialization is used to fill the kernel with values (`tf.truncated_normal`) on its first run. The rest of the parameters are similar to what have been used before except they are reduced to short-hand version. Instead of declaring the full kernel, now it's a simple tuple `(1,1)` for the kernel's height and width. # + image_input = tf.constant([ [ [[0., 0., 0.], [255., 255., 255.], [254., 0., 0.]], [[0., 191., 0.], [3., 108., 233.], [0., 191., 0.]], [[254., 0., 0.], [255., 255., 255.], [0., 0., 0.]] ] ]) conv2d = tf.contrib.layers.convolution2d( image_input, num_outputs=4, kernel_size=(1,1), # It's only the filter height and width. activation_fn=tf.nn.relu, stride=(1, 1), # Skips the stride values for image_batch and input_channels. trainable=True) # It's required to initialize the variables used in convolution2d's setup. sess.run(tf.global_variables_initializer()) sess.run(conv2d) # - # This example setup a full convolution against a batch of a single image. All the parameters are based off of the steps done throughout this chapter. The main difference is that `tf.contrib.layers.convolution2d` does a large amount of setup without having to write it all again. This can be a great time saving layer for advanced users. # # **NOTE**: `tf.to_float` should not be used if the input is an image, instead use `tf.image.convert_image_dtype` which will properly change the range of values used to describe colors. In this example code, float values of `255.` were used which aren't what TensorFlow expects when is sees an image using float values. TensorFlow expects an image with colors described as floats to stay in the range of <span class="math-tex" data-type="tex">\\([0,1]\\)</span>. # #### tf.contrib.layers.fully_connected # # A fully connected layer is one where every input is connected to every output. This is a fairly common layer in many architectures but for CNNs, the last layer is quite often fully connected. The `tf.contrib.layers.fully_connected` layer offers a great short-hand to create this last layer while following best practices. # # Typical fully connected layers in TensorFlow are often in the format of `tf.matmul(features, weight) + bias` where `feature`, `weight` and `bias` are all tensors. This short-hand layer will do the same thing while taking care of the intricacies involved in managing the `weight` and `bias` tensors. # + features = tf.constant([ [[1.2], [3.4]] ]) fc = tf.contrib.layers.fully_connected(features, num_outputs=2) # It's required to initialize all the variables first or there'll be an error about precondition failures. sess.run(tf.global_variables_initializer()) sess.run(fc) # - # This example created a fully connected layer and associated the input tensor with each neuron of the output. There are plenty of other parameters to tweak for different fully connected layers. # #### Layer Input # # Each layer serves a purpose in a CNN architecture. It's important to understand them at a high level (at least) but without practice they're easy to forget. A crucial layer in any neural network is the input layer, where raw input is sent to be trained and tested. For object recognition and classification, the input layer is a `tf.nn.conv2d` layer which accepts images. The next step is to use real images in training instead of example input in the form of `tf.constant` or `tf.range` variables.
chapters/05_object_recognition_and_classification/Chapter 5 - 03 Layers.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + id="QL28WSHoBsfp" colab_type="code" outputId="3bb771d0-3283-4a97-e93f-a26ea1cb9810" executionInfo={"status": "ok", "timestamp": 1547236686457, "user_tz": 120, "elapsed": 2961, "user": {"displayName": "<NAME>", "photoUrl": "https://lh6.googleusercontent.com/-p2L5zKSs9Oc/AAAAAAAAAAI/AAAAAAAAAEE/PzwO-PqNro8/s64/photo.jpg", "userId": "16421844834226782203"}} colab={"base_uri": "https://localhost:8080/", "height": 34} # -*- coding: utf-8 -*- #@author: alison import re import string import pickle import keras import numpy as np import pandas as pd from nltk.corpus import stopwords from sklearn.model_selection import train_test_split from nltk.stem import PorterStemmer, SnowballStemmer from nltk.tokenize import TweetTokenizer from keras.preprocessing.text import Tokenizer from keras.preprocessing import sequence from keras.models import Sequential, Model from keras.layers import Dense, Flatten, Activation from keras.layers import Conv2D, MaxPool2D, Reshape from keras.layers import Input, concatenate, Dropout from keras.layers import Embedding, Concatenate from keras.optimizers import Adam, SGD, RMSprop from keras import optimizers from keras import regularizers # + id="XdekfYsoV2c2" colab_type="code" outputId="0c70550e-f18e-40c3-978f-03ecd304621c" executionInfo={"status": "ok", "timestamp": 1547236686462, "user_tz": 120, "elapsed": 2897, "user": {"displayName": "<NAME>", "photoUrl": "https://lh6.googleusercontent.com/-p2L5zKSs9Oc/AAAAAAAAAAI/AAAAAAAAAEE/PzwO-PqNro8/s64/photo.jpg", "userId": "16421844834226782203"}} colab={"base_uri": "https://localhost:8080/", "height": 70} import nltk nltk.download('stopwords') # + id="-TpI0dMJEi6p" colab_type="code" colab={} # !pip install -U -q PyDrive from pydrive.auth import GoogleAuth from pydrive.drive import GoogleDrive from google.colab import auth from oauth2client.client import GoogleCredentials # 1. Authenticate and create the PyDrive client. auth.authenticate_user() gauth = GoogleAuth() gauth.credentials = GoogleCredentials.get_application_default() drive = GoogleDrive(gauth) # + id="XEfvBz8IEqrO" colab_type="code" outputId="221be9be-1721-4080-8ca4-a9519334ecb1" executionInfo={"status": "ok", "timestamp": 1547236694088, "user_tz": 120, "elapsed": 10399, "user": {"displayName": "<NAME>", "photoUrl": "https://lh6.googleusercontent.com/-p2L5zKSs9Oc/AAAAAAAAAAI/AAAAAAAAAEE/PzwO-PqNro8/s64/photo.jpg", "userId": "16421844834226782203"}} colab={"base_uri": "https://localhost:8080/", "height": 247} file_list = drive.ListFile({'q': "'1JsHac7_mxx_M8BapYSzcDKnzl4M9e1Yb' in parents and trashed=false"}).GetList() for file1 in file_list: print('title: %s, id: %s' % (file1['title'], file1['id'])) # + id="h2hLjMJyE3dR" colab_type="code" colab={} train_downloaded = drive.CreateFile({'id': '1tv4kRZtCJu7F4WtkBvaMWZMXZZDYF-yv'}) train_downloaded.GetContentFile('train_en.tsv') dev_downloaded = drive.CreateFile({'id': '116R1Q_P_m75ZagpkE7cZXfXHiDSDwrJy'}) dev_downloaded.GetContentFile('dev_en.tsv') test_downloaded = drive.CreateFile({'id': '1Z-kJ95PF2VZiGAn_5Piyovk8SZ-bYQJv'}) test_downloaded.GetContentFile('test_en.tsv') # + id="0s3Dnnk8FF4j" colab_type="code" colab={} train = pd.read_csv('train_en.tsv', delimiter='\t',encoding='utf-8') dev = pd.read_csv('dev_en.tsv', delimiter='\t',encoding='utf-8') #test = pd.read_csv('test_en.tsv', delimiter='\t',encoding='utf-8') # + id="lrslbThJCeSY" colab_type="code" colab={} # Etapa de pré-processamento def clean_tweets(tweet): tweet = re.sub('@(\\w{1,15})\b', '', tweet) tweet = tweet.replace("via ", "") tweet = tweet.replace("RT ", "") tweet = tweet.lower() return tweet def clean_url(tweet): tweet = re.sub('http\\S+', '', tweet, flags=re.MULTILINE) return tweet def remove_stop_words(tweet): stops = set(stopwords.words("english")) stops.update(['.',',','"',"'",'?',':',';','(',')','[',']','{','}']) toks = [tok for tok in tweet if not tok in stops and len(tok) >= 3] return toks def stemming_tweets(tweet): stemmer = SnowballStemmer('english') stemmed_words = [stemmer.stem(word) for word in tweet] return stemmed_words def remove_number(tweet): newTweet = re.sub('\\d+', '', tweet) return newTweet def remove_hashtags(tweet): result = '' for word in tweet.split(): if word.startswith('#') or word.startswith('@'): result += word[1:] result += ' ' else: result += word result += ' ' return result # + id="QDsnLDRQGl71" colab_type="code" colab={} def preprocessing(tweet, swords = True, url = True, stemming = True, ctweets = True, number = True, hashtag = True): if ctweets: tweet = clean_tweets(tweet) if url: tweet = clean_url(tweet) if hashtag: tweet = remove_hashtags(tweet) twtk = TweetTokenizer(strip_handles=True, reduce_len=True) if number: tweet = remove_number(tweet) tokens = [w.lower() for w in twtk.tokenize(tweet) if w != "" and w is not None] if swords: tokens = remove_stop_words(tokens) if stemming: tokens = stemming_tweets(tokens) text = " ".join(tokens) return text # + id="SunHFjyyFLR3" colab_type="code" colab={} train_text = train['text'].map(lambda x: preprocessing(x, swords = True, url = True, stemming = True, ctweets = True, number = True, hashtag = True)) hs_train = train['HS'] id_train = train['id'] tr_train = train['TR'] ag_train = [1 if k == '1' else 0 for k in train['AG']] test_text = dev['text'].map(lambda x: preprocessing(x, swords = True, url = True, stemming = True, ctweets = True, number = True, hashtag = True)) hs_test = dev['HS'] id_test = dev['id'] tr_test = dev['TR'] ag_test = dev['AG'] # + id="cZzoxLL3nsKn" colab_type="code" outputId="194091cb-d348-4b2d-86e9-c1a6c5410268" executionInfo={"status": "ok", "timestamp": 1547236702487, "user_tz": 120, "elapsed": 18523, "user": {"displayName": "<NAME>", "photoUrl": "https://lh6.googleusercontent.com/-p2L5zKSs9Oc/AAAAAAAAAAI/AAAAAAAAAEE/PzwO-PqNro8/s64/photo.jpg", "userId": "16421844834226782203"}} colab={"base_uri": "https://localhost:8080/", "height": 141} file_list = drive.ListFile({'q': "'1sBKK1i4JXIluelnAPqjOU4xVIIPA3Vmy' in parents and trashed=false"}).GetList() for file1 in file_list: print('title: %s, id: %s' % (file1['title'], file1['id'])) # + id="FIV4AdDpnwV7" colab_type="code" colab={} #we = {'id': '1FRfAM3GouOxelBo_gwj5n4YNWDMP6CkL', 'file': 'glove.6B.300d_en.txt'} we = {'id': '18zPA_9tWmlNZChdfMKHTp0Ka1SQsnkj9', 'file': 'wiki_fasttext_en.vec'} # + id="ygdK2gi1n2cW" colab_type="code" colab={} word_vector = drive.CreateFile({'id': we['id']}) word_vector.GetContentFile(we['file']) # + id="dF4yXFPKoA1g" colab_type="code" colab={} def word_embeddings(word_index, num_words, word_embedding_dim): embeddings_index = {} f = open(we['file'], 'r', encoding='utf-8') for line in tqdm(f): values = line.rstrip().rsplit(' ') word = values[0] coefs = np.asarray(values[1:], dtype='float32') embeddings_index[word] = coefs f.close() matrix = np.zeros((num_words, word_embedding_dim)) for word, i in word_index.items(): if i >= max_features: continue embedding_vector = embeddings_index.get(word) if embedding_vector is not None: matrix[i] = embedding_vector return matrix # + id="KyUJRLIQoSVj" colab_type="code" colab={} embedding_dim = 300 max_features = 25000 maxlen = 100 batch_size = 32 epochs = 5 filter_sizes = [3,4] num_filters = 512 drop = 0.5 def code_data(train_text, test_text, maxlen, max_features): data = np.concatenate((train_text, test_text), axis=0) # Treina um tokenizaddor nos dados de treino tokenizer = Tokenizer(num_words=max_features) tokenizer.fit_on_texts(data) # Tokeniza os dados X = tokenizer.texts_to_sequences(data) Y = tokenizer.texts_to_sequences(test_text) tweets = sequence.pad_sequences(X, maxlen=maxlen) x_test = sequence.pad_sequences(Y, maxlen=maxlen) word_index = tokenizer.word_index return tweets, x_test, word_index # + id="BvKdMmt8wn-J" colab_type="code" colab={} tweets, x_test, word_index = code_data(train_text, test_text, maxlen, max_features) # + id="M0kD_xc4c2SL" colab_type="code" colab={} num_words = min(max_features, len(word_index) + 1) # + id="3O5IjISOoGAV" colab_type="code" outputId="564b7fc5-6b00-4388-c953-07e733ffa7ab" executionInfo={"status": "ok", "timestamp": 1547236751884, "user_tz": 120, "elapsed": 67708, "user": {"displayName": "<NAME>", "photoUrl": "https://lh6.googleusercontent.com/-p2L5zKSs9Oc/AAAAAAAAAAI/AAAAAAAAAEE/PzwO-PqNro8/s64/photo.jpg", "userId": "16421844834226782203"}} colab={"base_uri": "https://localhost:8080/", "height": 34} from tqdm import tqdm embedding_matrix = word_embeddings(word_index, num_words, embedding_dim) # + id="pTZGv8lcWGIU" colab_type="code" colab={} # Fase de classificação tweet_input = Input(shape=(maxlen,), dtype='int32') embedding = Embedding(num_words, embedding_dim, weights=[embedding_matrix], input_length=maxlen, trainable=True)(tweet_input) reshape = Reshape((maxlen, embedding_dim, 1))(embedding) cnn1 = Conv2D(num_filters, kernel_size=(filter_sizes[0], embedding_dim), padding='valid', kernel_initializer='normal', activation='tanh')(reshape) max1 = MaxPool2D(pool_size=(maxlen - filter_sizes[0] + 1, 1), strides=(1,1), padding='valid')(cnn1) cnn2 = Conv2D(num_filters, kernel_size=(filter_sizes[1], embedding_dim), padding='valid', kernel_initializer='normal', activation='tanh')(reshape) max2 = MaxPool2D(pool_size=(maxlen - filter_sizes[1] + 1, 1), strides=(1,1), padding='valid')(cnn2) concatenated_tensor = Concatenate(axis=1)([max1, max2]) flatten = Flatten()(concatenated_tensor) dropout = Dropout(drop)(flatten) dens = Dense(num_filters, activation='relu')(dropout) output = Dense(1, activation='sigmoid')(dens) model = Model(inputs=tweet_input, outputs=output) opt = RMSprop(lr=0.001, rho=0.9, epsilon=None, decay=0.01) model.compile(loss='binary_crossentropy', optimizer=opt, metrics=['accuracy']) # + id="9B6Z7nAddDoz" colab_type="code" outputId="cf0937bb-69f5-418e-ebf9-26754eedf75a" executionInfo={"status": "ok", "timestamp": 1547236827053, "user_tz": 120, "elapsed": 142829, "user": {"displayName": "<NAME>", "photoUrl": "https://lh6.googleusercontent.com/-p2L5zKSs9Oc/AAAAAAAAAAI/AAAAAAAAAEE/PzwO-PqNro8/s64/photo.jpg", "userId": "16421844834226782203"}} colab={"base_uri": "https://localhost:8080/", "height": 834} # Treinamento e predição - Hate Speech (HS) classes = np.concatenate((hs_train, hs_test), axis=0) x_train, x_val, y_train, y_val = train_test_split(tweets, classes, test_size=0.25, random_state=None) model.summary() model.fit(x_train, y_train, batch_size=batch_size, epochs=epochs, shuffle=True, validation_data=(x_val, y_val)) hs = (model.predict(x_test, batch_size=batch_size) > .5).astype(int) # + id="-hdb9ftUdEAN" colab_type="code" outputId="cc71d07d-53af-4ce7-bcba-97873c1d94a9" executionInfo={"status": "ok", "timestamp": 1547236898999, "user_tz": 120, "elapsed": 214729, "user": {"displayName": "<NAME>", "photoUrl": "https://lh6.googleusercontent.com/-p2L5zKSs9Oc/AAAAAAAAAAI/AAAAAAAAAEE/PzwO-PqNro8/s64/photo.jpg", "userId": "16421844834226782203"}} colab={"base_uri": "https://localhost:8080/", "height": 834} # Treinamento e predição - Target Range (TR) classes = np.concatenate((tr_train, tr_test), axis=0) x_train, x_val, y_train, y_val = train_test_split(tweets, classes, test_size=0.25, random_state=None) model.summary() model.fit(x_train, y_train, batch_size=batch_size, epochs=epochs, shuffle=True, validation_data=(x_val, y_val), validation_steps=None) tr = (model.predict(x_test, batch_size=batch_size) > .5).astype(int) # + id="tpECsVoqdEQF" colab_type="code" outputId="dc19bc5d-7ba7-4f2d-92d4-09020ae489e0" executionInfo={"status": "ok", "timestamp": 1547236971309, "user_tz": 120, "elapsed": 287006, "user": {"displayName": "<NAME>", "photoUrl": "https://lh6.googleusercontent.com/-p2L5zKSs9Oc/AAAAAAAAAAI/AAAAAAAAAEE/PzwO-PqNro8/s64/photo.jpg", "userId": "16421844834226782203"}} colab={"base_uri": "https://localhost:8080/", "height": 834} # Treinamento e predição - Aggressiveness (AG) classes = np.concatenate((ag_train, ag_test), axis=0) x_train, x_val, y_train, y_val = train_test_split(tweets, classes, test_size=0.25, random_state=None) model.summary() model.fit(x_train, y_train, batch_size=batch_size, epochs=epochs, shuffle=True, validation_data=(x_val, y_val), validation_steps=None) ag = (model.predict(x_test, batch_size=batch_size) > .5).astype(int) # + id="o2oDoSc55nxN" colab_type="code" outputId="a35e93dd-6f07-450c-bf39-988e96f39cf1" executionInfo={"status": "ok", "timestamp": 1547236971311, "user_tz": 120, "elapsed": 286972, "user": {"displayName": "<NAME>", "photoUrl": "https://lh6.googleusercontent.com/-p2L5zKSs9Oc/AAAAAAAAAAI/AAAAAAAAAEE/PzwO-PqNro8/s64/photo.jpg", "userId": "16421844834226782203"}} colab={"base_uri": "https://localhost:8080/", "height": 34} def func(pred): output = [] for array in pred: output.append(array[0]) return output hs_pred = func(hs) tr_pred = func(tr) ag_pred = func(ag) print(len(hs_pred), len(tr_pred), len(ag_pred)) # + id="H6MN2vcq5OyW" colab_type="code" colab={} from google.colab import files with open("en_b.tsv", "w") as file: for i in range(len(hs_pred)): file.write(str(id_test[i])) file.write('\t') file.write(str(hs_pred[i])) file.write('\t') file.write(str(tr_pred[i])) file.write('\t') file.write(str(ag_pred[i])) file.write('\n') files.download('en_b.tsv')
English/TaskB/CNN_en_b.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="JjhmW8EZiwsy" # # Chapter 3 - Boston House Pricing Regression # + id="RmYnCpEXitWA" executionInfo={"status": "ok", "timestamp": 1626092120894, "user_tz": -270, "elapsed": 3142, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gg86N33hQca1Ab2Qwir_Bu36nKxiHcT9Q3omHQcsA=s64", "userId": "02569620274590613261"}} import random import numpy as np from keras import Sequential from keras.layers import Dense from keras.datasets import boston_housing from matplotlib import pyplot as plt from keras.utils.vis_utils import plot_model # + [markdown] id="-NouN7UMi2Mc" # ## Loading the Dataset # + id="cudK7Nloi0Jp" executionInfo={"status": "ok", "timestamp": 1626092120897, "user_tz": -270, "elapsed": 10, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gg86N33hQca1Ab2Qwir_Bu36nKxiHcT9Q3omHQcsA=s64", "userId": "02569620274590613261"}} (train_data, train_labels), (test_data, test_labels) = boston_housing.load_data() # + [markdown] id="PfsglNlQjHpP" # ## Normalizing the Data # + id="0UnuYEsMi453" executionInfo={"status": "ok", "timestamp": 1626092120897, "user_tz": -270, "elapsed": 10, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gg86N33hQca1Ab2Qwir_Bu36nKxiHcT9Q3omHQcsA=s64", "userId": "02569620274590613261"}} def normalize(data): mean = data.mean(axis = 0) std = train_labels.std(axis = 0) data /= std data -= mean return data # + [markdown] id="QpYoFfOGji5X" # ## building the Model # + id="Qjo5BGhyi9tJ" executionInfo={"status": "ok", "timestamp": 1626092120898, "user_tz": -270, "elapsed": 10, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gg86N33hQca1Ab2Qwir_Bu36nKxiHcT9Q3omHQcsA=s64", "userId": "02569620274590613261"}} def build_model(dim): model = Sequential() # Add layers model.add(Dense(units = 64, activation = 'relu', input_shape = (dim, ))) model.add(Dense(units = 64, activation = 'relu')) model.add(Dense(units = 1)) # Compile model model.compile(optimizer = 'rmsprop', loss = 'mse', metrics = ['mae']) return model # + id="LdQ2b0sZlFIC" executionInfo={"status": "ok", "timestamp": 1626093139527, "user_tz": -270, "elapsed": 428, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gg86N33hQca1Ab2Qwir_Bu36nKxiHcT9Q3omHQcsA=s64", "userId": "02569620274590613261"}} def train_model(model, x_train, y_train, epochs, batch_size, validation_data): history = model.fit(x_train, y_train, epochs = epochs, batch_size = batch_size, verbose = 0, validation_data = validation_data) return history # + [markdown] id="J-WNPt_-ln2p" # ## Evaluating the Model # + id="MzYpnvC6lpLY" executionInfo={"status": "ok", "timestamp": 1626092120898, "user_tz": -270, "elapsed": 10, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gg86N33hQca1Ab2Qwir_Bu36nKxiHcT9Q3omHQcsA=s64", "userId": "02569620274590613261"}} def evaluate_model(model, x_test, y_test): results = model.evaluate(x_test, y_test, verbose = 0) return results # + [markdown] id="uQMad7C1kHr4" # ## Manual K-Means Cross Validation # + id="fhpvnGvEjea0" executionInfo={"status": "ok", "timestamp": 1626093123720, "user_tz": -270, "elapsed": 375, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gg86N33hQca1Ab2Qwir_Bu36nKxiHcT9Q3omHQcsA=s64", "userId": "02569620274590613261"}} def k_means_validation(k, train_data, epochs): num_samples = len(train_data) // k history_mae = [] for i in range(k): print(f'Processing Fold #{i}...') # Validation split val_data = train_data[i * num_samples: (i + 1) * num_samples] val_labels = train_labels[i * num_samples: (i + 1) * num_samples] # Remaining training data partial_train_data = np.concatenate([train_data[: i * num_samples], train_data[(i + 1) * num_samples:]], axis = 0) partial_train_labels = np.concatenate([train_labels[: i * num_samples], train_labels[(i + 1) * num_samples:]], axis = 0) # Build the model model = build_model(dim = train_data.shape[1]) # Train the model history = train_model( model = model, x_train = partial_train_data, y_train = partial_train_labels, validation_data = (val_data, val_labels), epochs = epochs, batch_size = 1, ) history_mae.append(history.history['val_mae']) return history_mae # + [markdown] id="kwjFRciinNXH" # ## plotting the Training Process # + id="k7KjwUffjeWN" executionInfo={"status": "ok", "timestamp": 1626093183013, "user_tz": -270, "elapsed": 483, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gg86N33hQca1Ab2Qwir_Bu36nKxiHcT9Q3omHQcsA=s64", "userId": "02569620274590613261"}} def plot(ax, value, metric): ax.set_title(f'{metric} per Epoch') ax.plot(value, label = metric) ax.legend() # + [markdown] id="SjAQI03enQxn" # ## Making Predictions # + id="kHWg_k88nSiU" executionInfo={"status": "ok", "timestamp": 1626092120899, "user_tz": -270, "elapsed": 10, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gg86N33hQca1Ab2Qwir_Bu36nKxiHcT9Q3omHQcsA=s64", "userId": "02569620274590613261"}} def evaluate_model(model, x_test, y_test): results = model.evaluate(x_test, y_test, verbose = 0) return results # + [markdown] id="G-qemf43jfV3" # ## Wrap Up # + id="rK-C02P3jePm" executionInfo={"status": "ok", "timestamp": 1626092120899, "user_tz": -270, "elapsed": 10, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gg86N33hQca1Ab2Qwir_Bu36nKxiHcT9Q3omHQcsA=s64", "userId": "02569620274590613261"}} # Normalize data train_data = normalize(train_data) test_data = normalize(test_data) # + colab={"base_uri": "https://localhost:8080/"} id="rJ1LBA4MnnuF" executionInfo={"status": "ok", "timestamp": 1626093426947, "user_tz": -270, "elapsed": 219167, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gg86N33hQca1Ab2Qwir_Bu36nKxiHcT9Q3omHQcsA=s64", "userId": "02569620274590613261"}} outputId="80e97353-7c6c-4eae-8e3a-485783a7f526" k = 4 epochs = 100 # K-Means validation history_mae = k_means_validation(k = k, train_data = train_data, epochs = epochs) # Calculate history avg_mae_hisotry = [np.mean([x[i] for x in history_mae]) for i in range(epochs)] # + colab={"base_uri": "https://localhost:8080/", "height": 227} id="OVtYFfs7oYFT" executionInfo={"status": "ok", "timestamp": 1626093462798, "user_tz": -270, "elapsed": 983, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gg86N33hQca1Ab2Qwir_Bu36nKxiHcT9Q3omHQcsA=s64", "userId": "02569620274590613261"}} outputId="4650b5d8-1adb-4fe4-ffbb-7517878dce13" # Plot fig, ax = plt.subplots(1, figsize = (5, 3)) # Graph training process plot(ax, avg_mae_hisotry, 'mae') plt.show()
Deep Learning with Python/Chapter 3 - Boston House Pricing Regression.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Ejercicio 1 Prácticas LC. Curso 2020-2021 # ### <NAME> #IMPORTS from collections import defaultdict #Cadena madre st = "El/DT perro/N come/V carne/N de/P la/DT carnicería/N y/C de/P la/DT nevera/N y/C canta/V el/DT la/N la/N la/N ./Fp" # ## Ejercicio 1 def ej1(st): r = {} # Contamos cuantas ocurrencias hay de cada categoria y almacenamos en un diccionario for subst in st.split(" "): word, category = subst.split("/") r[category] = r.get(category, 0) + 1 return r print("Ej 1 \n------------------") r = ej1(st) rs = sorted(r.keys(), key=lambda x: x.lower()) for key in rs: print(f"{key} {r[key]}") # ## Ejercicio 2 def ej2(st): r = {} cat_f = {} words_cats = defaultdict(dict) # Hacemos splits en substring de "{PALABRA} {CATEGORIA}"" y procesamos la substring para almacenar la informacion necesaria para generar el resultado en O(N) for subst in st.split(" "): word, category = subst.split("/") word = word.lower() r[word] = r.get(word, 0) + 1 words_cats[word][category] = 1 cat_f[(word, category)] = cat_f.get((word, category), 0) + 1 for word in r.keys(): r[word] = ( f"{r[word]} " + "".join([f"{category} {cat_f[(word,category)]} " for category in words_cats[word]]).strip() ) return r print("Ej 2 \n------------------") r = ej2(st) rs = sorted(r.keys(), key=lambda x: x.lower()) for key in rs: print(f"{key} {r[key]}") # ## Ejercicio 3 def ej3(st): # Hacemos splits en substrings de "{PALABRA} {CATEGORIA}"" y nos quedamos con una lista SOLO de categorias. Con esta calculamos los bigramas y su frecuencia cats = ["<S>"] + [subst.split("/")[1] for subst in st.split(" ")] + ["</S>"] r = {} prev_cat = cats[0] for i in range(1, len(cats)): r[(prev_cat, cats[i])] = r.get((prev_cat, cats[i]), 0) + 1 prev_cat = cats[i] return r print("Ej 3 \n------------------") r = ej3(st) for key in r: print(f"{key} {r[key]}") # ## Ejercicio 4 def ej4(st, w): # Ejecutamos el ej1 y ej2 para usar los resultados en el calculo de probabilidades r1 = ej1(st) r2 = ej2(st) # Checkeamos que la palabra este contenida en la cadena if w not in r2.keys(): print("Palabra desconocida") return # Calculamos las probabilidades de emision de la palabra para todas sus categorias # P(W|c) c_count = r2[w][0] info = r2[w].split(" ")[2:] # Nos saltamos los 2 primeros ya que no corresponden a informacion de categorias info = zip(info[::2], info[1::2]) # Juntamos categoria y frecuencia en tuplas tmp = {} for ocurrence in info: c_tmp, n_tmp = ocurrence tmp[c_tmp] = n_tmp for c in tmp.keys(): p_w_c = int(tmp[c]) / int(c_count) print(f"P( {c} | {w} )= {p_w_c}") # Calculamos las probabilidades lexicas de la palabra para todas sus categorias # P(C|W) for c in tmp.keys(): p_c_w = int(tmp[c]) / int(r1[c]) print(f"P( {c} | {w} )= {p_c_w}") print("Ej 4 \n------------------") ej4(st, "la") st = "La/DT mamá/N de/P Pedro/N tiene/V tres/DNC tristes/Adj tigres/N que/C comen/V trigo/N en/P un/Pr triste/Adj trigal/N ./Fp"
LC/lab1/lab1.ipynb