code
stringlengths
38
801k
repo_path
stringlengths
6
263
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernel_info: # name: python3 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ### Note # * Instructions have been included for each segment. You do not have to follow them exactly, but they are included to help you think through the steps. # + # Dependencies and Setup import pandas as pd import numpy as np # File to Load (Remember to Change These) file_to_load = "Resources/purchase_data.csv" # Read Purchasing File and store into Pandas data frame purchase_data_df = pd.read_csv(file_to_load) purchase_data_df # - # ## Player Count # * Display the total number of players # total_players_unique = purchase_data_df["SN"].nunique() total_players_df = pd.DataFrame([{"Total players": total_players_unique}]) total_players_df # ## Purchasing Analysis (Total) # * Run basic calculations to obtain number of unique items, average price, etc. # # # * Create a summary data frame to hold the results # # # * Optional: give the displayed data cleaner formatting # # # * Display the summary data frame # # + items_unique = purchase_data_df["Item Name"].nunique() average_price = purchase_data_df["Price"].mean() number_of_purchases = purchase_data_df["Purchase ID"].count() Total_Revenue = purchase_data_df["Price"].sum() Summary_df = pd.DataFrame({"Number of Unique items": [items_unique], "Average Price": [average_price], "Number of Purchases": [number_of_purchases], "Total Revenue": Total_Revenue}) Summary_df["Average Price"] = Summary_df["Average Price"].map('${:,.2f}'.format) Summary_df["Total Revenue"] = Summary_df["Total Revenue"].map('${:,.2f}'.format) Summary_df # - # ## Gender Demographics # * Percentage and Count of Male Players # # # * Percentage and Count of Female Players # # # * Percentage and Count of Other / Non-Disclosed # # # # + grouped_df_unique = purchase_data_df.groupby(["Gender"]).nunique() count = grouped_df_unique["SN"].unique() percentage = (grouped_df_unique["SN"]/total_players_unique)*100 Gender_df = pd.DataFrame({"Percentage of Players": percentage, "Count":count}) Gender_df.sort_values("Count", ascending = False) Gender_df["Percentage of Players"] = Gender_df["Percentage of Players"].map('{:.2f}%'.format) Gender_df # - # # ## Purchasing Analysis (Gender) # * Run basic calculations to obtain purchase count, avg. purchase price, avg. purchase total per person etc. by gender # # # # # * Create a summary data frame to hold the results # # # * Optional: give the displayed data cleaner formatting # # # * Display the summary data frame # + grouped_df = purchase_data_df.groupby(["Gender"]) purchase_count = grouped_df["Purchase ID"].count() average_pp = grouped_df["Price"].mean() total_purchase = grouped_df["Price"].sum() avg_purchase_per_person = total_purchase/count Purchase_analysis_df = pd.DataFrame({"Purchase Count": purchase_count, "Average Purchase Price": average_pp, "Total Purchase Value": total_purchase, "Avg Total Purchase per Person":avg_purchase_per_person}) Purchase_analysis_df["Average Purchase Price"] = Purchase_analysis_df["Average Purchase Price"].map('${:,.2f}'.format) Purchase_analysis_df["Total Purchase Value"] = Purchase_analysis_df["Total Purchase Value"].map('${:,.2f}'.format) Purchase_analysis_df["Avg Total Purchase per Person"] = Purchase_analysis_df["Avg Total Purchase per Person"].map('${:,.2f}'.format) Purchase_analysis_df # - # ## Age Demographics # * Establish bins for ages # # # * Categorize the existing players using the age bins. Hint: use pd.cut() # # # * Calculate the numbers and percentages by age group # # # * Create a summary data frame to hold the results # # # * Optional: round the percentage column to two decimal points # # # * Display Age Demographics Table # # + age_bins = [0,9,14,19,24,29,34,39,70] age_labels = ["<10","10-14","15-19","20-24","25-29","30-34","35-39","40+"] unique_df = purchase_data_df.drop_duplicates("SN") unique_df["Category"] = pd.cut(unique_df["Age"],age_bins, labels=age_labels) age_total_count = unique_df["Category"].value_counts() age_percentage = age_total_count/total_players_unique * 100 # Creating DataFrame age_demographics_df = pd.DataFrame({"Total Count": age_total_count, "Percentage of Players": age_percentage}) age_demographics_df.sort_index # Formatting age_demographics_df["Percentage of Players"] = age_demographics_df["Percentage of Players"].map('{:.2f}%'.format) age_demographics_df # - # ## Purchasing Analysis (Age) # * Bin the purchase_data data frame by age # # # * Run basic calculations to obtain purchase count, avg. purchase price, avg. purchase total per person etc. in the table below # # # * Create a summary data frame to hold the results # # # * Optional: give the displayed data cleaner formatting # # # * Display the summary data frame # + purchase_data_df1 = purchase_data_df.copy() age_bins = [0,9,14,19,24,29,34,39,70] age_labels = ["<10","10-14","15-19","20-24","25-29","30-34","35-39","40+"] purchase_data_df1["Age Ranges"] = pd.cut(purchase_data_df1["Age"],age_bins, labels=age_labels) grouped_age_df = purchase_data_df1.groupby("Age Ranges") purchase_count = grouped_age_df["Purchase ID"].count() average_pp = grouped_age_df["Price"].mean() total_purchase = grouped_age_df["Price"].sum() avg_purchase_per_person = total_purchase/age_total_count purchase_age_df = pd.DataFrame({"Purchase Count": purchase_count, "Average Purchase Price": average_pp, "Total Purchase Value": total_purchase, "Avg Total Purchase per Person":avg_purchase_per_person}) purchase_age_df["Average Purchase Price"] = purchase_age_df["Average Purchase Price"].map('${:,.2f}'.format) purchase_age_df["Total Purchase Value"] = purchase_age_df["Total Purchase Value"].map('${:,.2f}'.format) purchase_age_df["Avg Total Purchase per Person"] = purchase_age_df["Avg Total Purchase per Person"].map('${:,.2f}'.format) purchase_age_df # - # ## Top Spenders # * Run basic calculations to obtain the results in the table below # # # * Create a summary data frame to hold the results # # # * Sort the total purchase value column in descending order # # # * Optional: give the displayed data cleaner formatting # # # * Display a preview of the summary data frame # # # + grouped_SN_df = purchase_data_df.groupby("SN") p_count = grouped_SN_df["Purchase ID"].count() avg_price = grouped_SN_df["Price"].mean() total_value = grouped_SN_df["Price"].sum() top_spenders_df = pd.DataFrame({"Purchase Count": p_count, "Average Purchase Price": avg_price, "Total Purchase Value": total_value}) top_spenders_df = top_spenders_df.sort_values("Total Purchase Value",ascending=False).head() top_spenders_df["Average Purchase Price"] = top_spenders_df["Average Purchase Price"].map('${:,.2f}'.format) top_spenders_df["Total Purchase Value"] = top_spenders_df["Total Purchase Value"].map('${:,.2f}'.format) top_spenders_df # - # ## Most Popular Items # * Retrieve the Item ID, Item Name, and Item Price columns # # # * Group by Item ID and Item Name. Perform calculations to obtain purchase count, item price, and total purchase value # # # * Create a summary data frame to hold the results # # # * Sort the purchase count column in descending order # # # * Optional: give the displayed data cleaner formatting # # # * Display a preview of the summary data frame # # # + most_popular_df = purchase_data_df.groupby(["Item ID","Item Name"]) most_count = most_popular_df["Purchase ID"].count() item_price = most_popular_df["Price"].mean() most_total = most_popular_df["Price"].sum() most_popular_df = pd.DataFrame({"Purchase Count": most_count, "Item Price": item_price, "Total Purchase Value": most_total}) most_popular_df = most_popular_df.sort_values("Purchase Count", ascending=False) formatted_df = most_popular_df.copy() formatted_df["Item Price"] = formatted_df["Item Price"].map('${:,.2f}'.format) formatted_df["Total Purchase Value"] = formatted_df["Total Purchase Value"].map('${:,.2f}'.format) formatted_df.head() # - # ## Most Profitable Items # * Sort the above table by total purchase value in descending order # # # * Optional: give the displayed data cleaner formatting # # # * Display a preview of the data frame # # # + sorted_df = most_popular_df.sort_values("Total Purchase Value", ascending=False) sorted_df["Item Price"] = sorted_df["Item Price"].map('${:,.2f}'.format) sorted_df["Total Purchase Value"] = sorted_df["Total Purchase Value"].map('${:,.2f}'.format) sorted_df.head() # - # # Heroes of Pymoli: Analysis # # The three observable trends in the data are: # - As can be seen from the genders dempgraphic table, Heroes of Pymoli have 6 times as many male players as female players. However, female players spend 40 cents more on average on a purchase. # - The largest population of players is in the 20-24 age bracket. However their average purchase per person is less than both the 35-39 and the less than 10 age bracket # - The most popular item in the game is 'Final Critic' which is also the most profitable item in the game.
HeroesOfPymoli/HeroesOfPymoli.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: sentiment # language: python # name: sentiment # --- # # Models subpackage tutorial # The NeuralModel class is a generic class used to manage neural networks implemented with Keras. It offers methods to save, load, train and use for classification the neural networks. # # Melusine provides two built-in Keras model : cnn_model and rnn_model based on the models used in-house at Maif. However the user is free to implement neural networks tailored for its needs. # ## The dataset # The NeuralModel class can take as input either : # - a text input : a cleaned text, usually the cleaned body or the concatenation of the cleaned body and the cleaned header. # - a text input and a metadata input : the metadata input has to be dummified. # #### Text input # + import ast import pandas as pd df_emails_preprocessed = pd.read_csv('./data/emails_preprocessed.csv', encoding='utf-8', sep=';') df_emails_preprocessed['clean_header'] = df_emails_preprocessed['clean_header'].astype(str) df_emails_preprocessed['clean_body'] = df_emails_preprocessed['clean_body'].astype(str) df_emails_preprocessed['attachment'] = df_emails_preprocessed['attachment'].apply(ast.literal_eval) # - df_emails_preprocessed.columns # The new clean_text column is the concatenation of the clean_header column and the clean_body column : # + df_emails_preprocessed['clean_text'] = df_emails_preprocessed['clean_header'] + " " + df_emails_preprocessed['clean_body'] # - df_emails_preprocessed.clean_text[0] # #### Metadata input # By default the metadata used are : # - the extension : gmail, outlook, wanadoo.. # - the day of the week at which the email has been sent # - the hour at which the email has been sent # - the minute at which the email has been sent # - the attachment types : pdf, png .. df_meta = pd.read_csv('./data/metadata.csv', encoding='utf-8', sep=';') df_meta.columns df_meta.head() # #### Defining X and y # X is a Pandas dataframe with a clean_text column that will be used for the text input and columns containing the dummified metadata. X = pd.concat([df_emails_preprocessed['clean_text'],df_meta],axis=1) # y is a numpy array containing the encoded labels : from sklearn.preprocessing import LabelEncoder y = df_emails_preprocessed['label'] le = LabelEncoder() y = le.fit_transform(y) y # ## The NeuralModel class from melusine.models.train import NeuralModel # The NeuralModel class is a generic class used to manage neural networks implemented with Keras. It offers methods to save, load, train and use for classification the neural networks. # # Its arguments are : # - **architecture_function :** a function returning a Model instance from Keras. # - **pretrained_embedding :** the pretrained embedding matrix as an numpy array. # - **text_input_column :** the name of the column that will provide the text input, by default clean_text. # - **meta_input_list :** the list of the names of the columns containing the metadata. If empty list or None the model is used without metadata. Default value, ['extension', 'dayofweek', 'hour', 'min']. # - **vocab_size :** the size of vocabulary for neurol network model. Default value, 25000. # - **seq_size :** the maximum size of input for neural model. Default value, 100. # - **loss :** the loss function for training. Default value, 'categorical_crossentropy'. # - **batch_size :** the size of batches for the training of the neural network model. Default value, 4096. # - **n_epochs :** the number of epochs for the training of the neural network model. Default value, 15. # #### architecture_function from melusine.models.neural_architectures import cnn_model, rnn_model # **architecture_function** is a function returning a Model instance from Keras. # Melusine provides two built-in neural networks : **cnn_model** and **rnn_model** based on the models used in-house at Maif. # #### pretrained_embedding # The embedding have to be trained on the user's dataset. from melusine.nlp_tools.embedding import Embedding pretrained_embedding = Embedding().load('./data/embedding.pickle') # ### NeuralModel used with text and metadata input # This neural network model will use the **clean_text** column for the text input and the dummified **extension**, **dayofweek**, **hour** and **min** as metadata input : nn_model = NeuralModel(architecture_function=cnn_model, pretrained_embedding=pretrained_embedding, text_input_column="clean_text", meta_input_list=['extension','attachment_type', 'dayofweek', 'hour', 'min'], n_epochs=10) # #### Training the neural network # During the training, logs are saved in "train" situated in the data directory. Use tensorboard to follow training using # - "tensorboard --logdir data" from your terminal # - directly from a notebook with "%load_ext tensorboard" and "%tensorboard --logdir data" magics command (see https://www.tensorflow.org/tensorboard/tensorboard_in_notebooks) nn_model.fit(X,y,tensorboard_log_dir="./data") # ![title](../docs/_static/tensorboard.png) # #### Saving the neural network # The **save_nn_model** method saves : # - the Keras model as a json file # - the weights as a h5 file nn_model.save_nn_model("./data/nn_model") # Once the **save_nn_model** used the NeuralModel object can be saved as a pickle file : import joblib _ = joblib.dump(nn_model,"./data/nn_model.pickle",compress=True) # #### Loading the neural network # The NeuralModel saved as a pickle file has to be loaded first : nn_model = joblib.load("./data/nn_model.pickle") # Then the Keras model and its weights can be loaded : nn_model.load_nn_model("./data/nn_model") # #### Making predictions y_res = nn_model.predict(X) y_res = le.inverse_transform(y_res) y_res # ### NeuralModel used with only text input X = df_emails_preprocessed[['clean_text']] nn_model = NeuralModel(architecture_function=cnn_model, pretrained_embedding=pretrained_embedding, text_input_column="clean_text", meta_input_list=None, n_epochs=10) nn_model.fit(X,y) y_res = nn_model.predict(X) y_res = le.inverse_transform(y_res) y_res
tutorial/tutorial07_models.ipynb
// --- // jupyter: // jupytext: // text_representation: // extension: .scala // format_name: light // format_version: '1.5' // jupytext_version: 1.14.4 // --- // # Updates and GDPR using Delta Lake - Scala // // In this notebook, we will review Delta Lake's end-to-end capabilities in Scala. You can also look at the original Quick Start guide if you are not familiar with [Delta Lake](https://github.com/delta-io/delta) [here](https://docs.delta.io/latest/quick-start.html). It provides code snippets that show how to read from and write to Delta Lake tables from interactive, batch, and streaming queries. // // In this notebook, we will cover the following: // // - Creating sample mock data containing customer orders // - Writing this data into storage in Delta Lake table format (or in short, Delta table) // - Querying the Delta table using functional and SQL // - The Curious Case of Forgotten Discount - Making corrections to data // - Enforcing GDPR on your data // - Oops, enforced it on the wrong customer! - Looking at the audit log to find mistakes in operations // - Rollback all the way! // - Closing the loop - 'defrag' your data // # Creating sample mock data containing customer orders // // For this tutorial, we will setup a sample file containing customer orders with a simple schema: (order_id, order_date, customer_name, price). spark.sql("DROP TABLE IF EXISTS input"); spark.sql(""" CREATE TEMPORARY VIEW input AS SELECT 1 order_id, '2019-11-01' order_date, 'Saveen' customer_name, 100 price UNION ALL SELECT 2, '2019-11-01', 'Terry', 50 UNION ALL SELECT 3, '2019-11-01', 'Priyanka', 100 UNION ALL SELECT 4, '2019-11-02', 'Steve', 10 UNION ALL SELECT 5, '2019-11-03', 'Rahul', 10 UNION ALL SELECT 6, '2019-11-03', 'Niharika', 75 UNION ALL SELECT 7, '2019-11-03', 'Elva', 90 UNION ALL SELECT 8, '2019-11-04', 'Andrew', 70 UNION ALL SELECT 9, '2019-11-05', 'Michael', 20 UNION ALL SELECT 10, '2019-11-05', 'Brigit', 25""") var orders = spark.sql("SELECT * FROM input") orders.show() orders.printSchema() // # Writing this data into storage in Delta Lake table format (or in short, Delta table) // // To create a Delta Lake table, you can write a DataFrame out in the **delta** format. You can use existing Spark SQL code and change the format from parquet, csv, json, and so on, to delta. These operations create a new Delta Lake table using the schema that was inferred from your DataFrame. // // If you already have existing data in Parquet format, you can do an "in-place" conversion to Delta Lake format. The code would look like following: // // DeltaTable.convertToDelta(spark, $"parquet.`{path_to_data}`"); // // //Confirm that the converted data is now in the Delta format // DeltaTable.isDeltaTable(parquetPath) val r = new scala.util.Random val sessionId = r. nextInt(1000) val path = s"/delta/delta-table-$sessionId"; path // + // Here's how you'd do this in Parquet: // orders.repartition(1).write().format("parquet").save(path) orders.repartition(1).write.format("delta").save(path) // - // # Querying the Delta table using functional and SQL // var ordersDataFrame = spark.read.format("delta").load(path) ordersDataFrame.show() ordersDataFrame.createOrReplaceTempView("ordersDeltaTable") spark.sql("SELECT * FROM ordersDeltaTable").show // # Understanding Meta-data // // In Delta Lake, meta-data is no different from data i.e., it is stored next to the data. Therefore, an interesting side-effect here is that you can peek into meta-data using regular Spark APIs. spark.read.text(s"$path/_delta_log/").collect.foreach(println); // # The Curious Case of Forgotten Discount - Making corrections to data // // Now that you are able to look at the orders table, you realize that you forgot to discount the orders that came in on November 1, 2019. Worry not! You can quickly make that correction. // + import io.delta.tables._ import org.apache.spark.sql.functions._ var table = DeltaTable.forPath(path) // Update every transaction that took place on November 1, 2019 and apply a discount of 10% table.update( condition = expr("order_date == '2019-11-01'"), set = Map("price" -> expr("price - price*0.1"))) // - table.toDF // When you now inspect the meta-data, what you will notice is that the original data is over-written. Well, not in a true sense but appropriate entries are added to Delta's transaction log so it can provide an "illusion" that the original data was deleted. We can verify this by re-inspecting the meta-data. You will see several entries indicating reference removal to the original data. spark.read.text(s"$path/_delta_log/").collect.foreach(println) // # Enforcing GDPR on your data // // One of your customers wanted their data to be deleted. But wait, you are working with data stored on an immutable file system (e.g., HDFS, ADLS, WASB). How would you delete it? Using Delta Lake's Delete API. // // Delta Lake provides programmatic APIs to conditionally update, delete, and merge (upsert) data into tables. For more information on these operations, see [Table Deletes, Updates, and Merges](https://docs.delta.io/latest/delta-update.html). // Delete the appropriate customer table.delete(condition = expr("customer_name == 'Saveen'")) table.toDF.show // # Oops, enforced it on the wrong customer! - Looking at the audit/history log to find mistakes in operations // // Delta's most powerful feature is the ability to allow looking into history i.e., the changes that were made to the underlying Delta Table. The cell below shows how simple it is to inspect the history. // table.history.drop("userId", "userName", "job", "notebook", "clusterId", "isolationLevel", "isBlindAppend").show(20, 1000, false) // # Rollback all the way using Time Travel! // // You can query previous snapshots of your Delta Lake table by using a feature called Time Travel. If you want to access the data that you overwrote, you can query a snapshot of the table before you overwrote the first set of data using the versionAsOf option. // // Once you run the cell below, you should see the first set of data, from before you overwrote it. Time Travel is an extremely powerful feature that takes advantage of the power of the Delta Lake transaction log to access data that is no longer in the table. Removing the version 0 option (or specifying version 1) would let you see the newer data again. For more information, see [Query an older snapshot of a table (time travel)](https://docs.delta.io/latest/delta-batch.html#deltatimetravel). spark.read.format("delta").option("versionAsOf", "1").load(path).write.mode("overwrite").format("delta").save(path) // Delete the correct customer - REMOVE table.delete(condition = expr("customer_name == 'Rahul'")) table.toDF.show table.history.drop("userId", "userName", "job", "notebook", "clusterId", "isolationLevel", "isBlindAppend").show(20, 1000, false) // # Closing the loop - 'defrag' your data // // + spark.conf.set("spark.databricks.delta.retentionDurationCheck.enabled", "false") table.vacuum(0.01) // Alternate Syntax: spark.sql($"VACUUM delta.`{path}`").show
Notebooks/Scala/Updates and GDPR using Delta Lake - Scala.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # <h1>Run SEIRHUD</h1> from model import SEIRHUD import csv import numpy as np import pandas as pd import time import warnings from tqdm import tqdm warnings.filterwarnings('ignore') data = pd.read_csv("../data/salvador.csv") data.head() def bootWeig(series, times): series = np.diff(series) series = np.insert(series, 0, 1) results = [] for i in range(0,times): results.append(np.random.multinomial(n = sum(series), pvals = series/sum(series))) return np.array(results) #using bootstrap to infeclists = bootWeig(data["cases"], 500) deathslists = bootWeig(data["deaths"], 500) #Define empty lists to recive results ypred = [] dpred = [] spred = [] epred = [] beta1 = [] beta2 = [] gammaH = [] gammaU = [] delta = [] ia0 = [] t1 = [] e0 = [] is0 = [] # + #define fixed parameters: kappa = 1/4 p = 0.2 gammaA = 1/3.5 gammaS = 1/4 muH = 0.15 muU = 0.4 xi = 0.53 omega_U = 0.29 omega_H = 0.14 N = 2872347 #Bound b1, b2?, t1, gmH gmU d h ia0 is0 e0 bound = ([0,0,0,1/14,1/14,0,0.05,0,0,0], [30,1.5,1,1/5,1/5,1,0.35,10/N,10/N,10/N]) # - for cases, deaths in tqdm(zip(infeclists, deathslists)): model = SEIRHUD(tamanhoPop = N, numeroProcessadores = 8) model.fit(x = range(1,len(data["cases"]) + 1), y = np.cumsum(cases), d = np.cumsum(deaths), bound = bound, kappa = kappa, p = p, gammaA = gammaA, gammaS = gammaS, muH = muH, muU = muU, xi = xi, omegaU = omega_U, omegaH = omega_H, stand_error = True, ) results = model.predict(range(1,len(data["cases"]) + 200)) coef = model.getCoef() #Append predictions ypred.append(results["pred"]) dpred.append(results["death"]) spred.append(results["susceptible"]) epred.append(results["exposed"]) #append parameters beta1.append(coef["beta1"]) beta2.append(coef["beta2"]) gammaH.append(coef["gammaH"]) gammaU.append(coef["gammaU"]) delta.append(coef["delta"]) t1.append(coef["dia_mudanca"]) ia0.append(coef["ia0"]) e0.append(coef["e0"]) is0.append(coef["is0"]) def getConfidenceInterval(series, length): series = np.array(series) #Compute mean value meanValue = [np.mean(series[:,i]) for i in range(0,length)] #Compute deltaStar deltaStar = meanValue - series #Compute lower and uper bound deltaL = [np.quantile(deltaStar[:,i], q = 0.025) for i in range(0,length)] deltaU = [np.quantile(deltaStar[:,i], q = 0.975) for i in range(0,length)] #Compute CI lowerBound = np.array(meanValue) + np.array(deltaL) UpperBound = np.array(meanValue) + np.array(deltaU) return [meanValue, lowerBound, UpperBound] #Get confidence interval for prediction for i, pred in tqdm(zip([ypred, dpred, epred, spred], ["Infec", "deaths", "exposed", "susceptible"])): Meanvalue, lowerBound, UpperBound = getConfidenceInterval(i, len(data["cases"]) + 199) df = pd.DataFrame.from_dict({pred + "_mean": Meanvalue, pred + "_lb": lowerBound, pred + "_ub": UpperBound}) df.to_csv("../results/Salvador/" + pred + ".csv", index = False) #Exprort parametes parameters = pd.DataFrame.from_dict({"beta1": beta1, "beta2": beta2, "gammaH": gammaH, "gammaU": gammaU, "delta": delta, "ia0":ia0, "e0": e0, "t1": t1, "is0":is0}) parameters.to_csv("../results/Salvador/Parameters.csv", index = False)
Reproducibility of published results/Evaluating the burden of COVID-19 in Bahia, Brazil: A modeling analysis of 14.8 million individuals/script/SEIRHUDSalvador.ipynb
# --- # jupyter: # jupytext: # split_at_heading: true # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- #hide # !pip install -Uqq fastbook import fastbook fastbook.setup_book() #hide from fastbook import * # # A Language Model from Scratch # ## The Data from fastai.text.all import * path = untar_data(URLs.HUMAN_NUMBERS) #hide Path.BASE_PATH = path path.ls() lines = L() with open(path/'train.txt') as f: lines += L(*f.readlines()) with open(path/'valid.txt') as f: lines += L(*f.readlines()) lines text = ' . '.join([l.strip() for l in lines]) text[:100] tokens = text.split(' ') tokens[:10] vocab = L(*tokens).unique() vocab word2idx = {w:i for i,w in enumerate(vocab)} nums = L(word2idx[i] for i in tokens) nums # ## Our First Language Model from Scratch L((tokens[i:i+3], tokens[i+3]) for i in range(0,len(tokens)-4,3)) seqs = L((tensor(nums[i:i+3]), nums[i+3]) for i in range(0,len(nums)-4,3)) seqs bs = 64 cut = int(len(seqs) * 0.8) dls = DataLoaders.from_dsets(seqs[:cut], seqs[cut:], bs=64, shuffle=False) # ### Our Language Model in PyTorch class LMModel1(Module): def __init__(self, vocab_sz, n_hidden): self.i_h = nn.Embedding(vocab_sz, n_hidden) self.h_h = nn.Linear(n_hidden, n_hidden) self.h_o = nn.Linear(n_hidden,vocab_sz) def forward(self, x): h = F.relu(self.h_h(self.i_h(x[:,0]))) h = h + self.i_h(x[:,1]) h = F.relu(self.h_h(h)) h = h + self.i_h(x[:,2]) h = F.relu(self.h_h(h)) return self.h_o(h) learn = Learner(dls, LMModel1(len(vocab), 64), loss_func=F.cross_entropy, metrics=accuracy) learn.fit_one_cycle(4, 1e-3) n,counts = 0,torch.zeros(len(vocab)) for x,y in dls.valid: n += y.shape[0] for i in range_of(vocab): counts[i] += (y==i).long().sum() idx = torch.argmax(counts) idx, vocab[idx.item()], counts[idx].item()/n # ### Our First Recurrent Neural Network class LMModel2(Module): def __init__(self, vocab_sz, n_hidden): self.i_h = nn.Embedding(vocab_sz, n_hidden) self.h_h = nn.Linear(n_hidden, n_hidden) self.h_o = nn.Linear(n_hidden,vocab_sz) def forward(self, x): h = 0 for i in range(3): h = h + self.i_h(x[:,i]) h = F.relu(self.h_h(h)) return self.h_o(h) learn = Learner(dls, LMModel2(len(vocab), 64), loss_func=F.cross_entropy, metrics=accuracy) learn.fit_one_cycle(4, 1e-3) # ## Improving the RNN # ### Maintaining the State of an RNN class LMModel3(Module): def __init__(self, vocab_sz, n_hidden): self.i_h = nn.Embedding(vocab_sz, n_hidden) self.h_h = nn.Linear(n_hidden, n_hidden) self.h_o = nn.Linear(n_hidden,vocab_sz) self.h = 0 def forward(self, x): for i in range(3): self.h = self.h + self.i_h(x[:,i]) self.h = F.relu(self.h_h(self.h)) out = self.h_o(self.h) self.h = self.h.detach() return out def reset(self): self.h = 0 m = len(seqs)//bs m,bs,len(seqs) def group_chunks(ds, bs): m = len(ds) // bs new_ds = L() for i in range(m): new_ds += L(ds[i + m*j] for j in range(bs)) return new_ds cut = int(len(seqs) * 0.8) dls = DataLoaders.from_dsets( group_chunks(seqs[:cut], bs), group_chunks(seqs[cut:], bs), bs=bs, drop_last=True, shuffle=False) learn = Learner(dls, LMModel3(len(vocab), 64), loss_func=F.cross_entropy, metrics=accuracy, cbs=ModelResetter) learn.fit_one_cycle(10, 3e-3) # ### Creating More Signal sl = 16 seqs = L((tensor(nums[i:i+sl]), tensor(nums[i+1:i+sl+1])) for i in range(0,len(nums)-sl-1,sl)) cut = int(len(seqs) * 0.8) dls = DataLoaders.from_dsets(group_chunks(seqs[:cut], bs), group_chunks(seqs[cut:], bs), bs=bs, drop_last=True, shuffle=False) [L(vocab[o] for o in s) for s in seqs[0]] class LMModel4(Module): def __init__(self, vocab_sz, n_hidden): self.i_h = nn.Embedding(vocab_sz, n_hidden) self.h_h = nn.Linear(n_hidden, n_hidden) self.h_o = nn.Linear(n_hidden,vocab_sz) self.h = 0 def forward(self, x): outs = [] for i in range(sl): self.h = self.h + self.i_h(x[:,i]) self.h = F.relu(self.h_h(self.h)) outs.append(self.h_o(self.h)) self.h = self.h.detach() return torch.stack(outs, dim=1) def reset(self): self.h = 0 def loss_func(inp, targ): return F.cross_entropy(inp.view(-1, len(vocab)), targ.view(-1)) learn = Learner(dls, LMModel4(len(vocab), 64), loss_func=loss_func, metrics=accuracy, cbs=ModelResetter) learn.fit_one_cycle(15, 3e-3) # ## Multilayer RNNs # ### The Model class LMModel5(Module): def __init__(self, vocab_sz, n_hidden, n_layers): self.i_h = nn.Embedding(vocab_sz, n_hidden) self.rnn = nn.RNN(n_hidden, n_hidden, n_layers, batch_first=True) self.h_o = nn.Linear(n_hidden, vocab_sz) self.h = torch.zeros(n_layers, bs, n_hidden) def forward(self, x): res,h = self.rnn(self.i_h(x), self.h) self.h = h.detach() return self.h_o(res) def reset(self): self.h.zero_() learn = Learner(dls, LMModel5(len(vocab), 64, 2), loss_func=CrossEntropyLossFlat(), metrics=accuracy, cbs=ModelResetter) learn.fit_one_cycle(15, 3e-3) # ### Exploding or Disappearing Activations # ## LSTM # ### Building an LSTM from Scratch class LSTMCell(Module): def __init__(self, ni, nh): self.forget_gate = nn.Linear(ni + nh, nh) self.input_gate = nn.Linear(ni + nh, nh) self.cell_gate = nn.Linear(ni + nh, nh) self.output_gate = nn.Linear(ni + nh, nh) def forward(self, input, state): h,c = state h = torch.cat([h, input], dim=1) forget = torch.sigmoid(self.forget_gate(h)) c = c * forget inp = torch.sigmoid(self.input_gate(h)) cell = torch.tanh(self.cell_gate(h)) c = c + inp * cell out = torch.sigmoid(self.output_gate(h)) h = out * torch.tanh(c) return h, (h,c) class LSTMCell(Module): def __init__(self, ni, nh): self.ih = nn.Linear(ni,4*nh) self.hh = nn.Linear(nh,4*nh) def forward(self, input, state): h,c = state # One big multiplication for all the gates is better than 4 smaller ones gates = (self.ih(input) + self.hh(h)).chunk(4, 1) ingate,forgetgate,outgate = map(torch.sigmoid, gates[:3]) cellgate = gates[3].tanh() c = (forgetgate*c) + (ingate*cellgate) h = outgate * c.tanh() return h, (h,c) t = torch.arange(0,10); t t.chunk(2) # ### Training a Language Model Using LSTMs class LMModel6(Module): def __init__(self, vocab_sz, n_hidden, n_layers): self.i_h = nn.Embedding(vocab_sz, n_hidden) self.rnn = nn.LSTM(n_hidden, n_hidden, n_layers, batch_first=True) self.h_o = nn.Linear(n_hidden, vocab_sz) self.h = [torch.zeros(n_layers, bs, n_hidden) for _ in range(2)] def forward(self, x): res,h = self.rnn(self.i_h(x), self.h) self.h = [h_.detach() for h_ in h] return self.h_o(res) def reset(self): for h in self.h: h.zero_() learn = Learner(dls, LMModel6(len(vocab), 64, 2), loss_func=CrossEntropyLossFlat(), metrics=accuracy, cbs=ModelResetter) learn.fit_one_cycle(15, 1e-2) # ## Regularizing an LSTM # ### Dropout class Dropout(Module): def __init__(self, p): self.p = p def forward(self, x): if not self.training: return x mask = x.new(*x.shape).bernoulli_(1-p) return x * mask.div_(1-p) # ### Activation Regularization and Temporal Activation Regularization # ### Training a Weight-Tied Regularized LSTM class LMModel7(Module): def __init__(self, vocab_sz, n_hidden, n_layers, p): self.i_h = nn.Embedding(vocab_sz, n_hidden) self.rnn = nn.LSTM(n_hidden, n_hidden, n_layers, batch_first=True) self.drop = nn.Dropout(p) self.h_o = nn.Linear(n_hidden, vocab_sz) self.h_o.weight = self.i_h.weight self.h = [torch.zeros(n_layers, bs, n_hidden) for _ in range(2)] def forward(self, x): raw,h = self.rnn(self.i_h(x), self.h) out = self.drop(raw) self.h = [h_.detach() for h_ in h] return self.h_o(out),raw,out def reset(self): for h in self.h: h.zero_() learn = Learner(dls, LMModel7(len(vocab), 64, 2, 0.5), loss_func=CrossEntropyLossFlat(), metrics=accuracy, cbs=[ModelResetter, RNNRegularizer(alpha=2, beta=1)]) learn = TextLearner(dls, LMModel7(len(vocab), 64, 2, 0.4), loss_func=CrossEntropyLossFlat(), metrics=accuracy) learn.fit_one_cycle(15, 1e-2, wd=0.1) # ## Conclusion # ## Questionnaire # 1. If the dataset for your project is so big and complicated that working with it takes a significant amount of time, what should you do? # 1. Why do we concatenate the documents in our dataset before creating a language model? # 1. To use a standard fully connected network to predict the fourth word given the previous three words, what two tweaks do we need to make to ou model? # 1. How can we share a weight matrix across multiple layers in PyTorch? # 1. Write a module that predicts the third word given the previous two words of a sentence, without peeking. # 1. What is a recurrent neural network? # 1. What is "hidden state"? # 1. What is the equivalent of hidden state in ` LMModel1`? # 1. To maintain the state in an RNN, why is it important to pass the text to the model in order? # 1. What is an "unrolled" representation of an RNN? # 1. Why can maintaining the hidden state in an RNN lead to memory and performance problems? How do we fix this problem? # 1. What is "BPTT"? # 1. Write code to print out the first few batches of the validation set, including converting the token IDs back into English strings, as we showed for batches of IMDb data in <<chapter_nlp>>. # 1. What does the `ModelResetter` callback do? Why do we need it? # 1. What are the downsides of predicting just one output word for each three input words? # 1. Why do we need a custom loss function for `LMModel4`? # 1. Why is the training of `LMModel4` unstable? # 1. In the unrolled representation, we can see that a recurrent neural network actually has many layers. So why do we need to stack RNNs to get better results? # 1. Draw a representation of a stacked (multilayer) RNN. # 1. Why should we get better results in an RNN if we call `detach` less often? Why might this not happen in practice with a simple RNN? # 1. Why can a deep network result in very large or very small activations? Why does this matter? # 1. In a computer's floating-point representation of numbers, which numbers are the most precise? # 1. Why do vanishing gradients prevent training? # 1. Why does it help to have two hidden states in the LSTM architecture? What is the purpose of each one? # 1. What are these two states called in an LSTM? # 1. What is tanh, and how is it related to sigmoid? # 1. What is the purpose of this code in `LSTMCell`: `h = torch.stack([h, input], dim=1)` # 1. What does `chunk` do in PyTorch? # 1. Study the refactored version of `LSTMCell` carefully to ensure you understand how and why it does the same thing as the non-refactored version. # 1. Why can we use a higher learning rate for `LMModel6`? # 1. What are the three regularization techniques used in an AWD-LSTM model? # 1. What is "dropout"? # 1. Why do we scale the weights with dropout? Is this applied during training, inference, or both? # 1. What is the purpose of this line from `Dropout`: `if not self.training: return x` # 1. Experiment with `bernoulli_` to understand how it works. # 1. How do you set your model in training mode in PyTorch? In evaluation mode? # 1. Write the equation for activation regularization (in math or code, as you prefer). How is it different from weight decay? # 1. Write the equation for temporal activation regularization (in math or code, as you prefer). Why wouldn't we use this for computer vision problems? # 1. What is "weight tying" in a language model? # ### Further Research # 1. In ` LMModel2`, why can `forward` start with `h=0`? Why don't we need to say `h=torch.zeros(...)`? # 1. Write the code for an LSTM from scratch (you may refer to <<lstm>>). # 1. Search the internet for the GRU architecture and implement it from scratch, and try training a model. See if you can get results similar to those we saw in this chapter. Compare you results to the results of PyTorch's built in `GRU` module. # 1. Take a look at the source code for AWD-LSTM in fastai, and try to map each of the lines of code to the concepts shown in this chapter.
deep_learning_for_coders/lesson8/clean/12_nlp_dive.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Make reasonable subset of `virustrack.db` for testing # + import os import numpy as np import pandas as pd from covidvu.utils import autoReloadCode; autoReloadCode() from covidvu.cryostation import Cryostation from covidvu.pipeline.vugrowth import computeGrowth, TODAY_DATE from tqdm.auto import tqdm # - MASTER_DATABASE = '../../database/virustrack.db' TEST_DB = 'test-virustrack-v2.db' storageTest = Cryostation(TEST_DB) storageTest['US'] = { 'key': 'US' } storageTest.close() # Get 3 US states with Cryostation(TEST_DB) as cryostationTest: with Cryostation(MASTER_DATABASE) as cryostation: unitedStates = cryostation['US'] california = {'confirmed': unitedStates['provinces']['California']['confirmed']} newYork = {'confirmed': unitedStates['provinces']['New York']['confirmed']} newJersey = {'confirmed': unitedStates['provinces']['New Jersey']['confirmed']} item = {'confirmed':unitedStates['confirmed'], 'provinces':{'California': california, 'New York': newYork, 'New Jersey': newJersey, }} item['key'] = 'US' cryostationTest['US'] = item # Append 2 other countries with Cryostation(TEST_DB) as cryostationTest: with Cryostation(MASTER_DATABASE) as cryostation: italy = {'confirmed': cryostation['Italy']['confirmed']} italy['key'] = 'Italy' uk = {'confirmed': cryostation['United Kingdom']['confirmed']} uk['key'] = 'United Kingdom' cryostationTest['Italy'] = italy cryostationTest['United Kingdom'] = uk with Cryostation(TEST_DB) as cryostationTest: with Cryostation(MASTER_DATABASE) as cryostation: assert cryostationTest['US']['provinces']['New York']['confirmed'] == cryostation['US']['provinces']['New York']['confirmed'] assert cryostationTest['US']['confirmed'] == cryostation['US']['confirmed'] assert cryostationTest['Italy']['confirmed'] == cryostation['Italy']['confirmed'] # --- # ## Add growth countryName = 'US' stateName = 'New York' with Cryostation(TEST_DB) as cryostationTest: country = cryostationTest[countryName] province = country['provinces'][stateName] province['growth'] = dict() country['provinces'][stateName] = province cryostationTest[countryName] = country with Cryostation(TEST_DB) as cryostationTest: x = cryostationTest[countryName]['provinces']['New York']['growth'] x # --- # # Remove growth # TODO: Complete this from tinydb import where cryostationTest = Cryostation(TEST_DB) cryostationTest.close()
work/resources/test_databases/subset-virustrack-db.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 (ipykernel) # language: python # name: python3 # --- # # Week 11 quiz solution: pandas import pandas as pd filepath = "enso_data.csv" df = pd.read_csv(filepath) df = pd.read_csv(filepath, index_col=0, parse_dates=True) df_enso=df.tail(5) df_enso = df_enso[['Nino12','Nino3','Nino4']] # ## Given the following DataFrame: df_enso # Write down the numerical values that would be printed for each of the following statements: df_enso.loc['2021-02-01'] df_enso['Nino3']['2021-03-01'] df_enso.columns df_enso.iloc[4,2]
wk11/wk11_quiz_sol.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Classification Trees # The construction of a classification tree is very similar to that of a regression tree. For a fuller description of the code below, please see the regression tree code on the previous page. # + ## Import packages import numpy as np from itertools import combinations import matplotlib.pyplot as plt import seaborn as sns ## Load data penguins = sns.load_dataset('penguins') penguins.dropna(inplace = True) X = np.array(penguins.drop(columns = ['species','island'])) y = np.array(penguins['species']) ## Train-test split np.random.seed(1) test_frac = 0.25 test_size = int(len(y)*test_frac) test_idxs = np.random.choice(np.arange(len(y)), test_size, replace = False) X_train = np.delete(X, test_idxs, 0) y_train = np.delete(y, test_idxs, 0) X_test = X[test_idxs] y_test = y[test_idxs] # - # We will build our classification tree on the {doc}`penguins </content/appendix/data>` dataset from `seaborn`. This dataset has a categorical response variable—penguin breed—with both quantitative and categorical predictors. # ## 1. Helper Functions # Let's first create our loss functions. The Gini index and cross-entropy calculate the loss for a single node while the `split_loss()` function creates the weighted loss of a split. # + ## Loss Functions def gini_index(y): size = len(y) classes, counts = np.unique(y, return_counts = True) pmk = counts/size return np.sum(pmk*(1-pmk)) def cross_entropy(y): size = len(y) classes, counts = np.unique(y, return_counts = True) pmk = counts/size return -np.sum(pmk*np.log2(pmk)) def split_loss(child1, child2, loss = cross_entropy): return (len(child1)*loss(child1) + len(child2)*loss(child2))/(len(child1) + len(child2)) # - # Next, let's define a few miscellaneous helper functions. As in the regression tree construction, `all_rows_equal()` checks if all of a bud's rows (observations) are equal across all predictors. If this is the case, this bud will not be split and instead becomes a terminal leaf. The second function, `possible_splits()`, returns all possible ways divide the classes in a categorical predictor into two. Specifically, it returns all possible sets of values which can be used to funnel observations into the "left" child node. An example is given below for a predictor with four categories, $a$ through $d$. The set $\{a, b\}$, for instance, would imply observations where that predictor equals $a$ or $b$ go to the left child and other observations go to the right child. (Note that this function requires the `itertools` package). # + ## Helper Functions def all_rows_equal(X): return (X == X[0]).all() def possible_splits(x): L_values = [] for i in range(1, int(np.floor(len(x)/2)) + 1): L_values.extend(list(combinations(x, i))) return L_values possible_splits(['a','b','c','d']) # - # ## 2. Helper Classes # Next, we define two classes to help our main decision tree classifier. These classes are essentially identical to those discussed in the regression tree page. The only difference is the loss function used to evaluate a split. # + class Node: def __init__(self, Xsub, ysub, ID, depth = 0, parent_ID = None, leaf = True): self.ID = ID self.Xsub = Xsub self.ysub = ysub self.size = len(ysub) self.depth = depth self.parent_ID = parent_ID self.leaf = leaf class Splitter: def __init__(self): self.loss = np.inf self.no_split = True def _replace_split(self, loss, d, dtype = 'quant', t = None, L_values = None): self.loss = loss self.d = d self.dtype = dtype self.t = t self.L_values = L_values self.no_split = False # - # ## 3. Main Class # Finally, we create the main class for our classification tree. This again is essentially identical to the regression tree class. In addition to differing in the loss function used to evaluate splits, this tree differs from the regression tree in how it forms predictions. In regression trees, the fitted value for a test observation was the average outcome variable of the training observations landing in the same leaf. In the classification tree, since our outcome variable is categorical, we instead use the most common class among training observations landing in the same leaf. # # + class DecisionTreeClassifier: ############################# ######## 1. TRAINING ######## ############################# ######### FIT ########## def fit(self, X, y, loss_func = cross_entropy, max_depth = 100, min_size = 2, C = None): ## Add data self.X = X self.y = y self.N, self.D = self.X.shape dtypes = [np.array(list(self.X[:,d])).dtype for d in range(self.D)] self.dtypes = ['quant' if (dtype == float or dtype == int) else 'cat' for dtype in dtypes] ## Add model parameters self.loss_func = loss_func self.max_depth = max_depth self.min_size = min_size self.C = C ## Initialize nodes self.nodes_dict = {} self.current_ID = 0 initial_node = Node(Xsub = X, ysub = y, ID = self.current_ID, parent_ID = None) self.nodes_dict[self.current_ID] = initial_node self.current_ID += 1 # Build self._build() ###### BUILD TREE ###### def _build(self): eligible_buds = self.nodes_dict for layer in range(self.max_depth): ## Find eligible nodes for layer iteration eligible_buds = {ID:node for (ID, node) in self.nodes_dict.items() if (node.leaf == True) & (node.size >= self.min_size) & (~all_rows_equal(node.Xsub)) & (len(np.unique(node.ysub)) > 1)} if len(eligible_buds) == 0: break ## split each eligible parent for ID, bud in eligible_buds.items(): ## Find split self._find_split(bud) ## Make split if not self.splitter.no_split: self._make_split() ###### FIND SPLIT ###### def _find_split(self, bud): ## Instantiate splitter splitter = Splitter() splitter.bud_ID = bud.ID ## For each (eligible) predictor... if self.C is None: eligible_predictors = np.arange(self.D) else: eligible_predictors = np.random.choice(np.arange(self.D), self.C, replace = False) for d in sorted(eligible_predictors): Xsub_d = bud.Xsub[:,d] dtype = self.dtypes[d] if len(np.unique(Xsub_d)) == 1: continue ## For each value... if dtype == 'quant': for t in np.unique(Xsub_d)[:-1]: ysub_L = bud.ysub[Xsub_d <= t] ysub_R = bud.ysub[Xsub_d > t] loss = split_loss(ysub_L, ysub_R, loss = self.loss_func) if loss < splitter.loss: splitter._replace_split(loss, d, 'quant', t = t) else: for L_values in possible_splits(np.unique(Xsub_d)): ysub_L = bud.ysub[np.isin(Xsub_d, L_values)] ysub_R = bud.ysub[~np.isin(Xsub_d, L_values)] loss = split_loss(ysub_L, ysub_R, loss = self.loss_func) if loss < splitter.loss: splitter._replace_split(loss, d, 'cat', L_values = L_values) ## Save splitter self.splitter = splitter ###### MAKE SPLIT ###### def _make_split(self): ## Update parent node parent_node = self.nodes_dict[self.splitter.bud_ID] parent_node.leaf = False parent_node.child_L = self.current_ID parent_node.child_R = self.current_ID + 1 parent_node.d = self.splitter.d parent_node.dtype = self.splitter.dtype parent_node.t = self.splitter.t parent_node.L_values = self.splitter.L_values ## Get X and y data for children if parent_node.dtype == 'quant': L_condition = parent_node.Xsub[:,parent_node.d] <= parent_node.t else: L_condition = np.isin(parent_node.Xsub[:,parent_node.d], parent_node.L_values) Xchild_L = parent_node.Xsub[L_condition] ychild_L = parent_node.ysub[L_condition] Xchild_R = parent_node.Xsub[~L_condition] ychild_R = parent_node.ysub[~L_condition] ## Create child nodes child_node_L = Node(Xchild_L, ychild_L, depth = parent_node.depth + 1, ID = self.current_ID, parent_ID = parent_node.ID) child_node_R = Node(Xchild_R, ychild_R, depth = parent_node.depth + 1, ID = self.current_ID+1, parent_ID = parent_node.ID) self.nodes_dict[self.current_ID] = child_node_L self.nodes_dict[self.current_ID + 1] = child_node_R self.current_ID += 2 ############################# ####### 2. PREDICTING ####### ############################# ###### LEAF MODES ###### def _get_leaf_modes(self): self.leaf_modes = {} for node_ID, node in self.nodes_dict.items(): if node.leaf: values, counts = np.unique(node.ysub, return_counts=True) self.leaf_modes[node_ID] = values[np.argmax(counts)] ####### PREDICT ######## def predict(self, X_test): # Calculate leaf modes self._get_leaf_modes() yhat = [] for x in X_test: node = self.nodes_dict[0] while not node.leaf: if node.dtype == 'quant': if x[node.d] <= node.t: node = self.nodes_dict[node.child_L] else: node = self.nodes_dict[node.child_R] else: if x[node.d] in node.L_values: node = self.nodes_dict[node.child_L] else: node = self.nodes_dict[node.child_R] yhat.append(self.leaf_modes[node.ID]) return np.array(yhat) # - # A classificaiton tree is built on the `penguins` dataset. We evaluate the predictions on a test set and find that roughly 90% of observations are correctly classified. # + ## Load data penguins = sns.load_dataset('penguins') penguins.dropna(inplace = True) X = np.array(penguins.drop(columns = ['species'])) y = 1*np.array(penguins['species'] == 'Adelie') y[y == 0] = -1 ## Train-test split np.random.seed(1) test_frac = 0.25 test_size = int(len(y)*test_frac) test_idxs = np.random.choice(np.arange(len(y)), test_size, replace = False) X_train = np.delete(X, test_idxs, 0) y_train = np.delete(y, test_idxs, 0) X_test = X[test_idxs] y_test = y[test_idxs] # + ## Build classifier tree = DecisionTreeClassifier() tree.fit(X_train, y_train, max_depth = 10, min_size = 10) y_test_hat = tree.predict(X_test) ## Evaluate on test data np.mean(y_test_hat == y_test)
content/c6/s2/classification_tree.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- #本章需导入的模块 import numpy as np import pandas as pd import warnings warnings.filterwarnings(action = 'ignore') import matplotlib.pyplot as plt # %matplotlib inline plt.rcParams['font.sans-serif']=['SimHei'] #解决中文显示乱码问题 plt.rcParams['axes.unicode_minus']=False from sklearn.model_selection import train_test_split,KFold,cross_val_score from sklearn import tree import sklearn.linear_model as LM from sklearn import ensemble from sklearn.datasets import make_classification,make_circles,make_regression from sklearn.metrics import zero_one_loss,r2_score,mean_squared_error # + N=800 X,Y=make_circles(n_samples=N,noise=0.2,factor=0.5,random_state=123) unique_lables=set(Y) fig,axes=plt.subplots(nrows=1,ncols=2,figsize=(15,6)) colors=plt.cm.Spectral(np.linspace(0,1,len(unique_lables))) markers=['o','*'] for k,col,m in zip(unique_lables,colors,markers): x_k=X[Y==k] #plt.plot(x_k[:,0],x_k[:,1],'o',markerfacecolor=col,markeredgecolor="k",markersize=8) axes[0].scatter(x_k[:,0],x_k[:,1],color=col,s=30,marker=m) axes[0].set_title('%d个样本观测点的分布情况'%N) axes[0].set_xlabel('X1') axes[0].set_ylabel('X2') dt_stump = tree.DecisionTreeClassifier(max_depth=1, min_samples_leaf=1) B=500 adaBoost = ensemble.AdaBoostClassifier(base_estimator=dt_stump,n_estimators=B,algorithm="SAMME",random_state=123) adaBoost.fit(X,Y) adaBoostErr = np.zeros((B,)) for b,Y_pred in enumerate(adaBoost.staged_predict(X)): adaBoostErr[b] = zero_one_loss(Y,Y_pred) axes[1].plot(np.arange(B),adaBoostErr,linestyle='-') axes[1].set_title('迭代次数与训练误差') axes[1].set_xlabel('迭代次数') axes[1].set_ylabel('训练误差') fig = plt.figure(figsize=(15,12)) data=np.hstack((X.reshape(N,2),Y.reshape(N,1))) data=pd.DataFrame(data) data.columns=['X1','X2','Y'] data['Weight']=[1/N]*N for b,Y_pred in enumerate(adaBoost.staged_predict(X)): data['Y_pred']=Y_pred data.loc[data['Y']!=data['Y_pred'],'Weight'] *= (1.0-adaBoost.estimator_errors_[b])/adaBoost.estimator_errors_[b] if b in [5,10,20,450]: axes = fig.add_subplot(2,2,[5,10,20,450].index(b)+1) for k,col,m in zip(unique_lables,colors,markers): tmp=data.loc[data['Y']==k,:] tmp['Weight']=10+tmp['Weight']/(tmp['Weight'].max()-tmp['Weight'].min())*100 axes.scatter(tmp['X1'],tmp['X2'],color=col,s=tmp['Weight'],marker=m) axes.set_xlabel('X1') axes.set_ylabel('X2') axes.set_title("高权重的样本观测点(迭代次数=%d)"%b) # - # 说明:这里基于模拟数据直观观察提升策略下高权重样本观测随迭代次数的变化情况。 # 1、利用make_circles生成样本量等于800,有两个输入变量,输出变量为二分类的数据集。图形显示两分类的边界大致呈圆形。 # 2、以树深度等于1的分类树为基础学习器,采用提升策略进行集成学习。随迭代次数的增加,前期训练误差快速下降,大约30次后下降不明显并保持在一个基本稳定的水平。 # 3、为探索迭代过程中高权重样本观测的变化情况,计算每次迭代后各个样本观测的权重。 # 4、以点的大小展示迭代5次,10次,20次和450次时样本观测的权重大小。高权重(预测误差)的样本观测主要集中在两类的圆形边界上。
chapter6-3.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] colab_type="text" id="view-in-github" # <a href="https://colab.research.google.com/github/dlsun/pods/blob/master/Chapter_01_The_Data_Ecosystem/Chapter_1.2_Tabular_Data.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # + [markdown] colab_type="text" id="N4l5tHGE3TJK" # # Ch.1 Tabular Data # # What does data look like? For most people, the first image that comes to mind is a spreadsheet, where each row represents something being measured and each column a type of measurement. This stereotype exists for a reason; many real-world data sets can indeed be organized this way. Data that can be represented using rows and columns is called _tabular data_. The rows are also called _observations_ or _records_, while the columns are called _variables_ or _fields_. The different terms reflect the diverse communities within data science, and their origins are summarized in the table below. # # | | Rows | Columns | # |---------------------|----------------|-------------| # | Statisticians | "observations" | "variables" | # | Computer Scientists | "records" | "fields" | # # ## 1.1 Pandas DataFrames # # The table below is an example of a # data set that can be represented in tabular form. # This is a sample of user profiles in the # San Francisco Bay Area from the online dating website # OKCupid. In this case, each observation is an OKCupid user, and the variables include age, body type, height, and # (relationship) status. Although a # `DataFrame` can contain values of all types, the # values within a column are typically all of the same # type---the age and height columns store # numbers, while the body type and # status columns store strings. Some values may be missing, such as body type for the first user # and diet for the second. # # | age | body type | diet | ... | smokes | height | status | # |-----|-----------|-------------------|-----|--------|--------|--------| # | 31 | | mostly vegetarian | ... | no | 67 | single | # | 31 | average | | ... | no | 66 | single | # | 43 | curvy | | ... | trying to quit | 65 | single | # | ... | ... | ... | ... | ... | ... | ... | # | 60 | fit | | ... | no | 57 | single | # # Within Python, tabular data is typically stored in # a special type of object called a `DataFrame`. A `DataFrame` is optimized for storing tabular data; for example, it uses the fact that the values within a column are all the same type to save memory and speed up computations. Unfortunately, the `DataFrame` is not built into base Python, a reminder that Python is a general-purpose programming language. To be able to work with `DataFrame`s, we have to import a data science package called `pandas`, which essentially does one thing---define a data structure called a `DataFrame` for storing tabular data. But this data structure is so fundamental to data science that importing `pandas` is the very first line of many Jupyter notebooks and Python scripts: # + colab={} colab_type="code" id="P6aZVeKg9ZGb" import pandas as pd # + [markdown] colab_type="text" id="6T1LDcxO9Z0D" # This command makes `pandas` objects and utilities # available under the abbreviation `pd`. # + [markdown] colab_type="text" id="KYdirVaS523q" # ### 1.1.1 Reading From CSV # # How do we get data, which is ordinarily stored in a file on disk, # into a `pandas` `DataFrame`? `pandas` provides # several utilities for reading data. For example, # the OKCupid data in # the table above is stored as a _comma-separated values_ (CSV) file on # the web, available at the URL https://dlsun.github.io/pods/data/okcupid.csv. # # We can read in this file from the web using the `read_csv` function in `pandas`: # + colab={} colab_type="code" id="iQuzKGgy54by" data_dir = "https://dlsun.github.io/pods/data/" df_okcupid = pd.read_csv(data_dir + "okcupid.csv") df_okcupid.head() # + [markdown] colab_type="text" id="btNgamkR6bxr" # The `read_csv` function is also able # to read in a file from disk. It automatically infers # where to look based on the file path. # Unless the path is obviously a URL (e.g., it begins with `http://`), it looks for the file # on the local machine. # + [markdown] colab_type="text" id="OTAXMMVLqc9G" # Notice above how missing values are represented in a `pandas` `DataFrame`. Each missing value is represented by a `NaN`, which is short for "not a number". As we will see, most `pandas` operations simply ignore `NaN` values. # + [markdown] colab_type="text" id="7pgZHERn8zmO" # ### 1.1.2 Exercises # + [markdown] colab_type="text" id="metKlkS6stFX" # 1. Download the OKCupid data set above to your workstation and use `read_csv` to read in the file from your local machine. # # 2. Read in the Framingham Heart Study data set, # which is available at the URL `https://dlsun.github.io/pods/data/framingham_long.csv`. Be sure to give the `DataFrame` an # informative variable name.
TesterBook/01_Data_Ecosystem/.ipynb_checkpoints/Chapter_1.2_Tabular_Data-checkpoint.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + #Import packages from scipy import optimize,arange from numpy import array import numpy as np import matplotlib.pyplot as plt import random import sympy as sm from math import * # %matplotlib inline from IPython.display import Markdown, display import pandas as pd # + a = sm.symbols('a') b = sm.symbols('b') c_vec = sm.symbols('c_vec') q_vec = sm.symbols('q_i') # q for firm i q_minus = sm.symbols('q_{-i}') # q for the the opponents #The profit of firm 1 is then: Pi_i = q_vec*((a-b*(q_vec+q_minus))-c_vec) #giving focs: foc = sm.diff(Pi_i,q_vec) foc # - # In order to use this in our solutionen, we rewrite $x_{i}+x_{-i} = \sum x_{i}$ using np.sum and then define a function for the foc def foc1(a,b,q_vec,c_vec): # Using the result from the sympy.diff return -b*q_vec+a-b*np.sum(q_vec)-c_vec
modelproject/rod.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: 'Python 3.7.0 64-bit (''py37athena'': conda)' # name: python370jvsc74a57bd081098997110362167705b61d21e46dda767ff2050d805c22b6ba90fec7e1aa35 # --- # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. # # Optimizing runtime performance on GPT-2 model inference with ONNXRuntime on CPU # # In this tutorial, you'll be introduced to how to load a GPT2 model from PyTorch, convert it to ONNX with one step search, and inference it using ONNX Runtime with/without IO Binding. GPT-2 model inference is optimized by compiling one-step beam search into the onnx compute graph, which speeds up the runtime significantly. # ## Prerequisites # If you have Jupyter Notebook, you may directly run this notebook. We will use pip to install or upgrade [PyTorch](https://pytorch.org/), [OnnxRuntime](https://microsoft.github.io/onnxruntime/) and other required packages. # # Otherwise, you can setup a new environment. First, we install [Anaconda](https://www.anaconda.com/distribution/). Then open an AnaConda prompt window and run the following commands: # # ```console # conda create -n cpu_env python=3.8 # conda activate cpu_env # conda install jupyter # jupyter notebook # ``` # # The last command will launch Jupyter Notebook and we can open this notebook in browser to continue. # + # Install PyTorch 1.7.0 and OnnxRuntime 1.7.0 for CPU-only. import sys if sys.platform == 'darwin': # Mac # !{sys.executable} -m pip install --upgrade torch torchvision else: # !{sys.executable} -m pip install --upgrade torch==1.7.0+cpu torchvision==0.8.1+cpu -f https://download.pytorch.org/whl/torch_stable.html # !{sys.executable} -m pip install onnxruntime==1.7.2 # Install other packages used in this notebook. # !{sys.executable} -m pip install transformers==4.3.1 # !{sys.executable} -m pip install onnx onnxconverter_common psutil pytz pandas py-cpuinfo py3nvml # + import os # Create a cache directory to store pretrained model. cache_dir = os.path.join(".", "cache_models") if not os.path.exists(cache_dir): os.makedirs(cache_dir) # - # ## Convert GPT2 model from PyTorch to ONNX with one step search ## # # We have a script [convert_to_onnx.py](https://github.com/microsoft/onnxruntime/blob/master/onnxruntime/python/tools/transformers/convert_to_onnx.py) that could help you to convert GPT2 with past state to ONNX. # # The script accepts a pretrained model name or path of a checkpoint directory as input, and converts the model to ONNX. It also verifies that the ONNX model could generate same input as the pytorch model. The usage is like # ``` # python -m onnxruntime.transformers.convert_to_onnx -m model_name_or_path \ # --model_class=GPT2LMHeadModel_BeamSearchStep|GPT2LMHeadModel_ConfigurableOneStepSearch \ # --output gpt2_onestepsearch.onnx -o -p fp32|fp16|int8 # ``` # The -p option can be used to choose the precision: fp32 (float32), fp16 (mixed precision) or int8 (quantization). The -o option will generate optimized model, which is required for fp16 or int8. # # Here we use a pretrained model as example: # + from packaging import version from onnxruntime import __version__ as ort_verison if version.parse(ort_verison) >= version.parse('1.12.0'): from onnxruntime.transformers.models.gpt2.gpt2_beamsearch_helper import Gpt2BeamSearchHelper, GPT2LMHeadModel_BeamSearchStep else: from onnxruntime.transformers.gpt2_beamsearch_helper import Gpt2BeamSearchHelper, GPT2LMHeadModel_BeamSearchStep from transformers import AutoConfig import torch model_name_or_path = "gpt2" config = AutoConfig.from_pretrained(model_name_or_path, cache_dir=cache_dir) model = GPT2LMHeadModel_BeamSearchStep.from_pretrained(model_name_or_path, config=config, batch_size=1, beam_size=4, cache_dir=cache_dir) device = torch.device("cpu") model.eval().to(device) print(model.config) num_attention_heads = model.config.n_head hidden_size = model.config.n_embd num_layer = model.config.n_layer # - onnx_model_path = "gpt2_one_step_search.onnx" Gpt2BeamSearchHelper.export_onnx(model, device, onnx_model_path) # add parameter use_external_data_format=True when model size > 2 GB # ## ONNX Runtime Inference ## # # We can use ONNX Runtime to inference. The inputs are dictionary with name and numpy array as value, and the output is list of numpy array. Note that both input and output are in CPU. When you run the inference in GPU, it will involve data copy between CPU and GPU for input and output. # # Let's create an inference session for ONNX Runtime given the exported ONNX model, and see the output. # + import onnxruntime import numpy from transformers import AutoTokenizer EXAMPLE_Text = ['best hotel in bay area.'] def get_tokenizer(model_name_or_path, cache_dir): tokenizer = AutoTokenizer.from_pretrained(model_name_or_path, cache_dir=cache_dir) tokenizer.padding_side = "left" tokenizer.pad_token = tokenizer.eos_token #okenizer.add_special_tokens({'pad_token': '[PAD]'}) return tokenizer def get_example_inputs(prompt_text=EXAMPLE_Text): tokenizer = get_tokenizer(model_name_or_path, cache_dir) encodings_dict = tokenizer.batch_encode_plus(prompt_text, padding=True) input_ids = torch.tensor(encodings_dict['input_ids'], dtype=torch.int64) attention_mask = torch.tensor(encodings_dict['attention_mask'], dtype=torch.float32) position_ids = (attention_mask.long().cumsum(-1) - 1) position_ids.masked_fill_(position_ids < 0, 0) #Empty Past State for generating first word empty_past = [] batch_size = input_ids.size(0) sequence_length = input_ids.size(1) past_shape = [2, batch_size, num_attention_heads, 0, hidden_size // num_attention_heads] for i in range(num_layer): empty_past.append(torch.empty(past_shape).type(torch.float32).to(device)) return input_ids, attention_mask, position_ids, empty_past input_ids, attention_mask, position_ids, empty_past = get_example_inputs() beam_select_idx = torch.zeros([1, input_ids.shape[0]]).long() input_log_probs = torch.zeros([input_ids.shape[0], 1]) input_unfinished_sents = torch.ones([input_ids.shape[0], 1], dtype=torch.bool) prev_step_scores = torch.zeros([input_ids.shape[0], 1]) onnx_model_path = "gpt2_one_step_search.onnx" session = onnxruntime.InferenceSession(onnx_model_path) ort_inputs = { 'input_ids': numpy.ascontiguousarray(input_ids.cpu().numpy()), 'attention_mask' : numpy.ascontiguousarray(attention_mask.cpu().numpy()), 'position_ids': numpy.ascontiguousarray(position_ids.cpu().numpy()), 'beam_select_idx': numpy.ascontiguousarray(beam_select_idx.cpu().numpy()), 'input_log_probs': numpy.ascontiguousarray(input_log_probs.cpu().numpy()), 'input_unfinished_sents': numpy.ascontiguousarray(input_unfinished_sents.cpu().numpy()), 'prev_step_results': numpy.ascontiguousarray(input_ids.cpu().numpy()), 'prev_step_scores': numpy.ascontiguousarray(prev_step_scores.cpu().numpy()), } for i, past_i in enumerate(empty_past): ort_inputs[f'past_{i}'] = numpy.ascontiguousarray(past_i.cpu().numpy()) ort_outputs = session.run(None, ort_inputs) # - # ## ONNX Runtime Inference with IO Binding ## # # To avoid data copy for input and output, ONNX Runtime also supports IO Binding. User could provide some buffer for input and outputs. For GPU inference, the buffer can be in GPU to reduce memory copy between CPU and GPU. This is helpful for high performance inference in GPU. For GPT-2, IO Binding might help the performance when batch size or (past) sequence length is large. def inference_with_io_binding(session, config, input_ids, position_ids, attention_mask, past, beam_select_idx, input_log_probs, input_unfinished_sents, prev_step_results, prev_step_scores, step, context_len): output_shapes = Gpt2BeamSearchHelper.get_output_shapes(batch_size=1, context_len=context_len, past_sequence_length=past[0].size(3), sequence_length=input_ids.size(1), beam_size=4, step=step, config=config, model_class="GPT2LMHeadModel_BeamSearchStep") output_buffers = Gpt2BeamSearchHelper.get_output_buffers(output_shapes, device) io_binding = Gpt2BeamSearchHelper.prepare_io_binding(session, input_ids, position_ids, attention_mask, past, output_buffers, output_shapes, beam_select_idx, input_log_probs, input_unfinished_sents, prev_step_results, prev_step_scores) session.run_with_iobinding(io_binding) outputs = Gpt2BeamSearchHelper.get_outputs_from_io_binding_buffer(session, output_buffers, output_shapes, return_numpy=False) return outputs # We can see that the result is exactly same with/without IO Binding: input_ids, attention_mask, position_ids, empty_past = get_example_inputs() beam_select_idx = torch.zeros([1, input_ids.shape[0]]).long() input_log_probs = torch.zeros([input_ids.shape[0], 1]) input_unfinished_sents = torch.ones([input_ids.shape[0], 1], dtype=torch.bool) prev_step_scores = torch.zeros([input_ids.shape[0], 1]) outputs = inference_with_io_binding(session, config, input_ids, position_ids, attention_mask, empty_past, beam_select_idx, input_log_probs, input_unfinished_sents, input_ids, prev_step_scores, 0, input_ids.shape[-1]) assert torch.eq(outputs[-2], torch.from_numpy(ort_outputs[-2])).all() print("IO Binding result is good") # ## Batch Text Generation ## # # Here is an example for text generation using ONNX Runtime with/without IO Binding. # + def update(output, step, batch_size, beam_size, context_length, prev_attention_mask, device): """ Update the inputs for next inference. """ last_state = (torch.from_numpy(output[0]).to(device) if isinstance(output[0], numpy.ndarray) else output[0].clone().detach().cpu()) input_ids = last_state.view(batch_size * beam_size, -1).to(device) input_unfinished_sents_id = -3 prev_step_results = (torch.from_numpy(output[-2]).to(device) if isinstance(output[-2], numpy.ndarray) else output[-2].clone().detach().to(device)) position_ids = (torch.tensor([context_length + step - 1 ]).unsqueeze(0).repeat(batch_size * beam_size, 1).to(device)) if prev_attention_mask.shape[0] != (batch_size * beam_size): prev_attention_mask = prev_attention_mask.repeat(batch_size * beam_size, 1) attention_mask = torch.cat( [ prev_attention_mask, torch.ones([batch_size * beam_size, 1]).type_as(prev_attention_mask), ], 1, ).to(device) beam_select_idx = (torch.from_numpy(output[input_unfinished_sents_id - 2]).to(device) if isinstance( output[input_unfinished_sents_id - 2], numpy.ndarray) else output[input_unfinished_sents_id - 2].clone().detach().to(device)) input_log_probs = (torch.from_numpy(output[input_unfinished_sents_id - 1]).to(device) if isinstance( output[input_unfinished_sents_id - 1], numpy.ndarray) else output[input_unfinished_sents_id - 1].clone().detach().to(device)) input_unfinished_sents = (torch.from_numpy(output[input_unfinished_sents_id]).to(device) if isinstance( output[input_unfinished_sents_id], numpy.ndarray) else output[input_unfinished_sents_id].clone().detach().to(device)) prev_step_scores = (torch.from_numpy(output[-1]).to(device) if isinstance(output[-1], numpy.ndarray) else output[-1].clone().detach().to(device)) past = [] if isinstance(output[1], tuple): # past in torch output is tuple past = list(output[1]) else: for i in range(model.config.n_layer): past_i = (torch.from_numpy(output[i + 1]) if isinstance(output[i + 1], numpy.ndarray) else output[i + 1].clone().detach()) past.append(past_i.to(device)) inputs = { 'input_ids': input_ids, 'attention_mask' : attention_mask, 'position_ids': position_ids, 'beam_select_idx': beam_select_idx, 'input_log_probs': input_log_probs, 'input_unfinished_sents': input_unfinished_sents, 'prev_step_results': prev_step_results, 'prev_step_scores': prev_step_scores, } ort_inputs = { 'input_ids': numpy.ascontiguousarray(input_ids.cpu().numpy()), 'attention_mask' : numpy.ascontiguousarray(attention_mask.cpu().numpy()), 'position_ids': numpy.ascontiguousarray(position_ids.cpu().numpy()), 'beam_select_idx': numpy.ascontiguousarray(beam_select_idx.cpu().numpy()), 'input_log_probs': numpy.ascontiguousarray(input_log_probs.cpu().numpy()), 'input_unfinished_sents': numpy.ascontiguousarray(input_unfinished_sents.cpu().numpy()), 'prev_step_results': numpy.ascontiguousarray(prev_step_results.cpu().numpy()), 'prev_step_scores': numpy.ascontiguousarray(prev_step_scores.cpu().numpy()), } for i, past_i in enumerate(past): ort_inputs[f'past_{i}'] = numpy.ascontiguousarray(past_i.cpu().numpy()) return inputs, ort_inputs, past def test_generation(tokenizer, input_text, use_onnxruntime_io, ort_session = None, num_tokens_to_produce = 30): print("Text generation using", "OnnxRuntime with IO binding" if use_onnxruntime_io else "OnnxRuntime", "...") input_ids, attention_mask, position_ids, past = get_example_inputs(input_text) beam_select_idx = torch.zeros([1, input_ids.shape[0]]).long() input_log_probs = torch.zeros([input_ids.shape[0], 1]) input_unfinished_sents = torch.ones([input_ids.shape[0], 1], dtype=torch.bool) prev_step_scores = torch.zeros([input_ids.shape[0], 1]) inputs = { 'input_ids': input_ids, 'attention_mask' : attention_mask, 'position_ids': position_ids, 'beam_select_idx': beam_select_idx, 'input_log_probs': input_log_probs, 'input_unfinished_sents': input_unfinished_sents, 'prev_step_results': input_ids, 'prev_step_scores': prev_step_scores, } ort_inputs = { 'input_ids': numpy.ascontiguousarray(input_ids.cpu().numpy()), 'attention_mask' : numpy.ascontiguousarray(attention_mask.cpu().numpy()), 'position_ids': numpy.ascontiguousarray(position_ids.cpu().numpy()), 'beam_select_idx': numpy.ascontiguousarray(beam_select_idx.cpu().numpy()), 'input_log_probs': numpy.ascontiguousarray(input_log_probs.cpu().numpy()), 'input_unfinished_sents': numpy.ascontiguousarray(input_unfinished_sents.cpu().numpy()), 'prev_step_results': numpy.ascontiguousarray(input_ids.cpu().numpy()), 'prev_step_scores': numpy.ascontiguousarray(prev_step_scores.cpu().numpy()), } for i, past_i in enumerate(past): ort_inputs[f'past_{i}'] = numpy.ascontiguousarray(past_i.cpu().numpy()) batch_size = input_ids.size(0) beam_size = 4 context_length = input_ids.size(-1) for step in range(num_tokens_to_produce): if use_onnxruntime_io: outputs = inference_with_io_binding(ort_session, config, inputs['input_ids'], inputs['position_ids'], inputs['attention_mask'], past, inputs['beam_select_idx'], inputs['input_log_probs'], inputs['input_unfinished_sents'], inputs['prev_step_results'], inputs['prev_step_scores'], step, context_length) else: outputs = ort_session.run(None, ort_inputs) inputs, ort_inputs, past = update(outputs, step, batch_size, beam_size, context_length, inputs['attention_mask'], device) if not inputs['input_unfinished_sents'].any(): break print("------------") print(tokenizer.decode(inputs['prev_step_results'][0], skip_special_tokens=True)) # - tokenizer = get_tokenizer(model_name_or_path, cache_dir) input_text = EXAMPLE_Text test_generation(tokenizer, input_text, use_onnxruntime_io=False, ort_session=session) # Next, we use ONNX Runtime with IO binding to run again and we can see that the result is exactly same. test_generation(tokenizer, input_text, use_onnxruntime_io=True, ort_session=session)
onnxruntime/python/tools/transformers/notebooks/Inference_GPT2-OneStepSearch_OnnxRuntime_CPU.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 2 # language: python # name: python2 # --- # # Notebook for Collaborative Filtering with both ALS and NCF models for 1M rows # In this notebook, we implement ALS and NCF models for Movie Recommendation System for 1M movie ratings. The 1M reviews dataset contains 1 million movie ratings made by 4,000 users on 6,000 movies. # + # Intialization import os import time import datetime as dt import warnings warnings.filterwarnings("ignore", message="numpy.dtype size changed") # spark sql imports from pyspark.sql import SparkSession, SQLContext, Row from pyspark.sql.functions import UserDefinedFunction, explode, desc from pyspark.sql.types import * # spark ml imports from pyspark.ml.recommendation import ALS from pyspark.ml.linalg import Vectors from pyspark.ml.tuning import ParamGridBuilder, CrossValidator from pyspark.ml.evaluation import RegressionEvaluator # spark bigdl, analytics zoo imports from zoo.models.recommendation import UserItemFeature from zoo.models.recommendation import NeuralCF from zoo.common.nncontext import init_nncontext from bigdl.nn.criterion import * from bigdl.optim.optimizer import * from bigdl.dataset import movielens from bigdl.util.common import * # data science imports import math import numpy as np import pandas as pd from sklearn import metrics from operator import itemgetter # - data_path = 'hdfs:///user/andrew/' sc = init_nncontext("NCF Example") # ## Data Preparation # + sqlContext = SQLContext(sc) # Row formats for imported data Rating = Row("userId", "movieId", "rating") # "timespamp" ignored Item = Row("movieId", "title" ,"genres") # Load data ratings = sc.textFile(data_path + "ratings.dat") \ .map(lambda line:line.split("::")[0:3]) \ .map(lambda line: (int(line[0]), int(line[1]), float(line[2]))) \ .map(lambda r: Rating(*r)) ratings = sqlContext.createDataFrame(ratings) movies = sc.textFile(data_path + "movies.dat") \ .map(lambda line: line.split("::")[0:2]) \ .map(lambda line: (int(line[0]), line[1])) \ .map(lambda r: Item(*r)) movies = sqlContext.createDataFrame(movies) # - # Create training and validation sets ratings_train, ratings_val = ratings.randomSplit([0.8, 0.2], seed = 42) print(ratings_train.count()) ratings_train.take(3) # + # Prepare the RDDs of Sample for the NCF model # train and val will be used now, and full will be used later def build_sample(user_id, item_id, rating): sample = Sample.from_ndarray(np.array([user_id, item_id]), np.array([rating])) return UserItemFeature(user_id, item_id, sample) fullPairFeatureRdds = ratings.rdd.map(lambda x: build_sample(x[0], x[1], x[2])) trainPairFeatureRdds = ratings_train.rdd.map(lambda x: build_sample(x[0], x[1], x[2])) valPairFeatureRdds = ratings_val.rdd.map(lambda x: build_sample(x[0], x[1], x[2])) full_rdd = fullPairFeatureRdds.map(lambda pair_feature: pair_feature.sample) train_rdd = trainPairFeatureRdds.map(lambda pair_feature: pair_feature.sample) val_rdd = valPairFeatureRdds.map(lambda pair_feature: pair_feature.sample) # - print(train_rdd.count()) train_rdd.take(3) # ## ALS and NCF Model Training and Validation on Training Data # ### Train and evaluate the ALS model with training data # %%time # Create the ALS models and set parameters als = ALS(seed = 42, regParam = 0.1, maxIter = 15, rank = 12, userCol = "userId", itemCol = "movieId", ratingCol = "rating") # Using MAE for the scoring metric evaluator = RegressionEvaluator(metricName="mae", labelCol="rating", predictionCol="prediction") # Train and evaluate the models - if training error is more than slightly less than validation error, the model has been overfit als_model = als.fit(ratings_train) print 'Training Error (MAE):', evaluator.evaluate(als_model.transform(ratings_train)) print 'Validation Error (MAE):', evaluator.evaluate(als_model.transform(ratings_val).fillna(0)) # ### Train and evaluate the NCF model with training data # + # %%time # Set parameters for the NCF model. # Batch size should be a multiple of the total number of cores in the Spark environment # max_user_id and max_movie_id are used for matching ids to embedding values batch_size = 2560 max_user_id = ratings.agg({'userId': 'max'}).collect()[0]['max(userId)'] max_movie_id = ratings.agg({'movieId': 'max'}).collect()[0]['max(movieId)'] # Set other parameters and initialize the model ncf = NeuralCF(user_count=max_user_id, item_count=max_movie_id, class_num=5, hidden_layers=[20, 10], include_mf = False) # Define the model optimizer optimizer = Optimizer( model=ncf, training_rdd=train_rdd, criterion=ClassNLLCriterion(), end_trigger=MaxEpoch(10), batch_size=batch_size, # 16 executors, 16 cores each optim_method=Adam(learningrate=0.001)) # Set the validation method for the optimizer optimizer.set_validation( batch_size=batch_size, # 16 executors, 16 cores each val_rdd=val_rdd, trigger=EveryEpoch(), val_method=[MAE(), Loss(ClassNLLCriterion())] ) # Train the model optimizer.optimize() # - # %%time # Evaluate the model using MAE as the scoring metric train_res = ncf.evaluate(train_rdd, batch_size, [MAE()]) val_res = ncf.evaluate(val_rdd, batch_size, [MAE()]) # If training error is more than slightly less than validation error, the model has been overfit print 'Training Error (MAE):', train_res[0] print 'Validation Error (MAE):', val_res[0] # ## ALS and NCF Model Training and Validation on the entire dataset # Create a sparse matrix of all userIds and movieIds. # userIds are the rows and movieIds are the columns. Any position without an explicit rating is given a value of 0. ratings_df = ratings.toPandas() ratings_matrix = ratings_df.pivot(index='userId',columns='movieId',values='rating').fillna(0) # Transform the userId x movieId back into three column format. (Will be much larger now) ratings_matrix['userId'] = ratings_matrix.index ratings_df_2 = pd.melt(ratings_matrix, id_vars = ['userId'], value_vars = list(ratings_matrix.columns).remove('userId')) ratings_df_2.columns = ['userId', 'movieId', 'rating'] ratings_df_2.shape # remove the userId, movieId pairs that already have ratings ratings_blanks_df = ratings_df_2.iloc[np.where(ratings_df_2.rating == 0)] ratings_blanks_df.shape # %%time # Conver to spark dataframe # This will be used by the ALS model for recommendations ratings_blanks = sqlContext.createDataFrame(ratings_blanks_df) del ratings_df, ratings_matrix, ratings_df_2, ratings_blanks_df # Build the RDDs of Sample for the unrated userId, movieId pairs # This will be used by the NCF model for recommendations blankPairFeatureRdds = ratings_blanks.rdd.map(lambda x: build_sample(x[0], x[1], x[2])) # %%time # Set parameters and train the ALS on the full rated dataset als = ALS(seed = 42, regParam = 0.1, maxIter = 15, rank = 12, userCol = "userId", itemCol = "movieId", ratingCol = "rating") evaluator = RegressionEvaluator(metricName="mae", labelCol="rating", predictionCol="prediction") als_model = als.fit(ratings) # Evaluate the model on the training set print 'Model Error (MAE):', evaluator.evaluate(als_model.transform(ratings)) # %%time # Set parameters and train the NCF on the full rated dataset, keep the previously defined batch_size max_user_id = ratings.agg({'userId': 'max'}).collect()[0]['max(userId)'] max_movie_id = ratings.agg({'movieId': 'max'}).collect()[0]['max(movieId)'] ncf = NeuralCF(user_count=max_user_id, item_count=max_movie_id, class_num=5, hidden_layers=[20, 10], include_mf = False) # Define the optimizer optimizer = Optimizer( model=ncf, training_rdd=full_rdd, criterion=ClassNLLCriterion(), end_trigger=MaxEpoch(10), batch_size=batch_size, # 16 executors, 16 cores each optim_method=Adam(learningrate=0.001)) # Train the model optimizer.optimize() # Evaluate the model on the training set full_res = ncf.evaluate(full_rdd, batch_size, [MAE()]) print 'Model Error (MAE):', full_res[0] # ### Predictions Comparison # # Compare the prediction between ALS and NCF for one specific user. The user id is specified in the final two cells # %%time # Create recommendations for all users. # The NCF model allow the number of recommendations to be limited to a top set, in this case the top 10 recommendations. als_pair_preds = als_model.transform(ratings_blanks) ncf_pair_preds = ncf.recommend_for_user(blankPairFeatureRdds, 10).toDF() als_pair_preds.show(5) ncf_pair_preds.show(5) # Select the userId, movieId, and prediction columns # The predictions are the predicted rating for the userId, movieId pair als_preds = als_pair_preds.select('userId', 'movieId', 'prediction').toDF('userId', 'movieId', 'als_pred') ncf_preds_topN = ncf_pair_preds.select('user_id', 'item_id', 'prediction').toDF('userId', 'movieId', 'ncf_pred') del als_pair_preds, ncf_pair_preds # + # Filtering the ALS recommendations to the top 10 from pyspark.sql.window import Window from pyspark.sql.functions import rank, col, row_number # Create a window for each userId, with sorted predictions window = Window.partitionBy(als_preds['userId']).orderBy(als_preds['als_pred'].desc()) # For each userId, keep the top 10 rows als_preds_topN = als_preds.select(col('*'), row_number().over(window).alias('row_number')).where(col('row_number') <= 10) # - # Combine the recommendations with movie information, this will make the recommendations more understandable als_preds_topN_labeled = als_preds_topN.join(movies, how = 'left', on = 'movieId') ncf_preds_topN_labeled = ncf_preds_topN.join(movies, how = 'left', on = 'movieId') del window, als_preds, als_preds_topN, ncf_preds_topN # Keep only essential columns als_final = als_preds_topN_labeled.select('userId', 'movieId', 'als_pred', 'title').sort(col("userId")) ncf_final = ncf_preds_topN_labeled.select('userId', 'movieId', 'ncf_pred', 'title').sort(col("userId")) als_final.show(10) ncf_final.show(10) # Convert to pandas to specific userIds can be viewed easily als_final_df = als_final.toPandas() ncf_final_df = ncf_final.toPandas() als_final_df.iloc[np.where(als_final_df.userId == 5000)] ncf_final_df.iloc[np.where(ncf_final_df.userId == 5000)] # + # sc.stop()
Model_Building/Collaborative_Filtering_ML_ALS_vs_BigDL_NCF.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] colab_type="text" id="XTA7OUaCMr_D" # # Welcome to the Sendy Logistics Challenge - Team 9 JHB # + colab={} colab_type="code" id="IbDV-D25M3yV" # import the pandas module import pandas as pd # import the seaborn module import seaborn as sns # import the matplotlib module import matplotlib.pyplot as plt # import the numpy module import numpy as np # - # ## Train Dataset # ### Dataset used to train prediction model # Train Dataset train_df = pd.read_csv('data/Train.csv') train_df.head() df = train_df.copy() df.info() # ## Summary Statistics df.columns columns = ['Order_No', 'User_Id', 'Vehicle_Type', 'Platform_Type', 'Personal_or_Business', 'Placement_Day_of_Month', 'Placement_Weekday', 'Placement_Time', 'Confirmation_Day', 'Confirmation_Weekday', 'Confirmation_Time', 'Arrival_at_Pickup_Day', 'Arrival_at_Pickup_Weekday', 'Arrival_at_Pickup_Time', 'Pickup_Day', 'Pickup_Weekday', 'Pickup_Time', 'Arrival_at_Destination_Day', 'Arrival_at_Destination_Weekday', 'Arrival_at_Destination_Time', 'Distance', 'Temperature', 'Precipitation', 'Pickup_Lat', 'Pickup_Long', 'Destination_Lat', 'Destination_Long', 'Rider_Id', 'Time_from_Pickup_to_Arrival'] df.columns = [name.lower() for name in columns] summary = df.describe() summary df.head() df.boxplot(column = 'time_from_pickup_to_arrival') df.boxplot(column = ['distance','temperature','precipitation']) df.time_from_pickup_to_arrival.hist() df.distance.hist() df.temperature.hist() df.corr() plt.figure(figsize=(10,6)) sns.heatmap(df.corr()) plt.title('Correlation between num variables') plt.show() pd.pivot_table(df, index=['distance','temperature'], values ='time_from_pickup_to_arrival') g = sns.FacetGrid(df, col="confirmation_weekday") g.map(plt.hist, "time_from_pickup_to_arrival"); plt.show() d = sns.FacetGrid(df, col="arrival_at_destination_weekday") d.map(plt.hist, "time_from_pickup_to_arrival"); plt.show() d = sns.FacetGrid(df, col="arrival_at_destination_weekday") d.map(plt.hist, "temperature"); plt.show() d = sns.FacetGrid(df, col="arrival_at_destination_weekday") d.map(plt.hist, "distance"); plt.show() plt.figure(figsize=(18,5)) sns.violinplot(data=df[['arrival_at_destination_weekday','time_from_pickup_to_arrival']], x='arrival_at_destination_weekday', y='time_from_pickup_to_arrival') plt.show() plt.figure(figsize=(18,5)) sns.violinplot(data=df[['arrival_at_destination_weekday','distance']], x='arrival_at_destination_weekday', y='distance') plt.show() plt.figure(figsize=(18,5)) sns.violinplot(data=df[['personal_or_business','distance']], x='personal_or_business', y='distance') plt.show() plt.figure(figsize=(18,5)) sns.violinplot(data=df[['personal_or_business','time_from_pickup_to_arrival']], x='personal_or_business', y='time_from_pickup_to_arrival') plt.show() plt.figure(figsize=(18,5)) sns.boxplot(data=df[['platform_type','distance']], x='platform_type', y='distance') plt.show() plt.figure(figsize=(18,5)) sns.boxplot(data=df[['platform_type','time_from_pickup_to_arrival']], x='platform_type', y='time_from_pickup_to_arrival') plt.show() plt.figure(figsize=(18,5)) sns.violinplot(data=df[['vehicle_type','time_from_pickup_to_arrival']], x='vehicle_type', y='time_from_pickup_to_arrival') plt.show() # Test Dataset test_df = pd.read_csv('data/Test.csv') test_df.head() # ## Riders Dataset # Riders Dataset riders_df = pd.read_csv('data/Riders.csv') riders_df.head() columns= list(riders_df.columns) columns = [x.lower() for x in columns] columns[0] = 'rider_id' riders_df.columns = columns riders_df.head() riders_df['rider_id'] = riders_df['rider_id'].apply(lambda x: x.split('_')[-1]) riders_df.head() # ## Variable Definitions for Datasets # Variable Definitions Dataset var_def_df = pd.read_csv('data/VariableDefinitions.csv') var_def_df # # Sample submission # Sample Submission sub_sample_df = pd.read_csv('data/SampleSubmission.csv') sub_sample_df.head()
EDA_Draft.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="view-in-github" colab_type="text" # <a href="https://colab.research.google.com/github/DataMinati/TSA-Fauj/blob/main/ARMA_Approach_to_Index2k18_Stocks.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # + [markdown] id="SM6K3NSYqh_P" # ### 1. Importing the necessary packages # + colab={"base_uri": "https://localhost:8080/"} id="gHkVJAFLqa9c" outputId="37de06f6-df3f-4dc8-c4b2-6d7aba15c5b2" import pandas as pd import numpy as np import matplotlib.pyplot as plt import statsmodels.graphics.tsaplots as sgt import statsmodels.tsa.stattools as sts from statsmodels.tsa.arima_model import ARMA from scipy.stats.distributions import chi2 import seaborn as sns sns.set() import warnings warnings.filterwarnings("ignore") print("All necessary packages have been imported successfully!") # + [markdown] id="8O3aHySiq6ys" # ### 2. Importing the Dataset # + colab={"base_uri": "https://localhost:8080/", "height": 356} id="trGur-Y-qzFe" outputId="cf950e13-3f39-42fd-f805-0c8a72a91258" raw_csv_data = pd.read_csv("https://raw.githubusercontent.com/MainakRepositor/Datasets-/master/Index2018.csv") df_comp=raw_csv_data.copy() df_comp.head(10) # + [markdown] id="O-NAuVCqrMuu" # ### 3. Pre-processing of Data # + id="E7RaDNVsrDLV" df_comp.date = pd.to_datetime(df_comp.date, dayfirst = True) df_comp.set_index("date", inplace=True) df_comp=df_comp.asfreq('b') df_comp=df_comp.fillna(method='ffill') df_comp['market_value']=df_comp.ftse del df_comp['spx'] del df_comp['dax'] del df_comp['ftse'] del df_comp['nikkei'] size = int(len(df_comp)*0.8) df, df_test = df_comp.iloc[:size], df_comp.iloc[size:] # + [markdown] id="911Sv3kXrY0f" # ### 4. The LLR Test # + id="7bEiX_ITrTGm" def LLR_test(mod_1, mod_2, DF = 1): L1 = mod_1.fit().llf L2 = mod_2.fit().llf LR = (2*(L2-L1)) p = chi2.sf(LR, DF).round(3) return p # + [markdown] id="1uGcyTZ0rdyj" # ### 5. Creating Returns # + id="p4FxyxA4rblo" df['returns'] = df.market_value.pct_change(1)*100 # + [markdown] id="u7YUuu64risT" # ### 6. ARMA(1,1) # + colab={"base_uri": "https://localhost:8080/", "height": 337} id="-g3lubB0rgXW" outputId="2e053944-f922-414e-ff37-61ce6307c275" model_ret_ar_1_ma_1 = ARMA(df.returns[1:], order=(1,1)) results_ret_ar_1_ma_1 = model_ret_ar_1_ma_1.fit() results_ret_ar_1_ma_1.summary() # + id="PZVbuiFhrlr7" model_ret_ar_1 = ARMA(df.returns[1:], order = (1,0)) model_ret_ma_1 = ARMA(df.returns[1:], order = (0,1)) # + [markdown] id="NMFvwU_RsI4M" # ### 7. Higher-Lag ARMA Models # + id="COT6g6ALrpQh" model_ret_ar_3_ma_3 = ARMA(df.returns[1:], order=(3,3)) results_ret_ar_3_ma_3 = model_ret_ar_3_ma_3.fit() # + colab={"base_uri": "https://localhost:8080/", "height": 499} id="UfRUWjgbsOgz" outputId="07550b4a-c798-43d3-b494-08ccd42b1665" results_ret_ar_3_ma_3.summary() # + colab={"base_uri": "https://localhost:8080/", "height": 458} id="BOUwu_ZKsTT0" outputId="0a476763-70ab-48ca-f9f3-6933738cee6c" model_ret_ar_3_ma_2 = ARMA(df.returns[1:], order=(3,2)) results_ret_ar_3_ma_2 = model_ret_ar_3_ma_2.fit() results_ret_ar_3_ma_2.summary() # + colab={"base_uri": "https://localhost:8080/", "height": 458} id="gUSJXs2qsVxh" outputId="8229fc35-1ccb-4ac9-fc91-f4a087f5ffde" model_ret_ar_2_ma_3 = ARMA(df.returns[1:], order=(2,3)) results_ret_ar_2_ma_3 = model_ret_ar_2_ma_3.fit() results_ret_ar_2_ma_3.summary() # + colab={"base_uri": "https://localhost:8080/", "height": 418} id="u-sbWz0lsfHo" outputId="f90c6e68-8e71-443b-c897-5bc41389fdb3" model_ret_ar_3_ma_1 = ARMA(df.returns[1:], order=(3,1)) results_ret_ar_3_ma_1 = model_ret_ar_3_ma_1.fit() results_ret_ar_3_ma_1.summary() # + colab={"base_uri": "https://localhost:8080/", "height": 418} id="8arKZcsLsmMq" outputId="ce515ead-f11c-4247-b375-67fa24fcbf54" model_ret_ar_2_ma_2 = ARMA(df.returns[1:], order=(2,2)) results_ret_ar_2_ma_2 = model_ret_ar_2_ma_2.fit() results_ret_ar_2_ma_2.summary() # + colab={"base_uri": "https://localhost:8080/", "height": 418} id="GOYa8m3Hsosb" outputId="d286f4cc-9d19-4651-a04e-008720058223" model_ret_ar_1_ma_3 = ARMA(df.returns[1:], order=(1,3)) results_ret_ar_1_ma_3 = model_ret_ar_1_ma_3.fit() results_ret_ar_1_ma_3.summary() # + colab={"base_uri": "https://localhost:8080/"} id="VSL5ALPDstvO" outputId="299eefc8-fe0f-49f6-a1c8-2edc64c84675" print("\n ARMA(3,2): \tLL = ", results_ret_ar_3_ma_2.llf, "\tAIC = ", results_ret_ar_3_ma_2.aic) print("\n ARMA(1,3): \tLL = ", results_ret_ar_1_ma_3.llf, "\tAIC = ", results_ret_ar_1_ma_3.aic) # + [markdown] id="EGxYj4yOs2M9" # ### 8. Residuals for Returns # + colab={"base_uri": "https://localhost:8080/", "height": 364} id="ygWUprU6swHI" outputId="c4904740-8e96-414a-a887-798b71e33cff" df['res_ret_ar_3_ma_2'] = results_ret_ar_3_ma_2.resid[1:] df.res_ret_ar_3_ma_2.plot(figsize = (20,5)) plt.title("Residuals of Returns", size=24) plt.show() # + colab={"base_uri": "https://localhost:8080/", "height": 293} id="SH2hfHwItCua" outputId="84caa239-81cf-4571-b624-a980e8442cc6" sgt.plot_acf(df.res_ret_ar_3_ma_2[2:], zero = False, lags = 40) plt.title("ACF Of Residuals for Returns",size=24) plt.show() # + [markdown] id="hXQqZDNLtHnq" # ### 9. Reevaluating Model Selection # + colab={"base_uri": "https://localhost:8080/", "height": 660} id="QXM2yxW0tEvn" outputId="983977b1-681e-4bd6-cf41-db16f093a7cf" model_ret_ar_5_ma_5 = ARMA(df.returns[1:], order=(5,5)) results_ret_ar_5_ma_5 = model_ret_ar_5_ma_5.fit() results_ret_ar_5_ma_5.summary() # + colab={"base_uri": "https://localhost:8080/", "height": 499} id="CqAF4l8btKVQ" outputId="073b6776-a09e-40a1-bd75-cae73e376beb" model_ret_ar_5_ma_1 = ARMA(df.returns[1:], order=(5,1)) results_ret_ar_5_ma_1 = model_ret_ar_5_ma_1.fit() results_ret_ar_5_ma_1.summary() # + colab={"base_uri": "https://localhost:8080/", "height": 499} id="-2qZA1PBtQvY" outputId="87de45c3-6b76-42a0-beca-af3d52e40765" model_ret_ar_1_ma_5 = ARMA(df.returns[1:], order=(1,5)) results_ret_ar_1_ma_5 = model_ret_ar_1_ma_5.fit() results_ret_ar_1_ma_5.summary() # + colab={"base_uri": "https://localhost:8080/"} id="6b77cDIVtTYy" outputId="2af18b1a-1b47-4c10-cbe9-0be832160c5e" print("ARMA(5,1): \t LL = ",results_ret_ar_5_ma_1.llf,"\t AIC = ",results_ret_ar_5_ma_1.aic) print("ARMA(1,5): \t LL = ",results_ret_ar_1_ma_5.llf,"\t AIC = ",results_ret_ar_1_ma_5.aic) # + colab={"base_uri": "https://localhost:8080/"} id="2EZHj-p5tVzZ" outputId="c4f18805-a27a-4294-b625-c3fe5cf34b0c" print("ARMA(3,2): \t LL = ",results_ret_ar_3_ma_2.llf,"\t AIC = ",results_ret_ar_3_ma_2.aic) # + [markdown] id="j92O2od2tasj" # ### 10. Residuals for the New Model # + colab={"base_uri": "https://localhost:8080/", "height": 293} id="tJ1uQRJutXR8" outputId="d2c23fce-9694-44f4-80d8-c9060ea26673" df['res_ret_ar_5_ma_1'] = results_ret_ar_5_ma_1.resid sgt.plot_acf(df.res_ret_ar_5_ma_1[1:], zero = False, lags = 40) plt.title("ACF of Residuals for Returns",size=24) plt.show() # + [markdown] id="9j230JRotgwd" # ### 11. ARMA Models for Prices # + colab={"base_uri": "https://localhost:8080/", "height": 289} id="7KGqL_7GteFX" outputId="68d1b572-cccc-4f8d-eb08-a306a8f44132" sgt.plot_acf(df.market_value, unbiased=True, zero = False, lags = 40) plt.title("Autocorrelation Function for Prices",size=20) plt.show() # + colab={"base_uri": "https://localhost:8080/", "height": 289} id="PuWfyPsxtkTw" outputId="b56f69cb-c954-4f72-c09a-597c2ff8f4dd" sgt.plot_pacf(df.market_value, lags = 40, alpha = 0.05, zero = False , method = ('ols')) plt.title("Partial Autocorrelation Function for Prices",size=20) plt.show() # + colab={"base_uri": "https://localhost:8080/", "height": 337} id="neyUdlURtnTw" outputId="059ecf70-0a04-4943-ec0a-2c696be3dc84" model_ar_1_ma_1 = ARMA(df.market_value, order=(1,1)) results_ar_1_ma_1 = model_ar_1_ma_1.fit() results_ar_1_ma_1.summary() # + colab={"base_uri": "https://localhost:8080/", "height": 289} id="wQQ-Gt9Jtpk1" outputId="08601521-5f67-4a31-f94f-888b1d0106de" df['res_ar_1_ma_1'] = results_ar_1_ma_1.resid sgt.plot_acf(df.res_ar_1_ma_1, zero = False, lags = 40) plt.title("ACF Of Residuals of Prices",size=20) plt.show() # + colab={"base_uri": "https://localhost:8080/", "height": 700} id="8ntxvL61tr_j" outputId="a4967658-462a-42cc-cff9-c6e9dd7baed7" model_ar_5_ma_6 = ARMA(df.market_value, order=(5,6)) results_ar_5_ma_6 = model_ar_5_ma_6.fit(start_ar_lags = 7) results_ar_5_ma_6.summary() # + colab={"base_uri": "https://localhost:8080/", "height": 539} id="0KXCvLaBtuRV" outputId="405fa5e3-fee6-4352-8434-a8b9e7e90f95" model_ar_6_ma_1 = ARMA(df.market_value, order=(6,1)) results_ar_6_ma_1 = model_ar_6_ma_1.fit(start_ar_lags = 7) results_ar_6_ma_1.summary() # + colab={"base_uri": "https://localhost:8080/"} id="cu1qXUg-t1F0" outputId="885f5a83-247d-4ce7-e069-056426f38594" print("ARMA(5,6): \t LL = ", results_ar_5_ma_6.llf, "\t AIC = ", results_ar_5_ma_6.aic) print("ARMA(6,1): \t LL = ", results_ar_6_ma_1.llf, "\t AIC = ", results_ar_6_ma_1.aic) # + colab={"base_uri": "https://localhost:8080/", "height": 289} id="P4QjzgBGt39g" outputId="bda1bd01-4036-48fa-bda5-060f3907cf16" df['res_ar_5_ma_6'] = results_ar_5_ma_6.resid sgt.plot_acf(df.res_ar_5_ma_6, zero = False, lags = 40) plt.title("ACF Of Residuals of Prices",size=20) plt.show() # + colab={"base_uri": "https://localhost:8080/"} id="jMjSq4Twt5hl" outputId="5c85ce03-45f5-4e94-d013-8912bbd8be3b" print("ARMA(5,6): \t LL = ", results_ar_5_ma_6.llf, "\t AIC = ", results_ar_5_ma_6.aic) print("ARMA(5,1): \t LL = ", results_ret_ar_5_ma_1.llf, "\t AIC = ", results_ret_ar_5_ma_1.aic) # + id="M8HUUap5t7bj"
ARMA_Approach_to_Index2k18_Stocks.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: batopt # language: python # name: batopt # --- # + #default_exp clean # - # # Data Cleaning # # <br> # # ### Imports # + #exports import numpy as np import pandas as pd import seaborn as sns import matplotlib.pyplot as plt from sklearn.model_selection import KFold from sklearn.metrics import mean_absolute_error, mean_squared_error from sklearn.linear_model import LinearRegression from sklearn.ensemble import RandomForestRegressor, GradientBoostingRegressor import os import glob from ipypb import track from batopt import utils, retrieval # - from IPython.display import JSON # <br> # # ### User Inputs raw_data_dir = '../data/raw' cache_data_dir = '../data/nb-cache' # <br> # # ### Loading the Raw Data # # We'll start by loading in the demand data, first we have to determine the latest training set that is available for us to work with #exports def identify_latest_set_num(data_dir): set_num = max([ int(f.split('_set')[1].replace('.csv', '')) for f in os.listdir(data_dir) if 'set' in f ]) return set_num # + set_num = identify_latest_set_num(raw_data_dir) set_num # - # <br> # # We'll then load in and clean the datetime index of the dataset # + #exports def reindex_df_dt_idx(df, freq='30T'): full_dt_idx = pd.date_range(df.index.min(), df.index.max(), freq=freq) df = df.reindex(full_dt_idx) return df def load_training_dataset(raw_data_dir: str, dataset_name: str='demand', set_num=None, parse_dt_idx: bool=True, dt_idx_freq: str='30T') -> pd.DataFrame: if set_num is None: set_num = identify_latest_set_num(raw_data_dir) allowed_datasets = ['demand', 'pv', 'weather'] assert dataset_name in allowed_datasets, f"`dataset_name` must be one of: {', '.join(allowed_datasets)} - not {dataset_name}" df = pd.read_csv(glob.glob(f'{raw_data_dir}/{dataset_name}*set{set_num}.csv')[0].replace('\\', '/')) if parse_dt_idx == True: assert 'datetime' in df.columns, 'if `parse_dt_idx` is True then `datetime` must be a column in the dataset' df['datetime'] = pd.to_datetime(df['datetime'], utc=True) df = df.set_index('datetime').pipe(reindex_df_dt_idx, freq=dt_idx_freq).sort_index(axis=1) df.index.name = 'datetime' return df # + df_demand = load_training_dataset(raw_data_dir, 'demand') df_demand.head() # - # <br> # # Then the pv # + df_pv = load_training_dataset(raw_data_dir, 'pv') df_pv.head() # - # <br> # # And finally the weather # + df_weather = load_training_dataset(raw_data_dir, 'weather', dt_idx_freq='H') df_weather.head(3) # - # <br> # # We'll also create a function that reads all of the datasets in at once and then combines them #exports def combine_training_datasets(raw_data_dir, set_num=None): # Loading provided training datasets single_datasets = dict() dataset_names = ['demand', 'pv', 'weather'] for dataset_name in dataset_names: single_datasets[dataset_name] = load_training_dataset(raw_data_dir, dataset_name, set_num=set_num) # Constructing date range min_dt = min([df.index.min() for df in single_datasets.values()]) max_dt = max([df.index.max() for df in single_datasets.values()]) + pd.Timedelta(minutes=30) dt_rng = pd.date_range(min_dt, max_dt, freq='30T') # Constructing combined dataframe df_combined = pd.DataFrame(index=dt_rng, columns=dataset_names) for dataset_name in dataset_names: df_single_dataset = single_datasets[dataset_name] cols_to_be_overwritten = set(df_combined.columns) - (set(df_combined.columns) - set(df_single_dataset.columns)) assert len(cols_to_be_overwritten) == 0, f"The following columns exist in multiple datasets meaning data would be overwritten: {', '.join(cols_to_be_overwritten)}" df_combined[df_single_dataset.columns] = df_single_dataset df_combined = df_combined.sort_index() # Adding holiday dates s_holidays = retrieval.load_holidays_s(raw_data_dir) s_cropped_holidays = s_holidays[max(df_combined.index.min(), s_holidays.index.min()): min(df_combined.index.max(), s_holidays.index.max())] df_combined.loc[s_cropped_holidays.index, 'holidays'] = s_cropped_holidays return df_combined # + df_combined = combine_training_datasets(raw_data_dir) df_combined.head(3) # - # # <br> # # ### Identifying Missing Values # # We'll quickly inspect the datasets and check their coverage over the full date range when aggregated by dataset #exports def identify_df_dt_entries(df_demand, df_pv, df_weather): min_dt = min(df_demand.index.min(), df_pv.index.min(), df_weather.index.min()) max_dt = max(df_demand.index.max(), df_pv.index.max(), df_weather.index.max()) dt_rng = pd.date_range(min_dt, max_dt, freq='30T') df_nulls = pd.DataFrame(index=dt_rng) df_nulls['demand'] = df_demand.reindex(dt_rng).isnull().mean(axis=1).astype(int) df_nulls['pv'] = df_pv.reindex(dt_rng).isnull().mean(axis=1).astype(int) df_nulls['weather'] = df_weather.reindex(dt_rng).ffill(limit=1).isnull().mean(axis=1).astype(int) df_entries = 1 - df_nulls return df_entries # + df_entries = identify_df_dt_entries(df_demand, df_pv, df_weather) # Plotting fig, ax = plt.subplots(dpi=150) sns.heatmap(df_entries.T, ax=ax, cmap=plt.cm.binary) utils.set_date_ticks(ax, df_entries.index.min().strftime('%Y-%m-%d'), df_entries.index.max().strftime('%Y-%m-%d'), axis='x', freq='Qs', date_format='%b %y') # - # <br> # # We'll also determine the null percentage in each individual column df_demand.isnull().mean() # <br> # # We can see that all of the PV data columns are missing some data df_pv.isnull().mean() # <br> # # Locations 1 and 2 are also missing some solar data, with 4 missing temperature data df_weather.isnull().mean() # <br> # # ### Handling Missing Values # # We'll start by interpolating the missing PV data, first checking the number of variables that have null values for each time period # + s_pv_num_null_vals = df_pv.isnull().sum(axis=1).replace(0, np.nan).dropna().astype(int) s_pv_num_null_vals.value_counts() # - # <br> # # `pv_power_mw` and `irradiance_Wm-2` have the same average number of null values, there are also no time-periods where only 2 variables have null values - it's therefore likely that power and irradiance always have null periods at the same time which makes it harder to interpolate their values. We'll quickly check this hypothesis: (df_pv['pv_power_mw'].isnull() == df_pv['irradiance_Wm-2'].isnull()).mean() == 1 # <br> # # It appears as though the `pv_power_mw` and `irradiance_Wm-2` missing values are a single time-block that coincides with a larger set of missing values within `panel_temp_C`. df_pv[df_pv['pv_power_mw'].isnull()] # <br> # # Looking at the `panel_temp_C` data we can see there are 3 time-blocks where obervations are missing df_pv['panel_temp_C'].isnull().astype(int).plot() # <br> # # One option might to be replace the missing temperature values with the temperatures observed at the surrounding weather grid locations, we'll start by constructing a dataframe that includes all of the temperature data as well as the average rolling temperature for each weather data location. #exports def construct_df_temp_features(df_weather, df_pv): df_weather = df_weather.reindex(pd.date_range(df_weather.index.min(), df_weather.index.max(), freq='30T')).ffill(limit=1) temp_loc_cols = df_weather.columns[df_weather.columns.str.contains('temp')] df_temp_features = (df_weather .copy() [temp_loc_cols] .assign(site_temp=df_pv['panel_temp_C']) ) df_temp_features[[col+'_rolling' for col in temp_loc_cols]] = df_temp_features.rolling(3).mean()[temp_loc_cols] df_temp_features = df_temp_features.sort_index(axis=1) return df_temp_features # + df_temp_features = construct_df_temp_features(df_weather, df_pv).dropna() df_temp_features.head() # - # <br> # # We'll now check the correlation sns.heatmap(df_temp_features.corr()) # <br> # # The correlation drops off quickly when it gets to the site temperature, looking at the full distributions we can see that the site measurements get far higher. This is because the panel is absorbing heat that raises its temperature above that of the surrounding area, again making it more difficult to simply fill in with the nearby temperature measurements. # + sns.histplot(df_temp_features['site_temp'], color='C0', label='Panel') sns.histplot(df_temp_features.drop('site_temp', axis=1).min(axis=1), color='C1', label='MERRA Min') sns.histplot(df_temp_features.drop('site_temp', axis=1).max(axis=1), color='C2', label='MERRA Max') plt.legend(frameon=False) # - # + # Could use an RF to estimate the panel temp based on the weather grid temps? # Potential features: current average surrounding temp, average surrounding temp over the last 3 hours # + #exports def split_X_y_data(df, target_col='site_temp'): df = df.dropna() X_cols = df.drop(target_col, axis=1).columns X = df[X_cols].values y = df[target_col].values return X, y def split_X_y_data_with_index(df, target_col='site_temp'): df = df.dropna() X_cols = df.drop(target_col, axis=1).columns X = df[X_cols].values y = df[target_col].values index = df.index return X, y, index # + X, y = split_X_y_data(df_temp_features) X.shape, y.shape # - #exports def generate_kfold_preds( X, y, model=LinearRegression(), kfold_kwargs={'n_splits': 5, 'shuffle': True}, index=None ): kfold = KFold(**kfold_kwargs) df_pred = pd.DataFrame(columns=['pred', 'true'], index=np.arange(X.shape[0])) for train_idxs, test_idxs in kfold.split(X): X_train, y_train = X[train_idxs], y[train_idxs] X_test, y_test = X[test_idxs], y[test_idxs] model.fit(X_train, y_train) df_pred.loc[test_idxs, 'true'] = y_test df_pred.loc[test_idxs, 'pred'] = model.predict(X_test) df_pred = df_pred.sort_index() if index is not None: assert len(index) == df_pred.shape[0], 'The passed index must be the same length as X and y' df_pred.index = index return df_pred # + df_pred = generate_kfold_preds(X, y) df_pred.head() # - #exports def evaluate_models(X, y, models, post_pred_proc_func=None, index=None): model_scores = dict() for model_name, model in track(models.items()): df_pred = generate_kfold_preds(X, y, model, index=index) if post_pred_proc_func is not None: df_pred['pred'] = post_pred_proc_func(df_pred['pred']) model_scores[model_name] = { 'mae': mean_absolute_error(df_pred['true'], df_pred['pred']), 'rmse': np.sqrt(mean_squared_error(df_pred['true'], df_pred['pred'])) } df_model_scores = pd.DataFrame(model_scores) df_model_scores.index.name = 'metric' df_model_scores.columns.name = 'model' return df_model_scores # + models = { 'std_linear': LinearRegression(), 'random_forest': RandomForestRegressor(), 'boosted': GradientBoostingRegressor() } rerun_panel_temp_model = False model_scores_filename = 'panel_temp_interp_model_results.csv' if (rerun_panel_temp_model == True) or (model_scores_filename not in os.listdir(cache_data_dir)): df_model_scores = evaluate_models(X, y, models) df_model_scores.to_csv(f'{cache_data_dir}/{model_scores_filename}') else: df_model_scores = pd.read_csv(f'{cache_data_dir}/{model_scores_filename}', index_col='metric') df_model_scores # - # + top_model = df_model_scores.T['rmse'].idxmin() df_pred = generate_kfold_preds(X, y, models[top_model]) df_pred.head() # - # + s_residuals = df_pred.diff(1, axis=1).dropna(axis=1).iloc[:, 0] s_residuals.plot(linewidth=0.3) # - # + plt.scatter(df_pred['true'], df_pred['pred'], s=1) plt.xlabel('Obervation') plt.ylabel('Prediction') # - #exports def interpolate_missing_panel_temps(df_pv, df_weather, model=RandomForestRegressor()): missing_panel_temp_dts = df_pv.index[df_pv['panel_temp_C'].isnull()] if len(missing_panel_temp_dts) == 0: # i.e. no missing values return df_pv df_temp_features = construct_df_temp_features(df_weather, df_pv) missing_dt_X = df_temp_features.loc[missing_panel_temp_dts].drop('site_temp', axis=1).values X, y = split_X_y_data(df_temp_features, 'site_temp') model.fit(X, y) df_pv.loc[missing_panel_temp_dts, 'panel_temp_C'] = model.predict(missing_dt_X) assert df_pv['panel_temp_C'].isnull().sum() == 0, 'There are still null values for the PV panel temperature' return df_pv # + df_pv = interpolate_missing_panel_temps(df_pv, df_weather) df_pv.isnull().mean() # - #exports def construct_df_irradiance_features(df_weather, df_pv): df_weather = df_weather.reindex(pd.date_range(df_weather.index.min(), df_weather.index.max(), freq='30T')).ffill(limit=1) temp_loc_cols = df_weather.columns[df_weather.columns.str.contains('solar')] df_irradiance_features = (df_weather .copy() [temp_loc_cols] .assign(site_solar=df_pv['irradiance_Wm-2']) .pipe(lambda df: df.assign(hour=df.index.hour + df.index.minute/60)) ) df_irradiance_features = df_irradiance_features.sort_index(axis=1) return df_irradiance_features # + df_irradiance_features = construct_df_irradiance_features(df_weather, df_pv) df_irradiance_features.head() # - # + models = { 'std_linear': LinearRegression(), 'random_forest': RandomForestRegressor(), 'boosted': GradientBoostingRegressor() } rerun_site_irradiance_model = False model_scores_filename = 'site_irradiance_interp_model_results.csv' X, y = split_X_y_data(df_irradiance_features, 'site_solar') if (rerun_site_irradiance_model == True) or (model_scores_filename not in os.listdir(cache_data_dir)): df_model_scores = evaluate_models(X, y, models) df_model_scores.to_csv(f'{cache_data_dir}/{model_scores_filename}') else: df_model_scores = pd.read_csv(f'{cache_data_dir}/{model_scores_filename}', index_col='metric') df_model_scores # - # + top_model = df_model_scores.T['rmse'].idxmin() df_pred = generate_kfold_preds(X, y, models[top_model]) df_pred.head() # - # + plt.scatter(df_pred['true'], df_pred['pred'], s=1) plt.xlabel('Obervation') plt.ylabel('Prediction') # - #exports def interpolate_missing_site_irradiance(df_pv, df_weather, model=RandomForestRegressor()): missing_site_irradiance_dts = df_pv.index[df_pv['irradiance_Wm-2'].isnull()] if len(missing_site_irradiance_dts) == 0: # i.e. no missing values return df_pv df_irradiance_features = construct_df_irradiance_features(df_weather, df_pv) missing_dt_X = df_irradiance_features.loc[missing_site_irradiance_dts].drop('site_solar', axis=1).values X, y = split_X_y_data(df_irradiance_features, 'site_solar') model.fit(X, y) df_pv.loc[missing_site_irradiance_dts, 'irradiance_Wm-2'] = model.predict(missing_dt_X) assert df_pv['irradiance_Wm-2'].isnull().sum() == 0, 'There are still null values for the solar site irradiance' return df_pv # + df_pv = interpolate_missing_site_irradiance(df_pv, df_weather) df_pv.isnull().mean() # - # <br> # # Now that we have the irradiance and temperature we're ready to start filling in the missing values for power output, again using the same regression interpolation method #exports def construct_df_power_features(df_pv): df_power_features = (df_pv .pipe(lambda df: df.assign(hour=df.index.hour + df.index.minute/60)) .sort_index(axis=1) ) return df_power_features # + df_power_features = construct_df_power_features(df_pv) df_power_features.head() # + models = { 'std_linear': LinearRegression(), 'random_forest': RandomForestRegressor(), 'boosted': GradientBoostingRegressor() } rerun_site_power_model = False model_scores_filename = 'site_power_interp_model_results.csv' X, y, dates = split_X_y_data_with_index(df_power_features, 'pv_power_mw') if (rerun_site_power_model == True) or (model_scores_filename not in os.listdir(cache_data_dir)): df_model_scores = evaluate_models(X, y, models) df_model_scores.to_csv(f'{cache_data_dir}/{model_scores_filename}') else: df_model_scores = pd.read_csv(f'{cache_data_dir}/{model_scores_filename}', index_col='metric') df_model_scores # + top_model = df_model_scores.T['rmse'].idxmin() df_pred = generate_kfold_preds(X, y, models[top_model]) df_pred.head() # + plt.scatter(df_pred['true'], df_pred['pred'], s=1) plt.xlabel('Obervation') plt.ylabel('Prediction') # - # ##### Anomalous data points in PV data # # The PV data shows a number of points where the observed value is 0 but the prediction is much higher. # # First let's try and identify them (setting the tolerance to be lower will capture more values as anomalous). # + def identify_anomalies_pv(df_pred, tolerance=0.1): foo = df_pred.copy() foo['difference'] = foo.pred - foo.true foo = foo[(foo.difference > tolerance) & (foo.true == 0)] return foo.index anomalous_dates = dates[identify_anomalies_pv(df_pred)] anomalous_df = df_power_features[df_power_features.index.isin(anomalous_dates)] plt.hist(anomalous_df.hour) # Check this histogram to eyeball if any unreasonable anomalous values are caught by the tolerance (e.g. late at night) # - # Replace these values in `df_power_features`. df_power_features_clean = df_power_features.copy() df_power_features_clean.loc[df_power_features_clean.index.isin(anomalous_dates), 'pv_power_mw'] = np.nan # Rerun the previous model fitting and check the pred vs. actual graph. # + models = { 'std_linear': LinearRegression(), 'random_forest': RandomForestRegressor(), 'boosted': GradientBoostingRegressor() } rerun_site_power_model = False model_scores_filename = 'site_power_interp_clean_model_results.csv' X, y, dates = split_X_y_data_with_index(df_power_features_clean, 'pv_power_mw') if (rerun_site_power_model == True) or (model_scores_filename not in os.listdir(cache_data_dir)): df_model_scores = evaluate_models(X, y, models) df_model_scores.to_csv(f'{cache_data_dir}/{model_scores_filename}') else: df_model_scores = pd.read_csv(f'{cache_data_dir}/{model_scores_filename}', index_col='metric') top_model = df_model_scores.T['rmse'].idxmin() df_pred = generate_kfold_preds(X, y, models[top_model]) plt.scatter(df_pred['true'], df_pred['pred'], s=1) plt.xlabel('Obervation') plt.ylabel('Prediction') # - # The above graph looks to be a cleaner with tolerance at 0.1. It looks like there might still be some which aren't though. Consider lowering the tolerance. #exports def pv_anomalies_to_nan(df_pv, model=GradientBoostingRegressor(), tolerance=0.1): """ Run this function to identify places where predicted values for pv_power_mw are much larger than true values and where the true value is 0 (we expect these are anomalies) and make these values nan. """ df_power_features = construct_df_power_features(df_pv) X, y, dates = split_X_y_data_with_index(df_power_features, 'pv_power_mw') df_pred = generate_kfold_preds(X, y, model) df_pred['difference'] = df_pred.pred - df_pred.true df_pred['datetime'] = dates df_pred = df_pred.set_index('datetime') anomalous_idx = df_pred[(df_pred.difference > tolerance) & (df_pred.true == 0)].index df_pv.loc[df_pv.index.isin(anomalous_idx), 'pv_power_mw'] = np.nan return df_pv df_pv = pv_anomalies_to_nan(df_pv) #exports def interpolate_missing_site_power(df_pv, model=RandomForestRegressor()): missing_site_power_dts = df_pv.index[df_pv['pv_power_mw'].isnull()] if len(missing_site_power_dts) == 0: # i.e. no missing values return df_pv df_power_features = construct_df_power_features(df_pv) missing_dt_X = df_power_features.loc[missing_site_power_dts].drop('pv_power_mw', axis=1).values X, y = split_X_y_data(df_power_features, 'pv_power_mw') model.fit(X, y) df_pv.loc[missing_site_power_dts, 'pv_power_mw'] = model.predict(missing_dt_X) assert df_pv['pv_power_mw'].isnull().sum() == 0, 'There are still null values for the solar site power' return df_pv # + df_pv = interpolate_missing_site_power(df_pv) df_pv.isnull().mean() # - #exports def interpolate_missing_weather_solar(df_pv, df_weather, weather_col='solar_location2', model=RandomForestRegressor()): missing_weather_solar_dts = df_weather.index[df_weather[weather_col].isnull()] if len(missing_weather_solar_dts) == 0: # i.e. no missing values return df_pv df_irradiance_features = construct_df_irradiance_features(df_weather, df_pv).drop('site_solar', axis=1) missing_dt_X = df_irradiance_features.loc[missing_weather_solar_dts].drop(weather_col, axis=1).values X, y = split_X_y_data(df_irradiance_features, weather_col) model.fit(X, y) df_weather.loc[missing_weather_solar_dts, weather_col] = model.predict(missing_dt_X) assert df_weather[weather_col].isnull().sum() == 0, 'There are still null values for the weather dataset solar observations' return df_weather # + df_weather = interpolate_missing_weather_solar(df_pv, df_weather, model=LinearRegression()) df_weather.isnull().mean() # - # <br> # # ### Interpolate Missing Temp Data # # #export def interpolate_missing_temps(df_weather, temp_variable, model=RandomForestRegressor()): """ Use the other temperature locations to predict missing values of `temp_variable`. For test_2: a full day is missing for temp_location4 on 2018-11-18 """ missing_temp_dts = df_weather.index[df_weather[temp_variable].isnull()] if len(missing_temp_dts) == 0: # i.e. no missing values return df_weather temp_loc_cols = df_weather.columns[df_weather.columns.str.contains('temp')] df_temp_features = df_weather.filter(temp_loc_cols) missing_dt_X = df_temp_features.loc[missing_temp_dts].drop(temp_variable, axis=1).values X, y = split_X_y_data(df_temp_features, temp_variable) model.fit(X, y) df_weather.loc[missing_temp_dts, temp_variable] = model.predict(missing_dt_X) df_weather = df_weather.ffill(limit=1) assert df_weather[temp_variable].isnull().sum() == 0, 'There are still null values for the PV panel temperature' return df_weather foo = interpolate_missing_temps(df_weather, 'temp_location4', model=LinearRegression()) foo.isnull().mean() # <br> # # Finally we'll export the relevant code to our `batopt` module # + #hide from nbdev.export import notebook2script notebook2script() # -
nbs/02-cleaning.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="view-in-github" colab_type="text" # <a href="https://colab.research.google.com/github/st24hour/tutorial/blob/master/dcgan_tutorial_answer.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # + [markdown] colab_type="text" id="_jQ1tEQCxwRx" # ##### Copyright 2019 The TensorFlow Authors. # + cellView="form" colab_type="code" id="V_sgB_5dx1f1" colab={} #@title Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # + [markdown] colab_type="text" id="rF2x3qooyBTI" # # Deep Convolutional Generative Adversarial Network # + [markdown] colab_type="text" id="ITZuApL56Mny" # 이 튜토리얼에서 우리는 [Deep Convolutional Generative Adversarial Network](https://arxiv.org/pdf/1511.06434.pdf) (DCGAN)을 사용하여 어떻게 handwritten 숫자들을 만드는 네트워크를 학습시킬지를 배울 것입니다. # # + [markdown] colab_type="text" id="2MbKJY38Puy9" # ## GAN이란? # [Generative Adversarial Networks](https://arxiv.org/abs/1406.2661) (GANs) 는 최근 computer science 분야에서 가장 흥미로운 아이디어 중에 하나입니다. 두 개의 모델은 적대적인 과정을 통해 동시에 훈련됩니다. *Generator* ("예술가")는 진짜처럼 보이는 이미지를 만드는 법을 배우고, *Discriminator*("비평가")는 진짜 이미지와 가짜 이미지를 구별하게 되는 것을 배웁니다. # # ![A diagram of a generator and discriminator](https://tensorflow.org/beta/tutorials/generative/images/gan1.png) # # 훈련과정 동안 *Generator*는 점차 실제같은 이미지를 더 잘 생성하게 되고, *Discriminator*는 점차 진짜와 가짜를 더 잘 구별하게됩니다. 이 과정은 *Discriminator*가 가짜 이미지에서 진짜 이미지를 더이상 구별하지 못하게 될때, 평형상태에 도달하게 됩니다. # # ![A second diagram of a generator and discriminator](https://tensorflow.org/beta/tutorials/generative/images/gan2.png) # # 이 코드를 통해 우리는 MNIST 데이터셋을 사용하여 GAN을 학습해볼 것입니다. 아래의 애니메이션은 *Generator*로부터 50 epoch을 거쳐 만들어진 여러 개의 이미지들입니다. 이 이미지들은 random noise로부터 시작해서 점점 시간이 갈 수록 손으로 쓴 숫자의 형상을 갖추게 됩니다. # # ![sample output](https://tensorflow.org/images/gan/dcgan.gif) # + [markdown] id="uKumTwKVgePx" colab_type="text" # ## DCGAN 튜토리얼 # # 이제부터 어떻게 네트워크를 만들고 GAN학습을 시킬지를 배워봅시다. # 순서는 다음과 같습니다. # 1. Tensorflow와 다른 library들을 불러온다. # 2. 데이터셋을 불러온다. # 3. 모델을 만든다. # 4. loss와 optimizer를 정의한다. # 5. Training loop를 정의한다. # 6. Train # 7. Test # # # + [markdown] colab_type="text" id="e1_Y75QXJS6h" # ### Import Tensorflow and other libraries # # # # + colab_type="code" id="J5oue0oqCkZZ" colab={} from __future__ import absolute_import, division, print_function, unicode_literals # + colab_type="code" id="g5RstiiB8V-z" colab={} # !pip install -q tensorflow-gpu==2.0.0-rc1 #tensorflow gpu 버전을 설치합니다 # + colab_type="code" id="WZKbyU2-AiY-" colab={} import tensorflow as tf # tensorflow를 import해줍니다 # + colab_type="code" id="wx-zNbLqB4K8" colab={} tf.__version__ # import한 tensorflow의 버전을 나타냅니다 # + colab_type="code" id="YzTlj4YdCip_" colab={} # 나중에 GIF를 생성하기 위해서 import해줍니다 # !pip install imageio # + colab_type="code" id="YfIk2es3hJEd" colab={} import glob import imageio import matplotlib.pyplot as plt import numpy as np import os import PIL from tensorflow.keras import layers import time from IPython import display # + [markdown] colab_type="text" id="iYn4MdZnKCey" # ### Load and prepare the dataset # # MNIST 데이터셋을 이용하여 Generator와 Discriminator를 학습시켜봅시다. 학습이 끝나면 Generator는 MNIST 데이터를 닮은 손글씨 숫자들을 생성할 것입니다. # # 주어진 정보들을 이용하여 빈 칸을 채워보세요! ([ ]가 빈 칸을 나타냅니다. 괄호를 지우고 알맞은 코드를 써주세요) # # 1. MNIST 데이터는 28x28x1의 dimension을 갖고 있습니다. 받아온 train_images를 (train_image의 갯수, width, height, channel)로 reshape 해줍니다. # 2. [0, 255] 범위로 이루어진 train_images를 [-1, 1]의 범위로 normalize해주기 위해 이미지에서 127.5를 뺀 뒤 127.5로 나눠줍니다. train_images에서 바로 빼고 나눠주시면 됩니다. # (픽셀의 값이 0일 경우 (0 - 127.5)/127.5=-1, 255일 경우 (255 - 127.5)/127.5=1이 됩니다. 이 과정을 통해 기존의 0과 255사이의 값으로 이루어진 이미지가 -1과 1사이의 값을 가지도록 normalize를 해주는 것입니다.) # 3. BUFFER SIZE는 60000입니다. # 4. BATCH SIZE는 256입니다. # + colab_type="code" id="a4fYMGxGhrna" colab={} (train_images, train_labels), (_, _) = tf.keras.datasets.mnist.load_data() # + colab_type="code" id="NFC2ghIdiZYE" colab={} train_images = train_images.reshape(train_images.shape[0],28,28,1).astype('float32') # 1번 train_images = (train_images-127.5)/127.5 # 2번 # + colab_type="code" id="S4PIDhoDLbsZ" colab={} BUFFER_SIZE = 60000 # 3번 BATCH_SIZE = 256 # 4번 # + colab_type="code" id="-yKCCQOoJ7cn" colab={} # 데이터를 shuffle하고 batch를 받아옵니다 train_dataset = tf.data.Dataset.from_tensor_slices(train_images).shuffle(BUFFER_SIZE).batch(BATCH_SIZE) # + [markdown] colab_type="text" id="THY-sZMiQ4UV" # ### Create the models # 이제 Generator와 Discriminator 모델을 만들어봅시다! # Generator와 Discriminator들은 모두 [Keras Sequential API](https://www.tensorflow.org/guide/keras#sequential_model).를 사용하여 정의할 것입니다. # # 우리가 만들 Generator와 Discriminator의 구조는 아래 그림들과 같습니다. # # 빈 칸을 채워서 그림과 맞는 모델을 만들어보세요. # + [markdown] colab_type="text" id="-tEyxE-GMC48" # ### The Generator # # Generator는 `tf.keras.layers.Conv2DTranspose` (unsampling) 레이어를 사용하여 seed(random noise)로부터 이미지를 생성합니다. 먼저 `Dense` 레이어에서 이 seed를 input으로 받은 뒤에 원본 이미지 크기(28x28x1)에 도달할 때까지 unsampling을 여러번 반복합니다. `tf.keras.layers.BatchNormalization`과 `tf.keras.layers.LeakyReLU` activation을 각 블럭의 마지막에 추가해줍니다. # ![generator](https://docs.google.com/uc?export=download&id=12hZMMHiHbqsiDwGKi7J87T-Ic_y8s6p5) # + colab_type="code" id="6bpTcDqoLWjY" colab={} def make_generator_model(): model = tf.keras.Sequential() model.add(layers.Dense(7*7*256, use_bias=False, input_shape=(100,))) # 빈칸을 채워주세요 model.add(layers.BatchNormalization()) # 빈칸을 채워주세요 model.add(layers.LeakyReLU()) # 빈칸을 채워주세요 model.add(layers.Reshape((7,7,256))) # 빈칸을 채워주세요 assert model.output_shape == (None, 7, 7, 256) # Note: None is the batch size model.add(layers.Conv2DTranspose(128, (5,5), strides=(1,1), padding='same', use_bias=False)) # 빈칸을 채워주세요 assert model.output_shape == (None, 7, 7, 128) model.add(layers.BatchNormalization()) # 빈칸을 채워주세요 model.add(layers.LeakyReLU()) # 빈칸을 채워주세요 model.add(layers.Conv2DTranspose(64, (5,5), strides=(2,2), padding='same', use_bias=False)) # 빈칸을 채워주세요 assert model.output_shape == (None, 14, 14, 64) model.add(layers.BatchNormalization()) # 빈칸을 채워주세요 model.add(layers.LeakyReLU()) # 빈칸을 채워주세요 model.add(layers.Conv2DTranspose(1, (5,5), strides=(2,2), padding='same', use_bias=False, activation='tanh')) # 빈칸을 채워주세요 assert model.output_shape == (None, 28, 28, 1) return model # + [markdown] colab_type="text" id="GyWgG09LCSJl" # Generator를 사용하여 이미지를 생성하도록 합니다. # 1. 1x100 dimension을 가진 noise를 정의해줍시다. # 2. 만들어진 이미지 generated_image는 generator에 noise를 input으로 넣어줌으로써 생성됩니다. # + colab_type="code" id="gl7jcC7TdPTG" colab={} generator = make_generator_model() noise = tf.random.normal([1,100]) # 1번 generated_image = generator(noise, training=False) # 2번 plt.imshow(generated_image[0, :, :, 0], cmap='gray') # + [markdown] colab_type="text" id="D0IKnaCtg6WE" # ### The Discriminator # Discriminator는 CNN-based 이미지 분류기입니다. # # 빈 칸을 채워 Discriminator의 구성을 완료합시다. # ![generator](https://docs.google.com/uc?export=download&id=18fiJbjonI34sO0CJJ-XxhLHXpFBIAlnU) # + colab_type="code" id="dw2tPLmk2pEP" colab={} def make_discriminator_model(): model = tf.keras.Sequential() model.add(layers.Conv2D(64, (5,5), strides=(2,2), padding='same', input_shape=[28, 28, 1])) # 빈칸을 채워주세요 model.add(layers.LeakyReLU()) # 빈칸을 채워주세요 model.add(layers.Dropout(0.3)) # 빈칸을 채워주세요 model.add(layers.Conv2D(128, (5,5), strides=(2,2), padding='same')) # 빈칸을 채워주세요 model.add(layers.LeakyReLU()) # 빈칸을 채워주세요 model.add(layers.Dropout(0.3)) # 빈칸을 채워주세요 model.add(layers.Flatten()) # 빈칸을 채워주세요 model.add(layers.Dense(1)) # 빈칸을 채워주세요 return model # + [markdown] colab_type="text" id="QhPneagzCaQv" # Discriminator(아직 training하지 않은)를 사용하여 만들어진 이미지가 진짜인지 가짜인지 구별하도록 합니다. 모델은 진짜 이미지를 받았을 때는 positivie한 값을 가짜 이미지를 받았을 때는 negative한 값을 내보내도록 학습될 것 입니다. # # 1. Discriminator(discriminator)가 만들어진 이미지(generated_image)를 받아 결정값을(decision) 내보내도록 합시다. # + colab_type="code" id="gDkA05NE6QMs" colab={} discriminator = make_discriminator_model() decision = discriminator(generated_image) # 1번 print (decision) # + [markdown] colab_type="text" id="0FMYgY_mPfTi" # ## Define the loss and optimizers # # 두 모델을 위해 loss functions과 optimizer를 정의해줍시다. # + colab_type="code" id="psQfmXxYKU3X" colab={} # 이 메서드는 크로스 엔트로피 손실함수 (cross entropy loss)를 계산하기 위해 헬퍼 (helper) 함수를 반환합니다. cross_entropy = tf.keras.losses.BinaryCrossentropy(from_logits=True) # + [markdown] colab_type="text" id="PKY_iPSPNWoj" # ### Discriminator loss # # 이 loss는 Discriminator가 얼마나 잘 진짜 이미지와 가짜 이미지를 구별해내는 지를 보여줄 것입니다. Discriminator가 진짜 이미지를 받아서 예측해낸 값과 1로 이루어진 array를 비교하고, 가짜 이미지(만들어진 이미지)를 받아서 예측해낸 값과 0으로 이루어진 array를 비교할 것입니다. # # 1. real_output과 같은 크기를 가진 1로 이루어진 array와 real_output과의 cross entropy를 계산하여 real_loss라고 정의해줍시다. (Hint: tf.ones_like) # 2. fake_output과 같은 크기를 가진 0로 이루어진 array와 fake_output과의 cross entropy를 계산하여 fake_loss라고 정의해줍시다. (Hint: tf.zeros_like) # 3. real_loss와 fake_loss를 합해 total_loss라고 정의해줍시다. # + colab_type="code" id="wkMNfBWlT-PV" colab={} def discriminator_loss(real_output, fake_output): real_loss = cross_entropy(tf.ones_like(real_output), real_output) # 1번 fake_loss = cross_entropy(tf.zeros_like(fake_output), fake_output) # 2번 total_loss = real_loss+fake_loss # 3번 return total_loss # + [markdown] colab_type="text" id="Jd-3GCUEiKtv" # ### Generator loss # # Generator의 loss는 얼마나 discriminator를 잘 속였는지를 값으로 보여줍니다. 직관적으로, 만약 Generator가 잘 하고 있다면 discriminator는 가짜 이미지를 진짜라고 판단할 것입니다 (or 1). 여기서 우리는 Generator가 만든 이미지(fake_output)이 Discriminator에 들어가서 나온 값이 1에 가까워지도록 할 것입니다. # 1. fake_output과 같은 크기를 가진 1로 이루어진 array와 fake_output과의 cross entropy를 계산하는 함수를 정의해줍시다. # + colab_type="code" id="90BIcCKcDMxz" colab={} def generator_loss(fake_output): return cross_entropy(tf.ones_like(fake_output), fake_output) # 1번 # + [markdown] colab_type="text" id="MgIc7i0th_Iu" # Discriminator와 Generator는 각각 학습되기 때문에 다른 optimizer를 정의해주어야합니다. # 1. 두 개의 optimizer를 각각 Adam Optimizer를 learning rate 1e-4로 정의해줍시다. ( Hint: tf.keras.optimizers.Adam(*learning_rate*)) # + colab_type="code" id="iWCn_PVdEJZ7" colab={} generator_optimizer = tf.keras.optimizers.Adam(1e-4) # 1번 discriminator_optimizer = tf.keras.optimizers.Adam(1e-4) # 1번 # + [markdown] colab_type="text" id="mWtinsGDPJlV" # ### Save checkpoints # # 모델을 저장하고 다시 불러오기 위해 checkpoint를 만들어줍시다. # + colab_type="code" id="CA1w-7s2POEy" colab={} checkpoint_dir = './training_checkpoints' checkpoint_prefix = os.path.join(checkpoint_dir, "ckpt") checkpoint = tf.train.Checkpoint(generator_optimizer=generator_optimizer, discriminator_optimizer=discriminator_optimizer, generator=generator, discriminator=discriminator) # + [markdown] colab_type="text" id="Rw1fkAczTQYh" # ## Define the training loop # # 1. 우리는 50 epoch을 돌려 training 시킬 것입니다. # 2. 우리가 사용할 noise의 dimension은 100입니다. # + colab_type="code" id="NS2GWywBbAWo" colab={} EPOCHS = 50 # 1번 noise_dim = 100 # 2번 num_examples_to_generate = 16 # animated GIF를 생성하기 위한 seed 생성 seed = tf.random.normal([num_examples_to_generate, noise_dim]) # + [markdown] colab_type="text" id="jylSonrqSWfi" # Training loop는 Generator가 random noise를 input으로 받으면서부터 시작됩니다. seed는 이미지를 만들기 위해 쓰입니다. Discriminator는 진짜 이미지(Training set에 있는)와 가짜 이미지(Generator가 만든)를 구분하도록 학습합니다. 각 model의 loss를 각각 계산한뒤 gradient를 이용하여 Generator와 Discriminator를 업데이트합시다. # # 1. Batch Size x Noise Dimension 크기의 noise를 생성합시다. (Hint: tf.random.normal) # 2. 위에서 정의한 generator에 noise를 input으로 주어 generated_images를 생성합니다. (Hint: training=True를 설정해줍시다) # 3. 위에서 정의한 discriminator에 진짜 이미지(images)를 넣어 real_output을 만듭니다. (Hint: training=True를 설정해줍시다) # 4. 위에서 정의한 discriminator에 가짜 이미지를 넣어(generated_images) fake_output을 만듭니다. (Hint: training=True를 설정해줍시다) # 5. 위에서 정의한 generator_loss에 fake_output을 넣어 gen_loss를 계산합니다. # 5. real_output과 fake_output을 이용하여 disc_loss를 계산합니다. # # + colab_type="code" id="3t5ibNo05jCB" colab={} # `tf.function`이 어떻게 사용되는지 주목해 주세요. # 이 데코레이터는 함수를 "컴파일"합니다. @tf.function def train_step(images): noise = tf.random.normal([BATCH_SIZE,noise_dim]) # 1번 with tf.GradientTape() as gen_tape, tf.GradientTape() as disc_tape: generated_images = generator(noise, training=True) # 2번 real_output = discriminator(images, training=True) # 3번 fake_output = discriminator(generated_images, training=True) # 4번 gen_loss = generator_loss(fake_output) # 5번 disc_loss = discriminator_loss(real_output, fake_output) # 6번 gradients_of_generator = gen_tape.gradient(gen_loss, generator.trainable_variables) gradients_of_discriminator = disc_tape.gradient(disc_loss, discriminator.trainable_variables) generator_optimizer.apply_gradients(zip(gradients_of_generator, generator.trainable_variables)) discriminator_optimizer.apply_gradients(zip(gradients_of_discriminator, discriminator.trainable_variables)) # + [markdown] id="uCnEK4kHrW67" colab_type="text" # ## Train 함수를 정의해봅시다 # + colab_type="code" id="2M7LmLtGEMQJ" colab={} def train(dataset, epochs): for epoch in range(epochs): start = time.time() for image_batch in dataset: train_step(image_batch) # Produce images for the GIF as we go display.clear_output(wait=True) generate_and_save_images(generator, epoch + 1, seed) # Save the model every 15 epochs if (epoch + 1) % 15 == 0: checkpoint.save(file_prefix = checkpoint_prefix) print ('Time for epoch {} is {} sec'.format(epoch + 1, time.time()-start)) # Generate after the final epoch display.clear_output(wait=True) generate_and_save_images(generator, epochs, seed) # + [markdown] colab_type="text" id="2aFF7Hk3XdeW" # **Generate and save images** # # # + colab_type="code" id="RmdVsmvhPxyy" colab={} def generate_and_save_images(model, epoch, test_input): # Notice `training` is set to False. # This is so all layers run in inference mode (batchnorm). predictions = model(test_input, training=False) fig = plt.figure(figsize=(4,4)) for i in range(predictions.shape[0]): plt.subplot(4, 4, i+1) plt.imshow(predictions[i, :, :, 0] * 127.5 + 127.5, cmap='gray') plt.axis('off') plt.savefig('image_at_epoch_{:04d}.png'.format(epoch)) plt.show() # + [markdown] colab_type="text" id="dZrd4CdjR-Fp" # ## Train the model # # 위에서 정의한 `train()` 함수를 불러 Generator와 Discriminator를 동시에 학습합니다. GAN을 학습하는 것은 까다롭습니다. Generator와 Discriminator가 서로를 overpower하지 않도록 해주는 것이 중요합니다. # # Training을 시작할 때 만들어진 이미지는 random한 noise처럼 보입니다. 그러나 학습이 진행되면서 만들어진 숫자들은 진짜처럼 보이기 시작합니다. 50 epoch이 지나면 MNIST 숫자를 닮은 이미지가 생성될 것 입니다. # # 1. 위에서 정의한 train 함수에 넣어줘야하는 인자를 넣어 학습을 시작해줍시다. # + colab_type="code" id="Ly3UN0SLLY2l" colab={} # %%time train(train_dataset, EPOCHS) # 1번 # + [markdown] colab_type="text" id="rfM4YcPVPkNO" # 학습이 다 끝나면 가장 최근의 checkpoint를 불러 test해봅시다. # + colab_type="code" id="XhXsd0srPo8c" colab={} checkpoint.restore(tf.train.latest_checkpoint(checkpoint_dir)) # + [markdown] colab_type="text" id="P4M_vIbUi7c0" # ## Create a GIF # # + colab_type="code" id="WfO5wCdclHGL" colab={} # Display a single image using the epoch number def display_image(epoch_no): return PIL.Image.open('image_at_epoch_{:04d}.png'.format(epoch_no)) # + colab_type="code" id="5x3q9_Oe5q0A" colab={} display_image(EPOCHS) # + [markdown] colab_type="text" id="NywiH3nL8guF" # `imageio` 라이브러리를 사용하여 training 도중 저장 된 이미지들로 gif를 만들어봅시다. # # + colab_type="code" id="IGKQgENQ8lEI" colab={} anim_file = 'dcgan.gif' with imageio.get_writer(anim_file, mode='I') as writer: filenames = glob.glob('image*.png') filenames = sorted(filenames) last = -1 for i,filename in enumerate(filenames): frame = 2*(i**0.5) if round(frame) > round(last): last = frame else: continue image = imageio.imread(filename) writer.append_data(image) image = imageio.imread(filename) writer.append_data(image) import IPython if IPython.version_info > (6,2,0,''): display.Image(filename=anim_file) # + colab_type="code" id="uV0yiKpzNP1b" colab={} try: from google.colab import files except ImportError: pass else: files.download(anim_file) # + [markdown] colab_type="text" id="k6qC-SbjK0yW" # ## Report # # + [markdown] colab_type="text" id="xjjkT9KAK6H7" # 1. 레포트에 학습시킨 모델의 1epoch, 5epoch, 15epoch, 30epoch, 50epoch의 결과 이미지들을 첨부해주세요. # 2. noise의 dimension을 10, 50, 200으로 바꾸어 학습시켜본 뒤 결과 이미지를 첨부해주세요. # # <EMAIL>로 레포트를 제출해주세요!
dcgan_tutorial_answer.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: 'Python 3.8.10 64-bit (''intermine'': conda)' # name: python3 # --- # # GOterms Import # # Through InterMine. # # This is just for MouseMine, and mouse specific GO terms. # # But for those who are using this as a template for other organisms, feel free to go dig in to <code>./mousemine/mousemine_query.py</code>. # # ---- # # Author : <NAME> # # Affiliation : Kyoto University import os import pandas as pd from mousemine.mousemine_query import grab, save_to_csv terms = [ 'astrocyte activation', 'astrocyte differentiation', 'astrocyte development' ] data = grab(terms) save_to_csv(data,filepath=os.path.curdir)
query.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import tensorflow as tf import numpy as np mnist_hidden_dim = [512, 128] model = tf.keras.Sequential() model.add(tf.keras.layers.Flatten()) for units in mnist_hidden_dim: model.add(tf.keras.layers.Dense(units, activation=tf.nn.relu)) model.add(tf.keras.layers.Dropout(0.1)) model.add(tf.keras.layers.Dense(10, activation=tf.nn.softmax)) model.compile( optimizer=tf.train.AdamOptimizer(1e-3), loss=tf.keras.losses.sparse_categorical_crossentropy, metrics=['accuracy']) (x_train, y_train), (x_test, y_test) = tf.keras.datasets.mnist.load_data() x_train = x_train / 255. x_test = x_test / 255. model.fit(x_train, y_train, epochs=5, batch_size=100, verbose=1) print(model.evaluate(x_test, y_test))
study2/0_highlevel/0_1_mnist_keras_sequential.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + _cell_guid="b1076dfc-b9ad-4769-8c92-a6c4dae69d19" _uuid="8f2839f25d086af736a60e9eeb907d3b93b6e0e5" import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import matplotlib.pyplot as plt import seaborn as sns from sklearn.metrics import accuracy_score, confusion_matrix from sklearn.ensemble import RandomForestClassifier from sklearn.model_selection import StratifiedKFold from sklearn.neighbors import KNeighborsClassifier from sklearn.preprocessing import StandardScaler from sklearn.decomposition import PCA from sklearn.linear_model import LogisticRegression from sklearn.tree import DecisionTreeClassifier from sklearn.preprocessing import MinMaxScaler import sklearn from sklearn.feature_extraction.text import CountVectorizer from sklearn.model_selection import StratifiedKFold, StratifiedShuffleSplit from keras.models import Sequential from keras.layers.core import Dense, Activation, Dropout from keras.optimizers import SGD from sklearn import datasets from sklearn.model_selection import train_test_split import lightgbm as lgb from tqdm import tqdm import os import gc from itertools import combinations, chain from datetime import datetime print(os.listdir("../input")) # Any results you write to the current directory are saved as output. # + [markdown] _uuid="1fc04f3cb613ee725c681a18f8bbcbcdb88af57d" # # summary # + [markdown] _uuid="968a5603b4bf10a83bc0054c21465141c8a9fcce" # ## model summary # We created a total of 10 learning models and stacked their predicted by LightGBM. # # table of contents # # # + [markdown] _uuid="fc395abfff45e2d6f729efe6ac68a8262aa5e863" # # nadare's kernel # + _uuid="9e91ca19c0142cb111e1acabaeb5a27c22eef5a4" train_df = pd.read_csv("../input/train.csv") test_df = pd.read_csv("../input/test.csv") smpsb = pd.read_csv("../input/sample_submission.csv") # + [markdown] _uuid="885a2a8875b9a7f134c3cb64673fe32d1bb1d7c2" # ## preprocessing # + [markdown] _cell_guid="79c7e3d0-c299-4dcb-8224-4455121ee9b0" _uuid="d629ff2d2480ee46fbb7e2d37f6b5fab8052498a" # ### EDA & leader board hacking # + _uuid="bdbb097f03fc4d277079c5fccc01fd9574728ece" # First of all, let's see the distribution of each variable. # You can see that there is a big difference in distribution between training data and test data. from scipy.stats import gaussian_kde def compare_dist(ax, feature, i=0): sns.kdeplot(train_df[feature], label="train", ax=ax) sns.kdeplot(test_df[feature], label="test", ax=ax) def numeric_tile(plot_func): fig, axs = plt.subplots(2, 5, figsize=(24, 6)) axs = axs.flatten() for i, (ax, col) in enumerate(zip(axs, train_df.columns.tolist()[1:11])): plot_func(ax, col, i) ax.set_title(col) plt.tight_layout() numeric_tile(compare_dist) # + _uuid="3ed05251dda674f52765dfb71298c93c2ee979f6" # For the training data, display the distribution of variables for each target. # Please pay attention to "Elevation". The difference between the training data and the test data distribution is # thought to be due to the difference between the proportion of the target variables in the training data and the test data. def compare_target(ax, feature, i=0): sns.kdeplot(train_df.loc[:, feature], label="train", ax=ax) sns.kdeplot(test_df.loc[:, feature], label="test", ax=ax) for target in range(1, 8): sns.kdeplot(train_df.loc[train_df["Cover_Type"] == target, feature], label=target, alpha=0.5, lw=1, ax=ax) numeric_tile(compare_target) # + _uuid="b3f75b110dc9d589866d8d6de4e0a1d6fcb25316" # I was able to obtain the distribution of the test data by submitting prediction data with all the same purpose variables. """ smpsb = pd.read_csv("../input/sample_submission.csv") for i in range(1, 8): smpsb["Cover_Type"] = i smpsb.to_csv("all_{}.csv".format(i), index=None)""" # and this is the magic number of this competition. type_ratio = np.array([0.37053, 0.49681, 0.05936, 0.00103, 0.01295, 0.02687, 0.03242]) class_weight = {k: v for k, v in enumerate(type_ratio, start=1)} # By using these numbers, you can mimic the distribution of the test data from the training data. def compare_balanced_dist(ax, feature, i=0): min_ = min(train_df[feature].min(), test_df[feature].min()) max_ = max(train_df[feature].max(), test_df[feature].max()) X = np.linspace(min_, max_, 1000) sns.kdeplot(train_df[feature], label="train", ax=ax) sns.kdeplot(test_df[feature], label="test", ax=ax) btest = np.zeros(1000) for target in range(1, 8): btest += gaussian_kde(train_df.loc[train_df["Cover_Type"] == target, feature])(X) * type_ratio[target-1] ax.plot(X, btest, label="balanced") ax.legend() numeric_tile(compare_balanced_dist) # + _uuid="e00a36ba7782b587a997871baf314f3d934fdd78" # By using the following functions, it is possible to perform almost the same evaluation # as the leader board even in the local environment. def balanced_accuracy_score(y_true, y_pred): return accuracy_score(y_true, y_pred, sample_weight=np.apply_along_axis(lambda x: type_ratio[x], 0, y_true-1)) # + [markdown] _uuid="11d9cb5a1a6a43a5e8e7b7a3e1b777822de29b12" # ### feature engineering 1 # + [markdown] _uuid="6e90c46e4445d14719f145e7a2cea75270424680" # I will explain some of the features I consider important or unique. # + [markdown] _uuid="8c2d853cbc8c1e198bb5c5c009bc6cb44652bc1e" # #### Aspect # + _uuid="b595095fcd707c0a30ff4e3234c7f5bc5ad9c934" # The angle can be divided into sine and cosine sin_ = np.sin(np.pi*train_df["Aspect"]/180) cos_ = np.cos(np.pi*train_df["Aspect"]/180) # However, if this feature quantity alone, the effect seems to be light. plt.figure(figsize=(5, 4)) for i in range(1, 8): cat = np.where(train_df["Cover_Type"] == i)[0] r = (.5+0.2*i) plt.scatter(cos_[cat]*(r), sin_[cat]*(r), alpha=0.02*r, s=6, label=i) plt.xlim(-2, 3) plt.legend() plt.savefig("aspect.png") # + [markdown] _uuid="a1199fd2d205a22f5d91bc988b2591de4e1922c5" # #### degree to hydrology # + _uuid="933e53d39a16bab8ce40efe36e5a909211f4e6d4" # this may be good feature but unfortunally i forgot to add my data hydro_h = train_df["Vertical_Distance_To_Hydrology"] hydro_v = train_df["Horizontal_Distance_To_Hydrology"] # + _uuid="7e7c1fc8875a1b0ac9e70dc60a15cb60509543f2" plt.scatter(hydro_h, hydro_v, s=1, c=train_df["Cover_Type"], cmap="Set1", alpha=0.3) # + _uuid="b48277e4eb95cdd55c415a4d6e25dc687447b0de" hydro_arctan = np.arctan((hydro_h+0.0001) / (hydro_v+0.0001)) for i in range(1, 8): cat = np.where(train_df["Cover_Type"] == i)[0] sns.kdeplot(hydro_arctan[cat]) # + _uuid="64ffc12e5f344534a878b0aea2e2c99bacde16ae" plt.scatter(hydro_arctan, np.pi*train_df["Slope"]/180, c=train_df["Cover_Type"], cmap="Set1", s=1.5, alpha=0.7) # + [markdown] _uuid="da688e40e90ed0f0b0a3bc2abd96a444d64a3318" # #### target_encoding # + _uuid="ea912ece3fa7e6022d5202a54c727ed3e9aaae4f" # this is the ratio of Wilderness_Area plt.figure(figsize=(6, 6)) train_df.filter(regex="Wilder").sum(axis=0).plot("pie") # + _uuid="51deabb9bcfa6bc33ca903deeafa9f75ce1b9d8f" # and this is ratio of "over_Type" in each "Wildereness_area" wilder = (train_df.filter(regex="Wilder") * np.array([1, 2, 3, 4])).sum(axis=1) fig, axs = plt.subplots(2, 2, figsize=(8, 8)) axs = axs.flatten() for i, ax in enumerate(axs, start=1): train_df.loc[wilder==i, "Cover_Type"].value_counts().sort_index().plot("pie", ax=ax) ax.set_title(i) # + _uuid="e9de8a38df1a7a5206740188264757cc53ec70bf" # This shows the expression of Soil_Type for the objective variable. plt.figure(figsize=(12, 4)) sns.heatmap(train_df.iloc[:, -41:].sort_values(by="Cover_Type").iloc[:, :-1].T, cmap="Greys_r") for i in np.linspace(0, train_df.shape[0], 8)[1:]: plt.axvline(i, c="r") # + [markdown] _uuid="0459bec5c4616fa7cffaaa6ca41800962e9fd976" # As indicated above, category values are considered to have a major role in classification. # # Therefore, in order to handle category values effectively, the ratio of object variables in each category value is added as a feature quantity. # # In order to prevent data leakage and not to excessively trust category values which have only a small number, we added values for 10 data as prior distribution to each category. # + _uuid="89a5c509e52da8f96bf25b285472e86f579682af" # this is the code def categorical_post_mean(x): p = (x.values)*type_ratio p = p/p.sum()*x.sum() + 10*type_ratio return p/p.sum() # + [markdown] _uuid="85cf81e78ae32a4c0b98caf4262c3b67ad4c70c9" # #### summarizes preprocessing # + _uuid="31f82acb720296e835f66fc6d3cb9dc7abd2cc27" train_df = pd.read_csv("../input/train.csv") test_df = pd.read_csv("../input/test.csv") smpsb = pd.read_csv("../input/sample_submission.csv") def main(train_df, test_df): # this is public leaderboard ratio start = datetime.now() type_ratio = np.array([0.37053, 0.49681, 0.05936, 0.00103, 0.01295, 0.02687, 0.03242]) total_df = pd.concat([train_df.iloc[:, :-1], test_df]) # Aspect total_df["Aspect_Sin"] = np.sin(np.pi*total_df["Aspect"]/180) total_df["Aspect_Cos"] = np.cos(np.pi*total_df["Aspect"]/180) print("Aspect", (datetime.now() - start).seconds) # Hillshade hillshade_col = ["Hillshade_9am", "Hillshade_Noon", "Hillshade_3pm"] for col1, col2 in combinations(hillshade_col, 2): total_df[col1 + "_add_" + col2] = total_df[col2] + total_df[col1] total_df[col1 + "_dif_" + col2] = total_df[col2] - total_df[col1] total_df[col1 + "_div_" + col2] = (total_df[col2]+0.01) / (total_df[col1]+0.01) total_df[col1 + "_abs_" + col2] = np.abs(total_df[col2] - total_df[col1]) total_df["Hillshade_mean"] = total_df[hillshade_col].mean(axis=1) total_df["Hillshade_std"] = total_df[hillshade_col].std(axis=1) total_df["Hillshade_max"] = total_df[hillshade_col].max(axis=1) total_df["Hillshade_min"] = total_df[hillshade_col].min(axis=1) print("Hillshade", (datetime.now() - start).seconds) # Hydrology ** I forgot to add arctan total_df["Degree_to_Hydrology"] = ((total_df["Vertical_Distance_To_Hydrology"] + 0.001) / (total_df["Horizontal_Distance_To_Hydrology"] + 0.01)) # Holizontal horizontal_col = ["Horizontal_Distance_To_Hydrology", "Horizontal_Distance_To_Roadways", "Horizontal_Distance_To_Fire_Points"] for col1, col2 in combinations(hillshade_col, 2): total_df[col1 + "_add_" + col2] = total_df[col2] + total_df[col1] total_df[col1 + "_dif_" + col2] = total_df[col2] - total_df[col1] total_df[col1 + "_div_" + col2] = (total_df[col2]+0.01) / (total_df[col1]+0.01) total_df[col1 + "_abs_" + col2] = np.abs(total_df[col2] - total_df[col1]) print("Holizontal", (datetime.now() - start).seconds) def categorical_post_mean(x): p = (x.values)*type_ratio p = p/p.sum()*x.sum() + 10*type_ratio return p/p.sum() # Wilder wilder = pd.DataFrame([(train_df.iloc[:, 11:15] * np.arange(1, 5)).sum(axis=1), train_df.Cover_Type]).T wilder.columns = ["Wilder_Type", "Cover_Type"] wilder["one"] = 1 piv = wilder.pivot_table(values="one", index="Wilder_Type", columns="Cover_Type", aggfunc="sum").fillna(0) tmp = pd.DataFrame(piv.apply(categorical_post_mean, axis=1).tolist()).reset_index() tmp["index"] = piv.sum(axis=1).index tmp.columns = ["Wilder_Type"] + ["Wilder_prob_ctype_{}".format(i) for i in range(1, 8)] tmp["Wilder_Type_count"] = piv.sum(axis=1).values total_df["Wilder_Type"] = (total_df.filter(regex="Wilder") * np.arange(1, 5)).sum(axis=1) total_df = total_df.merge(tmp, on="Wilder_Type", how="left") for i in range(7): total_df.loc[:, "Wilder_prob_ctype_{}".format(i+1)] = total_df.loc[:, "Wilder_prob_ctype_{}".format(i+1)].fillna(type_ratio[i]) total_df.loc[:, "Wilder_Type_count"] = total_df.loc[:, "Wilder_Type_count"].fillna(0) print("Wilder_type", (datetime.now() - start).seconds) # Soil type soil = pd.DataFrame([(train_df.iloc[:, -41:-1] * np.arange(1, 41)).sum(axis=1), train_df.Cover_Type]).T soil.columns = ["Soil_Type", "Cover_Type"] soil["one"] = 1 piv = soil.pivot_table(values="one", index="Soil_Type", columns="Cover_Type", aggfunc="sum").fillna(0) tmp = pd.DataFrame(piv.apply(categorical_post_mean, axis=1).tolist()).reset_index() tmp["index"] = piv.sum(axis=1).index tmp.columns = ["Soil_Type"] + ["Soil_prob_ctype_{}".format(i) for i in range(1, 8)] tmp["Soil_Type_count"] = piv.sum(axis=1).values total_df["Soil_Type"] = (total_df.filter(regex="Soil") * np.arange(1, 41)).sum(axis=1) total_df = total_df.merge(tmp, on="Soil_Type", how="left") for i in range(7): total_df.loc[:, "Soil_prob_ctype_{}".format(i+1)] = total_df.loc[:, "Soil_prob_ctype_{}".format(i+1)].fillna(type_ratio[i]) total_df.loc[:, "Soil_Type_count"] = total_df.loc[:, "Soil_Type_count"].fillna(0) print("Soil_type", (datetime.now() - start).seconds) icol = total_df.select_dtypes(np.int64).columns fcol = total_df.select_dtypes(np.float64).columns total_df.loc[:, icol] = total_df.loc[:, icol].astype(np.int32) total_df.loc[:, fcol] = total_df.loc[:, fcol].astype(np.float32) return total_df total_df = main(train_df, test_df) one_col = total_df.filter(regex="(Type\d+)|(Area\d+)").columns total_df = total_df.drop(one_col, axis=1) # + _uuid="e4fc31a4e0bf23728d886147a3430ff10de919b7" y = train_df["Cover_Type"].values X = total_df[total_df["Id"] <= 15120].drop("Id", axis=1) X_test = total_df[total_df["Id"] > 15120].drop("Id", axis=1) # + _uuid="0504ef9435ceaba8b37bfbde45ec416ac7e7ff34" gc.collect() # + [markdown] _uuid="02527a3340e8079cdb7a7f978d06ffe36feacba4" # ### KNN features and Decision tree feature # + [markdown] _uuid="4db26e4628d5a1933c745629c2f1cd1a38499708" # For the variable created up to the above, the decision tree and the k-nearest neighbor method are applied after narrowing down the number of variables and adding the prediction probability as the feature amount. # # I decided the combination of variables to be used last and the setting of parameters based on Multi-class logarithmic loss while considering diversity. # + [markdown] _uuid="5b664180bc7f55a5f05226caf1956bb50b3a5f98" # #### KNN_feature # + _uuid="eaf1fe074875abe7c8d997aa2c3b3355825f7d27" all_set = [['Elevation', 500], ['Horizontal_Distance_To_Roadways', 500], ['Horizontal_Distance_To_Fire_Points', 500], ['Horizontal_Distance_To_Hydrology', 500], ['Hillshade_9am', 500], ['Aspect', 500], ['Hillshade_3pm', 500], ['Slope', 500], ['Hillshade_Noon', 500], ['Vertical_Distance_To_Hydrology', 500], ['Elevation_PLUS_Vertical_Distance_To_Hydrology', 200], ['Elevation_PLUS_Hillshade_9am_add_Hillshade_Noon', 200], ['Elevation_PLUS_Aspect', 200], ['Elevation_PLUS_Hillshade_Noon_dif_Hillshade_3pm', 200], ['Elevation_PLUS_Hillshade_Noon_abs_Hillshade_3pm', 200], ['Elevation_PLUS_Hillshade_9am', 200], ['Elevation_PLUS_Horizontal_Distance_To_Hydrology', 200], ['Elevation_PLUS_Horizontal_Distance_To_Roadways', 100], ['Elevation_PLUS_Vertical_Distance_To_Hydrology', 200], ['Wilder_Type_PLUS_Elevation', 500], ['Wilder_Type_PLUS_Hillshade_Noon_div_Hillshade_3pm', 500], ['Wilder_Type_PLUS_Degree_to_Hydrology', 200], ['Wilder_Type_PLUS_Hillshade_9am_div_Hillshade_3pm', 500], ['Wilder_Type_PLUS_Aspect_Cos', 500], ['Hillshade_9am_dif_Hillshade_Noon_PLUS_Hillshade_Noon_dif_Hillshade_3pm', 200], ['Hillshade_Noon_PLUS_Hillshade_3pm', 200], ['Hillshade_Noon_add_Hillshade_3pm_PLUS_Hillshade_Noon_dif_Hillshade_3pm', 200]] def simple_feature_scores2(clf, cols, test=False, **params): scores = [] bscores = [] lscores = [] X_preds = np.zeros((len(y), 7)) scl = StandardScaler().fit(X.loc[:, cols]) for train, val in StratifiedKFold(n_splits=10, shuffle=True, random_state=2018).split(X, y): X_train = scl.transform(X.loc[train, cols]) X_val = scl.transform(X.loc[val, cols]) y_train = y[train] y_val = y[val] C = clf(**params) C.fit(X_train, y_train) X_preds[val] = C.predict_proba(X_val) #scores.append(accuracy_score(y_val, C.predict(X_val))) #bscores.append(balanced_accuracy_score(y_val, C.predict(X_val))) #lscores.append(log_loss(y_val, C.predict_proba(X_val), labels=list(range(1, 8)))) if test: X_test_select = scl.transform(X_test.loc[:, cols]) C = clf(**params) C.fit(scl.transform(X.loc[:, cols]), y) X_test_preds = C.predict_proba(X_test_select) else: X_test_preds = None return scores, bscores, lscores, X_preds, X_test_preds # + _uuid="d5cc637ad06e0e1d3fe5924b5dbf15f1ee3d2a38" import warnings import gc from multiprocessing import Pool warnings.filterwarnings("ignore") preds = [] test_preds = [] for colname, neighbor in tqdm(all_set): gc.collect() #print(colname, depth) ts, tbs, ls, pred, test_pred = simple_feature_scores2(KNeighborsClassifier, colname.split("_PLUS_"), test=True, n_neighbors=neighbor) preds.append(pred) test_preds.append(test_pred) # + _uuid="1c301a98315266944aae3498c863a9005623ec87" cols = list(chain.from_iterable([[col[0] + "_KNN_{}".format(i) for i in range(1, 8)] for col in all_set])) knn_train_df = pd.DataFrame(np.hstack(preds)).astype(np.float32) knn_train_df.columns = cols knn_test_df = pd.DataFrame(np.hstack(test_preds)).astype(np.float32) knn_test_df.columns = cols # + [markdown] _uuid="cc362ea934b2b07aaa900a3f1510118aecd3eecc" # #### DT_features # + _uuid="00347f67de008070fcdc4caf7d3e8bace8ba2dbd" all_set = [['Elevation', 4], ['Horizontal_Distance_To_Roadways', 4], ['Horizontal_Distance_To_Fire_Points', 3], ['Horizontal_Distance_To_Hydrology', 4], ['Hillshade_9am', 3], ['Vertical_Distance_To_Hydrology', 3], ['Slope', 4], ['Aspect', 4], ['Hillshade_3pm', 3], ['Hillshade_Noon', 3], ['Degree_to_Hydrology', 3], ['Hillshade_Noon_dif_Hillshade_3pm', 3], ['Hillshade_Noon_abs_Hillshade_3pm', 3], ['Elevation_PLUS_Hillshade_9am_add_Hillshade_Noon', 5], ['Elevation_PLUS_Hillshade_max', 5], ['Elevation_PLUS_Horizontal_Distance_To_Hydrology', 5], ['Aspect_Sin_PLUS_Aspect_Cos_PLUS_Elevation', 5], ['Elevation_PLUS_Horizontal_Distance_To_Fire_Points', 5], ['Wilder_Type_PLUS_Elevation', 5], ['Elevation_PLUS_Hillshade_9am', 5], ['Elevation_PLUS_Degree_to_Hydrology', 5], ['Wilder_Type_PLUS_Horizontal_Distance_To_Roadways', 5], ['Wilder_Type_PLUS_Hillshade_9am_add_Hillshade_Noon', 4], ['Wilder_Type_PLUS_Horizontal_Distance_To_Hydrology', 5], ['Wilder_Type_PLUS_Hillshade_Noon_abs_Hillshade_3pm', 4], ['Hillshade_9am_add_Hillshade_Noon_PLUS_Hillshade_std', 4], ['Hillshade_9am_PLUS_Hillshade_9am_add_Hillshade_Noon', 4], ['Hillshade_9am_add_Hillshade_Noon_PLUS_Hillshade_Noon_add_Hillshade_3pm', 5]] def simple_feature_scores(clf, cols, test=False, **params): scores = [] bscores = [] lscores = [] X_preds = np.zeros((len(y), 7)) for train, val in StratifiedKFold(n_splits=10, shuffle=True, random_state=2018).split(X, y): X_train = X.loc[train, cols] X_val = X.loc[val, cols] y_train = y[train] y_val = y[val] C = clf(**params) C.fit(X_train, y_train) X_preds[val] = C.predict_proba(X_val) #scores.append(accuracy_score(y_val, C.predict(X_val))) #bscores.append(balanced_accuracy_score(y_val, C.predict(X_val))) #lscores.append(log_loss(y_val, C.predict_proba(X_val), labels=list(range(1, 8)))) if test: X_test_select = X_test.loc[:, cols] C = clf(**params) C.fit(X.loc[:, cols], y) X_test_preds = C.predict_proba(X_test_select) else: X_test_preds = None return scores, bscores, lscores, X_preds, X_test_preds # + _uuid="882ab485d7b1be411b67d0d244cee7e9e078d226" preds = [] test_preds = [] for colname, depth in tqdm(all_set): #print(colname, depth) ts, tbs, ls, pred, test_pred = simple_feature_scores(DecisionTreeClassifier, colname.split("_PLUS_"), test=True, max_depth=depth) preds.append(pred) test_preds.append(test_pred) cols = list(chain.from_iterable([[col[0] + "_DT_{}".format(i) for i in range(1, 8)] for col in all_set])) dt_train_df = pd.DataFrame(np.hstack(preds)).astype(np.float32) dt_train_df.columns = cols dt_test_df = pd.DataFrame(np.hstack(test_preds)).astype(np.float32) dt_test_df.columns = cols # + _uuid="650ae398572fa740879dc785a5b461317086ef8c" # target encoding features(1.2.3) te_train_df = total_df.filter(regex="ctype").iloc[:len(train_df)] te_test_df = total_df.filter(regex="ctype").iloc[len(train_df):] # + _uuid="c4c32b082b39c89180db5b547ecf2b0571aaf861" train_level2 = train_df[["Id"]] test_level2 = test_df[["Id"]] # + [markdown] _uuid="f349e101f81a8f3d4af953d227943254a760413e" # ## modeling # + [markdown] _uuid="095ef00c2099aba8fd04b9e9d1694249d2feb2c5" # I have created 6 models # # without KNN&DT features # * Random Forest Classifier # * PCA & K-nearest Neighbors Classifier # * LightGBM # # with KNN & DT features # * Random Forest Classifier # * Logistic Regression # * LightGBM # # Using these learning machines, data for stacking was created using 10-fold cross validation. # + [markdown] _uuid="f0d1dd30a7e2af14a04f8250b92cb14eda93de18" # ### without KNN&DT feature # + _uuid="ca42a3cd6f0f284ed94472a66e0610801e909552" y = train_df["Cover_Type"].values X = total_df[total_df["Id"] <= 15120].drop("Id", axis=1) X_test = total_df[total_df["Id"] > 15120].drop("Id", axis=1) type_ratio = np.array([0.37053, 0.49681, 0.05936, 0.00103, 0.01295, 0.02687, 0.03242]) class_weight = {k: v for k, v in enumerate(type_ratio, start=1)} # + [markdown] _uuid="efd524163b32998c065c279e8cf10cd48b9c2142" # #### Random forest classifier # + _uuid="719e2a31d0667c8dfd2b332349a8367bbb57691c" RFC1_col = ["RFC1_{}_proba".format(i) for i in range(1, 8)] for col in RFC1_col: train_level2.loc[:, col] = 0 test_level2.loc[:, col] = 0 # + _uuid="5bf8085715c17f5117f867d2df9cab077b167714" rfc = RandomForestClassifier(n_estimators=150, max_depth=12, class_weight=class_weight, n_jobs=-1) confusion = np.zeros((7, 7)) scores = [] for train, val in tqdm(StratifiedKFold(n_splits=10, random_state=2434, shuffle=True).split(X, y)): X_train = X.iloc[train, :] X_val = X.iloc[val, :] y_train = y[train] y_val = y[val] rfc.fit(X_train, y_train) y_val_pred = rfc.predict(X_val) y_val_proba = rfc.predict_proba(X_val) confusion += confusion_matrix(y_val, y_val_pred) train_level2.loc[val, RFC1_col] = y_val_proba scores.append(balanced_accuracy_score(y_val, y_val_pred)) rfc.fit(X, y) test_level2.loc[:, RFC1_col] = rfc.predict_proba(X_test) #smpsb.loc[:, "Cover_Type"] = rfc.predict(X_test) #smpsb.to_csv("RFC1.csv", index=None) # + _uuid="040b3893919ef650c0be96b562393fdff12ed226" print(np.mean(scores)) sns.heatmap(confusion, cmap="Blues") # + [markdown] _uuid="0bbae34ea9b2bf5a479c0e9aae0d7a13dcafd168" # #### PCA & KNN # + _uuid="524d158a396206814880ee7d682406dd8eb2aa37" KNN1_col = ["KNN1_{}_proba".format(i) for i in range(1, 8)] for col in KNN1_col: train_level2.loc[:, col] = 0 test_level2.loc[:, col] = 0 # + _uuid="65537269007f3bee16e8079db738e065e704e2ef" cat_col = X.filter(regex="Soil_Type|Wilderness").columns.tolist()[:-1] + ["Wilder_Type"] # + _uuid="32e8c05574e03cc14beed390ac5b1c520163c126" knn = KNeighborsClassifier(n_neighbors=2, n_jobs=-1) scl = StandardScaler().fit(X_test.drop(cat_col, axis=1)) X_scl = scl.transform(X.drop(cat_col, axis=1)) X_test_scl = scl.transform(X_test.drop(cat_col, axis=1)) pca = PCA(n_components=23).fit(X_test_scl) X_pca = pca.transform(X_scl) X_test_pca = pca.transform(X_test_scl) confusion = np.zeros((7, 7)) scores = [] for train, val in tqdm(StratifiedKFold(n_splits=10, random_state=2434, shuffle=True).split(X, y)): X_train = X_pca[train] X_val = X_pca[val] y_train = y[train] y_val = y[val] knn.fit(X_train, y_train) y_val_pred = knn.predict(X_val) y_val_proba = knn.predict_proba(X_val) confusion += confusion_matrix(y_val, y_val_pred) train_level2.loc[val, KNN1_col] = y_val_proba scores.append(balanced_accuracy_score(y_val, y_val_pred)) knn.fit(X_pca, y) test_level2.loc[:, KNN1_col] = knn.predict_proba(X_test_pca) #smpsb.loc[:, "Cover_Type"] = knn.predict(X_test_pca) #smpsb.to_csv("KNN1.csv", index=None) # + _uuid="1e1987f27e1b1a580c146796a16b56921ee16ce6" print(np.mean(scores)) sns.heatmap(confusion, cmap="Blues") # + [markdown] _uuid="6ef7c413eab43090eaa2ac07a6bb3b46c2e25ed3" # #### LightGBM # + _uuid="bd9a9af0ce09640f0b90e349d1d69539651fd873" LGBM1_col = ["LGBM1_{}_proba".format(i) for i in range(1, 8)] for col in LGBM1_col: train_level2.loc[:, col] = 0 test_level2.loc[:, col] = 0 # + _uuid="c35f7a3011697bbf95e34193c1f26c1a755c9448" cat_col = X.filter(regex="Soil_Type|Wilderness").columns.tolist()[:-1] + ["Wilder_Type"] categorical_feature = [29, 38] lgbm_col = X.drop(cat_col[:-2], axis=1).columns.tolist() class_weight_lgbm = {i: v for i, v in enumerate(type_ratio)} # + _uuid="2134226ea0195d71d61f10d53b00a325bb23dbee" gbm = lgb.LGBMClassifier(n_estimators=15, num_class=7, learning_rate=0.1, bagging_fraction=0.6, num_boost_round=370, max_depth=8, max_cat_to_onehot=40, class_weight=class_weight_lgbm, device="cpu", n_jobs=4, silent=-1, verbose=-1) confusion = np.zeros((7, 7)) scores = [] for train, val in tqdm(StratifiedKFold(n_splits=10, random_state=2434, shuffle=True).split(X, y)): X_train = X.loc[train, lgbm_col] X_val = X.loc[val, lgbm_col] y_train = y[train] y_val = y[val] gbm.fit(X_train, y_train, eval_set=[(X_train, y_train), (X_val, y_val)], verbose=50, categorical_feature=categorical_feature) y_val_pred = gbm.predict(X_val) y_val_proba = gbm.predict_proba(X_val) scores.append(balanced_accuracy_score(y_val, y_val_pred)) confusion += confusion_matrix(y_val, y_val_pred) train_level2.loc[val, LGBM1_col] = y_val_proba X_all = X.loc[:, lgbm_col] X_test_lgbm = X_test.loc[:, lgbm_col] gbm.fit(X_all, y, verbose=50, categorical_feature=categorical_feature) test_level2.loc[:, LGBM1_col] = gbm.predict_proba(X_test_lgbm) #smpsb["Cover_Type"] = gbm.predict(X_test_lgbm) #smpsb.to_csv("LGBM1.csv") # + _uuid="a97ac783c210d78101df455ade63265a17379134" print(np.mean(scores)) sns.heatmap(confusion, cmap="Blues") # + [markdown] _uuid="aa06d3aff18a8f46592c976296539f14b380f148" # ### with KNN & DT features # + _uuid="25308199c65d017343ea96fb02f668e6cb426dd8" X_p = pd.concat([knn_train_df, dt_train_df, te_train_df], axis=1).astype(np.float32) X_test_p = pd.concat([knn_test_df, dt_test_df, te_test_df.reset_index(drop=True)], axis=1).astype(np.float32) # + [markdown] _uuid="e4e05bc017ab0deaf36bec4d86840b5f2b12366c" # #### RandomForestClassifier # + _uuid="e5bfffb307a40b0c67c31d53a8a08033e5b91381" KNNDT_RF_col = ["KNNDT_RF_{}_proba".format(i) for i in range(1, 8)] for col in KNNDT_RF_col: train_level2.loc[:, col] = 0 test_level2.loc[:, col] = 0 # + _uuid="b624a15e4a0c11612205ead52e2c8711951ca31b" rfc = RandomForestClassifier(n_jobs=-1, n_estimators=200, max_depth=None, max_features=.7, max_leaf_nodes=220, class_weight=class_weight) confusion = np.zeros((7, 7)) scores = [] for train, val in tqdm(StratifiedKFold(n_splits=10, shuffle=True, random_state=2434).split(X_p, y)): X_train = X_p.iloc[train, :] y_train = y[train] X_val = X_p.iloc[val, :] y_val = y[val] rfc.fit(X_train, y_train) y_pred = rfc.predict(X_val) scores.append(balanced_accuracy_score(y_val, y_pred)) confusion += confusion_matrix(y_val, y_pred) train_level2.loc[val, KNNDT_RF_col] = rfc.predict_proba(X_val) rfc.fit(X_p, y) test_level2.loc[:, KNNDT_RF_col] = rfc.predict_proba(X_test_p) # + _uuid="bcd8c782f669a2ef57cf14b0ac1d3f5079d3dde0" print(np.mean(scores)) sns.heatmap(confusion, cmap="Blues") # + [markdown] _uuid="c283aadde8a935479ca9b3efa466d14e3abd4050" # #### Logistic Regression # + _uuid="15db3d3366a7a6f6fe9bf02aec83782b748cb2eb" KNNDT_LR_col = ["KNNDT_LR_{}_proba".format(i) for i in range(1, 8)] for col in KNNDT_LR_col: train_level2.loc[:, col] = 0 test_level2.loc[:, col] = 0 # + _uuid="72ad8f5bb755924ea308a365f701ad86b1f09597" confusion = np.zeros((7, 7)) scores = [] for train, val in tqdm(StratifiedKFold(n_splits=10, shuffle=True, random_state=2434).split(X, y)): X_train = X_p.iloc[train, :] y_train = y[train] X_val = X_p.iloc[val, :] y_val = y[val] lr = LogisticRegression(n_jobs=-1, multi_class="multinomial", C=10**9, solver="saga", class_weight=class_weight) lr.fit(X_train, y_train) y_val_pred = lr.predict(X_val) train_level2.loc[val, KNNDT_LR_col] = lr.predict_proba(X_val) scores.append(balanced_accuracy_score(y_val, y_val_pred)) confusion += confusion_matrix(y_val, y_val_pred) lr.fit(X_p, y) test_level2.loc[:, KNNDT_LR_col] = lr.predict_proba(X_test_p) # + _uuid="8aeac651853a98927d9d3a5c255e83d5d8dfc37f" print(np.mean(scores)) sns.heatmap(confusion, cmap="Blues") # + [markdown] _uuid="91077b39bcbb10180ef2a0424b19c1791db9b776" # #### LightGBM # + _uuid="d09a32de613b485bef804a563d082e3dbe1ff787" KNNDT_LGB_col = ["KNNDT_LGB_{}_proba".format(i) for i in range(1, 8)] for col in KNNDT_LGB_col: train_level2.loc[:, col] = 0 test_level2.loc[:, col] = 0 # + _uuid="bb284b9b2d18193fab68f211ea66f7994696d200" X = total_df[total_df["Id"] <= 15120].drop("Id", axis=1) X_test = total_df[total_df["Id"] > 15120].drop("Id", axis=1).reset_index(drop=True) X_d = pd.concat([X.drop(total_df.filter(regex="Type\d+").columns, axis=1), knn_train_df, dt_train_df], axis=1) X_test_d = pd.concat([X_test.drop(total_df.filter(regex="Type\d+").columns, axis=1), knn_test_df, dt_test_df], axis=1) fcol = X_d.select_dtypes(np.float64).columns X_d.loc[:, fcol] = X_d.loc[:, fcol].astype(np.float32) X_d = X_d.values.astype(np.float32) X_test_d.loc[:, fcol] = X_test_d.loc[:, fcol].astype(np.float32) X_test_d = X_test_d.values.astype(np.float32) # + _uuid="742567953206af0aeae3a560941b9a5687737514" class_weight_lgbm = {i: v for i, v in enumerate(type_ratio)} gbm = lgb.LGBMClassifier(n_estimators=300, num_class=8, num_leaves=32, feature_fraction=0.3, min_child_samples=20, learning_rate=0.05, num_boost_round=430, max_depth=-1, class_weight=class_weight_lgbm, device="cpu", n_jobs=4, silent=-1, verbose=-1) confusion = np.zeros((7, 7)) scores = [] for train, val in tqdm(StratifiedKFold(n_splits=10, shuffle=True, random_state=2434).split(X_p, y)): X_train = X_d[train] X_val = X_d[val] y_train = y[train] y_val = y[val] gbm.fit(X_train, y_train, categorical_feature=[33, 42]) y_pred = gbm.predict(X_val) scores.append(balanced_accuracy_score(y_val, y_pred)) confusion += confusion_matrix(y_val, y_pred) train_level2.loc[val, KNNDT_LGB_col] = gbm.predict_proba(X_val) gbm.fit(X_d, y, categorical_feature=[33, 42]) test_level2.loc[:, KNNDT_LGB_col] = gbm.predict_proba(X_test_d) # + _uuid="3b496508a7b0b892ed6e33011e2c9d1a922fef50" print(np.mean(scores)) sns.heatmap(confusion, cmap="Blues") # + [markdown] _uuid="acfd60853a6f5c166f59a29e1bc66a345ba4126e" # # ykskks's kernel # + _uuid="6c91ea9786eebe34c0dc9e772139dd12cfb81cc9" # This Python 3 environment comes with many helpful analytics libraries installed # It is defined by the kaggle/python docker image: https://github.com/kaggle/docker-python # For example, here's several helpful packages to load in import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) # Input data files are available in the "../input/" directory. # For example, running this (by clicking run or pressing Shift+Enter) will list the files in the input directory import os print(os.listdir("../input")) #import warnings #warnings.filterwarnings('ignore') # Any results you write to the current directory are saved as output. from matplotlib import pyplot as plt import seaborn as sns # %matplotlib inline from sklearn.model_selection import StratifiedKFold, GridSearchCV from sklearn.ensemble import RandomForestClassifier from sklearn.linear_model import LogisticRegression from sklearn.pipeline import Pipeline from sklearn.preprocessing import StandardScaler from sklearn.decomposition import PCA from sklearn.svm import SVC import lightgbm as lgb # + _uuid="e48eb826e4756b0dde940764177f5ada1169ba97" train=pd.read_csv('../input/train.csv') test=pd.read_csv('../input/test.csv') # + _uuid="e1a71c69e138722e7bd744a940ee5acf811286a4" #drop columns that have the same value in every row train.drop(['Soil_Type7', 'Soil_Type15'], axis=1, inplace=True) test.drop(['Soil_Type7', 'Soil_Type15'], axis=1, inplace=True) # + [markdown] _uuid="cfecd1b31f2b47ab71661ffda9a1632c72183ab3" # The feature enginnering ideas I used here are based on [Lathwal's amazing kernel ](https://www.kaggle.com/codename007/forest-cover-type-eda-baseline-model). # # I removed 'slope_hyd' feature from the original one beacause it did'nt seem to be that useful for prediction. # # + _uuid="90502edbe566c397c1b998e62d4d72c4e82cf915" train['HF1'] = train['Horizontal_Distance_To_Hydrology']+train['Horizontal_Distance_To_Fire_Points'] train['HF2'] = abs(train['Horizontal_Distance_To_Hydrology']-train['Horizontal_Distance_To_Fire_Points']) train['HR1'] = abs(train['Horizontal_Distance_To_Hydrology']+train['Horizontal_Distance_To_Roadways']) train['HR2'] = abs(train['Horizontal_Distance_To_Hydrology']-train['Horizontal_Distance_To_Roadways']) train['FR1'] = abs(train['Horizontal_Distance_To_Fire_Points']+train['Horizontal_Distance_To_Roadways']) train['FR2'] = abs(train['Horizontal_Distance_To_Fire_Points']-train['Horizontal_Distance_To_Roadways']) train['ele_vert'] = train.Elevation-train.Vertical_Distance_To_Hydrology train['Mean_Amenities']=(train.Horizontal_Distance_To_Fire_Points + train.Horizontal_Distance_To_Hydrology + train.Horizontal_Distance_To_Roadways) / 3 train['Mean_Fire_Hyd']=(train.Horizontal_Distance_To_Fire_Points + train.Horizontal_Distance_To_Hydrology) / 2 # + _uuid="7cea9c1dfad87d4d961fbfed2c124824f485c185" test['HF1'] = test['Horizontal_Distance_To_Hydrology']+test['Horizontal_Distance_To_Fire_Points'] test['HF2'] = abs(test['Horizontal_Distance_To_Hydrology']-test['Horizontal_Distance_To_Fire_Points']) test['HR1'] = abs(test['Horizontal_Distance_To_Hydrology']+test['Horizontal_Distance_To_Roadways']) test['HR2'] = abs(test['Horizontal_Distance_To_Hydrology']-test['Horizontal_Distance_To_Roadways']) test['FR1'] = abs(test['Horizontal_Distance_To_Fire_Points']+test['Horizontal_Distance_To_Roadways']) test['FR2'] = abs(test['Horizontal_Distance_To_Fire_Points']-test['Horizontal_Distance_To_Roadways']) test['ele_vert'] = test.Elevation-test.Vertical_Distance_To_Hydrology test['Mean_Amenities']=(test.Horizontal_Distance_To_Fire_Points + test.Horizontal_Distance_To_Hydrology + test.Horizontal_Distance_To_Roadways) / 3 test['Mean_Fire_Hyd']=(test.Horizontal_Distance_To_Fire_Points + test.Horizontal_Distance_To_Hydrology) / 2 # + _uuid="483b4ae89f0770f861bea38823276331e0f692c6" #Id for later use Id_train=train['Id'] Id_test=test['Id'] train.drop('Id', axis=1, inplace=True) test.drop('Id', axis=1, inplace=True) # + _uuid="fa8c2be8de8ddbbc41de1a52b558dfa9d6c44fc7" x_train=train.drop('Cover_Type', axis=1) y_train=train['Cover_Type'] # + [markdown] _uuid="8cb0bc06ea06f7cd01023d76c17a18e81df0d61a" # ## randomforest # + _uuid="1ebfc5f7a08fde1f4746234c72511194af5d265f" #prepare df to store pred proba x_train_L2=pd.DataFrame(Id_train) x_test_L2=pd.DataFrame(Id_test) rf_cul=['rf'+str(i+1) for i in range(7)] #prepare cols to store pred proba for i in rf_cul: x_train_L2.loc[:, i]=0 x_test_L2.loc[:, i]=0 rf=RandomForestClassifier(max_depth=None, max_features=20,n_estimators=500, random_state=1) #StratifiedKfold to avoid leakage for train_index, val_index in tqdm(StratifiedKFold(n_splits=10, shuffle=True, random_state=1).split(x_train, y_train)): x_train_L1=x_train.iloc[train_index, :] y_train_L1=y_train.iloc[train_index] x_val_L1=x_train.iloc[val_index, :] y_val_L1=y_train.iloc[val_index] rf.fit(x_train_L1, y_train_L1) y_val_proba=rf.predict_proba(x_val_L1) x_train_L2.loc[val_index, rf_cul]=y_val_proba rf.fit(x_train, y_train) x_test_L2.loc[:, rf_cul]=rf.predict_proba(test) #prepare df for submission #submit_df=pd.DataFrame(rf.predict(test)) #submit_df.columns=['Cover_Type'] #submit_df['Id']=Id_test #submit_df=submit_df.loc[:, ['Id', 'Cover_Type']] #submit_df.to_csv('rf.csv', index=False) #0.75604 # + [markdown] _uuid="293cb1e3271a078108834aed9118fc18ff31672a" # ## LightGBM # + _uuid="ceef4c46d42a9701b28ea63fe8dd4bf8fa64cbf1" #prepare df to store pred proba #x_train_L2=pd.DataFrame(Id_train) #x_test_L2=pd.DataFrame(Id_test) lgbm_cul=['lgbm'+str(i+1) for i in range(7)] #prepare cols to store pred proba for i in lgbm_cul: x_train_L2.loc[:, i]=0 x_test_L2.loc[:, i]=0 lgbm=lgb.LGBMClassifier(learning_rate=0.3, max_depth=-1, min_child_samples=20, n_estimators=300, num_leaves=200, random_state=1, n_jobs=4) #StratifiedKfold to avoid leakage for train_index, val_index in tqdm(StratifiedKFold(n_splits=10, shuffle=True, random_state=1).split(x_train, y_train)): x_train_L1=x_train.iloc[train_index, :] y_train_L1=y_train.iloc[train_index] x_val_L1=x_train.iloc[val_index, :] y_val_L1=y_train.iloc[val_index] lgbm.fit(x_train_L1, y_train_L1) y_val_proba=lgbm.predict_proba(x_val_L1) x_train_L2.loc[val_index, lgbm_cul]=y_val_proba lgbm.fit(x_train, y_train) x_test_L2.loc[:, lgbm_cul]=lgbm.predict_proba(test) #prepare df for submission #submit_df=pd.DataFrame(lgbm.predict(test)) #submit_df.columns=['Cover_Type'] #submit_df['Id']=Id_test #submit_df=submit_df.loc[:, ['Id', 'Cover_Type']] #submit_df.to_csv('lgbm.csv', index=False) # + [markdown] _uuid="f6cfa93e9d087406d2331e830e63ef877312893a" # ## LR # + _uuid="f9ce60a286c0998db755fa87c58c14a715c4524d" lr_cul=['lr'+str(i+1) for i in range(7)] #prepare cols to store pred proba for i in lr_cul: x_train_L2.loc[:, i]=0 x_test_L2.loc[:, i]=0 pca=PCA(n_components=40) x_train_pca=pd.DataFrame(pca.fit_transform(x_train)) test_pca=pd.DataFrame(pca.transform(test)) pipeline=Pipeline([('scaler', StandardScaler()), ('lr', LogisticRegression(C=10, solver='newton-cg', multi_class='multinomial',max_iter=500))]) #StratifiedKfold to avoid leakage for train_index, val_index in tqdm(StratifiedKFold(n_splits=10, shuffle=True, random_state=1).split(x_train_pca, y_train)): x_train_L1=x_train_pca.iloc[train_index, :] y_train_L1=y_train.iloc[train_index] x_val_L1=x_train_pca.iloc[val_index, :] y_val_L1=y_train.iloc[val_index] pipeline.fit(x_train_L1, y_train_L1) y_val_proba=pipeline.predict_proba(x_val_L1) x_train_L2.loc[val_index, lr_cul]=y_val_proba pipeline.fit(x_train_pca, y_train) x_test_L2.loc[:, lr_cul]=pipeline.predict_proba(test_pca) #prepare df for submission #submit_df=pd.DataFrame(pipeline.predict(test_pca)) #submit_df.columns=['Cover_Type'] #submit_df['Id']=Id_test #submit_df=submit_df.loc[:, ['Id', 'Cover_Type']] #submit_df.to_csv('lr.csv', index=False) # + [markdown] _uuid="39561fd3170fdc43962d89ec5d552d188e181134" # ## SVM # + _uuid="cfa2a2e75d833bff82cd51fb73ee38ddba7e3205" svm_cul=['svm'+str(i+1) for i in range(7)] #prepare cols to store pred proba for i in svm_cul: x_train_L2.loc[:, i]=0 x_test_L2.loc[:, i]=0 #pca=PCA(n_components=40) #x_train_pca=pca.fit_transform(x_train) #test_pca=pca.transform(test) pipeline=Pipeline([('scaler', StandardScaler()), ('svm', SVC(C=10, gamma=0.1, probability=True))]) #StratifiedKfold to avoid leakage for train_index, val_index in tqdm(StratifiedKFold(n_splits=10, shuffle=True, random_state=1).split(x_train_pca, y_train)): x_train_L1=x_train_pca.iloc[train_index, :] y_train_L1=y_train.iloc[train_index] x_val_L1=x_train_pca.iloc[val_index, :] y_val_L1=y_train.iloc[val_index] pipeline.fit(x_train_L1, y_train_L1) y_val_proba=pipeline.predict_proba(x_val_L1) x_train_L2.loc[val_index, svm_cul]=y_val_proba pipeline.fit(x_train_pca, y_train) x_test_L2.loc[:, svm_cul]=pipeline.predict_proba(test_pca) #prepare df for submission #submit_df=pd.DataFrame(pipeline.predict(test_pca)) #submit_df.columns=['Cover_Type'] #submit_df['Id']=Id_test #submit_df=submit_df.loc[:, ['Id', 'Cover_Type']] #submit_df.to_csv('svm.csv', index=False) # + [markdown] _uuid="dc4dd658ece053364a668653ab3c35a5e48f83b6" # # stacking # + [markdown] _uuid="d42deaa383b95ea9fadc4e05bbb908fc702d7caa" # ## Level1 summary # + _uuid="310a00ca38bfc3d2149b14620203dd8b83cad06d" # concatenate two data train_L2 = pd.concat([x_train_L2.iloc[:, 1:].reset_index(drop=True), train_level2.iloc[:, 1:].reset_index(drop=True)], axis=1) test_L2 = pd.concat([x_test_L2.iloc[:, 1:].reset_index(drop=True), test_level2.iloc[:, 1:].reset_index(drop=True)], axis=1) train_L2.to_csv("Wtrain_L2.csv", index=False) test_L2.to_csv("Wtest_L2.csv", index=False) # + _uuid="28b75db2a27aa91fc6da91cd65b32fb614b5be3e" # each models score y = pd.read_csv("../input/train.csv")["Cover_Type"].values model_scores = {} text = [] for i in range(10): y_pred = np.argmax(train_L2.iloc[:, 7*i:7*(i+1)].values, axis=1) + 1 score = balanced_accuracy_score(y, y_pred) model_scores[cols[i*7]] = score text.append("{}\t{:<.5}".format(train_L2.columns[i*7], score)) print(*text[::-1], sep="\n") pd.Series(model_scores).plot(kind="barh") plt.savefig("model_summary.png") # + [markdown] _uuid="c3fd4028181ba8d2e302bbf57cd203e004fef5e7" # ## stacking with Logistic Regression # + [markdown] _uuid="a46e27fd2856f948dbb7bf9b0767e1f53b129b51" # ### nadare's simple stacking # + _uuid="b4093457aacc4a3ac1589b20e256435e7dc8bebc" score = [] for train, val in tqdm(StratifiedKFold(n_splits=10, random_state=2434, shuffle=True).split(X, y)): X_train = train_level2.iloc[train, 1:] X_val = train_level2.iloc[val, 1:] y_train = y[train] y_val = y[val] lr = LogisticRegression(n_jobs=1, class_weight=class_weight) lr.fit(X_train, y_train) y_pred = lr.predict(X_val) score.append(balanced_accuracy_score(y_val, y_pred)) #print(score[-1]) print(np.mean(score)) # + [markdown] _uuid="4a073df808a00458e3a6bdfbe74fdc68bcd1649b" # ### ykskks's simple stacking # + _uuid="f8823485458b15b4a030f0b6cb3e74597005139a" score = [] for train, val in tqdm(StratifiedKFold(n_splits=10, random_state=2434, shuffle=True).split(X, y)): X_train = x_train_L2.iloc[train, 1:] X_val = x_train_L2.iloc[val, 1:] y_train = y[train] y_val = y[val] lr = LogisticRegression(n_jobs=1, class_weight=class_weight) lr.fit(X_train, y_train) y_pred = lr.predict(X_val) score.append(balanced_accuracy_score(y_val, y_pred)) print(np.mean(score)) lr = LogisticRegression(n_jobs=1, class_weight=class_weight) lr.fit(x_train_L2, y) # + [markdown] _uuid="bdfd0730d524342031c365cd24a5934c94d199ae" # ### double simple stacking # + _uuid="b9520a86d6a7b24591596457b6530328baa9c3b2" score = [] for train, val in tqdm(StratifiedKFold(n_splits=10, random_state=2434, shuffle=True).split(X, y)): X_train = train_L2.iloc[train, 1:] X_val = train_L2.iloc[val, 1:] y_train = y[train] y_val = y[val] lr = LogisticRegression(n_jobs=1, class_weight=class_weight) lr.fit(X_train, y_train) y_pred = lr.predict(X_val) score.append(balanced_accuracy_score(y_val, y_pred)) print(np.mean(score)) # + _uuid="a6116b70210f4b0be69389d95af93e3a9172dd8e" # this is 0.83266 on public LB """ smpsb = pd.read_csv("../input/sample_submission.csv") lr = LogisticRegression(n_jobs=1, class_weight=class_weight) lr.fit(train_L2, y) smpsb["Cover_Type"] = lr.predict(test_L2) smpsb.to_csv("W_ensemble_LR.csv", index=False)""" # + [markdown] _uuid="9b2b9381ffef1bc1b8c25758ad17723a379b879e" # ## stacking with LightGBM # + _uuid="a72184de5e684ade61bb85d60c137f6994bab595" wtrain = train_L2.values.astype(np.float32) wtest = test_L2.values.astype(np.float32) y = pd.read_csv("../input/train.csv")["Cover_Type"].values smpsb = pd.read_csv("../input/sample_submission.csv") cols = train_L2.columns # + _uuid="042ee7b70f3ba6e537f5957582214d6dc2241254" # this is our final submission which is 0.84806 on Public LB gbm = lgb.LGBMClassifier(n_estimators=300, num_class=8, num_leaves=25, learning_rate=5, min_child_samples=20, bagging_fraction=.3, bagging_freq=1, reg_lambda = 10**4.5, reg_alpha = 1, feature_fraction=.2, num_boost_round=4000, max_depth=-1, class_weight=class_weight_lgbm, device="cpu", n_jobs=4, silent=-1, verbose=-1) gbm.fit(wtrain, y, verbose=-1) smpsb["Cover_Type"] = gbm.predict(wtest) smpsb.to_csv("final_submission.csv", index=False) # + _uuid="6b36ef78a85b570c684b9301a5deb79920f7caa9" plt.figure(figsize=(6, 12)) plt.barh(cols, gbm.feature_importances_) plt.savefig("feature_importances.png") # + _uuid="351fb3443797feddd01aa91cb8833852754e0bba" # bagging with k-fold scores = [] gbm = lgb.LGBMClassifier(n_estimators=300, num_class=8, num_leaves=25, learning_rate=5, min_child_samples=20, bagging_fraction=.3, bagging_freq=1, reg_lambda = 10**4.5, reg_alpha = 1, feature_fraction=.2, num_boost_round=8000, max_depth=-1, class_weight=class_weight_lgbm, device="cpu", n_jobs=-1, silent=-1, verbose=-1) proba = np.zeros((wtest.shape[0], 7)) for train, val in tqdm(StratifiedKFold(n_splits=5, shuffle=True, random_state=2434).split(wtrain, y)): X_train = wtrain[train] X_val = wtrain[val] y_train = y[train] y_val = y[val] gbm.fit(X_train, y_train, verbose=-1, eval_set=[(X_train, y_train), (X_val, y_val)], early_stopping_rounds=20) proba += gbm.predict_proba(wtest) / 10 y_pred = gbm.predict(X_val) scores.append(balanced_accuracy_score(y_val, y_pred)) print(np.mean(scores)) # + _uuid="9d36a5aa6bcd558bd50f184ec4f93c7973e0ba9a" smpsb["Cover_Type"] = np.argmax(proba, axis=1) + 1 smpsb.to_csv("final_submission_bagging.csv", index=False)
reference_guides/EDA, Feature engineering and Modeling (4th) FOREST COVER .ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Descargar automáticamente del SMN # A partir de la ejecución del siguiente código: # 1. Se descargan los archivos de radiación del Servicio Meteorológico Nacional de las fechas seleccionadas. # 2. Se estandarizan los mismos y compila en una única tabla. # 3. Se los exporta como archivo CSV. # # Fuente: https://www.smn.gob.ar/descarga-de-datos # ### R Datos de Radiación de Buenos Aires y de Ushuaia # Datos que se obtienen mediante la medición de la radiación solar y su interacción con la atmósfera. #Cargar librerías import requests import pandas as pd from datetime import datetime, date, timedelta from os import mkdir, rmdir, remove, path # + # Definir fechas sdate = date(2018, 2, 1) # fecha de inicio (año, mes, día) edate = date(2018, 2, 28) # fecha de fin (año, mes, día) delta = edate - sdate # + # Descarga de datos dir = "radiacion" #carpeta de destino de los archivos mkdir(str(dir)) for i in range(delta.days + 1): date = str(sdate + timedelta(days=i)).replace('-', '') url = 'https://ssl.smn.gob.ar/dpd/descarga_opendata.php?file=radiacionsolar/radsolar' + date + '.txt' r = requests.get(url, allow_redirects=True) if 'El archivo no existe.' in str(r.content): print(date + " does not exist") else: with open('./' + dir + '/' + date + '.txt', 'wb') as f: f.write(r.content) # + #Combinación de archivos out = pd.DataFrame(columns = ['Horario', 'Global_BsAs[W/m2]', 'Difusa_BsAs[W/m2]', 'Global_Ush[W/m2]', 'Difusa_Ush[W/m2]']) for i in range(delta.days + 1): date = str(sdate + timedelta(days=i)).replace('-', '') if path.isfile('./' + dir + '/' + date + '.txt'): df1 = pd.read_csv('./' + dir + '/' + date + '.txt', header = None, index_col = False, skiprows= 1, names = ['Horario', 'Global_BsAs[W/m2]', 'Difusa_BsAs[W/m2]', 'Global_Ush[W/m2]', 'Difusa_Ush[W/m2]']) out = out.append(df1, ignore_index=True) else: print(date + " does not exist") display(out.head()) display(out.tail()) # + #Estandarización df = out df['Horario'] = pd.to_datetime(df['Horario'], format ='%Y-%m-%d %H:%M:%S') print(df.info()) # + #Borra archivos anteriores y exporta la tabla file = 'datosRadiacion.csv' #nombre del archivo de destino df.to_csv(file, index = False) for i in range(delta.days + 1): date = str(sdate + timedelta(days=i)).replace('-', '') if path.isfile('./' + dir + '/' + date + '.txt'): remove('./' + dir + '/' + date + '.txt') else: print(date + " does not exist") rmdir(str(dir))
projects/automation/meteodata/SMNdatosradiacion.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- #import the necessary packages import pandas as pd import numpy as np import matplotlib.pyplot as plt import seaborn as sns # ## Reading the data using pandas data= pd.read_csv('Churn_Modelling.csv') data.head(5) len(data) data.shape # ## Scrubbing the data data.isnull().values.any() # + #It seems we have some missing values now let us explore what are the columns #having missing values data.isnull().any() ## it seems that we have missing values in Gender,age and EstimatedSalary # - data[["EstimatedSalary","Age"]].describe() data.describe() #### It seems that HasCrCard has value as 0 and 1 hence needs to be changed to category data['HasCrCard'].value_counts() ## No of missing Values present data.isnull().sum() ## Percentage of missing Values present round(data.isnull().sum()/len(data)*100,2) ## Checking the datatype of the missing columns data[["Gender","Age","EstimatedSalary"]].dtypes # ### There are three ways to impute missing values: # 1. Droping the missing values rows # 2. Fill missing values with a test stastics # 3. Predict the missing values using ML algorithm ### Filling the missing value with the mean of the values mean_value=data['EstimatedSalary'].mean() data['EstimatedSalary']=data['EstimatedSalary'].fillna(mean_value) data['Gender'].value_counts() # + ### Since it seems that the Gender is a categorical field therefore ### we will fill the values with the 0 since its the most occuring number data['Gender']=data['Gender'].fillna(data['Gender'].value_counts().idxmax()) # - mode_value=data['Age'].mode() data['Age']=data['Age'].fillna(mode_value[0]) # + ##checking for any missing values data.isnull().any() # - # ### Renaming the columns # + # We would want to rename some of the columns data = data.rename(columns={ 'CredRate': 'CreditScore', 'ActMem' : 'IsActiveMember', 'Prod Number': 'NumOfProducts', 'Exited':'Churn' }) data.columns # - # ### We would also like to move the churn columnn to the extreme right and drop the customer ID data.drop(labels=['CustomerId'], axis=1,inplace = True) column_churn = data['Churn'] data.drop(labels=['Churn'], axis=1,inplace = True) data.insert(len(data.columns), 'Churn', column_churn.values) data.columns # ### Changing the data type # Convert these variables into categorical variables data["Geography"] = data["Geography"].astype('category') data["Gender"] = data["Gender"].astype('category') data.dtypes # # Exploring the data # ## Statistical Overview data['Churn'].value_counts(0) data['Churn'].value_counts(1)*100 data.describe() summary_churn = data.groupby('Churn') summary_churn.mean() summary_churn.median() corr = data.corr() plt.figure(figsize=(15,8)) sns.heatmap(corr, xticklabels=corr.columns.values, yticklabels=corr.columns.values,annot=True) corr # ## Visualization # + f, axes = plt.subplots(ncols=3, figsize=(15, 6)) sns.distplot(data.EstimatedSalary, kde=True, color="darkgreen", ax=axes[0]).set_title('EstimatedSalary') axes[0].set_ylabel('No of Customers') sns.distplot(data.Age, kde=True, color="darkblue", ax=axes[1]).set_title('Age') axes[1].set_ylabel('No of Customers') sns.distplot(data.Balance, kde=True, color="maroon", ax=axes[2]).set_title('Balance') axes[2].set_ylabel('No of Customers')
7). Supervised Learning - Predicting Customer Churn/.ipynb_checkpoints/Bank Churn Predictions - Exercise 9-checkpoint.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Politician Activity on Wikipedia # # The parameters in the cell below can be adjusted to explore otherPoliticians and time frames. # # ### How to explore other politicians? # The ***politician_ID*** is an internal identifier that connects the different social media accounts. You can [use this other notebook](../Politicians.ipynb?autorun=true) to get other the identifiers of other politicians. # # ***Alternatively***, you can direcly use the [Politicians API](http://mediamonitoring.gesis.org/api/Politicians/swagger/), or access it with the [SMM Wrapper](https://pypi.org/project/smm-wrapper/). # # ## A. Set Up parameters # Parameters: politician_id = 1928 from_date = '2017-09-01' to_date = '2018-12-31' aggregation = 'week' # ## B. Using APIs # ### B.1 Using the SMM Politician API # + from smm_wrapper import SMMPoliticians # create an instance to the smm wrapper smm = SMMPoliticians() # request the politician from the API politician = smm.dv.get_one(politician_id) # using the api to get the change objects wiki_chobs = smm.dv.wikipedia(_id=politician_id, from_date=from_date, to_date=to_date, aggregate_by=aggregation) wiki_chobs = wiki_chobs.groupby('date').agg({'chobs': 'sum'}).reset_index() # - # ### B.2 Using the Wikiwho API # + from wikiwho_wrapper import WikiWho import pandas as pd #using wikiwho to extract conflicts and revisions ww = WikiWho(lng='de') edit_persistance_gen = ( ww.dv.edit_persistence(page_id=wp_id , start=from_date, end=to_date) for wp_id in politician['wp_ids']) wiki_data = pd.concat(df for df in edit_persistance_gen if len(df) > 0) wiki_data['undos'] = wiki_data['dels'] + wiki_data['reins'] wiki_data['date'] = pd.to_datetime(wiki_data['year_month']) wiki_data = wiki_data.groupby('date')['conflict','elegibles','undos'].sum().reset_index() wiki_data['conflict_score'] = wiki_data['conflict'] / wiki_data['elegibles'] wiki_data.fillna(0, inplace=True) # - # ### B.3 Using the Wikimedia API # + import requests import urllib.parse # open a session session = requests.Session() session.headers.update({'User-Agent': 'mediamonitoring.gesis.org'}) # prepare url vurl = ("https://wikimedia.org/api/rest_v1/metrics/pageviews/per-article" "/de.wikipedia.org/all-access/user/{wp_title}/daily/" f"{from_date.replace('-','')}/{to_date.replace('-','')}") # use the wikimedia API to download the views views = pd.concat([pd.DataFrame(session.get(url=vurl.format( wp_title=urllib.parse.quote(wp_title, safe=''))).json()['items']) for wp_title in politician['wp_titles']]) views['timestamp']=pd.to_datetime(views['timestamp'], format='%Y%m%d%H') # weekly or monthly aggregation of the data if aggregation == 'week': views = views.groupby([pd.Grouper(key='timestamp', freq='W-SUN')])['views'].sum().reset_index().sort_values('timestamp') views['timestamp'] = views['timestamp'] - pd.Timedelta(days=6) elif aggregation == 'month': views = views.groupby([pd.Grouper(key='timestamp', freq='MS')])['views'].sum().reset_index().sort_values('timestamp') # - # ## C. Plotting # ### C.1 Plot Wikipedia Activity # + import plotly from plotly import graph_objs as go plotly.offline.init_notebook_mode(connected=True) plotly.offline.iplot({ "data": [go.Scatter(x=views['timestamp'], y=views['views'], name='Views', line_shape='spline'), go.Scatter(x=wiki_chobs['date'], y=wiki_chobs['chobs'], name='Changes', yaxis='y2', line_shape='spline')], "layout": go.Layout( title='Wikipedia Activity', yaxis=dict(title='Views'), yaxis2=dict(title='Changes', overlaying='y', side='right')) }) # - # ### C.2 Plot Wikipedia Disagreement plotly.offline.iplot({ "data": [go.Scatter(x=wiki_data['date'], y=wiki_data['undos'], name='Undos', line_shape='spline'), go.Scatter(x=wiki_data['date'], y=wiki_data['conflict_score'], name='Conflict', line_shape='spline', yaxis='y2')], "layout": go.Layout(title='Wikipedia Disagreement', yaxis=dict(title='Undos'), yaxis2=dict(title='Conflict', overlaying='y',side='right')) })
python/politician/wikipedia.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # 梯度上升法解决主成分分析问题 # # 目标:求w,使得 # # $$f(X) = \frac{1}{m}\sum_{i=1}^m(X_1^{(i)}w_1 + X_2^{(i)}w_2 + \ldots + X_n^{(i)}w_n)^2$$ # # 最大 # # **为了使用梯度上升法,求梯度:** # # $$\nabla f = \begin{pmatrix} # \frac{\partial f}{\partial w_1} \\\\ # \frac{\partial f}{\partial w_2} \\\\ # \vdots \\\\ # \frac{\partial f}{\partial w_n} \\\\ # \end{pmatrix} = # \frac{2}{m} \begin{pmatrix} # \sum_{i=1}^m(X_1^{(i)}w_1 + X_2^{(i)}w_2 + \ldots + X_n^{(i)}w_n)X_1^{(i)} \\\\ # \sum_{i=1}^m(X_1^{(i)}w_1 + X_2^{(i)}w_2 + \ldots + X_n^{(i)}w_n)X_2^{(i)} \\\\ # \vdots \\\\ # \sum_{i=1}^m(X_1^{(i)}w_1 + X_2^{(i)}w_2 + \ldots + X_n^{(i)}w_n)X_n^{(i)} \\\\ # \end{pmatrix} = \begin{pmatrix} # \sum_{i=1}^m(X^{(i)}w)X_1^{(i)} \\\\ # \sum_{i=1}^m(X^{(i)}w)X_2^{(i)} \\\\ # \vdots \\\\ # \sum_{i=1}^m(X^{(i)}w)X_n^{(i)} \\\\ # \end{pmatrix}$$ # # $$\nabla f = \frac{2}{m} \cdot X^T (Xw)$$ # ## 使用梯度上升法求解主成分分析 import numpy as np import matplotlib.pyplot as plt X = np.empty((100,2)) X[:,0] = np.random.uniform(0., 100., size=100) X[:,1] = 0.75 * X[:,0] + 3. + np.random.normal(0, 10., size=100) plt.scatter(X[:,0], X[:,1]) plt.show() # ### demean def demean(X): return X - np.mean(X, axis=0) X_demean = demean(X) plt.scatter(X_demean[:,0], X_demean[:,1]) plt.show() def f(w, X): return np.sum(X.dot(w)**2) / len(X) def df_math(w, X): return X.T.dot(X.dot(w)) * 2. / len(X) def df_debug(w, X, epsilon=0.0001): res = np.empty(len(w)) for i in range(len(w)): w_1 = w.copy() w_1[i] += epsilon w_2 = w.copy() w_2[i] -= epsilon res[i] = (f(w_1, X) - f(w_2, X)) / (2 * epsilon) return res def direction(w): return w / np.linalg.norm(w) def gradient_ascent(df, x, initial_w, eta, n_iters=1e4, epsilon=1e-8): w = direction(initial_w) cur_iter = 0 while cur_iter < n_iters: gradient = df(w, x) last_w = w w = w + eta * gradient w = direction(w) # 注意1: 每次求一个单位方向 if(abs(f(w,x) - f(last_w,x)) < epsilon): break cur_iter += 1 return w initial_w = np.random.random(X.shape[1]) # 注意2: 不能用0向量开始 initial_w eta = 0.001 # 注意3 不能使用StandardScaler标准化数据 gradient_ascent(df_debug, X_demean, initial_w, eta) gradient_ascent(df_math, X_demean, initial_w, eta) w = gradient_ascent(df_math, X_demean, initial_w, eta) plt.scatter(X_demean[:,0], X_demean[:,1]) plt.plot([0, w[0] * 30], [0, w[1] * 30], color='r') plt.show() # ### 求前 n 个主成分 # # 求出第一主成分后,如何求出下一个主成分? # - 数据进行改变,将数据在第一个主成分上的分量去掉 # # $$X^{(i)} \cdot w = ||X_{project}^{(i)}||$$ # # $$X_{project}^{(i)} = ||X_{project}^{(i)} \cdot w||$$ # # $$X^{`(i)} = X^{(i)} - X_{project}^{(i)}$$ # # - 在新数据上求第一主成分 X2 = X_demean - X_demean.dot(w).reshape(-1,1) * w plt.scatter(X2[:,0],X2[:,1]) plt.show() w2 = gradient_ascent(df_math,X2,initial_w,eta) w2 w.dot(w2) def first_n_components(n, X, eta=0.01, n_iters=1e4,epsilon=1e-8): X_pca = X.copy() X_pca = demean(X_pca) res = [] for i in range(n): initial_w = np.random.random(X_pca.shape[1]) w = gradient_ascent(df_math, X_pca, initial_w, eta) res.append(w) X_pca = X_pca - X_pca.dot(w).reshape(-1,1) * w return res first_n_components(2, X)
PCAandGradientAscent/2.Use-Gradient-Ascent-To-Solve-PCA.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import tensorflow as tf import os from easydict import EasyDict import numpy as np from dataset import CaptchData from model import Net, Model, MyEstimator from generate_captch import CaptchaGenerator import matplotlib.pyplot as plt # + code_folding=[] config = EasyDict({ #data 'batch_size':64, 'char_set':['digits','letters'], 'text_lengths':[4,5,6,7], 'image_shape':(64,256), #net 'regularaztion_rate':0.0001, 'num_hidden':128, 'num_classes':10+26, 'seq_len':16,#经过卷积层后,特征图的长度 计算得出 #model 'learning_rate':1e-3, 'input_tensor_shape':(-1,64,256,3), 'moving_average_decay':0.99, #estimator 'model_path':'/home/renhui/File/model/captcha_ocr/model2/', 'log_path':'/home/renhui/File/model/captcha_ocr/model2/log/', 'max_steps':30000, 'display_step':100, 'save_step':2000, 'eval_step':500, }) # - data = CaptchData(config) net = Net(config) model = Model(config,net) estimator = MyEstimator(model,config) estimator.train(data.train_input_fn) # ### 预测 def image_preprocess(image): image = image*(2./255)-1 image = np.reshape(image, [1,64,256,3]) return image generator = CaptchaGenerator(char_set=['digits','letters'], shape=(64,256)) image1, label1, text1 = generator.create_sample() image1 = image_preprocess(image1) image2, label2, text2 = generator.create_sample() image2 = image_preprocess(image2) images = np.concatenate([image1, image2]) print(text1) print(label1) print(text2) print(label2) estimator.prepare_inference(batch_size=2) pred, prob = estimator.inference(images) pred log_prob tf.sparse_to_dense(decoded[0][0], decoded[0][2], decoded[0][1]) pow(2,log_prob) # ?tf.sparse_tensor_to_dense
.ipynb_checkpoints/train_demo-checkpoint.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python [conda env:evcp] * # language: python # name: conda-env-evcp-py # --- # + import os import sys path_to_this_notebook = os.path.abspath('.') path_to_project = path_to_this_notebook[:path_to_this_notebook.find('note')] sys.path.append(path_to_project) from src.save_and_load_tools.save_load_utils import save_grid, save_scenarios, load_grid from src.surrogate_grids._grid_parallel_nodes import create_grid_parallel_nodes from src.surrogate_grids._grid_single_node import create_grid_single_node from src.generate_grid_examples.cabled_grid import generate_cabled_grid from src.scenario.scenario_generator import ScenarioGenerator from src.plotting_tools.plot_grid import plot_grid import pandas as pd import pickle import numpy as np # + n_loads_per_cables_list = [6, 6] tree_cable_list = [True, True] gen_inside_list = [False, False] gen_end_list = [False, False] dg_connections_list = [] v_min, v_max = 300, 400 load_p_min, load_p_max = 0, 10000 line_i_max = 30 line_g = 15 gen_p_max = 0 gen_center_p_min = -0 gen_end_p_min = -0 gen_inside_p_min = -0 gen_dg_p_min = -0 grid = generate_cabled_grid(n_loads_per_cables_list, tree_cable_list, gen_inside_list, gen_end_list, dg_connections_list, v_min, v_max, gen_center_p_min, gen_end_p_min, gen_inside_p_min, gen_dg_p_min, load_p_max, line_i_max, line_g) # Scenarios params n_scenarios = 6 t_start_hr = 0 t_end_hr = 24 ptu_size_minutes = 30 ptu_size_hr = ptu_size_minutes / 60 timesteps_hr = np.arange(0, t_end_hr + ptu_size_hr, ptu_size_hr) charging_time_mean_bounds = 3.75, 3.76 charging_time_std_bounds = 1, 1.01 per_hour_demand_mean_bounds = 8500, 8501 per_hour_demand_std_bounds = 900, 901 std_price_factor=1e6 path_sessions = path_to_project + 'data/sessions_example.csv' path_prices = path_to_project + 'data/power_price_example.csv' scenario_generator = ScenarioGenerator(t_start_hr, t_end_hr, ptu_size_minutes, charging_time_mean_bounds, charging_time_std_bounds, per_hour_demand_mean_bounds, per_hour_demand_std_bounds, std_price_factor, path_sessions, path_prices) # - n_loads_per_cables_list = [3,] tree_cable_list = [True, ] gen_end_list = [False] grid = generate_cabled_grid(n_loads_per_cables_list, tree_cable_list, gen_inside_list, gen_end_list, dg_connections_list, v_min, v_max, gen_center_p_min, gen_end_p_min, gen_inside_p_min, gen_dg_p_min, load_p_max, line_i_max, line_g) # + overwrite_grid = False overwrite_scenarios = False overwrite_plots = False experiment_folder = 'observability' path_to_experiments = path_to_project + '/experiments/%s/' % experiment_folder if not os.path.isdir(path_to_experiments): os.makedirs(path_to_experiments) print(path_to_experiments) print(os.listdir(path_to_experiments)) # + # Specify parameters for the grids you want to create loads_per_cable = 12 n_cables = 3 tree_cable = True gen_inside = False gen_end = False dg_connections_list = [] dg_connections_list = [[0, 7, 1, 7], [1, 9, 2, 9], [2, 11, 0, 11],] meshed_str = 'meshed' if len(dg_connections_list) else 'radial' shape_str = 'tree-cables' if tree_cable else 'straight-cables' gens_str = '_gens-inside' if gen_inside else '' gens_str = (gens_str + '_gens-end') if gen_end else gens_str experiment_name = 'CabledGrid%dx%d%s_%s%s' % (loads_per_cable, n_cables, meshed_str, shape_str, gens_str) line_i_max_array = np.linspace(20, 130, 4) gen_p_factors_array = np.linspace(1,.4, 4) print(experiment_name) n_loads_per_cables_list = [loads_per_cable for _ in range(n_cables)] tree_cable_list = [tree_cable for _ in range(len(n_loads_per_cables_list))] gen_inside_list = [gen_inside for _ in range(len(n_loads_per_cables_list))] gen_end_list = [gen_end for _ in range(len(n_loads_per_cables_list))] n_gens = 1 + sum(gen_inside_list) + sum(gen_end_list) + len(dg_connections_list) n_loads = sum(n_loads_per_cables_list) gen_p_min_array = np.array([-int(f * n_loads * load_p_max / n_gens) for f in gen_p_factors_array]) print('Ps: ', gen_p_min_array.round()) print('Is: ', line_i_max_array.round()) print('N grids: ', len(line_i_max_array) * len(gen_p_factors_array)) # + # Create and save the grids for gen_p_min in gen_p_min_array: for line_i_max in line_i_max_array: gen_center_p_min = gen_p_min gen_end_p_min = gen_p_min gen_inside_p_min = gen_p_min gen_dg_p_min = gen_p_min grid = generate_cabled_grid(n_loads_per_cables_list, tree_cable_list, gen_inside_list, gen_end_list, dg_connections_list, v_min, v_max, gen_center_p_min, gen_end_p_min, gen_inside_p_min, gen_dg_p_min, load_p_max, line_i_max, line_g) grid_parallel, _ = create_grid_parallel_nodes(grid) grid_single_cable, _ = create_grid_single_node(grid) grid_name = grid.name print('Grid: ', grid_name) path_to_grid_folder = path_to_experiments + '/%s/%s/' % (experiment_name, grid_name) if not os.path.isdir(path_to_grid_folder): print('Created folder') os.makedirs(path_to_grid_folder) grid_already_exists = 'grid.pickle' in os.listdir(path_to_grid_folder) if (not grid_already_exists )or overwrite_plots: plot_grid(grid, grid_name, bbox=(500, 500), margin=50, title_size=12, save=True, path_to_figures=path_to_grid_folder) plot_grid(grid_parallel, grid_name + '_parallel', bbox=(500, 500), margin=50, title_size=12, save=True, path_to_figures=path_to_grid_folder) plot_grid(grid_single_cable, grid_name + '_single-cable', bbox=(500, 500), margin=50, title_size=12, save=True, path_to_figures=path_to_grid_folder) print('Overwrote plots!') if not grid_already_exists: save_grid(grid, experiment_name, grid_name, path_to_experiments) print('Saved grid!') elif (grid_already_exists and overwrite_grid): save_grid(grid, experiment_name, grid_name, path_to_experiments) print('Overwrote existing grid!') else: grid = load_grid(experiment_name, grid_name, path_to_experiments) print('Grid found, skipping!') scenarios_already_exist = ('scenarios_t=%d.pickle' % (ptu_size_minutes) in os.listdir(path_to_grid_folder)) scenarios = scenario_generator.generate(grid.load_inds, n_scenarios, 0, []) if not scenarios_already_exist: save_scenarios(scenarios, scenario_generator, experiment_name, grid_name, path_to_experiments) print('Saved scenarios!') elif (scenarios_already_exist and overwrite_scenarios): save_scenarios(scenarios, scenario_generator, experiment_name, grid_name, path_to_experiments) print('Overwrote existing scenarios!') else: print('Scenarios found, skipping!') print()
notebooks/run_experiments/0.create_grids/RandomCabledGrid.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # STILL WORK IN PROGRESS 08/01/21# # # # Introduction # # This jupyter notebook creates additional figures from the data described in the review by Mandel & Broekgaarden (2021) on "Rates of Compact Object Coalescence". In order to produce the figures one needs to download the data that is available on https://zenodo.org/record/5072401 !Make sure to use the latest version of the dataset! # # To make the figures from the paper, put all the data (at least 26 files, unless you are only interested in reproducing one of the Compact Object flavors) in one directory that we will refer to in the remaining jupyter notebook as the *path_to_data_directory*. Enter the path to the directory below, and run the jupyter notebook cells. # # # # # ### Reference # If you use this data/code for publication, please cite both the paper: Mandel & Broekgaarden (2021) and the dataset on Zenodo through it's doi https://zenodo.org/record/5072401 doi: [![DOI](https://zenodo.org/badge/DOI/10.5281/zenodo.5072401.svg)](https://doi.org/10.5281/zenodo.5072401) # # # # For any questions or inquiry, please email one of or both the authors: # <NAME>: <EMAIL> # Floor Broekgaarden: <EMAIL> # + # full path to the directory with all data files # change this to your path with the data directory path_to_data_directory = '/Users/floorbroekgaarden/Projects/GitHub/Rates_of_Compact_Object_Coalescence/Data_Mandel_and_Broekgaarden_2021/' # + # the following lines import the `default_scripts` files that contains some global code / settings that are used throughout this jupyter notebook # if for some reason the python script is not working, or not importing, one can copy paste the code in the script and run it in a cell above/below this block # Not all codes / libraries loaded are strictly needed. import sys sys.path.append('../') from default_scripts import * # just to make the cells appear wider: comment these lines if needed. from IPython.core.display import display, HTML display(HTML("<style>.container { width:100% !important; }</style>")) # -
plottingCode/Extra_figures/Make_extra_figures_horizontal.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # --- # title: "Cross entropy — Part 2: Convexity of the objective function" # date: 2021-02-24 # categories: [statistics, entropy] # tags: [statistics, cross entropy, softmax, convexity] # preview_image: /enrightward.github.io/assets/img/cross-entropy/convex.png # --- # ![Desktop View](/assets/img/cross-entropy/convex.png) # ## 1. Introduction # # In the [previous post](https://enrightward.github.io/enrightward.github.io/posts/cross-entropy-part-1/), I recalled the definition of the _cross entropy_ $H(p, q)$ of two discrete PDFs $p$ and $q$ over the same support $S = \{ x_{1}, \ldots, x_{n} \}$. It is a loose measure of similarity of $p$ and $q$, and so is used in machine learning to define objective functions for tasks where the goal is to learn a PDF $p$ implicit in training data by updating the internal parameters of a learnt PDF $q$. I also wrote down a proof that for fixed but arbitrary $p$, the function $q \mapsto H(p, q)$ obtains a global minimum at $q = p$. # # In this post, I will show that $q \mapsto H(p, q)$ is a convex function, provided we restrict $p$ and $q$ to the space of all PDFs such that no event in $S$ is impossible, i.e. we assume the $p(x_{i})$ and $q(x_{i})$ are never zero. This assumption is not very restrictive for machine learning purposes, where learnt zero probabilities are rare. It also allows us to use the _softmax_ parametrisation for $q$, explained below. # ## 2. Definition of cross entropy # # Recall that the _cross entropy_ of a pair $(p, q)$ of discrete PDFs with the same support $S$ is defined to be: # # \begin{equation} # H(p, q) := -\sum_{x \in S}^{n} p(x) \log(q(x)). # \end{equation} # ## 3. The softmax parametrisation of a discrete PDF # # For any collection $t_{1}, \ldots, t_{n}$ of real numbers, the $n$-tuple: # # \begin{equation} # q(t_{1}, \ldots, t_{n}) := \left( \frac{e^{t_{1}}}{Z}, \ldots, \frac{e^{t_{n}}}{Z} \right), # \end{equation} # # where $Z$ is the normalisation constant $\sum_{i=1}^{n} e^{t_{i}}$, is a PDF. Indeed, each entry of $q$ is positive-valued, since its numerator and denominator are sums of real-valued exponentials, and these entries sum to $1$, by construction of $Z$. Conversely, any PDF $q = (q_{1}, \ldots, q_{n})$ with all non-zero entries can be written in this form: Simply set $t_{i}$ equal to $\log(q_{i})$. We call this the _softmax parametrisation_ of $q$. # ## 4. Proof that $q \mapsto H(p, q)$ is convex # # We showed in the previous post that the function $q \mapsto H(p, q)$, with $p$ fixed and $q$ varying, has a global minimum at $q = p$. We now show that this is a convex function, assuming $p$ and $q$ have no zero entries. By the above assumption, we can write $q$ using the softmax parametrisation: # # \begin{equation} # q(x_{j}) = \frac{e^{t_{j}}}{Z}, # \end{equation} # # where $Z$ is the normalisation constant $\sum_{i=1}^{n} e^{t_{i}}$. # ### Step 1: Use the softmax parametrisation # # Fix $p$ and re-write $H(p, q)$ by replacing $q(x_{i})$ with $e^{t_{i}}/Z$: # # \begin{equation} # H(p, q) = -\sum_{i=1}^{n} p(x_{i}) \log(q(x_{i})) = -\sum_{i=1}^{n} p(x_{i}) \log \left( \frac{e^{t_{i}}}{Z} \right) = # \sum_{i=1}^{n} p(x_{i})(\log(Z) - t_{i}) = \log(Z) - \sum_{i=1}^{n} p(x_{i}) t_{i}. # \end{equation} # ### Step 2: Compute first derivative # # Now we find a local optimum for $H(p, q)$, regarded as a function of $q$ for fixed but arbitrary $p$. The first step is to solve $\partial H/\partial t_{j} = 0$. To do this, observe that because $Z = \sum_{i=1}^{n} e^{t_{i}}$, we have $\partial Z/\partial t_{j} = e^{t_{j}}$, so that: # # \begin{equation} # \frac{\partial \log(Z)}{t_{j}} = \frac{e^{t_{j}}}{Z} = q(x_{j}), # \end{equation} # # and hence: # # \begin{equation} # \frac{\partial H(p, q)}{\partial t_{j}} = q(x_{j}) - p(x_{j}). # \end{equation} # # This partial derivative is zero exactly when $q(x_{j}) = p(x_{j})$. # ### Step 3: The Hessian is positive definite # # The previous step implies that for fixed $p$, the quantity $H(p, q)$ is locally flat around $q = p$. To show this is a global minimum, we will show $H$ is "concave up" as a function in $q$. It suffices to show that the Hessian matrix: # # \begin{equation} # \nabla^{2} H(p, q) = \left( \frac{\partial^{2} H(p, q)}{\partial t_{j} \partial t_{i}} \right)_{i, j=1}^{n} # \end{equation} # # is positive definite, i.e. satisfies $v^{t} \nabla^{2} H(p, q) v \ge 0$ for all $v \in \mathbb{R}^{n}$, with equality only for $v = 0$. First let's compute the entries of the Hessian. From the partial derivative calculation in the previous step, we have: # # \begin{equation} # \frac{\partial^{2} H(p, q)}{\partial t_{j} \partial t_{i}} = \frac{\partial}{\partial t_{j}}(q(x_{i}) - p(x_{i})) = # \frac{\partial q(x_{i})}{\partial t_{j}}, # \end{equation} # # because $p$ is constant. Now, # # \begin{equation} # \frac{\partial q(x_{i})}{\partial t_{j}} = \frac{\partial}{\partial t_{j}} \left( \frac{e^{t_{i}}}{Z} \right) = # \frac{Z \cdot \frac{\partial e^{t_{i}}}{\partial t_{j}} - e^{t_{i}} \cdot \frac{\partial Z}{\partial t_{j}}}{Z^{2}} = # \frac{Z \cdot \frac{\partial e^{t_{i}}}{\partial t_{j}} - e^{t_{i} + t_{j}}}{Z^{2}}, # \end{equation} # noting that $\frac{\partial Z}{\partial t_{j}} = e^{t_{j}}$ from Step 2. On the other hand, the quantity $\frac{\partial e^{t_{i}}}{\partial t_{j}}$ is equal either to zero, if $i$ and $j$ differ, or else $e^{t_{i}}$, if $i$ and $j$ are the same. It follows that: # # \begin{equation} # \frac{\partial^{2} H(p, q)}{\partial t_{j} \partial t_{i}} = -\frac{e^{t_{i} + t_{j}}}{Z^{2}} = -q(x_{i})q(x_{j}), # \end{equation} # # if $i \neq j$, and # # \begin{equation} # \frac{\partial^{2} H(p, q)}{\partial t_{i}^2{}} = \frac{Z e^{t_{i}} - e^{2t_{i}}}{Z^{2}} = q(x_{i}) - q(x_{i})^{2}. # \end{equation} # # The Hessian can thus be written: # # \begin{equation} # \nabla^{2} H(p, q) = \textrm{diag}(Q) - QQ^{t}, # \end{equation} # # where $Q = (q(x_{1}), \ldots, q(x_{n}))$ is an $n$-dimensional column vector, and $\textrm{diag}(Q)$ is the $n \times n$ matrix whose diagonal is defined by $Q$, and whose off-diagonal entries are zero. Observe now that for $v \in \mathbb{R}^{n}$, we have: # # \begin{equation} # v \nabla^{2} H(p, q) v^{t} = v \, \textrm{diag}(Q) v^{t} - v QQ^{t} v^{t} = \\ # \sum_{i=1}^{n} v_{i}^{2} q(x_{i}) - \sum_{i=1}^{n} v_{i}^{2} q(x_{i})^{2} = # \sum_{i=1}^{n} v_{i}^{2} q(x_{i}) (1 - q(x_{i})). # \end{equation} # Here, each summand $v_{i}^{2} q(x_{i}) (1 - q(x_{i}))$ is non-negative, being the product of a square, a probability and its complementary probability. It follows that the whole sum is non-negative. Since $p$ and $q$ have no zero probabilities by assumption, this sum can be zero only if each $v_{i}$ is zero, so $\nabla^{2} H(p, q)$ is positive definite and the minimum $q = p$ is global. # ## 5. Roundup # # We introduced the softmax function, which can differentiably paramatrise any discrete PDF with no zero probabilities. Then, using the softmax parametrisation, we showed that the $p \mapsto H(p, q)$ is a convex function. This convexity is important because, as noted in the [previous post](https://enrightward.github.io/enrightward.github.io/posts/cross-entropy-part-1/), a common machine learning task is to update a parametrised PDF $q$ to more closely resemble an idealised PDF $p$ implicit in some data set — language modelling often uses this technique, for example. The above convexity result, combined with last post's proof that $q \mapsto H(p, q)$ achieves a minimum at $q = p$, implies this function can be optimised using a gradient descent algorithm.
notebooks/cross-entropy-part-2.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # CHAPTER6: Data Loading, Storage, and File Formats # ## 6.1 Reading and Writing Data in Text Format # menampilkan isi file ex1.csv # !cat examples/ex1.csv # + # membaca file csv dan menyimpan nya ke DataFrame: # - import pandas as pd df = pd.read_csv('examples/ex1.csv') df # + # load data dengan fungsi read_table ditambah delimiter nya # misal karena file nya csv delimiter nya comma (,) # - pd.read_table('examples/ex1.csv', sep=',') # + # membaca file csv tanpa header karena file csv nya tidak memiliki header # - # !cat examples/ex2.csv pd.read_csv('examples/ex2.csv', header=None) # + # memberi header sendiri karena tidak memiliki header # - pd.read_csv('examples/ex2.csv', names=['a','b','c','d','message']) # !cat examples/ex2.csv # You can either indicate you want the column at index 4 or named 'message' using # the index_col argument names = ['a','b','c','d','message'] pd.read_csv('examples/ex2.csv', names=names, index_col='message') # !cat examples/csv_mindex.csv parsed = pd.read_csv('examples/csv_mindex.csv', index_col=['key1','key2']) parsed # In some cases, a table might not have a fixed delimiter, using whitespace or some other pattern to separate fields list(open('examples/ex3.txt')) result = pd.read_table('examples/ex3.txt', sep='\s+') result # + # skip first, third, and fourth rows of the file with skiprows # - # !cat examples/ex4.csv pd.read_csv('examples/ex4.csv', skiprows=[0, 2, 3]) # + # Handling missing values # - # !cat examples/ex5.csv result = pd.read_csv('examples/ex5.csv') result # + # NA dan ULL akan diisi NaN # - pd.isnull(result) result result = pd.read_csv('examples/ex5.csv', na_values=['NULL']) result # + # sentinels # - sentinels = {'message': ['foo','NA'], 'something': ['two']} pd.read_csv('examples/ex5.csv', na_values=sentinels) # + # perhatikan word foo dan two akan menjadi NaN # + # READING TEXT FILES IN PIECES # - # setting pandas display hanya 10 baris pd.options.display.max_rows = 10 result = pd.read_csv('examples/ex6.csv') result pd.options.display.max_rows = 6 result pd.options.display.max_rows = 10 # + # read a small number of rows # - pd.read_csv('examples/ex6.csv', nrows = 6) # specify a chunksize as a number of rows <br> # The parameter essentially means the number of rows to be read into a dataframe at any single time in order to fit into the local memory. chunker = pd.read_csv('examples/ex6.csv', chunksize=1000) chunker type(chunker) tot = pd.Series([]) for piece in chunker: tot = tot.add(piece['key'].value_counts(), fill_value=0) type(tot) tot = tot.sort_values(ascending=False) tot[:10] tot # + # WRITING DATA TO TECT FORMAT # - # !cat examples/ex5.csv data = pd.read_csv('examples/ex5.csv') data data.to_csv('examples/out.csv') # !cat examples/out.csv # + # writing to sys.stdout so it prints the text result to the console # - import sys data.to_csv(sys.stdout, sep='|') # Missing values appear as empty strings in the output. You might want to denote them by some other sentinel value: data.to_csv(sys.stdout, na_rep='NULL') data.to_csv(sys.stdout, index=False, header=False) # write only a subset of the columns, and in an order of your choosing: data.to_csv(sys.stdout, index=False, columns=['a','b','c']) # Series also has to_csv method: dates = pd.date_range('1/1/2000', periods=7) dates import numpy as np ts = pd.Series(np.arange(7), index=dates) ts ts.to_csv('examples/tseries.csv') # !cat examples/tseries.csv # + # WORKING WITH DELIMITED FORMATS # - # !cat examples/ex7.csv import csv f = open('examples/ex7.csv') reader = csv.reader(f) reader type(reader) for line in reader: print(line) with open('examples/ex7.csv') as f: lines = list(csv.reader(f)) lines header, values = lines[0], lines[1:] # create dict from list data_dict = {h: v for h, v in zip(header, zip(*values))} data_dict class my_dialect(csv.Dialect): lineterminator = '\n' delimiter = ';' quotechar = '"' quoting = csv.QUOTE_MINIMAL f = open('examples/ex7.csv') reader = csv.reader(f, dialect=my_dialect) reader reader = csv.reader(f, delimiter='|') reader reader # + # write delimited files manually # - with open('mydata.csv', 'w') as f: writer = csv.writer(f, dialect=my_dialect) writer.writerow(('one','two','three')) writer.writerow(('1','2','3')) writer.writerow(('4','5','6')) writer.writerow(('7','8','9')) # !cat examples/ex7.csv # + #JSON DATA # - import json obj = """ { "name": "Wes", "places_lived": ["United States", "Spain", "Germany"], "pet": null, "siblings": [ { "name": "Scott", "age": 30, "pets": ["Zeus","Zuko"] }, { "name": "Ketie", "age": 38, "pets": ["Sixes", "Stache", "Cisco"] } ] } """ obj # convert a json string to python form result = json.loads(obj) result # + # convert a python object to json # - asjson = json.dumps(result) asjson # + # convert a JSON object or list of object to a DataFrame # - result['siblings'] siblings = pd.DataFrame(result['siblings'], columns=['name','age']) siblings # + # Convert JSON datasets in specific arrangements into a Series or DataFrame # - # !cat examples/examples.json # !cat examples/example.json data = pd.read_json('examples/example.json') data # + # convert data form pandas to JSON # - print(data.to_json()) print(data.to_json(orient='records')) # + # XML AND HTML: WEB SCRAPING # - tables = pd.read_html('examples/fdic_failed_bank_list.html') tables len(tables) failures = tables[0] failures failures.head() # + # cleaning and analysis, computing the number of bank failures by year # - close_timestamps = pd.to_datetime(failures['Closing Date']) close_timestamps close_timestamps.dt.year.value_counts() # + # Partisng XML With lxml.objectify # + # show an example of how to use lxml to parse data from a more general XML Format # - from lxml import objectify path = 'examples/mta_perf/Performance_MNR.xml' path parsed = objectify.parse(open(path)) parsed root = parsed.getroot() root data = [] skip_fields = ['PARENT_SEQ', 'INDICATOR_SEQ','DESIRED_CHANGE','DECIMAL_PLACE'] for elt in root.INDICATOR: el_data = {} for child in elt.getchildren(): if child.tag in skip_fields: continue el_data[child.tag] = child.pyval data.append(el_data) perf = pd.DataFrame(data) perf perf.head() from io import StringIO tag = '<a href="http://www.google.com">Google</a>' root = objectify.parse(StringIO(tag)).getroot() root root.get('href') root.text # ## 6.2 Binary Data Format # + # easiest ways to store data in binary format using Python's built-in pickle serialization # - # writes the data to disk in pickle format frame = pd.read_csv('examples/ex1.csv') frame frame.to_pickle('examples/frame_pickle') pd.read_pickle('examples/frame_pickle') # + # Using HDF5 Format # - frame = pd.DataFrame({'a': np.random.randn(100)}) frame store = pd.HDFStore('mydata.h5') frame['a'] type(frame) type(frame['a']) store['obj1'] = frame store['obj1_col'] = frame['a'] store store['obj1'] store['obj1_col'] # + # HDF5Store support two storage schemas: 'fixed' and 'table' # - store.put('obj2', frame, format='table') store['obj2'] type(store['obj2']) store.select('obj2', where=['index >= 10 and index <= 15']) frame.to_hdf('mydata.h5', 'obj3', format='table') # + # READING MICROSOFT EXCEL FILE # - xlsx = pd.ExcelFile('examples/ex1.xlsx') pd.read_excel(xlsx, 'Sheet1') frame = pd.read_excel('examples/ex1.xlsx', 'Sheet1') frame # + # write pandas data to excel format # - writer = pd.ExcelWriter('examples/ex2.xlsx') frame.to_excel(writer, 'Sheet1') writer.save() frame.to_excel('examples/ex2.xlsx') frame pd.read_excel('examples/ex2.xlsx', 'Sheet1') # ## 6.3 Interating with Web APIs import requests url = 'https://api.github.com/repos/pandas-dev/pandas/issues' resp = requests.get(url) resp # The Response object's json method will return a dictionary containing JSON parsed into native Python obects: data = resp.json() data data[0]['title'] # We can pass data directly to DataFrame and extract fields of interest issues = pd.DataFrame(data, columns=['number','title','labels','state']) issues # ## 6.4 Interacting with Databases #create sqlite databases import sqlite3 query = """ CREATE TABLE test( a VARCHAR(20), b VARCHAR(20), c REAL, d INTEGER ); """ con = sqlite3.connect('mydata.sqlite') con.execute(query) con.commit() # + # insert few rows of data # - data = [('Atlanta', 'Georgia', 1.25, 6), ('Tallahassee','Florida', 2.6, 3), ('Secramento','California', 1.7, 5) ] stmt = "INSERT INTO test VALUES(?, ?, ?, ?)" con.executemany(stmt, data) con.commit() cursor = con.execute('select * from test') cursor rows = cursor.fetchall() rows cursor.description pd.DataFrame(rows, columns=[x[0] for x in cursor.description]) # + # connect to the same SQLite database with SQLALchemy and read data from the table created before # - import sqlalchemy as sqla db = sqla.create_engine('sqlite:///mydata.sqlite') pd.read_sql('select * from test', db)
CHAPTER6 Data Loading Storage and File Formats/Notebook.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import numpy as np mat = [[-3, 6, -1, 1, -7], [1, -2, 2, 3, -1], [2, -4, 5, 8, -4]] mat = np.array(mat, dtype=np.double) mat m, n = mat.shape # + def count_zeros(arr): zeros = [] arr = np.round(arr, 5) for row in arr: for ind, element in enumerate(row): if element != 0: zeros.append(ind) break elif ind == arr.shape[1] - 1: zeros.append(ind + 1) break return zeros def sort_by_zeros(arr): zeros = count_zeros(arr) arr_sorted = arr[np.argsort(zeros)] return np.sort(zeros), arr_sorted def rr_down(sliced): m, n = sliced.shape row_zero = (sliced[0, :] / sliced[0, 0])[..., np.newaxis].T coefs_mat = np.repeat(a = row_zero, repeats = m - 1, axis = 0) * (-1 * sliced[1:, 0][:, np.newaxis]) sliced[1:, :] += coefs_mat def rr_up(sliced): m, n = sliced.shape sliced[-1, :] /= sliced[-1, 0] row_last = sliced[-1, :][..., np.newaxis].T coefs_mat = np.repeat(a = row_last, repeats = m - 1, axis = 0) * (-1 * sliced[:-1, 0][:, np.newaxis]) sliced[:-1, :] += coefs_mat def echelon_form(mat): mat = mat.copy() for i in range(mat.shape[0] - 1): sliced = mat[i:, :] zeros, sliced = sort_by_zeros(sliced) pivot_position = zeros[0] rr_down(sliced[:, pivot_position:]) mat[i:, :] = sliced return mat def reduced_echelon_form(mat): ef = mat.copy() zeros = count_zeros(ef) m, n = ef.shape for i in reversed(range(m)): sliced = ef[:i + 1, :] pivot_position = zeros[i] if pivot_position == n: continue rr_up(sliced[:, pivot_position:]) ef[:i + 1, :] = sliced return ef # - ef = echelon_form(mat) print(np.round(ef, 3)) zeros = count_zeros(ef) zeros ref = reduced_echelon_form(ef) print(np.round(ref, 3)) pivots = np.array(zeros) pivots = pivots[pivots != n] pivots ef[pivots != n - 1] # + def get_null_space(ref, pivots): m, n = ref.shape null_space = np.zeros((n, n - pivots.shape[0])) for ref_row_index in range(pivots.shape[0]): pivot = pivots[ref_row_index] ref_row = ref[ref_row_index, pivot+1:] for i, e in enumerate(ref_row[ref_row != 0]): null_space[pivot, ref_row_index + i] = -1 * e col = 0 for row in [i for i in range(n) if i not in pivots.tolist()]: null_space[row, col] = 1 col += 1 return null_space def get_row_space(ref, pivots): return ref[:pivots.shape[0], :].T def get_col_space(mat, pivots): return mat[:, pivots] # - get_null_space(ref, pivots) get_row_space(ref, pivots) get_col_space(mat, pivots)
3/mat_bases.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="view-in-github" colab_type="text" # <a href="https://colab.research.google.com/github/GerardoRojano/daa_2021_1/blob/master/2Octubre.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # + [markdown] id="T2bC9pHVh6Yx" # ## Meteorología en México # En Sistema meteorológico nacional lleva el registro de la lluvias desde el año 1985 y lo pone a disposición de la población por medio de la pagina datos.gob.mx. # # En la siguiente liga se encuentran 2 archivos separados por comas CSV correspondientes a los registros de lluvias # mensuales y anuales de los años 2017 y 2018. En los columnas se encuentran 13, correspondientes al promedio mensual y el promedio anual. # En los renglones se encuentran 33, correspondientes a cada uno de los 32 estados y a nivel nacional. # # https://drive.google.com/file/d/1lamkxgq2AsXRu81Y4JTNXLVld4og7nxt/view?usp=sharing # # # ## Planteamiento del problema # Diseñar un algoritmo y programarlo para que: # 1. Solicite por teclado el año, el estado y el mes, en base a esa información: # - muestre en pantalla el promedio de ese mes en ese estado en el año seleccionado. # - muestre en pantalla el promedio anual del estado seleccionado. # - muestre la suma de los 12 meses de ese estado en el año seleccionado. # # 2. Busque el mes que mas llovió en todos los estados durante esos dos años. Imprimir año, estado y mes. # 3. Busque el mes que menos llovió en los dos. Imprimir año, estado y mes. # + id="uMlW1nIShnmb" outputId="6e095f88-5108-40bf-cdeb-d7e0717f51e4" colab={"resources": {"http://localhost:8080/nbextensions/google.colab/files.js": {"data": "<KEY>", "ok": true, "headers": [["content-type", "application/javascript"]], "status": 200, "status_text": ""}}, "base_uri": "https://localhost:8080/", "height": 160} from google.colab import files files.upload() # + id="qNpvVUg9s1PK" outputId="ed76632f-56e3-4c0e-99ba-2958a8cd66a4" colab={"base_uri": "https://localhost:8080/", "height": 1000} import pandas as pd datos=pd.read_csv('2017Precip.csv',encoding='latin') datos2=pd.read_csv('2018Precip.csv',encoding='latin') print("En que año quieres buscar") año =int(input()) if año==2017: print(datos) print("que estado quieres ver ") estado=str(input().upper()) e=datos[datos['2017']==estado] print(e) print("Que mes buscas ENE, FEB, MAR, ABR, MAY, JUN, JUL, AGO, SEP, OCT, DIC ") mes=str(input().upper()) if mes=="ENE": print(datos[datos['2017']==estado]['PRECIPITACIîN A NIVEL NACIONAL Y POR ENTIDAD FEDERATIVA']) if mes=="FEB": print(datos[datos['2017']==estado]['Unnamed: 2']) if mes=="MAR": print(datos[datos['2017']==estado]['Unnamed: 3']) if mes=="ABR": print(datos[datos['2017']==estado]['Unnamed: 4']) if mes=="MAY": print(datos[datos['2017']==estado]['Unnamed: 5']) if mes=="JUN": print(datos[datos['2017']==estado]['Unnamed: 6']) if mes=="JUL": print(datos[datos['2017']==estado]['Unnamed: 7']) if mes=="AGO": print(datos[datos['2017']==estado]['Unnamed: 8']) if mes=="SEP": print(datos[datos['2017']==estado]['Unnamed: 9']) if mes=="OCT": print(datos[datos['2017']==estado]['Unnamed: 10']) if mes=="NOV": print(datos[datos['2017']==estado]['Unnamed: 11']) if mes=="DIC": print(datos[datos['2017']==estado]['Unnamed: 12']) a=float(datos[datos['2017']==estado]['PRECIPITACIîN A NIVEL NACIONAL Y POR ENTIDAD FEDERATIVA']) b=float(datos[datos['2017']==estado]['Unnamed: 2']) c=float(datos[datos['2017']==estado]['Unnamed: 3']) d=float(datos[datos['2017']==estado]['Unnamed: 4']) f=float(datos[datos['2017']==estado]['Unnamed: 5']) g=float(datos[datos['2017']==estado]['Unnamed: 6']) h=float(datos[datos['2017']==estado]['Unnamed: 7']) i=float(datos[datos['2017']==estado]['Unnamed: 8']) j=float(datos[datos['2017']==estado]['Unnamed: 9']) k=float(datos[datos['2017']==estado]['Unnamed: 10']) l=float(datos[datos['2017']==estado]['Unnamed: 11']) m=float(datos[datos['2017']==estado]['Unnamed: 12']) prom=float(a+b+c+d+f+g+h+i+j+k+l+m) promaxu=prom/12 print("Promedio anual del estado seleccionado") print("{:.1f}".format(promaxu)) print("Suma de anual del estado") print((datos[datos['2017']==estado]['Unnamed: 13'])) if año==2018: print(datos2) print("que estado quieres ver ") estado2=str(input().upper()) e2=datos2[datos2['2018']==estado2] print(e2) print("Que mes buscas ENE, FEB, MAR, ABR, MAY, JUN, JUL, AGO, SEP, OCT, DIC ") mes2=str(input().upper()) if mes2=="ENE": print(datos2[datos2['2018']==estado2]['PRECIPITACIîN A NIVEL NACIONAL Y POR ENTIDAD FEDERATIVA']) if mes2=="FEB": print(datos2[datos2['2018']==estado2]['Unnamed: 2']) if mes2=="MAR": print(datos2[datos2['2018']==estado2]['Unnamed: 3']) if mes2=="ABR": print(datos2[datos2['2018']==estado2]['Unnamed: 4']) if mes2=="MAY": print(datos2[datos2['2018']==estado2]['Unnamed: 5']) if mes2=="JUN": print(datos2[datos2['2018']==estado2]['Unnamed: 6']) if mes2=="JUL": print(datos2[datos2['2018']==estado2]['Unnamed: 7']) if mes2=="AGO": print(datos2[datos2['2018']==estado2]['Unnamed: 8']) if mes2=="SEP": print(datos2[datos2['2018']==estado2]['Unnamed: 9']) if mes2=="OCT": print(datos2[datos2['2018']==estado2]['Unnamed: 10']) if mes2=="NOV": print(datos2[datos2['2018']==estado2]['Unnamed: 11']) if mes2=="DIC": print(datos2[datos2['2018']==estado2]['Unnamed: 12']) a=float(datos2[datos2['2018']==estado2]['PRECIPITACIîN A NIVEL NACIONAL Y POR ENTIDAD FEDERATIVA']) b=float(datos2[datos2['2018']==estado2]['Unnamed: 2']) c=float(datos2[datos2['2018']==estado2]['Unnamed: 3']) d=float(datos2[datos2['2018']==estado2]['Unnamed: 4']) f=float(datos2[datos2['2018']==estado2]['Unnamed: 5']) g=float(datos2[datos2['2018']==estado2]['Unnamed: 6']) h=float(datos2[datos2['2018']==estado2]['Unnamed: 7']) i=float(datos2[datos2['2018']==estado2]['Unnamed: 8']) j=float(datos2[datos2['2018']==estado2]['Unnamed: 9']) k=float(datos2[datos2['2018']==estado2]['Unnamed: 10']) l=float(datos2[datos2['2018']==estado2]['Unnamed: 11']) m=float(datos2[datos2['2018']==estado2]['Unnamed: 12']) prom2=float(a+b+c+d+f+g+h+i+j+k+l+m) promaxu2=prom2/12 print("Promedio anual del estado seleccionado") print("{:.1f}".format(promaxu2)) print("Suma de anual del estado") print((datos2[datos2['2018']==estado2]['Unnamed: 13'])) x=0 contador=0 datos3=datos.sort_values(by='PRECIPITACIîN A NIVEL NACIONAL Y POR ENTIDAD FEDERATIVA') s=str(datos3.iloc[[0],[0]]) s1=str(datos.iloc[[0],[1]]) print(s) print(s1) print("------------------------------------") datos=datos.sort_values(by='PRECIPITACIîN A NIVEL NACIONAL Y POR ENTIDAD FEDERATIVA', ascending=False) s=str(datos.iloc[[1],[0]]) s1=str(datos.iloc[[1],[1]]) print(s) print(s1) datos=datos.sort_values(by='Unnamed: 2', ascending=False ) s=str(datos.iloc[[1],[0]]) s1=str(datos.iloc[[1],[1]]) print(s) print(s1) datos=datos.sort_values(by='Unnamed: 3', ascending=False) s=str(datos.iloc[[1],[0]]) s1=str(datos.iloc[[1],[1]]) print(s) print(s1) datos=datos.sort_values(by='Unnamed: 4', ascending=False) s=str(datos.iloc[[1],[0]]) s1=str(datos.iloc[[1],[1]]) print(s) print(s1) datos=datos.sort_values(by='Unnamed: 5', ascending=False) s=str(datos.iloc[[1],[0]]) s1=str(datos.iloc[[1],[1]]) print(s) print(s1) datos=datos.sort_values(by='Unnamed: 6', ascending=False) s=str(datos.iloc[[1],[0]]) s1=str(datos.iloc[[1],[1]]) print(s) print(s1) datos=datos.sort_values(by='Unnamed: 7', ascending=False) s=str(datos.iloc[[1],[0]]) s1=str(datos.iloc[[1],[1]]) print(s) print(s1) datos=datos.sort_values(by='Unnamed: 8', ascending=False) s=str(datos.iloc[[1],[0]]) s1=str(datos.iloc[[1],[1]]) print(s) print(s1) datos=datos.sort_values(by='Unnamed: 9', ascending=False) s=str(datos.iloc[[1],[0]]) s1=str(datos.iloc[[1],[1]]) print(s) print(s1) datos=datos.sort_values(by='Unnamed: 10', ascending=False) s=str(datos.iloc[[1],[0]]) s1=str(datos.iloc[[1],[1]]) print(s) print(s1) datos=datos.sort_values(by='Unnamed: 11', ascending=False) s=str(datos.iloc[[1],[0]]) s1=str(datos.iloc[[1],[1]]) print(s) print(s1) print("-----------------------------------------------------") datos2=datos2.sort_values(by='PRECIPITACIîN A NIVEL NACIONAL Y POR ENTIDAD FEDERATIVA') s=str(datos2.iloc[[0],[1]]) print(s) datos2=datos2.sort_values(by='Unnamed: 2') s=str(datos2.iloc[[0],[1]]) print(s) datos2=datos2.sort_values(by='Unnamed: 3') s=str(datos2.iloc[[0],[1]]) print(s) datos2=datos2.sort_values(by='Unnamed: 4') s=str(datos2.iloc[[0],[1]]) print(s) datos2=datos2.sort_values(by='Unnamed: 5') s=str(datos2.iloc[[0],[1]]) print(s) datos2=datos2.sort_values(by='Unnamed: 6') s=str(datos2.iloc[[0],[1]]) print(s) datos2=datos2.sort_values(by='Unnamed: 7') s=str(datos2.iloc[[0],[1]]) print(s) datos2=datos2.sort_values(by='Unnamed: 8') s=str(datos2.iloc[[0],[1]]) print(s) datos2=datos2.sort_values(by='Unnamed: 9') s=str(datos2.iloc[[0],[1]]) print(s) datos2=datos2.sort_values(by='Unnamed: 10') s=str(datos2.iloc[[0],[1]]) print(s) datos2=datos2.sort_values(by='Unnamed: 11') s=str(datos2.iloc[[0],[1]]) print(s)
2Octubre.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # The Harmonic Oscillator Strikes Back # *Note:* Much of this is adapted/copied from https://flothesof.github.io/harmonic-oscillator-three-methods-solution.html # This week we continue our adventures with the harmonic oscillator. # # ![1D Harmonic Oscillator](ho.png) # The harmonic oscillator is a system that, when displaced from its equilibrium position, experiences a restoring force F proportional to the displacement x: # # $$F=-kx$$ # # The potential energy of this system is # # $$V = {1 \over 2}k{x^2}$$ # These are sometime rewritten as # # $$ F=- \omega_0^2 m x, \text{ } V(x) = {1 \over 2} m \omega_0^2 {x^2}$$ # # Where $\omega_0 = \sqrt {{k \over m}} $ # If the equilibrium value of the harmonic oscillator is not zero, then # # $$ F=- \omega_0^2 m (x-x_{eq}), \text{ } V(x) = {1 \over 2} m \omega_0^2 (x-x_{eq})^2$$ # ## 1. Harmonic oscillator from last time (with some better defined conditions) # Applying the harmonic oscillator force to Newton's second law leads to the following second order differential equation # # $$ F = m a $$ # # $$ F= -m \omega_0^2 (x-x_{eq}) $$ # # $$ a = - \omega_0^2 (x-x_{eq}) $$ # # $$ x(t)'' = - \omega_0^2 (x-x_{eq}) $$ # The final expression can be rearranged into a second order homogenous differential equation, and can be solved using the methods we used above # This is already solved to remind you how we found these values import sympy as sym sym.init_printing() # **Note** that this time we define some of the properties of the symbols. Namely, that the frequency is always positive and real and that the positions are always real omega0,t=sym.symbols("omega_0,t",positive=True,nonnegative=True,real=True) xeq=sym.symbols("x_{eq}",real=True) x=sym.Function("x",real=True) x(t),omega0 dfeq=sym.Derivative(x(t),t,2)+omega0**2*(x(t)-xeq) dfeq sol = sym.dsolve(dfeq) sol sol,sol.args[0],sol.args[1] # **Note** this time we define the initial positions and velocities as real x0,v0=sym.symbols("x_0,v_0",real=True) ics=[sym.Eq(sol.args[1].subs(t, 0), x0), sym.Eq(sol.args[1].diff(t).subs(t, 0), v0)] ics solved_ics=sym.solve(ics) solved_ics # ### 1.1 Equation of motion for $x(t)$ full_sol = sol.subs(solved_ics[0]) full_sol # ### 1.2 Equation of motion for $p(t)$ m=sym.symbols("m",positive=True,nonnegative=True,real=True) p=sym.Function("p") sym.Eq(p(t),m*sol.args[1].subs(solved_ics[0]).diff(t)) # ## 2. Time average values for a harmonic oscillator # If we want to understand the average value of a time dependent observable, we need to solve the following integral # # # $${\left\langle {A(t)} \right\rangle}_t = \begin{array}{*{20}{c}} # {\lim }\\ # {\tau \to 0} # \end{array}\frac{1}{\tau }\int\limits_0^\tau {A(t)dt} $$ # ### 2.1 Average position ${\left\langle {x} \right\rangle}_t$ for a harmonic oscillator tau=sym.symbols("tau",nonnegative=True,real=True) xfunc=full_sol.args[1] xavet=(xfunc.integrate((t,0,tau))/tau).limit(tau,sym.oo) xavet # The computer does not always make the best choices the first time. If you treat each sum individually this is not a hard limit to do by hand. The computer is not smart. We can help it by inseting an `expand()` function in the statement xavet=(xfunc.integrate((t,0,tau))/tau).expand().limit(tau,sym.oo) xavet # ### 2.2 Excercise: Calculate the average momenta ${\left\langle {p} \right\rangle}_t$ for a harmonic oscillator m=sym.symbols("m",positive=True,nonnegative=True,real=True) p=sym.Function("p") sym.Eq(p(t),m*sol.args[1].subs(solved_ics[0]).diff(t)) tau=sym.symbols("tau",nonnegative=True,real=True) pfunc=sym.Eq(p(t),m*sol.args[1].subs(solved_ics[0]).diff(t)).args[1] pavet=(pfunc.integrate((t,0,tau))/tau).limit(tau,sym.oo) pavet # ### 2.3 Exercise: Calculate the average kinetic energy of a harmonic oscillator kefunct=((sym.Eq(p(t),m*sol.args[1].subs(solved_ics[0]).diff(t)).args[1])**2/(2*m)) kefunct keavt=(kefunct.integrate((t,0,tau))/tau).expand().limit(tau,sym.oo) keavt # ## 3. Ensemble (Thermodynamic) Average values for a harmonic oscillator # If we want to understand the thermodynamics ensemble average value of an observable, we need to solve the following integral. # # # $${\left\langle {A(t)} \right\rangle}_{T} = \frac{\int{A e^{-\beta H}dqdp}}{\int{e^{-\beta H}dqdp} } $$ # # You can think of this as a Temperature average instead of a time average. # # Here $\beta=\frac{1}{k_B T}$ and the classical Hamiltonian, $H$ is # # $$ H = \frac{p^2}{2 m} + V(q)$$ # # **Note** that the factors of $1/h$ found in the classical partition function cancel out when calculating average values # ### 3.1 Average position ${\left\langle {x} \right\rangle}_t$ for a harmonic oscillator # For a harmonic oscillator with equilibrium value $x_{eq}$, the Hamiltonian is # $$ H = \frac{p^2}{2 m} + \frac{1}{2} m \omega_0 (x-x_{eq})^2 $$ # First we will calculate the partition function $\int{e^{-\beta H}dqdp}$ k,T=sym.symbols("k,T",positive=True,nonnegative=True,real=True) xT,pT=sym.symbols("x_T,p_T",real=True) ham=sym.Rational(1,2)*(pT)**2/m + sym.Rational(1,2)*m*omega0**2*(xT-xeq)**2 beta=1/(k*T) bolz=sym.exp(-beta*ham) z=sym.integrate(bolz,(xT,-sym.oo,sym.oo),(pT,-sym.oo,sym.oo)) z # Then we can calculate the numerator $\int{A e^{-\beta H}dqdp}$ # numx=sym.integrate(xT*bolz,(xT,-sym.oo,sym.oo),(pT,-sym.oo,sym.oo)) numx # And now the average value xaveT=numx/z xaveT # ### 3.2 Exercise: Calculate the average momenta ${\left\langle {p} \right\rangle}_t$ for a harmonic oscillator # # After calculating the value, explain why you think you got this number k,T=sym.symbols("k,T",positive=True,nonnegative=True,real=True) xT,pT=sym.symbols("x_T,p_T",real=True) ham=sym.Rational(1,2)*(pT)**2/m + sym.Rational(1,2)*m*omega0**2*(xT-xeq)**2 beta=1/(k*T) bolz=sym.exp(-beta*ham) z=sym.integrate(bolz,(xT,-sym.oo,sym.oo),(pT,-sym.oo,sym.oo)) z nump=sym.integrate(pT*bolz,(pT,-sym.oo,sym.oo),(xT,-sym.oo,sym.oo)) nump paveT=nump/z paveT # + #zero makes sense because the momenta is constantly canceling out. It has a Guassian distrubtution and the mean value is zero. # - # ### 3.3 Exercise: Calculate the average kinetic energy # # The answer you get here is a well known result related to the energy equipartition theorem k,T=sym.symbols("k,T",positive=True,nonnegative=True,real=True) xT,pT=sym.symbols("x_T,p_T",real=True) ham=sym.Rational(1,2)*(pT)**2/m + sym.Rational(1,2)*m*omega0**2*(xT-xeq)**2 beta=1/(k*T) bolz=sym.exp(-beta*ham) z=sym.integrate(bolz,(xT,-sym.oo,sym.oo),(pT,-sym.oo,sym.oo)) z keaveT=sym.integrate(pT**2/(2*m)*bolz,(xT,-sym.oo,sym.oo),(pT,-sym.oo,sym.oo))/z keaveT # # Back to the lecture # ## 4. Exercise Verlet integrators # In this exercise we will write a routine to solve for the equations of motion for a hamonic oscillator. # # Plot the positions and momenta (seprate plots) of the harmonic oscillator as a functions of time. # # Calculaate trajectories using the following methods: # 1. Exact solution # 2. Simple taylor series expansion # 3. Predictor-corrector method # 4. Verlet algorithm # 5. Leapfrog algorithm # 6. Velocity Verlet algorithm tau=sym.symbols("tau",nonnegative=True,real=True) xfunc=full_sol.args[1] xfunc #1 Exact solution position omega0,t=sym.symbols("omega_0,t",positive=True,nonnegative=True,real=True) xeq=sym.symbols("x_{eq}",real=True) x=sym.Function("x",real=True) full_sol1 = sym.simplify(full_sol.subs({x0:10, xeq:0 , v0:10, omega0:1})) sym.plot(full_sol1.rhs,(t,-10,10)) #1 momenta m=sym.symbols("m",positive=True,nonnegative=True,real=True) p=sym.Function("p") sym.Eq(p(t),m*sol.args[1].subs(solved_ics[0]).diff(t)) momentum=sym.Eq(p(t),m*sol.args[1].subs(solved_ics[0]).diff(t)) momentum1=sym.simplify(momentum.subs({x0:10, xeq:0, v0:10, omega0:1, m:1})) sym.plot(momentum1.rhs,(t,-10,10)) #2 Simple Taylor Series expansion import sympy as sym import numpy as np import matplotlib.pyplot as plt xt0=0 t=0.5 vt0=1 pos=[] for i in range(0,100): vt=vt0+1/2*t**2*-xt0 vt0=vt xt=xt0+vt0*t xt0=xt pos.append(xt) plt.plot(xlist) plt.xlabel('x') plt.ylabel('y') plt.grid(True) plt.title('Taylor series approximation') # + #3 skip # + #4 Verlet algorithm #r(t+dt)=2r(t)-r(t-dt)+dt**2*a #v(t)=(r(t+dt)-r(t-dt))/2dt import sympy as sym import numpy as np import matplotlib.pyplot as plt fig, (ax1, ax2) xt0=0 xt1=1 t=0.4 a=1 posit=[] for i in range(0,100): xt2=2*xt1-xt0+t**2*(-(xt1)) xt0=xt1 xt1=xt2 posit.append(xt2) plt.plot(posit) plt.xlabel('x') plt.ylabel('y') plt.grid(True) plt.title('Verlet Algorithm-Position') # - xt0=0 xt2=1 t=2 vel=[] for i in range(0,30): vt=(xt2-xt0)/2*t xt0=xt2 xt2=vt vel.append(vt) plt.plot(vel) plt.xlabel('x') plt.ylabel('y') plt.grid(True) plt.title('Verlet Algorithm-Velocity') #5 Leapfrog import sympy as sym import numpy as np import matplotlib.pyplot as plt xt0=1 vminushalft=0 t=0.2 posit=[] for i in range(0,100): vhalft=vminushalft+(t)*(-(xt0)) vminushalft=vhalft xt1=xt0+(t)*(vhalft) xt0=xt1 posit.append(xt1) plt.plot(posit) plt.xlabel('x') plt.ylabel('y') plt.grid(True) plt.title('Leapfrog Alg Position') xt0=1 t=0.3 vminushalft=1 vhalft=2 vel=[] for i in range(0,100): vt=(1/2)*((vhalft)+(vminushalft)) vminushalft=vt vhalft=vminushalft+(t)*(-(xt0)) vminushalft=vhalft xt1=xt0+(t)*(vhalft) xt0=xt1 vel.append(vt) plt.plot(vel) plt.xlabel('x') plt.ylabel('y') plt.grid(True) plt.title('Leapfrog Alg Velocity') # + #velocity verlet algorithum import sympy as sym sym.init_printing() dt=0.01 x0=0 v0=1 pos=[] for i in range(0,1000): x1=x0+v0*dt+1/2*-x0*dt**2 x0=x1 v1=v0+1/2*(-x1-x0)*dt v0=v1 pos.append(x1) plt.plot(pos) plt.xlabel('x') plt.ylabel('y') plt.grid(True) plt.title("Velocty Verlet Position") # - xt0=1 dt=2 vt0=1 vel=[] for i in range(0,10): vtplushalfdt=vt0+1/2*dt*-xt0 xt0=vtplushalfdt vel.append(vtplushalfdt) plt.plot(vel) plt.xlabel('x') plt.ylabel('y') plt.grid(True) plt.title("Velocty Verlet Velocity")
harmonic_student.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Alpaca Japan # ## Libraries import pandas as pd import numpy as np import random import matplotlib.pyplot as plt # ## Kmeans Function Creation def kmeans_function(data, n_clusters, max_iter, seed = 2021): # Set seed to get the same outcome random.seed(seed) # Creation of n random centroids cent = random.sample(values, n_clusters) # Temporal table for the study temp = pd.DataFrame(data,columns = ['X', 'Y']) # Create column fot the cluster_id temp['Cluster_id'] = '' # Number of iterations for j in range(max_iter): for i in range(len(data)): dist_0 = 100 # Assign the cluster_id for cen in range(n_clusters): # Euclidean distance dist = np.sqrt((data[i][0] - cent[cen][0])**2 + (data[i][1] - cent[cen][1])**2) # Comparation of distances if dist < dist_0: dist_0 = dist temp.loc[i, 'Cluster_id'] = cen # Save the centroids in order to chek it later if it does not change cent_old = cent cent = [] # Change position of the centroids for cat in range(n_clusters): df_cat = temp[temp['Cluster_id'] == cat].copy() x_cat = df_cat['X'].mean() y_cat = df_cat['Y'].mean() coord = (x_cat, y_cat) cent.append(coord) # Check if the centroids have not changed if cent == cent_old: # Show the number of iterations needed print('Number of iterations needed: {}'.format(j)) # Visualization df_cent = pd.DataFrame(cent) plt.scatter(temp.iloc[:, 0], temp.iloc[:, 1], c = temp['Cluster_id'], cmap = 'viridis') plt.scatter(df_cent.iloc[:, 0], df_cent.iloc[:, 1], c = 'black', s = 200, alpha = 0.5, marker = 'x') plt.title('Kmeans plot (Number of clusters = {})'.format(n_clusters), size = 15) # Break the for loop break # Return the Cluster if and the position of the centroids return temp['Cluster_id'].to_list(),cent # ## Model Testing # Generate random numbers in the range [0.0, 1.0) values = [] random.seed(2021) for i in range(100): values.append((random.random(),random.random())) # Testing the function cluster_id, centroids = kmeans_function(values, 3, 100) # Check the outcome cluster_id[:5], centroids
Kmeans_function.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Object Orientation import this def a(): pass a callable(a) a() type(_) class A(object): pass a = A() # `a` is an instance of the class `A` type(a) # ```java # # public class A { # # private String name; # # public A(String name){ # this.name = name; # } # # public String a(String a){} # public String a(int a){} # # } # ``` class B(object): def __init__(self, v): print('The ctor of the class B has been invoked.') self.slot = v def yourself(self): return self B dir(B) b = B(v=4) # I'm creating an instance of the class B, referencing it with `b`. b.slot b.yourself() assert b == b.yourself() callable(B) # + class Node(object): def __init__(self, left, value, right): # the followings are all slots for each instance of this class. self.left = left self.value = value self.right = right def __repr__(self) -> str: return 'Node({0}, {1}, {2})'.format(repr(self.left), repr(self.value), repr(self.right)) def __str__(self) -> str: return '({}) <- {} -> ({})'.format(self.left, self.value, self.right) def visit_inorder(self, f): assert callable(f) # `f` is a Callable object self.left.visit_inorder(f) f(self.value) #! self.right.visit_inorder(f) class EmptyTree: def visit_inorder(self, f): 'There is no value that should be passed to `f`' pass def __str__(self): return '•' def __repr__(self): return 'EmptyTree()' empty_tree = EmptyTree() # we will use the lone instance of an empty tree. # - n1 = Node(left=empty_tree, value=3, right=empty_tree) n1 callable(print) n1.visit_inorder(f=print) type(n1) Node(None, 3, None) repr(n1) str(n1) n2 = Node(left=n1, value=2, right=n1) n2 print(str(n2)) str(Node(left=[], value=object(), right=4)) n2.visit_inorder(f=print) n3 = Node(left=n2, value=0, right=n1) s = [] n3.visit_inorder(f=lambda v: s.append(v)) s def p(a): print(a) a + '4' def p_equiv(a): print(a) a + '4' return None a = p('hello world') a = p_equiv('hello world') type(a) b = lambda: print('hello world') a = b() type(a) # --- # # Back to the past... class Node(object): def __init__(self, left, value, right): # the followings are all slots for each instance of this class. self.left = left self.value = value self.right = right def __repr__(self) -> str: return 'Node({0}, {1}, {2})'.format(repr(self.left), repr(self.value), repr(self.right)) def __str__(self) -> str: return '({}) <- {} -> ({})'.format(self.left, self.value, self.right) def visit_inorder(self, f): assert callable(f) # `f` is a Callable object if self.left: self.left.visit_inorder(f) f(self.value) #! if self.right: self.right.visit_inorder(f) n1 = Node(left=None, value=3, right=None) n1.visit_inorder(f=print) n2 = Node(left=n1, value=2, right=n1) n2 n2.visit_inorder(f=print) # --- # # Back to the future... True, False type(True) type(False) # + class Node(object): def __init__(self, left, value, right): # the followings are all slots for each instance of this class. self.left = left self.value = value self.right = right def __repr__(self) -> str: return 'Node({0}, {1}, {2})'.format(repr(self.left), repr(self.value), repr(self.right)) def __str__(self) -> str: return '({}) <- {} -> ({})'.format(self.left, self.value, self.right) def visit_inorder(self, f): assert callable(f) # `f` is a Callable object if self.left: self.left.visit_inorder(f) f(self.value) #! if self.right: self.right.visit_inorder(f) def __bool__(self): print('Print from Node.__bool__') return True class EmptyTree: def __str__(self): return '•' def __repr__(self): return 'EmptyTree()' def __bool__(self): print('Print from EmptyTree.__bool__') return False empty_tree = EmptyTree() # we will use the lone instance of an empty tree. # - n2 = Node(left=n1, value=2, right=n1) n2.visit_inorder(f=print) # --- class F(object): def __init__(self): self.accumulator = [] def __call__(self, arg): self.accumulator.append(arg) def __iter__(self): return iter(self.accumulator) def __next__(self): yield from self.accumulator f = F() f.__call__(4) f.accumulator [f(i) for i in range(0, 100, 2)] f.accumulator g = F() n1 = Node(left=empty_tree, value=3, right=empty_tree) n2 = Node(left=n1, value=2, right=n1) n2.visit_inorder(f=g) g.accumulator for i in g: print(i+1) next(g) next(_)
ipynbs/oo.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Reference 1: # # ## [Unraveling the Mechanism of Photoinduced Charge Transfer in Carotenoid−Porphyrin−C60 Molecular Triad](https://pubs.acs.org/doi/10.1021/acs.jpclett.5b00074) # - The molecular triad absorbs a UV visible light and produces a charge separated state (CT2) where an electron is transferred from C to C60, thus producing a large dipole moment of ~150 D. # # # - Thermal fluctuations facilitate the transition between a linearly extended configuration, which is the most energetically stable one, to a bent configuration that is, however, the dominant one at room temperature due to entropy. # # # - The intramolecular triad conformation may greatly affect the CT kinetics # # # - There are two possible mechanisms for the photoinduced charge transfer: # - In one mechanism, the charge separation follows a series of CT steps, where CT2, which is the final charge-separated state, is produced following coupling to CT1. # - The second mechanism involves a single concerted step where the final CT2 state is directly coupled to the absorbing state of a localized π−π* excitation on the porphyrin chromophore # # # - **The triad spatial conformation strongly affects the process of charge separation of a molecular triad** # - The CT1 in the bent conformation **functions as a charge trap**, further hindering the generation of charge separation in CT2 # - The sequential mechanism is practically turned off in the bent conformation, the dominant mechanism for charge separation for the bent conformation lies in the concerted mechanism # - For the bent conformation, the charge transfer is attenuated due to a significantly reduced rate for the transfer process between the shorter and longer range CT states (CT1 ↔ CT2) # # # - It is encouraged to populate **linear conformations** over the bent ones for achieving efficient charge separation # # Reference 2 # ## [Linear-Response and Nonlinear-Response Formulations of the Instantaneous Marcus Theory for Nonequilibrium Photoinduced Charge Transfer](https://pubs.acs.org/doi/10.1021/acs.jctc.0c01250) # - MT vs. IMT # - Marcus theory (MT) # - Compute CT rate constants in a variety of complex systems in the condensed phase # - Expressed in terms of only three parameters # - **Cannot account for the effect caused by the nonequilibrium nature of the initial preparation** # - Instantaneous Marcus theory (IMT) # - Account for the effects due to the nonequilibrium initial preparation # - IMT expressions require nonequilibrium molecular dynamics (NEMD) simulations on the donor PES with initial nuclear conditions sampled on the equilibrated ground PES, which is computationally expensive # # # - NLR IMT & LR IMT: Compute IMT for nonequilibrium photoinduced CT with only equilibrium molecular dynamics (MD) simulations # - NLR IMT: **yields an excellent prediction** for the nonequilibrium IMT CT rate coefficients in all cases investigated, save 60% of computational cost # - LR IMT: predicts the **correct trends** of the CT rate coefficient but overestimate the transient IMT CT rate, save 80% of computational cost # # Reference 3 # ## [The effect of structural changes on charge transfer states in a light-harvesting carotenoid-diaryl-porphyrin-C60 molecular triad](https://aip.scitation.org/doi/10.1063/1.4876075) # - CPC60 triad molecule # - Consists of organic/inorganic pigments covalently linked to electron donor and/or acceptor moieties # - Retards charge recombination by the addition of a secondary donor (carotenoid) molecule which allowed for an increased separation between the particle and hole states # - The HOMO of the triad is on the carotenoid and the lowest three LUMOs are located on the fullerene # - The hybridization of the molecular orbitals belonging to different components is negligible such that the orbitals involved in charge-transfer excited state transitions are mostly localized on the parent components # # # - Conformational factors that may affect the CT performance # - Donor-acceptor (carotene-fullerene) distance (extent of folding) \-\-significant # - Torsions originated from the C60-porphyrin & the porphyrin-carotene linkage \-\-not significant # - The angle between the porphyrin macrocycle and its two meso-aryl groups \-\-not significant # # Reference 4 # ## [Photoinduced Charge Transfer Dynamics in the Carotenoid−Porphyrin−C60 Triad via the Linearized Semiclassical Nonequilibrium Fermi’s Golden Rule](https://pubs.acs.org/doi/10.1021/acs.jpcb.0c06306) # - Effects of nonequilibrium nature of the initial state on CT rates # - Bent structures # - Increase the ππ\* → CT1 transition rate by 40 times # - No significant effect on the overall ππ\* → CT2 transition rate # - Linear structures # - Have negligible effect on both of the steps # # - Solvation process # - Bent structures: THF molecules reorient so as to align their dipoles in a manner that leads to stabilization on the ππ\* PES, increasing the donor\-acceptor energy gap # - Linear structures: No change in THF molecules, no significant change in the energy gap # # # - Bent vs. linear # - The fact that both the timescale and kinetics of the ππ\* → CT1 transition are strongly conformation-dependent can be viewed as a structure-function relation, where the molecular structure (triad conformation) is seen to have a rather dramatic effect on the function (the CT rate).
projects/Gustave_Li/References/Reading_notes.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Activation functions # ## Imports import matplotlib.pyplot as plt # %matplotlib inline import numpy as np # ## Sigmoid # A sigmoid function has been used as an activation function for neural networks for several decades, and only recently been partly replaced by ReLU. It is still used quite frequently though. def sigmoid(x): return 1.0/(1.0 + np.exp(-x)) x = np.linspace(-5.0, 5.0, 101) _ = plt.plot(x, sigmoid(x)) # Note that the output of the sigmoid function is in the range $[0, 1]$. # ## Hyperbolic tangent # Whereas the output of the sigmoid function is always positive, the hyperbolic tangent is used when negative output values are required. x = np.linspace(-5.0, 5.0, 101) _ = plt.plot(x, np.tanh(x)) # ## ReLU versus SoftPlus # An activation that is used quite often in the context of deep learning is ReLU (Rectified Linear Unit). It is an approximation for the SoftPlus function, and although it is not differentiable, it is far cheaper computationally. def relu(x): return np.maximum(0, x) def softplus(x): return np.log(1.0 + np.exp(x)) x = np.linspace(-5.0, 5.0, 101) plt.plot(x, relu(x), label='ReLU') plt.plot(x, softplus(x), label='SoftMax') plt.legend(loc='upper left'); # ## SoftMax # The SoftMax function is often used for an output layer that represents categorical data. It will relatively increase high values, decrease low values. More importantly, for categorical output represented by a one-hot encoding, it will normalize the outputs such that the sum is equal to 1, and they can be interpreted as the proobability of the categories. def softmax(x): norm = np.sum(np.exp(x)) return np.exp(x)/norm x = np.random.uniform(low=-1.0, high=1.0, size=20) plt.plot(x, softmax(x), 'o'); # The sum of the softmax values is indeed equal to 1. np.sum(softmax(x))
hands-on/030_activation_functions_lazy.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: mydlenv # language: python # name: mydlenv # --- import pandas as pd import numpy as np import matplotlib.pyplot as plt import seaborn as sns # %matplotlib inline # + # Get the data # Set index_col = 0 to use the first column as the index df = pd.read_csv('/Users/rohithebbar/Desktop/Machine_learning_projects/k-nearest_neighbour/data/Classified Data', index_col=0) # - df.head() df.shape df.info() df.describe() # + # Standardise the variable from sklearn.preprocessing import StandardScaler # - scaler = StandardScaler() scaler.fit(df.drop('TARGET CLASS',axis=1)) scaled_features = scaler.transform(df.drop('TARGET CLASS',axis = 1)) df_feat = pd.DataFrame(scaled_features, columns = df.columns[:-1]) df_feat.head() # + # Train Test Split from sklearn.model_selection import train_test_split # - X_train, X_test, y_train, y_test = train_test_split(scaled_features, df['TARGET CLASS'],test_size=0.30) print(X_train.shape) print(X_test.shape) print(y_train.shape) print(y_test.shape) # Using KNN from sklearn.neighbors import KNeighborsClassifier knn = KNeighborsClassifier(n_neighbors = 1) knn.fit(X_train, y_train) pred = knn.predict(X_test) # + # predictions and evaluations from sklearn.metrics import classification_report, confusion_matrix # - print(confusion_matrix(y_test, pred)) print(classification_report(y_test, pred)) # + # choosing K value error_rate = [] for i in range(1, 40): knn = KNeighborsClassifier(n_neighbors = i) knn.fit(X_train, y_train) pred_i = knn.predict(X_test) error_rate.append(np.mean(pred_i != y_test)) # - plt.figure(figsize=(10, 6)) plt.plot(range(1, 40), error_rate, color='blue', linestyle='dashed', marker='o', markerfacecolor='red', markersize=10) plt.title('Error Rate vs. K Value') plt.xlabel('K') plt.ylabel('Error Rate') # + # FIRST A QUICK COMPARISON TO OUR ORIGINAL K=1 knn = KNeighborsClassifier(n_neighbors=1) knn.fit(X_train,y_train) pred = knn.predict(X_test) print('WITH K=1') print('\n') print(confusion_matrix(y_test,pred)) print('\n') print(classification_report(y_test,pred)) # + # NOW WITH K=7 knn = KNeighborsClassifier(n_neighbors=7) knn.fit(X_train,y_train) pred = knn.predict(X_test) print('WITH K=7') print('\n') print(confusion_matrix(y_test,pred)) print('\n') print(classification_report(y_test,pred)) # + # NOW WITH K=8 knn = KNeighborsClassifier(n_neighbors=8) knn.fit(X_train,y_train) pred = knn.predict(X_test) print('WITH K=8') print('\n') print(confusion_matrix(y_test,pred)) print('\n') print(classification_report(y_test,pred)) # + # NOW WITH K=26 knn = KNeighborsClassifier(n_neighbors=26) knn.fit(X_train,y_train) pred = knn.predict(X_test) print('WITH K=26') print('\n') print(confusion_matrix(y_test,pred)) print('\n') print(classification_report(y_test,pred)) # -
k-nearest_neighbours/Day-12_KNN.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 (ipykernel) # language: python # name: python3 # --- # + import xgboost from sklearn.model_selection import ParameterGrid from sklearn.metrics import mean_squared_error from sklearn.metrics import r2_score from sklearn.preprocessing import StandardScaler, MinMaxScaler from sklearn.compose import ColumnTransformer from sklearn.model_selection import train_test_split num_ftrs = ["v_0","v_1","v_2","v_3","v_4","v_5","v_6","v_7","v_8","v_9","v_10","v_11","v_12","v_13","v_14"] max_ftrs = [i for i in train_X if i not in num_ftrs] numeric_transformer = Pipeline(steps=[('scaler', StandardScaler())]) minmax_transformer = Pipeline(steps=[ ('scaler', MinMaxScaler())]) preprocessor = ColumnTransformer( transformers=[ ('num', numeric_transformer, num_ftrs), ('max', minmax_transformer, max_ftrs) ]) best_models = [] test_scores = np.zeros(5) XGB = xgboost.XGBRegressor() # # find the best parameter set param_grid = {"max_depth": [2,3,4,5,9]} param_grid = {"learning_rate": [0.1], "n_estimators": [200], "gamma":[0], "seed": [0], "missing": [np.nan], "max_depth": [2,3,4,5,9], "colsample_bytree": [0.9], "subsample": [0.8,0.75]} pg = ParameterGrid(param_grid) train_R2score = np.zeros(len(pg)) val_R2score = np.zeros(len(pg)) grid = [] # create dataframes for train and val_scores to store the␣,→R2 score # from the loop for each rand state train_R2_df = pd.DataFrame() val_R2_df = pd.DataFrame() for i in range(5): print('randoms state '+ str(i)) # let's split to train, CV, and test X_other, X_test, y_other, y_test = train_test_split(train_X, train_Y, test_size=0.2,random_state=42*i) X_train, X_CV, y_train, y_CV = train_test_split(X_other, y_other,test_size=0.25, random_state=42*i) # fit_transform the training set X_prep = preprocessor.fit_transform(X_train) # little hacky, but collect feature names feature_names = preprocessor.transformers_[0][-1] + preprocessor.transformers_[1][-1] df_train = pd.DataFrame(data=X_prep,columns=feature_names) # transform the CV X_CV_prep = preprocessor.transform(X_CV) # transform the test X_test_prep = preprocessor.transform(X_test) for k in range(len(pg)): params = pg[k] XGB.set_params(**params ) eval_set = [(X_CV_prep, y_CV)] XGB.fit(X=X_prep, y=y_train, early_stopping_rounds=10, eval_set=eval_set, verbose=False) grid.append(XGB) y_CV_pred = XGB.predict(X_CV_prep, ntree_limit=XGB.best_ntree_limit) y_train_pred = XGB.predict(X_prep,ntree_limit=XGB.best_ntree_limit) train_R2score[k] = r2_score(y_train,y_train_pred) val_R2score[k] = r2_score(y_CV,y_CV_pred) # collect and save the best model best_models.append(grid[np.argmax(val_R2score)]) # calculate and save the test score y_test_pred = best_models[-1].predict(X_test_prep,ntree_limit=XGB.best_ntree_limit) test_scores[i] = r2_score(y_test,y_test_pred) print('the best max depth in the model parameters is:', pg[np.argmax(val_R2score)] , 'the matching test score is',test_scores[i]) # get the val and train r2 scores into the dataframes that created before, for comparison train_R2_df[42*i] = train_R2score val_R2_df[42*i] = val_R2score # - r2_score_xg = [0.9693566161830031,0.9676788956266471,0.9685968513122004,0.969983836267007,0.9677852076231475] r2score_mean = np.mean(r2_score_xg) r2score_mean r2_score_std = ([0.9693566161830031,0.9676788956266471,0.9685968513122004,0.969983836267007,0.9677852076231475]) np.std(r2_score_std) # from result above the best parameter is below import xgboost as xgb xgr = xgb.XGBRegressor(n_estimators=200, learning_rate=0.1, gamma=0, subsample=0.8, colsample_bytree=0.9, max_depth=9) #,objective ='reg:squarederror' xgr.fit(train_X,train_Y) pre = xgr.predict(test_X) pre1 = xgr.predict(train_X) sub_new = pd.concat([df1[df1['SaleID'].isnull()],pd.DataFrame(np.expm1(pre))],axis =1).rename({0:"price"},axis=1) sub_new[['SaleID',"price"]].to_csv(r'../results/submit.csv') print('R2 score now:', r2_score(pre1,train_Y)) import shap explainer = shap.TreeExplainer(xgr) shap_values = explainer.shap_values(train_X) shap.summary_plot(shap_values, train_X) fig = shap.summary_plot(shap_values, train_X, show=False) plt.savefig(r'../results/shap1.png',dpi=300, format="PNG") global_importance = pd.Series(xgr.feature_importances_, index = train_X.columns.tolist()) pd.set_option('display.max_rows', 1000) global_importance.sort_values(ascending=True) # + fig = plt.figure(figsize=(20,20)) fig = global_importance.plot(kind = 'barh') plt.savefig(r'../results/xgbr feature importance.png',dpi=300, format="PNG") # -
src/XBGR model.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # %matplotlib inline from pyvista import set_plot_theme set_plot_theme('document') # Disabling Mesh Lighting {#disabling_mesh_lighting_example} # ======================= # # Disable mesh lighting. # # While plotters have a default set of lights and there are many options # for customizing lighting conditions in general, meshes have the option # to opt out of lighting altogether. Pass `lighting=False` to # `pyvista.Plotter.add_mesh`{.interpreted-text role="func"} to disable # lighting for the given mesh: # # + # sphinx_gallery_thumbnail_number = 1 import pyvista as pv from pyvista import examples horse = examples.download_horse().decimate(0.9) horse.rotate_z(-120) horse.points = (horse.points - horse.center) * 100 shifted = horse.copy() shifted.translate((0, 10, 0)) plotter = pv.Plotter() plotter.add_mesh(horse, color='brown') plotter.add_mesh(shifted, color='brown', show_edges=True, lighting=False) plotter.show() # - # Due to the obvious lack of depth detail this mostly makes sense for # meshes with non-trivial colors or textures. If it weren\'t for the edges # being drawn, the second mesh would be practically impossible to # understand even with the option to interactively explore the surface: # shifted.plot(color='brown', lighting=False) # For further examples about fine-tuning mesh properties that affect light # rendering, see the `ref_lighting_properties_example`{.interpreted-text # role="ref"} example. #
locale/examples/04-lights/mesh_lighting.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # The calculations of loss during the training of networks can use weighting to reinforce the learning of specific variables and features in data. In order to reinforce physically reasonable predictions, you can impose more strict penalties for predictions in physically invalid predictions. For example, a prediction of Power Conversion Efficiency (PCE) above the Schockley-Quassier predicted maximum PCE could increase exponentially, rather than linearly. # # The classes developed below are wrappers for PyTorch tensors loss functions, which additionally modify these classes using theoretically and emprically derived boundaries for network loss calculations. # + import pandas as pd import numpy as np import matplotlib.pyplot as plt # %matplotlib inline from sklearn.model_selection import train_test_split import torch import torch.nn as nn import torch.nn.functional as F # - #Take in data as a dataframe for easy preprocessing device_df = pd.read_excel('/Users/wesleytatum/Desktop/OPV_total_df.xlsx') print (device_df.shape) device_df.head() # + # Hyper parameters num_epochs = 25 batch_size = int(len(device_df['PCE'])*0.8*0.9) # 90% of x_train samples learning_rate = 0.004 # Device configuration (GPU if available, otherwise CPU) device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu') # + X = device_df[['Anneal_time', 'Anneal_temp', 'MajorAL_avg', 'MajorAL_stdev', 'MinorAL_avg', 'MinorAL_stdev', 'Ecc_avg', 'Ecc_stdev', 'Orient_avg', 'Orient_stdev', 'Perim_avg', 'Perim_stdev', 'GMM_label']] #input features used to make prediction Y = device_df[['PCE']] #target features to be predicted x_train, x_test, y_train, y_test = train_test_split(X,Y, test_size = 0.2, shuffle = True) #split dataset into separate testing and training datasets #convert pd.DataFrame -> np.ndarray -> torch.tensor x_train_tensor = torch.tensor(x_train.values.astype(np.float32)) y_train_tensor = torch.tensor(y_train.values.astype(np.float32)) x_test_tensor = torch.tensor(x_test.values.astype(np.float32)) y_test_tensor = torch.tensor(y_test.values.astype(np.float32)) train_tensor = torch.utils.data.TensorDataset(x_train_tensor, y_train_tensor) training_data_set = torch.utils.data.DataLoader(dataset = train_tensor, batch_size = batch_size, shuffle = True) test_tensor = torch.utils.data.TensorDataset(x_test_tensor, y_test_tensor) testing_data_set = torch.utils.data.DataLoader(dataset = test_tensor, shuffle = True) # - #define the neural network class NN1(nn.Module): def __init__(self, in_dims, out_dims): super(NN1, self).__init__() #emedding layer self.em_layer = nn.Linear(in_dims, out_dims) #hidden layers self.h_layer1 = nn.Linear(out_dims, 32) self.h_layer2 = nn.Linear(32, 8) #output layers self.PCE_branch = nn.Sequential( nn.Dropout(p = 0.3), nn.Linear(8, 32), nn.Linear(32, 64), nn.Linear(64, 16), nn.Dropout(p = 0.3), nn.Softplus() ) self.outlayer = nn.Linear(16, 1) def forward(self, x): #data enters embedding layer out = self.em_layer(x) #embedded data is passed to hidden layers out = self.h_layer1(out) out = self.h_layer2(out) #embedded data is passed to output layer out = self.PCE_branch(out) out= self.outlayer(out) return out def train_model(model, training_data_set, optimizer): epoch_losses = [] train_total = 0 #switch model to training mode model.train() criterion = ThresholdedMSELoss(lower = 0, upper = 6) # criterion = nn.MSELoss() for train_data, labels in training_data_set: train_data = train_data.to(device) labels = labels.to(device) model.zero_grad() #zero out any gradients from prior loops out = model(train_data) #gather model predictions for this loop #calculate batch loss and accuracy in the predictions batch_loss = criterion(out, labels) #BACKPROPOGATE LIKE A MF torch.autograd.backward(batch_loss) optimizer.step() #save loss and accuracy for this batch epoch_losses.append(batch_loss.item()) train_total+=1 #calculate and save total error and accuracy for this epoch of training epoch_loss = sum(epoch_losses)/train_total #update progress bar print(f'Epoch training loss: {epoch_loss}') return epoch_loss def eval_model(model, testing_data_set): #evaluate the model model.eval() criterion = ThresholdedMSELoss(lower = 0, upper = 6) # criterion = nn.MSELoss() #don't update nodes during evaluation b/c not training with torch.no_grad(): epoch_losses = [] test_total = 0 for inputs, labels in testing_data_set: inputs = inputs.to(device) labels = labels.to(device) outputs = model(inputs) # calculate loss per batch of testing data test_loss = criterion(outputs, labels) epoch_losses.append(test_loss.item()) test_total += 1 # Average all batches in Epoch test_epoch_loss = sum(epoch_losses)/test_total print(f"Epoch testing loss = {test_epoch_loss}") return test_epoch_loss def model_validation(model, validation_data_set): #evaluate the model model.eval() criterion = ThresholdedMSELoss(lower = 0, upper = 6) # criterion = nn.MSELoss() accuracy = MAPE() #don't update nodes during evaluation b/c not training with torch.no_grad(): val_losses = [] val_accuracies = [] for inputs, labels in validation_data_set: inputs = inputs.to(device) labels = labels.to(device) outputs = model(inputs) # calculate loss per batch of testing data val_loss = criterion(outputs, labels) val_losses.append(val_loss.item()) val_acc = accuracy(outputs, labels) val_accuracies.append(val_acc.item()) return val_losses, val_accuracies class ThresholdedMSELoss(nn.Module): """ This class contains a loss function that use a mean-squared-error loss for reasonable predictions and an exponential penalty for unreasonable predictions. They inherit from torch.nn.Module. For physically unreasonable conditions, prediction loss is more severely calculated. What qualifies as reasonable is based on empirically gathered datasets and literature reported boundaries of performance. For the following predictions that are improbable, the loss is penalized: - X < lower - X > upper """ def __init__(self, lower, upper): super(ThresholdedMSELoss, self).__init__() self.lower = lower self.upper = upper def forward(self, predictions, labels): # print (predictions.size()) # print (labels.size()) result_list = torch.zeros(predictions.size(0)) element_count = 0 for x, y in zip(predictions, labels): # print (f"{el_count+1}/{result_list.size(0)}") # if (x >= 0) == 1 (True) if torch.le(x, torch.tensor([self.lower])) == torch.tensor([1]): #Exponential MSE for x <= 0 # print(f"prediction = {x}, lower threshold violated") # Need to use only torch.nn.Function() and torch.() functions for autograd to track operations error = torch.add(x, torch.neg(y)) #error = x + (-y) element_result = torch.pow(error, 2) element_result = torch.pow(element_result, 1) # if (x <= 6) == 1 (True) elif torch.ge(x, torch.tensor([self.upper])) == torch.tensor([1]): #exponential MSE for x >= 6 # print(f"prediction = {x}, upper threshold violated") error = torch.add(x, torch.neg(y)) element_result = torch.pow(error, 2) element_result = torch.pow(element_result, 1) # all other values of x else: # print(f"prediction = {x}") error = torch.add(x, torch.neg(y)) element_result = torch.pow(error, 2) result_list[element_count] = element_result element_count+=1 # Average of all the squared errors result = result_list.mean() return result class Accuracy(nn.Module): """ Simple class to interate through predictions and labels to determine overall accuracy of a model """ def __init__(self, acc_thresh = 0.1): super(Accuracy, self).__init__() self.acc_thresh = acc_thresh def forward(self, predictions, labels): element_count = 0 correct = 0 accuracy_list = [] for x, y in zip(predictions, labels): error = torch.tensor(x-y) #if precision <= accuracy threshold, count as correct if torch.le(torch.div(error, y), torch.tensor(self.acc_thresh)) == torch.tensor([1]): correct += 1 element_count += 1 else: element_count += 1 accuracy = (correct/element_count) * 100 accuracy_list.append(accuracy) acc_list = torch.tensor(accuracy_list) avg_acc = acc_list.mean() return avg_acc class MAPE(nn.Module): """ Simple class to interate through pytorch tensors of predictions and ground-tuths to calculate the Mean Absolute Percent Error (MAPE). """ def __init__(self): super (MAPE, self).__init__() def forward(self, predictions, labels): absolute_percent_error_list = [] count = 0 for x, y in zip(predictions, labels): count += 1 error = y - x ae = np.absolute(error) ape = ae/y absolute_percent_error_list.append(ape) mape = np.sum(absolute_percent_error_list) / count mape = mape * 100 return mape # + # Instantiate our beautiful NN model # takes in 2 features (anneal time, anneal temp) # predicts 4 metrics (PCE, Voc, Jsc, FF) in_dims = int(x_train_tensor.size(1)) #number of x channels out_dims = y_test.shape[1] #number of predicted features model = NN1(in_dims = in_dims, out_dims = out_dims).to(device) # criterion = nn.MSELoss() optimizer = torch.optim.Adam(params = model.parameters(), lr = learning_rate) # + #empty lists to hold loss per epoch train_epoch_losses = [] test_epoch_losses = [] for epoch in range(num_epochs): train_epoch_loss = train_model(model = model, training_data_set = training_data_set, optimizer = optimizer) train_epoch_losses.append(train_epoch_loss) test_epoch_loss = eval_model(model = model, testing_data_set = testing_data_set) test_epoch_losses.append(test_epoch_loss) # + fig, ax = plt.subplots(figsize = (8,6)) epochs = np.arange(1, (num_epochs+1), 1) plt.plot(epochs, train_epoch_losses, c = 'k', label = 'training error') plt.plot(epochs, test_epoch_losses, c = 'r', label = 'testing error') plt.legend(loc = 'upper right') plt.title("Total Training & Testing Error") ax.set_xlabel('Epoch') ax.set_ylabel('Total MSE Loss') plt.show() # - final_losses, final_accuracies = model_validation(model = model, validation_data_set = testing_data_set) # + fig, ax = plt.subplots(figsize = (8,6)) samples = np.arange(0, len(final_accuracies), 1) plt.plot(samples, final_losses, c = 'k', label = 'loss') plt.plot(samples, final_accuracies, c = 'r', label = 'accuracy') ax.set_xlabel('samples') plt.legend(loc = 'upper left') plt.show() # - final_accuracies # + model.eval() predictions = [] targets = [] with torch.no_grad(): for inputs, labels in testing_data_set: inputs.to(device) labels.to(device) outputs = model(inputs) predictions.append(outputs.item()) targets.append(labels.item()) # + xlin = ylin = np.arange(0, 100, 1) fig, ax = plt.subplots(figsize = (8,6)) plt.scatter(predictions, targets) plt.plot(xlin, ylin, c = 'k') ax.set_xlim(0, 3) ax.set_ylim(0, 3) plt.show() # -
ipynb/PhysicsInspiredLoss.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] slideshow={"slide_type": "slide"} # # Welcome to MESSAGEix # # ## Integrated Assessment Modeling for the 21st Century # # ### <NAME>, International Institute for Applied Systems Analysis # # #### Email: <EMAIL>, Github: @gidden # # #### https://iiasa.github.io/ene-present.github.io/cnrs-2018 # + [markdown] slideshow={"slide_type": "slide"} # # Building a Simple Energy Model # # The goal of this tutorial is to build a simple energy model using `MESSAGEix` with minimal features that can be expanded in future tutorials. # # We will build the model component by component, focusing on both the **how** (code implementation) and **why** (mathematical formulation). # + [markdown] slideshow={"slide_type": "slide"} # # Follow Along at Home # # The full model documentation is available online at [https://messageix.iiasa.ac.at](https://messageix.iiasa.ac.at). Source code is at [https://github.com/iiasa/message_ix](https://github.com/iiasa/message_ix). # # <img src='assests/doc_page.png'> # + [markdown] slideshow={"slide_type": "fragment"} # And you can easily install `MESSAGEix` yourself and get all the tutorials: # # ```shell # $ conda install -c conda-forge message-ix # $ messageix-dl # install all tutorials to your current directory # ``` # + [markdown] slideshow={"slide_type": "slide"} # # MESSAGEix # # <img src='assests/austria.png' width='700'> # + [markdown] slideshow={"slide_type": "slide"} # # MESSAGEix: the Model # # At it's core, an MI/LP optimization problem: # # # <table><col width="200"><col width="400"><tr><td><p>$\min c^T x$<br>$s.t. Ax \leq b$</p></td><td><img src='assests/feasible.png'></td></tr></table> # + [markdown] slideshow={"slide_type": "fragment"} # - system of **constraints** # - optimizing an **objective function**, nominally minimizing total system **cost** # - decision VARIABLES ($x$) are capitalized # - parameters ($A$, $b$) are lower case # + [markdown] slideshow={"slide_type": "slide"} # ## MESSAGEix: the Framework # # <img src='assests/message.png' width='700'> # + [markdown] slideshow={"slide_type": "slide"} # # A Brave New World: Westeros with Electricity! # + [markdown] slideshow={"slide_type": "fragment"} # <table align='center'><tr><td><img src='assests/westeros.jpg' width='150'></td><td><img src='assests/base_res.png'></td></tr></table> # + [markdown] slideshow={"slide_type": "slide"} # ## Setup # # First, we import all the packages we need. # + slideshow={"slide_type": "fragment"} import pandas as pd import ixmp as ix import message_ix from message_ix.utils import make_df # %matplotlib inline # + [markdown] slideshow={"slide_type": "slide"} # The `Platform` is your connection to a database which will automatically store all data for you # + slideshow={"slide_type": "fragment"} mp = ix.Platform(dbtype='HSQLDB') # + [markdown] slideshow={"slide_type": "slide"} # Once connected, we make our `Scenario` which we use to build our model. # + slideshow={"slide_type": "fragment"} scenario = message_ix.Scenario(mp, model='Westeros Electrified', scen='baseline', version='new') # + [markdown] slideshow={"slide_type": "slide"} # ## Model Structure # # We start by defining basic characteristics of the model, including time, space, and the energy system structure. # + [markdown] slideshow={"slide_type": "fragment"} # The model horizon will span 3 decades. Let's assume that we're far in the future after the events of A Song of Ice and Fire (which occur ~300 years after Aegon the conqueror). # # | Math Notation | Model Meaning | # |---------------|------------------------------| # | $y \in Y^H$ | time periods in history | # | $y \in Y^M$ | time periods in model horizon| # + slideshow={"slide_type": "fragment"} history = [690] model_horizon = [700, 710, 720] scenario.add_horizon({'year': history + model_horizon, 'firstmodelyear': model_horizon[0]}) # + [markdown] slideshow={"slide_type": "slide"} # Our model will have a single `node`, i.e., its spatial dimension. # # # | Math Notation | Model Meaning| # |---------------|--------------| # | $n \in N$ | node | # + slideshow={"slide_type": "fragment"} country = 'Westeros' scenario.add_spatial_sets({'country': country}) # + [markdown] slideshow={"slide_type": "slide"} # And we fill in the energy system's `commodities`, `levels`, `technologies`, and `mode` (defining how certain technologies operate). # # # | Math Notation | Model Meaning| # |---------------|--------------| # | $c \in C$ | commodity | # | $l \in L$ | level | # | $t \in T$ | technology | # | $m \in M$ | mode | # + slideshow={"slide_type": "fragment"} scenario.add_set("commodity", ["electricity", "light"]) scenario.add_set("level", ["secondary", "final", "useful"]) scenario.add_set("technology", ['coal_ppl', 'wind_ppl', 'grid', 'bulb']) scenario.add_set("mode", "standard") # + [markdown] slideshow={"slide_type": "slide"} # ## Supply and Demand (or Balancing Commodities) # + [markdown] slideshow={"slide_type": "fragment"} # The fundamental premise of the model is to satisfy demand. To first order, demand for services like electricity track with economic productivity (GDP). We define a GDP profile similar to first-world GDP growth from [1900-1930](https://en.wikipedia.org/wiki/List_of_regions_by_past_GDP): # + slideshow={"slide_type": "fragment"} gdp_profile = pd.Series([1., 1.5, 1.9], index=model_horizon) gdp_profile.plot(title='Demand') # + [markdown] slideshow={"slide_type": "slide"} # The `COMMODITY_BALANCE` equation ensures that `demand` for each `commodity` is met at each `level` in the energy system. # # $\sum_{\substack{n^L,t,m \\ y^V \leq y}} output_{n^L,t,y^V,y,m,n,c,l} \cdot ACT_{n^L,t,y^V,y,m}$ # $- \sum_{\substack{n^L,t,m, \\ y^V \leq y}} input_{n^L,t,y^V,y,m,n,c,l} \cdot ACT_{n^L,t,m,y}$ # $\geq demand_{n,c,l,y} \quad \forall \ l \in L$ # # The `COMMODITY_BALANCE` equation is formulated as an inequality implying that demand must be met, but supply of a commodity can exceed demand. The formulation implicitly assumes "zero cost of disposal", as is common in economics. This implementation simplifies calibration and is in line with conventions in energy systems modelling. # # + [markdown] slideshow={"slide_type": "slide"} # First we establish demand. Let's assume # # - 40 million people in [300 AC](https://atlasoficeandfireblog.wordpress.com/2016/03/06/the-population-of-the-seven-kingdoms/) # - similar population growth to Earth in the same time frame [(~factor of 12)](https://en.wikipedia.org/wiki/World_population_estimates) # - a per capita demand for electricity of 1000 kWh # - and 8760 hours in a year (of course!) # # Then we can add the demand parameter # + [markdown] slideshow={"slide_type": "skip"} # Note present day: [~17000 GWh in Austria](http://www.iea.org/statistics/statisticssearch/report/?country=AUSTRIA&product=electricityandheat&year=2015) with population [~9M](http://www.austria.org/population/) which is ~1890 kWh per capita # + slideshow={"slide_type": "fragment"} demand_baseyear = 40e6 * 12 * 1000 * 1e6 / 8760 light_demand = pd.DataFrame({ 'node': country, 'commodity': 'light', 'level': 'useful', 'year': model_horizon, 'time': 'year', 'value': demand_baseyear * gdp_profile, 'unit': 'GWa', }) # + slideshow={"slide_type": "slide"} light_demand # + slideshow={"slide_type": "fragment"} scenario.add_par("demand", light_demand) # + slideshow={"slide_type": "skip"} #TODO Hide this one vintage_years, act_years = scenario.vintage_and_active_years() base_input = { 'node_loc': country, 'year_vtg': vintage_years, 'year_act': act_years, 'mode': 'standard', 'node_origin': country, 'commodity': 'electricity', 'time': 'year', 'time_origin': 'year', } base_output = { 'node_loc': country, 'year_vtg': vintage_years, 'year_act': act_years, 'mode': 'standard', 'node_dest': country, 'time': 'year', 'time_dest': 'year', 'unit': '%', } # + [markdown] slideshow={"slide_type": "slide"} # Working backwards along the Reference Energy System, we can add connections for the `bulb` # + slideshow={"slide_type": "fragment"} bulb_out = make_df(base_output, technology='bulb', commodity='light', level='useful', value=1.0) scenario.add_par('output', bulb_out) bulb_in = make_df(base_input, technology='bulb', commodity='electricity', level='final', value=1.0, unit='%') scenario.add_par('input', bulb_in) # + [markdown] slideshow={"slide_type": "slide"} # Next, the `grid`, with loses of 13% # + slideshow={"slide_type": "fragment"} grid_efficiency = 0.87 grid_out = make_df(base_output, technology='grid', commodity='electricity', level='final', value=grid_efficiency) scenario.add_par('output', grid_out) grid_in = make_df(base_input, technology='grid', commodity='electricity', level='secondary', value=1.0, unit='%') scenario.add_par('input', grid_in) # + [markdown] slideshow={"slide_type": "slide"} # And finally, our power plants. The model does not include the fossil resources used as `input` for coal plants; however, costs of coal extraction are included in the parameter $variable\_cost$. # + slideshow={"slide_type": "fragment"} coal_out = make_df(base_output, technology='coal_ppl', commodity='electricity', level='secondary', value=1.) scenario.add_par('output', coal_out) wind_out = make_df(base_output, technology='wind_ppl', commodity='electricity', level='secondary', value=1.) scenario.add_par('output', wind_out) # + [markdown] slideshow={"slide_type": "slide"} # ## Operational Constraints and Parameters # + [markdown] slideshow={"slide_type": "fragment"} # The model has a number of "reality" constraints, which relate built capacity to available power. # # The `CAPACITY_CONSTRAINT` # # $\sum_{m} ACT_{n,t,y^V,y,m,h} # \leq duration\_time_{h} \cdot capacity\_factor_{n,t,y^V,y,h} \cdot CAP_{n,t,y^V,y} # \quad t \ \in \ T^{INV}$ # # + [markdown] slideshow={"slide_type": "slide"} # This requires us to provide capacity factors # + slideshow={"slide_type": "skip"} #TODO: Hide this base_capacity_factor = { 'node_loc': country, 'year_vtg': vintage_years, 'year_act': act_years, 'time': 'year', 'unit': '%', } # + slideshow={"slide_type": "fragment"} capacity_factor = { 'coal_ppl': 0.85, 'wind_ppl': 0.2, 'bulb': 0.1, } for tec, val in capacity_factor.items(): df = make_df(base_capacity_factor, technology=tec, value=val) scenario.add_par('capacity_factor', df) # + [markdown] slideshow={"slide_type": "slide"} # The model can further be provided `technical_lifetime`s in order to properly manage deployed capacity and related costs via the `CAPACITY_MAINTENENCE` constraint: # # $CAP_{n,t,y^V,y} \leq remaining\_capacity_{n,t,y^V,y} \cdot value \: \: \forall t \in T^{INV}$ # # Where `value` can take different forms depending on what time period is considered: # # | Value | Condition | # |---------------------------------------|----------------------------------------------------------------| # | $\Delta_y historical\_new\_capacity_{n,t,y^V}$ | $y$ is first model period | # | $\Delta_y CAP\_NEW_{n,t,y^V}$ | $y = y^V$ | # | $CAP_{n,t,y^V,y-1}$ | if $y > y^V$ and $y - y^V < technical\_lifetime_{n,t,y^V}$ | # # + slideshow={"slide_type": "skip"} base_technical_lifetime = { 'node_loc': country, 'year_vtg': model_horizon, 'unit': 'y', } # + slideshow={"slide_type": "fragment"} lifetime = { 'coal_ppl': 20, 'wind_ppl': 10, 'bulb': 1, } for tec, val in lifetime.items(): df = make_df(base_technical_lifetime, technology=tec, value=val) scenario.add_par('technical_lifetime', df) # + [markdown] slideshow={"slide_type": "slide"} # ## Technological Diffusion and Contraction # # We know from historical precedent that energy systems can not be transformed instantaneously. Therefore, we use a family of constraints on **activity** (`ACT`) and **capacity** (`CAP`). # + [markdown] slideshow={"slide_type": "fragment"} # $\sum_{y^V \leq y,m} ACT_{n,t,y^V,y,m,h} \leq$ # $initial\_activity\_up_{n,t,y,h} # \cdot \frac{ \Big( 1 + growth\_activity\_up_{n,t,y,h} \Big)^{|y|} - 1 } # { growth\_activity\_up_{n,t,y,h} }+ \Big( 1 + growth\_activity\_up_{n,t,y,h} \Big)^{|y|} \cdot \Big( \sum_{y^V \leq y-1,m} ACT_{n,t,y^V,y-1,m,h} + \sum_{m} historical\_activity_{n,t,y-1,m,h}\Big)$ # + [markdown] slideshow={"slide_type": "slide"} # This example limits the ability for technologies to **grow**. To do so, we need to provide `growth_activity_up` values for each technology that we want to model as being diffusion constrained. Here, we set this constraint at 5% per year. # + slideshow={"slide_type": "skip"} ## TODO: hide this base_growth = { 'node_loc': country, 'year_act': model_horizon, 'time': 'year', 'unit': '%', } # + slideshow={"slide_type": "fragment"} growth_technologies = [ "coal_ppl", "wind_ppl", ] for tec in growth_technologies: df = make_df(base_growth, technology=tec, value=0.05) scenario.add_par('growth_activity_up', df) # + [markdown] slideshow={"slide_type": "slide"} # ## Defining an Energy Mix # # To model the transition of an energy system, one must start with the existing system which are defined by the parameters `historical_activity` and `historical_capacity`. These parameters define the energy mix before the model horizon. # # We begin by defining a few key values: # # - how much useful energy was needed # - how much final energy was generated # - and the mix for different technologies # + slideshow={"slide_type": "fragment"} historic_demand = 0.85 * demand_baseyear historic_generation = historic_demand / grid_efficiency coal_fraction = 0.6 # + slideshow={"slide_type": "skip"} # TODO: hide base_capacity = { 'node_loc': country, 'year_vtg': history, 'unit': 'GWa', } base_activity = { 'node_loc': country, 'year_act': history, 'mode': 'standard', 'time': 'year', 'unit': 'GWa', } # + [markdown] slideshow={"slide_type": "slide"} # Then, we can define the **activity** and **capacity** in the historic period # + slideshow={"slide_type": "fragment"} old_activity = { 'coal_ppl': coal_fraction * historic_generation, 'wind_ppl': (1 - coal_fraction) * historic_generation, } for tec, val in old_activity.items(): df = make_df(base_activity, technology=tec, value=val) scenario.add_par('historical_activity', df) # + slideshow={"slide_type": "fragment"} act_to_cap = { 'coal_ppl': 1 / 10 / capacity_factor['coal_ppl'] / 2, # 20 year lifetime 'wind_ppl': 1 / 10 / capacity_factor['wind_ppl'], } for tec in act_to_cap: value = old_activity[tec] * act_to_cap[tec] df = make_df(base_capacity, technology=tec, value=value) scenario.add_par('historical_new_capacity', df) # + [markdown] slideshow={"slide_type": "slide"} # ## Objective Function # # The objective function drives the purpose of the optimization. Do we wish to seek maximum utility of the social planner, minimize carbon emissions, or something else? Energy-system focused IAMs seek to minimize total discounted system cost over space and time. # # $\min \sum_{n,y \in Y^{M}} interestrate_{y} \cdot COST\_NODAL_{n,y}$ # # + [markdown] slideshow={"slide_type": "fragment"} # First, let's add the interest rate parameter. # + slideshow={"slide_type": "fragment"} rate = [0.05] * len(model_horizon) unit = ['%'] * len(model_horizon) scenario.add_par("interestrate", key=model_horizon, val=rate, unit=unit) # + [markdown] slideshow={"slide_type": "fragment"} # `COST_NODAL` is comprised of a variety of costs related to the use of different technologies. # + [markdown] slideshow={"slide_type": "slide"} # ### Investment Costs # # Capital, or investment, costs are invoked whenever a new plant or unit is built # # $inv\_cost_{n,t,y} \cdot construction\_time\_factor_{n,t,y} \cdot CAP\_NEW_{n,t,y}$ # + slideshow={"slide_type": "skip"} # TODO hide this base_inv_cost = { 'node_loc': country, 'year_vtg': model_horizon, 'unit': 'USD/GWa', } # + slideshow={"slide_type": "fragment"} # in $ / kW costs = { 'coal_ppl': 1500, 'wind_ppl': 1100, 'bulb': 5, } for tec, val in costs.items(): df = make_df(base_inv_cost, technology=tec, value=val * 1e6) scenario.add_par('inv_cost', df) # + [markdown] slideshow={"slide_type": "slide"} # ### Fixed O&M Costs # # Fixed cost are only relevant as long as the capacity is active. This formulation allows to include the potential cost savings from early retirement of installed capacity. # # $\sum_{y^V \leq y} \ fix\_cost_{n,t,y^V,y} \cdot CAP_{n,t,y^V,y}$ # + slideshow={"slide_type": "skip"} # TODO Hide base_fix_cost = { 'node_loc': country, 'year_vtg': vintage_years, 'year_act': act_years, 'unit': 'USD/GWa', } # + slideshow={"slide_type": "fragment"} # in $ / kW costs = { 'coal_ppl': 40, 'wind_ppl': 30, } for tec, val in costs.items(): df = make_df(base_fix_cost, technology=tec, value=val * 1e6) scenario.add_par('fix_cost', df) # + [markdown] slideshow={"slide_type": "slide"} # ### Variable O&M Costs # # Variable Operation and Maintence costs are associated with the costs of actively running the plant. Thus, they are not applied if a plant is on standby (i.e., constructed, but not currently in use). # # $\sum_{\substack{y^V \leq y \\ m,h}} \ var\_cost_{n,t,y^V,y,m,h} \cdot ACT_{n,t,y^V,y,m,h} $ # + slideshow={"slide_type": "skip"} # TODO hide base_var_cost = { 'node_loc': country, 'year_vtg': vintage_years, 'year_act': act_years, 'mode': 'standard', 'time': 'year', 'unit': 'USD/GWa', } # + slideshow={"slide_type": "fragment"} # in $ / MWh costs = { 'coal_ppl': 24.4, } for tec, val in costs.items(): df = make_df(base_var_cost, technology=tec, value=val * 8760. * 1e3) scenario.add_par('var_cost', df) # + [markdown] slideshow={"slide_type": "slide"} # A full model will also have costs associated with # # - resource extraction: $\sum_{c,g} \ resource\_cost_{n,c,g,y} \cdot EXT_{n,c,g,y} $ # - emissions : $\sum_{e,t, \hat{e} \in E(e)} emission\_scaling_{e,\hat{e}} \cdot \ emission\_tax_{n,e,t,y} \cdot EMISS_{n,e,t,y}$ # - land use (emulator): $\sum_{s} land\_cost_{n,s,y} \cdot LAND_{n,s,y}$ # - exceedence on "hard" bounds of technology expansion and contraction: $\sum_{m,h} \ \Big( abs\_cost\_activity\_soft\_lo_{n,t,y,m,h} + level\_cost\_activity\_soft\_lo_{n,t,y,m,h} \cdot\ levelized\_cost_{n,t,y,m,h}\Big) \cdot ACT\_LO_{n,t,y,h}$ # + [markdown] slideshow={"slide_type": "slide"} # ## Time to Solve the Model # + slideshow={"slide_type": "fragment"} scenario.commit(comment='basic model of Westerosi electrification') scenario.set_as_default() # + slideshow={"slide_type": "fragment"} scenario.solve() # + slideshow={"slide_type": "fragment"} scenario.var('OBJ')['lvl'] # + [markdown] slideshow={"slide_type": "slide"} # # Plotting Results # + slideshow={"slide_type": "fragment"} from tools import Plots p = Plots(scenario, country, firstyear=model_horizon[0]) # + [markdown] slideshow={"slide_type": "slide"} # # Activity # # How much energy is generated in each time period from the different energy supply technologies? # + slideshow={"slide_type": "fragment"} p.plot_activity(baseyear=True, subset=['coal_ppl', 'wind_ppl']) # + [markdown] slideshow={"slide_type": "slide"} # ## Capacity # # How many new plants are built? # + slideshow={"slide_type": "fragment"} p.plot_new_capacity(baseyear=True, subset=['coal_ppl', 'wind_ppl']) # + [markdown] slideshow={"slide_type": "slide"} # ## Electricity Price # # And how much does the electricity cost? These prices are given by the **dual variables** of the commodity balance constraint. Economists use the term **shadow prices** instead of dual variables. They reflect the marginal price of electricity, taken from the most expensive producer. # # Note that prices are lower when the system does not depend on expensive technologies any longer because it sufficiently expanded capacity of cheap technologies. # + slideshow={"slide_type": "fragment"} p.plot_prices(subset=['light'], baseyear=True) # + [markdown] slideshow={"slide_type": "slide"} # # Fin # # With that, you have built and run your very first MESSAGEix model. Welcome to the community! # # Check us out on Github: https://github.com/iiasa/message_ix # # Get in touch with us online: https://groups.google.com/forum/message-ix # # And feel free to contact me with any further questions: <EMAIL> # + slideshow={"slide_type": "skip"} mp.close_db()
cnrs-2018/westeros.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + import pandas as pd import numpy as np from scipy import stats import scipy.io from scipy.spatial.distance import pdist from scipy.linalg import cholesky import matlab.engine as engi import matlab as mat import math from sklearn.linear_model import LogisticRegression from sklearn.metrics import classification_report,roc_auc_score,recall_score,precision_score from scipy.io import loadmat from sklearn.model_selection import train_test_split import SMOTE from sklearn.preprocessing import MinMaxScaler import CFS import platform from os import listdir from os.path import isfile, join from glob import glob from pathlib import Path import sys import os import copy import traceback from pathlib import Path import matplotlib.pyplot as plt # + def load_data(project): understand_path = 'data/understand_files/' + project + '_understand.csv' commit_guru_path = 'data/commit_guru/' + project + '.csv' understand_df = pd.read_csv(understand_path) understand_df = understand_df.dropna(axis = 1,how='all') # print(understand_df) # understand_df = understand_df.drop(['Kind','Name'],axis = 1) # understand_df = understand_df[['Bugs', 'Name', 'commit_hash', 'AvgCyclomatic', 'AvgCyclomaticModified', # 'AvgCyclomaticStrict', 'AvgEssential', 'AvgLine', 'AvgLineBlank', # 'AvgLineCode', 'AvgLineComment', 'CountClassBase', 'CountClassCoupled', # 'CountClassCoupledModified', 'CountClassDerived', # 'CountDeclClassMethod', 'CountDeclClassVariable', # 'CountDeclInstanceMethod', 'CountDeclInstanceVariable', # 'CountDeclMethod', 'CountDeclMethodAll', 'CountDeclMethodDefault', # 'CountDeclMethodPrivate', 'CountDeclMethodProtected', # 'CountDeclMethodPublic', 'CountLine', 'CountLineBlank', 'CountLineCode', # 'CountLineCodeDecl', 'CountLineCodeExe', 'CountLineComment', # 'CountSemicolon', 'CountStmt', 'CountStmtDecl', 'CountStmtExe', # 'MaxCyclomatic', 'MaxCyclomaticModified', 'MaxCyclomaticStrict', # 'MaxEssential', 'MaxInheritanceTree', 'MaxNesting', # 'PercentLackOfCohesion', 'PercentLackOfCohesionModified', # 'RatioCommentToCode', 'SumCyclomatic', 'SumCyclomaticModified', # 'SumCyclomaticStrict', 'SumEssential']] commit_guru_df = pd.read_csv(commit_guru_path) cols = understand_df.columns.tolist() commit_guru_df = commit_guru_df.drop(labels = ['parent_hashes','author_name','author_name', 'author_email','fileschanged','author_date', 'author_date_unix_timestamp', 'commit_message', 'classification', 'fix', 'contains_bug','fixes',],axis=1) # print(cols[3:len(cols)-2]) # print(understand_df.shape) understand_df = understand_df.drop_duplicates(cols[3:len(cols)-2]) # print(understand_df.shape) df = understand_df.merge(commit_guru_df,on='commit_hash') cols = df.columns.tolist() cols = cols[1:] + [cols[0]] df = df[cols] # print(df.columns) df = df.drop(labels = ['Kind','Name','commit_hash'],axis=1) df.dropna(inplace=True) df.reset_index(drop=True, inplace=True) # df,cols = apply_cfs(df) y = df.Bugs X = df.drop('Bugs',axis = 1) cols = X.columns scaler = MinMaxScaler() X = scaler.fit_transform(X) X = pd.DataFrame(X,columns = cols) # X = X.drop(labels = ['la', 'ld', 'nf', 'ns', 'nd', # 'entrophy', 'ndev', 'lt', 'nuc', 'age', 'exp', 'rexp', 'sexp'], axis = 1) return X,y def apply_smote(df): cols = df.columns smt = SMOTE.smote(df) df = smt.run() df.columns = cols return df def apply_cfs(df): y = df.Bugs.values X = df.drop(labels = ['Bugs'],axis = 1) X = X.values selected_cols = CFS.cfs(X,y) cols = df.columns[[selected_cols]].tolist() cols.append('Bugs') return df[cols],cols # - def run_self(project): X,y = load_data(project) X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.40, random_state=18) df_smote = pd.concat([X_train,y_train],axis = 1) df_smote = apply_smote(df_smote) y_train = df_smote.Bugs X_train = df_smote.drop('Bugs',axis = 1) clf = LogisticRegression() clf.fit(X_train,y_train) predicted = clf.predict(X_test) print(classification_report(y_test, predicted)) recall = recall_score(y_test, predicted,average='binary') precision = precision_score(y_test, predicted,average='binary') return recall,precision _dir = 'data/understand_files/' projects = [f.split('_understand')[0] for f in listdir(_dir) if isfile(join(_dir, f))] precision = [] recall = [] for project in projects: try: if project == '.DS_Store': continue # if project != 'guice': # continue print("+++++++++++++++++ " + project + " +++++++++++++++++") r,p = run_self(project) recall.append(r) precision.append(p) except Exception as e: print(e) continue fig = plt.figure(num=None, figsize = (12,4), facecolor='w', edgecolor='k') ax = fig.add_subplot(121) ax.boxplot(precision) ax.set_title('Precision',size = 15) ax = fig.add_subplot(122) ax.boxplot(recall) ax.set_title('Recall',size = 15) plt.boxplot(recall) plt.ylabel('Recall',fontsize=18)
random_notebook_Experiments/Check Data.ipynb
# -*- coding: utf-8 -*- # --- # jupyter: # jupytext: # text_representation: # extension: .jl # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Julia 0.3.11 # language: julia # name: julia-0.3 # --- using PyPlot include("sim.jl"); #include the simulator include("floopMap.jl") #include mapping nSeg=5 #number of segments aStep=3 #angle step aLimit=60 res=10 #resolution of segment l=1/res #set subsegment lenght so that segment lenght =1 # + ### Begin continuous state space code # s: 5 element column vector of joint angles # a: 5 element column vector of action on each segment -> currently a[i] ∈ {-1, 0, 1} # w: currently 9 element column vector of weights for value approximation BasisSize = 9 global GoalPos, GoalThresh GoalPos = [4.5,1] GoalThresh = 0.3 goal=Zone(GoalPos', GoalThresh) #create zone object for goal # - drawZone(goal) function ContReward(s) eDec = 2 # exponential decay factor in the goal proximity EEpos = ccEnd(s) goalDist = norm(GoalPos - EEpos') (goalDist < GoalThresh) ? r = 1000 : r = 0 #(goalDist < GoalThresh) ? r = 1000 : r = exp(-eDec*goalDist) # if within threshold of goal, return full reward. Otherwise, return negative exponential of distance to goal return r end function ContTrans(s,a) #currently deterministic, finite action space return s + a #can make this probabilistic by sampling from distribution end function ValApprox(w, s) # features: EEpos, goalDist, s, const. (currently 10 elements) -> add ObstDist when obstacles added. And other bases? return sum(w.*BasisFun(s)) end # + function BasisFun(s) #EEpos = ccQuick(s, 1)[end,:] EEpos = ccEnd(s) goalDist = norm(GoalPos' - EEpos) return [EEpos'; goalDist; s'; 1] end # + m = 1000 # number of state samples w = zeros(BasisSize) #initialize weights aSize = 243 #number of possible actions kMax = 1 # number of samples of probabilistic transition - currently 1 because deterministic transition γ = 0.95 # learning rate y = zeros(m) action = [0 0 0 0 0] #println(size(w)) #println(size(action)) stateMat = zeros(m,5) for i = 1:m # set up this way so can change state initialization based on trajectory following stateMat[i, :] = [rand(-90:90) rand(-90:90) rand(-90:90) rand(-90:90) rand(-90:90)] end for iters = 1:50 tic() A = zeros(m,BasisSize) for i = 1:m q = zeros(aSize) state = stateMat[i,:] A[i,:] = BasisFun(state) for j = 1:aSize action[1],action[2],action[3],action[4],action[5] = ind2sub((3,3,3,3,3),j) action -=2 for k = 1:kMax #println(ContReward(state)) #println(ValApprox(w,ContTrans(state,action))) #println(w) q[j] += (ContReward(state) + γ*ValApprox(w,ContTrans(state,action)))/kMax #println("ok") end end y[i] = maximum(q) end wp = (pinv(A)*y) println(norm(wp - w)) w = wp toc() end # - w s = [rand(-90:90) rand(-90:90) rand(-90:90) rand(-90:90) rand(-90:90)] a = zeros(1,5) nsteps=2000 traj = zeros(nsteps,5) for i = 1:nsteps #println(norm(ccQuick(map(deg2rad, s), 1)[end,:] - [4.5 1])) traj[i,:] = s q = zeros(aSize) for j = 1:243 action[1],action[2],action[3],action[4],action[5] = ind2sub((3,3,3,3,3),j) action -=2 q[j] += (ContReward(s) + γ*ValApprox(w,ContTrans(s,action))) end #println(q) a[1],a[2],a[3],a[4],a[5] = ind2sub((3,3,3,3,3),findmax(q)[2]) a-= 2 #println(a) s = ContTrans(s,a) end # + (p,e)=ccArm2(traj[1, :]) drawArm(p, e) (p,e)=ccArm2(traj[end, :]) drawArm(p, e) drawZone(goal) title("start and end") xlabel("x") ylabel("y") xlim(-0, 7) ylim(-3.5, 3.5) # - hold("on") scale=10 for i=1:nsteps if i%(nsteps/scale)==0 (p,e)=ccArm2(traj[i, :]) drawArm(p, e) drawZone(goal) title("itr: $(i)") xlabel("x") ylabel("y") xlim(-0, 7) ylim(-3.5, 3.5) end end using PyPlot plot(p[:,1], p[:,2], "k")
ContStateSpace.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: 'Python 3.9.7 64-bit (''buddhalight'': conda)' # language: python # name: python3 # --- # # **Semantic Image Synthesis with Spatially-Adaptive Normalization** # # **Authors: <NAME>, <NAME>, <NAME>, <NAME>** # # **Original Paper**: https://arxiv.org/pdf/1903.07291.pdf # # **Official Github**: https://github.com/NVlabs/SPADE # # --- # # **Edited By <NAME> (Key Summary & Code Practice)** # # If you have any issues on this scripts, please PR to the repository below. # # **[Github: @JonyChoi - Computer Vision Paper Reviews]** https://github.com/jonychoi/Computer-Vision-Paper-Reviews # # Edited Jan 16 2022 # # --- # <img src="./imgs/figure1.png" /> # ### **1. Introduction** # <p> # Conditional image synthesis refers to the task of generating photorealistic images conditioning on certain input data. Seminal work computes the output image by # stitching pieces from a single image (e.g., Image Analogies [16]) or using an image collection [7, 14, 23, 30, 35]. # Recent methods directly learn the mapping using neural networks [3, 6, 22, 47, 48, 54, 55, 56]. The latter methods are # faster and require no external database of images. # </p> # <p> # We are interested in a specific form of conditional image synthesis, which is converting a semantic segmentation # mask to a photorealistic image. This form has a wide range # of applications such as content generation and image editing [6, 22, 48]. We refer to this form as semantic image # synthesis. In this paper, we show that the conventional network architecture [22, 48], which is built by stacking convolutional, normalization, and nonlinearity layers, is at best arXiv:1903.07291v2 [cs.CV] 5 Nov 2019 # suboptimal because their normalization layers tend to “wash # away” information contained in the input semantic masks. # To address the issue, we propose spatially-adaptive normalization, a conditional normalization layer that modulates the # activations using input semantic layouts through a spatiallyadaptive, learned transformation and can effectively propagate the semantic information throughout the network. # </p> # <p> # We conduct experiments on several challenging datasets # including the COCO-Stuff [4, 32], the ADE20K [58], and # the Cityscapes [9]. We show that with the help of our # spatially-adaptive normalization layer, a compact network # can synthesize significantly better results compared to several state-of-the-art methods. Additionally, an extensive ablation study demonstrates the effectiveness of the proposed # normalization layer against several variants for the semantic # image synthesis task. Finally, our method supports multimodal and style-guided image synthesis, enabling controllable, diverse outputs, as shown in Figure 1. Also, please # see our SIGGRAPH 2019 Real-Time Live demo and try our # online demo by yourself. # </p> # ### **2. Related Work** # <p> # <strong>Deep generative models</strong> can learn to synthesize images. # Recent methods include generative adversarial networks # (GANs) [13] and variational autoencoder (VAE) [28]. Our # work is built on GANs but aims for the conditional image # synthesis task. The GANs consist of a generator and a discriminator where the goal of the generator is to produce realistic images so that the discriminator cannot tell the synthesized images apart from the real ones. # </p> # <p> # <strong>Conditional image synthesis</strong> exists in many forms that differ in the type of input data. For example, class-conditional # models [3, 36, 37, 39, 41] learn to synthesize images given # category labels. Researchers have explored various models # for generating images based on text [18,44,52,55]. Another # widely-used form is image-to-image translation based on a # type of conditional GANs [20, 22, 24, 25, 33, 57, 59, 60], # where both input and output are images. Compared to # earlier non-parametric methods [7, 16, 23], learning-based # methods typically run faster during test time and produce # more realistic results. In this work, we focus on converting # segmentation masks to photorealistic images. We assume # the training dataset contains registered segmentation masks # and images. With the proposed spatially-adaptive normalization, our compact network achieves better results compared to leading methods. # </p> # <p> # <strong>Unconditional normalization layers</strong> have been an important component in modern deep networks and can be found # in various classifiers, including the Local Response Normalization in the AlexNet [29] and the Batch Normalization (BatchNorm) in the Inception-v2 network [21]. Other # popular normalization layers include the Instance Normalization (InstanceNorm) [46], the Layer Normalization [2], # the Group Normalization [50], and the Weight Normalization [45]. We label these normalization layers as unconditional as they do not depend on external data in contrast to # the conditional normalization layers discussed below. # </p> # <p> # <strong>Conditional normalization layers</strong> include the Conditional # Batch Normalization (Conditional BatchNorm) [11] and # Adaptive Instance Normalization (AdaIN) [19]. Both were # first used in the style transfer task and later adopted in various vision tasks [3, 8, 10, 20, 26, 36, 39, 42, 49, 54]. Different from the earlier normalization techniques, conditional normalization layers require external data and generally operate as follows. First, layer activations are normalized to zero mean and unit deviation. Then the normalized activations are denormalized by modulating the # activation using a learned affine transformation whose parameters are inferred from external data. For style transfer tasks [11, 19], the affine parameters are used to control # the global style of the output, and hence are uniform across # spatial coordinates. In contrast, our proposed normalization # layer applies a spatially-varying affine transformation, making it suitable for image synthesis from semantic masks. # Wang et al. proposed a closely related method for image # super-resolution [49]. Both methods are built on spatiallyadaptive modulation layers that condition on semantic inputs. While they aim to incorporate semantic information # into super-resolution, our goal is to design a generator for # style and semantics disentanglement. We focus on providing the semantic information in the context of modulating # normalized activations. We use semantic maps in different # scales, which enables coarse-to-fine generation. The reader # is encouraged to review their work for more details. # </p> # # ### **3. Semantic Image Synthesis** # <p> # Let m ∈ L # H×W be a semantic segmentation mask # where L is a set of integers denoting the semantic labels, # and H and W are the image height and width. Each entry # in m denotes the semantic label of a pixel. We aim to learn # a mapping function that can convert an input segmentation # mask m to a photorealistic image. # </p> # <p> # <table> # <tbody> # <tr> # <td> # <img src="./imgs/figure2.png" width="400" /> # </td> # </tr> # </tbody> # </table> # <strong>Spatially-adaptive denormalization.</strong> Let h # i denote the activations of the i-th layer of a deep convolutional network # for a batch of N samples. Let C # i be the number of channels in the layer. Let Hi # and Wi be the height and width # of the activation map in the layer. We propose a new conditional normalization method called the SPatially-Adaptive # (DE)normalization (1) # (SPADE). Similar to the Batch Normalization [21], the activation is normalized in the channelwise manner and then modulated with learned scale and # bias. Figure 2 illustrates the SPADE design. The activation value at site (n ∈ N, c ∈ C # i # , y ∈ Hi # , x ∈ Wi # ) is # </p> # <table> # <tbody> # <tr> # <td> # <img src="./imgs/equation1.png" width="400" /> # </td> # </tr> # </tbody> # </table> # <p> # where h # i # n,c,y,x is the activation at the site before normalization and µ # i # c # and σ # i # c # are the mean and standard deviation of # the activations in channel c: # </p> # <table> # <tbody> # <tr> # <td> # <img src="./imgs/equation2.png" width="400" /> # </td> # <td> # <img src="./imgs/equation3.png" width="400" /> # </td> # </tr> # </tbody> # </table> # <p> # The variables γ # i # c,y,x(m) and β # i # c,y,x(m) in (1) are the # learned modulation parameters of the normalization layer. # In contrast to the BatchNorm [21], they depend on the input segmentation mask and vary with respect to the location # (y, x). We use the symbol γ # i # c,y,x and β # i # c,y,x to denote the # functions that convert m to the scaling and bias values at # the site (c, y, x) in the i-th activation map. We implement # the functions γ # i # c,y,x and β # i # c,y,x using a simple two-layer convolutional network, whose design is in the appendix. # </p> # <p> # In fact, SPADE is related to, and is a generalization # of several existing normalization layers. First, replacing # the segmentation mask m with the image class label and # making the modulation parameters spatially-invariant (i.e., # γ # i # c,y1,x1 ≡ γ # i # c,y2,x2 # and β # i # c,y1,x1 ≡ β # i # c,y2,x2 # for any y1, y2 ∈ # {1, 2, ..., Hi} and x1, x2 ∈ {1, 2, ..., Wi}), we arrive at the # form of the Conditional BatchNorm [11]. Indeed, for any # spatially-invariant conditional data, our method reduces to # the Conditional BatchNorm. Similarly, we can arrive at # the AdaIN [19] by replacing m with a real image, making the modulation parameters spatially-invariant, and setting N = 1. As the modulation parameters are adaptive to # the input segmentation mask, the proposed SPADE is better # suited for semantic image synthesis. # </p> # <table> # <tbody> # <tr> # <td> # <img src="./imgs/figure3.png" width="400" /> # </td> # </tr> # </tbody> # </table> # <p> # <strong>SPADE generator.</strong> With the SPADE, there is no need to # feed the segmentation map to the first layer of the generator, since the learned modulation parameters have encoded # enough information about the label layout. Therefore, we # discard encoder part of the generator, which is commonly # used in recent architectures [22, 48]. This simplification results in a more lightweight network. Furthermore, similarly # to existing class-conditional generators [36,39,54], the new # generator can take a random vector as input, enabling a simple and natural way for multi-modal synthesis [20, 60]. # </p> # <p> # Figure 4 illustrates our generator architecture, which employs several ResNet blocks [15] with upsampling layers. # The modulation parameters of all the normalization layers # are learned using the SPADE. Since each residual block # operates at a different scale, we downsample the semantic # mask to match the spatial resolution. # </p> # <p> # We train the generator with the same multi-scale discriminator and loss function used in pix2pixHD [48] except that # we replace the least squared loss term [34] with the hinge # loss term [31,38,54]. We test several ResNet-based discriminators used in recent unconditional GANs [1, 36, 39] but # observe similar results at the cost of a higher GPU memory requirement. Adding the SPADE to the discriminator # also yields a similar performance. For the loss function, we # observe that removing any loss term in the pix2pixHD loss # function lead to degraded generation results. # </p> # <p> # <strong>Why does the SPADE work better?</strong> A short answer is that # it can better preserve semantic information against common # normalization layers. Specifically, while normalization layers such as the InstanceNorm [46] are essential pieces in # almost all the state-of-the-art conditional image synthesis # models [48], they tend to wash away semantic information # when applied to uniform or flat segmentation masks. # </p> # <img src="./imgs/figure4.png" /> # <p> # Let us consider a simple module that first applies convolution to a segmentation mask and then normalization. # Furthermore, let us assume that a segmentation mask with # a single label is given as input to the module (e.g., all the # pixels have the same label such as sky or grass). Under this # setting, the convolution outputs are again uniform, with different labels having different uniform values. Now, after we # apply InstanceNorm to the output, the normalized activation # will become all zeros no matter what the input semantic label is given. Therefore, semantic information is totally lost. # This limitation applies to a wide range of generator architectures, including pix2pixHD and its variant that concatenates the semantic mask at all intermediate layers, as long # as a network applies convolution and then normalization to # the semantic mask. In Figure 3, we empirically show this is # precisely the case for pix2pixHD. Because a segmentation # mask consists of a few uniform regions in general, the issue # of information loss emerges when applying normalization. # </p> # <p> # In contrast, the segmentation mask in the SPADE Generator is fed through spatially adaptive modulation without # normalization. Only activations from the previous layer are # normalized. Hence, the SPADE generator can better preserve semantic information. It enjoys the benefit of normalization without losing the semantic input information. # </p> # <p> # <strong>Multi-modal synthesis.</strong> By using a random vector as the # input of the generator, our architecture provides a simple # way for multi-modal synthesis [20, 60]. Namely, one can # attach an encoder that processes a real image into a random # vector, which will be then fed to the generator. The encoder # and generator form a VAE [28], in which the encoder tries # to capture the style of the image, while the generator combines the encoded style and the segmentation mask information via the SPADEs to reconstruct the original image. The # encoder also serves as a style guidance network at test time # to capture the style of target images, as used in Figure 1. # For training, we add a KL-Divergence loss term [28]. # </p> # # --- # (1) Conditional normalization [11, 19] uses external data to denormalize # the normalized activations; i.e., the denormalization part is conditional. # # ### **4. Experiments** # <p> # <strong>Implementation details.</strong> We apply the Spectral Norm [38] # to all the layers in both generator and discriminator. The learning rates for the generator and discriminator are # 0.0001 and 0.0004, respectively [17]. We use the ADAM # solver [27] with β1 = 0 and β2 = 0.999. All the experiments are conducted on an NVIDIA DGX1 with 8 32GB # V100 GPUs. We use synchronized BatchNorm, i.e., these # statistics are collected from all the GPUs. # </p> # <strong>Datasets.</strong> We conduct experiments on several datasets. # <ul> # <li>COCO-Stuff [4] is derived from the COCO dataset [32]. # It has 118, 000 training images and 5, 000 validation images captured from diverse scenes. It has 182 semantic # classes. Due to its vast diversity, existing image synthesis models perform poorly on this dataset.</li> # <li>ADE20K [58] consists of 20, 210 training and 2, 000 validation images. Similarly to the COCO, the dataset contains challenging scenes with 150 semantic classes.</li> # <li>ADE20K-outdoor is a subset of the ADE20K dataset that # only contains outdoor scenes, used in Qi et al. [43].</li> # <li>Cityscapes dataset [9] contains street scene images in # German cities. The training and validation set sizes are # 3, 000 and 500, respectively. Recent work has achieved # photorealistic semantic image synthesis results [43, 47] # on the Cityscapes dataset.</li> # <li>Flickr Landscapes. We collect 41, 000 photos from # Flickr and use 1, 000 samples for the validation set. To # avoid expensive manual annotation, we use a well-trained # DeepLabV2 [5] to compute input segmentation masks. # </li> # </ul> # <p> # We train the competing semantic image synthesis methods # on the same training set and report their results on the same # validation set for each dataset. # </p> # <img src="./imgs/figure5.png" /> # <img src="./imgs/figure6.png" /> # <img src="./imgs/table1.png" /> # <p> # Performance metrics. We adopt the evaluation protocol # from previous work [6, 48]. Specifically, we run a semantic segmentation model on the synthesized images and compare how well the predicted segmentation mask matches the # ground truth input. Intuitively, if the output images are realistic, a well-trained semantic segmentation model should # be able to predict the ground truth label. For measuring the # segmentation accuracy, we use both the mean Intersection-over-Union (mIoU) and the pixel accuracy (accu). We use # the state-of-the-art segmentation networks for each dataset: # DeepLabV2 [5, 40] for COCO-Stuff, UperNet101 [51] for # ADE20K, and DRN-D-105 [53] for Cityscapes. In addition to the mIoU and the accu segmentation performance # metrics, we use the Frechet Inception Distance (FID) [ ´ 17] # to measure the distance between the distribution of synthesized results and the distribution of real images. # </p> # <p> # <strong>Baselines.</strong> We compare our method with 3 leading semantic image synthesis models: the pix2pixHD model [48], # the cascaded refinement network (CRN) [6], and the semiparametric image synthesis method (SIMS) [43]. The # pix2pixHD is the current state-of-the-art GAN-based conditional image synthesis framework. The CRN uses a deep # network that repeatedly refines the output from low to high # resolution, while the SIMS takes a semi-parametric approach that composites real segments from a training set and # refines the boundaries. Both the CRN and SIMS are mainly # trained using image reconstruction loss. For a fair comparison, we train the CRN and pix2pixHD models using the # implementations provided by the authors. As image synthesis using the SIMS requires many queries to the training dataset, it is computationally prohibitive for a large dataset # such as the COCO-stuff and the full ADE20K. Therefore, # we use the results provided by the authors when available. # </p> # <img src="./imgs/figure7.png" /> # <p> # <strong>Quantitative comparisons.</strong> As shown in Table 1, our # method outperforms the current state-of-the-art methods by # a large margin in all the datasets. For the COCO-Stuff, our # method achieves an mIoU score of 35.2, which is about 1.5 # times better than the previous leading method. Our FID # is also 2.2 times better than the previous leading method. # We note that the SIMS model produces a lower FID score # but has poor segmentation performances on the Cityscapes # dataset. This is because the SIMS synthesizes an image by # first stitching image patches from the training dataset. As # using the real image patches, the resulting image distribution can better match the distribution of real images. However, because there is no guarantee that a perfect query (e.g., # a person in a particular pose) exists in the dataset, it tends # to copy objects that do not match the input segments. # </p> # <p> # <strong>Qualitative results.</strong> In Figures 5 and 6, we provide qualitative comparisons of the competing methods. We find that # our method produces results with much better visual quality # and fewer visible artifacts, especially for diverse scenes in # the COCO-Stuff and ADE20K dataset. When the training # dataset size is small, the SIMS model also renders images # with good visual quality. However, the depicted content # often deviates from the input segmentation mask (e.g., the # shape of the swimming pool in the second row of Figure 6). # </p> # <table> # <tbody> # <tr> # <td> # <img src="./imgs/table2.png" width="400" /> # </td> # </tr> # </tbody> # </table> # <p> # In Figures 7 and 8, we show more example results from # the Flickr Landscape and COCO-Stuff datasets. The proposed method can generate diverse scenes with high image # fidelity. More results are included in the appendix. # </p> # <p> # <strong>Human evaluation.</strong> We use the Amazon Mechanical Turk # (AMT) to compare the perceived visual fidelity of our # method against existing approaches. Specifically, we give # the AMT workers an input segmentation mask and two # synthesis outputs from different methods and ask them to # choose the output image that looks more like a corresponding image of the segmentation mask. The workers are given # unlimited time to make the selection. For each comparison, # we randomly generate 500 questions for each dataset, and # each question is answered by 5 different workers. For quality control, only workers with a lifetime task approval rate # greater than 98% can participate in our study. # </p> # <img src="./imgs/figure8.png" /> # <table> # <tbody> # <tr> # <td> # <img src="./imgs/table3.png" width="400" /> # </td> # </tr> # </tbody> # </table> # <p> # Table 2 shows the evaluation results. We find that users strongly favor our results on all the datasets, especially on # the challenging COCO-Stuff and ADE20K datasets. For the # Cityscapes, even when all the competing methods achieve # high image fidelity, users still prefer our results. # </p> # <p> # <strong>Effectiveness of the SPADE.</strong> For quantifying importance # of the SPADE, we introduce a strong baseline called # pix2pixHD++, which combines all the techniques we find # useful for enhancing the performance of pix2pixHD except # the SPADE. We also train models that receive the segmentation mask input at all the intermediate layers via feature # concatenation in the channel direction, which is termed as # pix2pixHD++ w/ Concat. Finally, the model that combines the strong baseline with the SPADE is denoted as # pix2pixHD++ w/ SPADE. # </p> # <table> # <tbody> # <tr> # <td> # <img src="./imgs/table4.png" width="400" /> # </td> # </tr> # </tbody> # </table> # <p> # As shown in Table 3, the architectures with the proposed # SPADE consistently outperforms its counterparts, in both # the decoder-style architecture described in Figure 4 and # more traditional encoder-decoder architecture used in the # pix2pixHD. We also find that concatenating segmentation # masks at all intermediate layers, a reasonable alternative # to the SPADE, does not achieve the same performance as # SPADE. Furthermore, the decoder-style SPADE generator # works better than the strong baselines even with a smaller # number of parameters. # </p> # <img src="./imgs/figure9.png" /> # <p> # <strong>Variations of SPADE generator.</strong> Table 4 reports the performance of several variations of our generator. First, we # compare two types of input to the generator where one is the # random noise while the other is the downsampled segmentation map. We find that both of the variants render similar # performance and conclude that the modulation by SPADE # alone provides sufficient signal about the input mask. Second, we vary the type of parameter-free normalization layers before applying the modulation parameters. We observe # that the SPADE works reliably across different normalization methods. Next, we vary the convolutional kernel size # acting on the label map, and find that kernel size of 1x1 # hurts performance, likely because it prohibits utilizing the # context of the label. Lastly, we modify the capacity of the # generator by changing the number of convolutional filters. # We present more variations and ablations in the appendix. # </p> # <p> # <strong>Multi-modal synthesis.</strong> In Figure 9, we show the multimodal image synthesis results on the Flickr Landscape # dataset. For the same input segmentation mask, we sample different noise inputs to achieve different outputs. More # results are included in the appendix. # </p> # <p> # <strong>Semantic manipulation and guided image synthesis.</strong> In # Figure 1, we show an application where a user draws different segmentation masks, and our model renders the corresponding landscape images. Moreover, our model allows # users to choose an external style image to control the global # appearances of the output image. We achieve it by replacing the input noise with the embedding vector of the style # image computed by the image encoder. # </p> # ### **5. Conclusion** # <p> # We have proposed the spatially-adaptive normalization, # which utilizes the input semantic layout while performing # the affine transformation in the normalization layers. The # proposed normalization leads to the first semantic image # synthesis model that can produce photorealistic outputs for # diverse scenes including indoor, outdoor, landscape, and # street scenes. We further demonstrate its application for # multi-modal synthesis and guided image synthesis. # </p> # <p> # <strong>Acknowledgments.</strong> We thank <NAME>, Bryan # Catanzaro, <NAME>, and <NAME> for insightful advice. We thank <NAME>, <NAME>, and Brad # Nemire for their help in constructing the demo apps. Taesung Park contributed to the work during his internship at # NVIDIA. His Ph.D. is supported by a Samsung Scholarship. # </p> # ### **References** # # [1] <NAME>, <NAME>, and <NAME>. Wasserstein generative adversarial networks. In International Conference on # Machine Learning (ICML), 2017. 3 # # [2] <NAME>, <NAME>, and <NAME>. Layer normalization. # arXiv preprint arXiv:1607.06450, 2016. 2 # # [3] <NAME>, <NAME>, and <NAME>. Large scale gan # training for high fidelity natural image synthesis. In International Conference on Learning Representations (ICLR), # 2019. 1, 2 # # [4] <NAME>, <NAME>, and <NAME>. Coco-stuff: Thing and # stuff classes in context. In IEEE Conference on Computer # Vision and Pattern Recognition (CVPR), 2018. 2, 4 # # [5] <NAME>, <NAME>, <NAME>, <NAME>, and # <NAME>. Deeplab: Semantic image segmentation with # deep convolutional nets, atrous convolution, and fully connected crfs. IEEE Transactions on Pattern Analysis and Machine Intelligence (TPAMI), 40(4):834–848, 2018. 4, 5 # # [6] <NAME> and <NAME>. Photographic image synthesis with # cascaded refinement networks. In IEEE International Conference on Computer Vision (ICCV), 2017. 1, 4, 5, 13, 14, # 15, 16, 17, 18 # # [7] <NAME>, <NAME>, <NAME>, <NAME>, and <NAME>. # Sketch2photo: internet image montage. ACM Transactions # on Graphics (TOG), 28(5):124, 2009. 1, 2 # # [8] <NAME>, <NAME>, <NAME>, and <NAME>. On self modulation for generative adversarial networks. In International # Conference on Learning Representations, 2019. 2 # # [9] <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, # <NAME>, <NAME>, <NAME>, and <NAME>. The # cityscapes dataset for semantic urban scene understanding. # In IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2016. 2, 4 # # [10] <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, # and <NAME>. Modulating early visual processing # by language. In Advances in Neural Information Processing Systems, 2017. 2 # # [11] <NAME>, <NAME>, and <NAME>. A learned representation for artistic style. In International Conference on # Learning Representations (ICLR), 2016. 2, 3 # # [12] <NAME> and <NAME>. Understanding the difficulty of # training deep feedforward neural networks. In Proceedings # of the thirteenth international conference on artificial intelligence and statistics, pages 249–256, 2010. 12, 13 # # [13] <NAME>, <NAME>, <NAME>, <NAME>, # <NAME>, <NAME>, <NAME>, and <NAME>. Generative adversarial nets. In Advances in Neural Information # Processing Systems, 2014. 2 # # [14] <NAME> and <NAME>. Scene completion using millions of # photographs. In ACM SIGGRAPH, 2007. 1 # # [15] <NAME>, <NAME>, <NAME>, and <NAME>. Deep residual learning # for image recognition. In IEEE Conference on Computer # Vision and Pattern Recognition (CVPR), 2016. 3 # # [16] <NAME>, <NAME>, <NAME>, <NAME>, and <NAME>. # Salesin. Image analogies. 2001. 1, 2 # # [17] <NAME>, <NAME>, <NAME>, <NAME>, and # <NAME>. GANs trained by a two time-scale update rule # converge to a local Nash equilibrium. In Advances in Neural # Information Processing Systems, 2017. 4, 5, 13 # # [18] <NAME>, <NAME>, <NAME>, and <NAME>. Inferring semantic layout for hierarchical text-to-image synthesis. In IEEE # Conference on Computer Vision and Pattern Recognition # (CVPR), 2018. 2 # # [19] <NAME> and <NAME>. Arbitrary style transfer in realtime with adaptive instance normalization. In IEEE International Conference on Computer Vision (ICCV), 2017. 2, # 3 # # [20] <NAME>, <NAME>, <NAME>, and <NAME>. Multimodal # unsupervised image-to-image translation. European Conference on Computer Vision (ECCV), 2018. 2, 3, 4 # # [21] <NAME> and <NAME>. Batch normalization: Accelerating # deep network training by reducing internal covariate shift. # In International Conference on Machine Learning (ICML), # 2015. 2, 3 # # [22] <NAME>, <NAME>, <NAME>, and <NAME>. Image-toimage translation with conditional adversarial networks. In # IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2017. 1, 2, 3, 11, 12 # # [23] M. Johnson, <NAME>, <NAME>, <NAME>, # <NAME>, and <NAME>. Semantic photo synthesis. In # Computer Graphics Forum, volume 25, pages 407–413, # 2006. 1, 2 # # [24] <NAME>, <NAME>, <NAME>, and <NAME>. Learning # to generate images of outdoor scenes from attributes and semantic layouts. arXiv preprint arXiv:1612.00215, 2016. 2 # # [25] <NAME>, <NAME>, <NAME>, and <NAME>. Manipulating attributes of natural scenes via hallucination. arXiv # preprint arXiv:1808.07413, 2018. 2 # # [26] <NAME>, <NAME>, and <NAME>. A style-based generator # architecture for generative adversarial networks. In IEEE # Conference on Computer Vision and Pattern Recognition # (CVPR), 2019. 2 # # [27] <NAME> and <NAME>. Adam: A method for stochastic # optimization. In International Conference on Learning Representations (ICLR), 2015. 4 # # [28] <NAME> and <NAME>. Auto-encoding variational # bayes. In International Conference on Learning Representations (ICLR), 2014. 2, 4, 11, 12 # # [29] <NAME>, <NAME>, and <NAME>. Imagenet # classification with deep convolutional neural networks. In # Advances in Neural Information Processing Systems, 2012. # 2 # # [30] <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, # and <NAME>minisi. Photo clip art. In ACM transactions on # graphics (TOG), volume 26, page 3. ACM, 2007. 1 # # [31] <NAME> and <NAME>. Geometric gan. arXiv preprint # arXiv:1705.02894, 2017. 3, 11 # # [32] <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, and <NAME>. Microsoft coco: Com- ´ # mon objects in context. In European Conference on Computer Vision (ECCV), 2014. 2, 4 # # [33] <NAME>, <NAME>, and <NAME>. Unsupervised image-toimage translation networks. In Advances in Neural Information Processing Systems, 2017. 2 # # [34] <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, and <NAME>. Least squares generative adversarial networks. In IEEE # International Conference on Computer Vision (ICCV), 2017. # 3, 11 # # [35] <NAME>, <NAME> and <NAME>. Photosketch: A sketch based image query and compositing system. In ACM SIGGRAPH 2009 Talk Program, 2009. 1 # # [36] <NAME>, <NAME>, and <NAME>. Which training # methods for gans do actually converge? In International # Conference on Machine Learning (ICML), 2018. 2, 3, 11 # # [37] <NAME> and <NAME>. Conditional generative adversarial nets. arXiv preprint arXiv:1411.1784, 2014. 2 # # [38] <NAME>, <NAME>, <NAME>, and <NAME>. Spectral normalization for generative adversarial networks. In International Conference on Learning Representations (ICLR), # 2018. 3, 4, 11 # # [39] <NAME> and <NAME>. cGANs with projection discriminator. In International Conference on Learning Representations (ICLR), 2018. 2, 3, 11 # # [40] <NAME>. Deeplab-pytorch. https://github. # com/kazuto1011/deeplab-pytorch, 2018. 5 # # [41] <NAME>, <NAME>, and <NAME>. Conditional image synthesis with auxiliary classifier GANs. In International Conference on Machine Learning (ICML), 2017. 2 # # [42] <NAME>, <NAME>, <NAME>, <NAME>, and # <NAME>. Learning visual reasoning without strong # priors. In International Conference on Machine Learning # (ICML), 2017. 2 # # [43] <NAME>, <NAME>, <NAME>, and <NAME>. Semi-parametric image synthesis. In IEEE Conference on Computer Vision and # Pattern Recognition (CVPR), 2018. 4, 5, 13, 17, 18 # # [44] <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, and # <NAME>. Generative adversarial text to image synthesis. In International Conference on Machine Learning (ICML), 2016. # 2 # # [45] <NAME> and <NAME>. Weight normalization: A # simple reparameterization to accelerate training of deep neural networks. In Advances in Neural Information Processing # Systems, 2016. 2 # # [46] <NAME>, <NAME>, and <NAME>. Instance normalization: The missing ingredient for fast stylization. arxiv # 2016. arXiv preprint arXiv:1607.08022, 2016. 2, 3 # # [47] <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, # and <NAME>. Video-to-video synthesis. In Advances in # Neural Information Processing Systems, 2018. 1, 4 # # [48] <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, and # <NAME>. High-resolution image synthesis and semantic # manipulation with conditional gans. In IEEE Conference on # Computer Vision and Pattern Recognition (CVPR), 2018. 1, # 3, 4, 5, 7, 11, 12, 13, 14, 15, 16, 17, 18 # # [49] <NAME>, <NAME>, <NAME>, and <NAME>. Recovering realistic texture in image super-resolution by deep spatial # feature transform. In Proceedings of the IEEE Conference on # Computer Vision and Pattern Recognition, pages 606–615, # 2018. 2 # # [50] <NAME> and <NAME>. Group normalization. In European Conference on Computer Vision (ECCV), 2018. 2 # # [51] <NAME>, <NAME>, <NAME>, <NAME>, and <NAME>. Unified perceptual parsing for scene understanding. In European Conference on Computer Vision (ECCV), 2018. 5 # # [52] <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, and # <NAME>. Attngan: Fine-grained text to image generation with # attentional generative adversarial networks. In IEEE Conference on Computer Vision and Pattern Recognition (CVPR), # 2018. 2 # # [53] <NAME>, <NAME>, and <NAME>. Dilated residual networks. In IEEE Conference on Computer Vision and Pattern # Recognition (CVPR), 2017. 5 # # [54] <NAME>, <NAME>, <NAME>, and <NAME>. Selfattention generative adversarial networks. In International # Conference on Machine Learning (ICML), 2019. 1, 2, 3, 11 # # [55] <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, and # <NAME>. Stackgan: Text to photo-realistic image synthesis with stacked generative adversarial networks. In IEEE # International Conference on Computer Vision (ICCV), 2017. # 1, 2 # # [56] <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, # and <NAME>. Stackgan++: Realistic image synthesis # with stacked generative adversarial networks. IEEE Transactions on Pattern Analysis and Machine Intelligence (TPAMI), # 2018. 1 # # [57] <NAME>, <NAME>, <NAME>, and <NAME>. Image generation # from layout. In IEEE Conference on Computer Vision and # Pattern Recognition (CVPR), 2019. 2 # # [58] <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, and # <NAME>. Scene parsing through ade20k dataset. In # IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2017. 2, 4 # # [59] <NAME>, <NAME>, <NAME>, and <NAME>. Unpaired imageto-image translation using cycle-consistent adversarial networks. In IEEE International Conference on Computer Vision (ICCV), 2017. 2 # # [60] <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, # <NAME>, and <NAME>. Toward multimodal image-toimage translation. In Advances in Neural Information Processing Systems, 2017. 2, 3, 4 # # ### **A. Additional Implementation Details** # <p> # <strong>Generator.</strong> The architecture of the generator consists of a # series of the proposed SPADE ResBlks with nearest neighbor upsampling. We train our network using 8 GPUs simultaneously and use the synchronized version of the BatchNorm. We apply the Spectral Norm [38] to all the convolutional layers in the generator. The architectures of the proposed SPADE and SPADE ResBlk are given in Figure 10 # and Figure 11, respectively. The architecture of the generator is shown in Figure 12. # </p> # <p> # <strong>Discriminator.</strong> The architecture of the discriminator follows the one used in the pix2pixHD method [48], which # uses a multi-scale design with the InstanceNorm (IN). The # only difference is that we apply the Spectral Norm to all the convolutional layers of the discriminator. The details of the # discriminator architecture is shown in Figure 13. # <table> # <tbody> # <tr> # <td> # <img src="./imgs/figure10.png" width="500" /> # </td> # <td> # <img src="./imgs/figure11.png" width="500" /> # </td> # <td> # <img src="./imgs/figure12.png" width="500" /> # </td> # </tr> # </tbody> # </table> # </p> # <p> # <strong>Image Encoder.</strong> The image encoder consists of 6 stride-2 # convolutional layers followed by two linear layers to produce the mean and variance of the output distribution as # shown in Figure 14. # </p> # <strong>Learning objective.</strong> We use the learning objective function # in the pix2pixHD work [48] except that we replace its LSGAN loss [34] term with the Hinge loss term [31, 38, 54]. # We use the same weighting among the loss terms in the objective function as that in the pix2pixHD work. # </p> # When training the proposed framework with the image # encoder for multi-modal synthesis and style-guided image # synthesis, we include a KL Divergence loss: # </p> # <table> # <tbody> # <tr> # <td> # <img src="./imgs/equation4.png" width="400" /> # </td> # </tr> # </tbody> # </table> # <p> # where the prior distribution p(z) is a standard Gaussian distribution and the variational distribution q is fully determined by a mean vector and a variance vector [28]. We # use the reparamterization trick [28] for back-propagating # the gradient from the generator to the image encoder. The # weight for the KL Divergence loss is 0.05. # </p> # <p> # In Figure 15, we overview the training data flow. The # image encoder encodes a real image to a mean vector and # a variance vector. They are used to compute the noise input to the generator via the reparameterization trick [28]. # The generator also takes the segmentation mask of the input image as input with the proposed SPADE ResBlks. The discriminator takes concatenation of the segmentation mask and the output image from the generator as input and aims # to classify that as fake. # <table> # <tbody> # <tr> # <td> # <img src="./imgs/figure13.png" width="500" /> # </td> # <td> # <img src="./imgs/figure14.png" width="500" /> # </td> # </tr> # </tbody> # </table> # <strong>Training details.</strong> We perform 200 epochs of training on the # Cityscapes and ADE20K datasets, 100 epochs of training # on the COCO-Stuff dataset, and 50 epochs of training on the # Flickr Landscapes dataset. The image sizes are 256 × 256, # except the Cityscapes at 512 × 256. We linearly decay the # learning rate to 0 from epoch 100 to 200 for the Cityscapes # and ADE20K datasets. The batch size is 32. We initialize # the network weights using thes Glorot initialization [12]. # <table> # <tbody> # <tr> # <td> # <img src="./imgs/figure15.png" width="500" /> # </td> # </tr> # </tbody> # </table> # ### **B. Additional Ablation Study** # # <table> # <tbody> # <tr> # <td> # <img src="./imgs/table5.png" width="500" /> # </td> # </tr> # </tbody> # </table> # <p> # Table 5 provides additional ablation study results analyzing the contribution of individual components in the proposed method. We first find that both of the perceptual loss # and GAN feature matching loss inherited from the learning objective function of the pix2pixHD [48] are important. Removing any of them leads to a performance drop. # We also find that increasing the depth of the discriminator # by inserting one more convolutional layer to the top of the # pix2pixHD discriminator does not improve the results. # </p> # <p> # In Table 5, we also analyze the effectiveness of each # component used in our strong baseline, the pix2pixHD++ # method, derived from the pix2pixHD method. We # found that the Spectral Norm, synchronized BatchNorm, # TTUR [17], and the hinge loss objective all contribute to # the performance boost. Adding the SPADE to the strong # baseline further improves the performance. Note that the # pix2pixHD++ w/o Sync BatchNorm and w/o Spectral Norm # still differs from the pix2pixHD in that it uses the hinge loss # objective, TTUR, a large batch size, and the Glorot initialization [12]. # </p> # ### **C. Additional Results** # <p> # In Figure 16, 17, and 18, we show additional synthesis results from the proposed method on the COCO-Stuff # and ADE20K datasets with comparisons to those from the # CRN [6] and pix2pixHD [48] methods. # </p> # <p> # In Figure 19 and 20, we show additional synthesis results from the proposed method on the ADE20K-outdoor # and Cityscapes datasets with comparison to those from the # CRN [6], SIMS [43], and pix2pixHD [48] methods. # </p> # <p> # In Figure 21, we show additional multi-modal synthesis # results from the proposed method. As sampling different z # from a standard multivariate Gaussian distribution, we synthesize images of diverse appearances. # </p> # <p> # In the accompanying video, we demonstrate our semantic image synthesis interface. We show how a user can create photorealistic landscape images by painting semantic # labels on a canvas. We also show how a user can synthesize images of diverse appearances for the same semantic # segmentation mask as well as transfer the appearance of a # provided style image to the synthesized one. # </p> # <img src="./imgs/figure16.png" /> # <img src="./imgs/figure17.png" /> # <img src="./imgs/figure18.png" /> # <img src="./imgs/figure19.png" /> # <img src="./imgs/figure20.png" /> # <img src="./imgs/figure21.png" /> #
Image to Image Translation/Style Transfer/Semantic Image Synthesis with Spatially-Adaptive Normalization/Semantic-Image-Synthesis_Review.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="view-in-github" colab_type="text" # <a href="https://colab.research.google.com/github/microprediction/optimizer-notebooks/blob/main/bayesian_optimization.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # + id="54DpAhQ1i2rf" # !pip install bayesian-optimization # !pip install humpday # + id="PX5u9DLxkU97" from humpday.objectives.classic import shekel_on_cube from bayes_opt import BayesianOptimization # + id="Ru6VKFYQknMr" def bayesopt_cube(objective ,n_trials,n_dim,with_count): global feval_count feval_count = 0 pbounds = dict([ ('u'+str(i),(0.,1.) ) for i in range(n_dim) ]) def _neg_objective(**kwargs) -> float: global feval_count feval_count += 1 u = [ kwargs['u'+str(i)] for i in range(n_dim)] return -objective(u) optimizer = BayesianOptimization( f=_neg_objective, pbounds=pbounds, verbose=0, random_state=1, ) optimizer.maximize( init_points=5, n_iter=n_trials-5, ) best_val = -optimizer.max['target'] best_x = [ optimizer.max['params']['u'+str(i)] for i in range(n_dim) ] return (best_val, best_x, feval_count) if with_count else (best_val, best_x) # + colab={"base_uri": "https://localhost:8080/"} id="jyEmLE2QnKE7" outputId="2868ba84-3a69-4870-8a9f-ddd824ee78df" bayesopt_cube(objective=shekel_on_cube ,n_trials=50,n_dim=5,with_count=True)
bayesian_optimization.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] id="A1o3eopSJd0P" # # Overview of Ontario's Covid-19 Spread # > Insights into the Covid-19 spread in Ontario, Canada. # # - toc: true # - badges: true # - comments: true # - categories: [jupyter] # - image: images/ontario_map.png # + id="kl3afzQUJd0V" colab={"base_uri": "https://localhost:8080/"} outputId="e04306c2-0dbc-4c92-95fc-8b6023179932" #hide import pandas as pd import numpy as np import plotly.graph_objects as go import plotly.express as px import altair as alt from IPython.display import HTML, display import pytz import ipywidgets as widgets from datetime import datetime, timedelta, timezone print('Libraries successfully imported.') # + id="GQkoatELJd0Y" colab={"base_uri": "https://localhost:8080/"} outputId="274a8eff-4739-4189-ebbb-9794a692789e" #hide # #%%time url = 'https://data.ontario.ca/dataset/f4112442-bdc8-45d2-be3c-12efae72fb27/resource/455fd63b-603d-4608-8216-7d8647f43350/download/conposcovidloc.csv' url_2 = 'https://data.ontario.ca/dataset/f4f86e54-872d-43f8-8a86-3892fd3cb5e6/resource/ed270bb8-340b-41f9-a7c6-e8ef587e6d11/download/covidtesting.csv' url_3 = 'https://data.ontario.ca/dataset/f4f86e54-872d-43f8-8a86-3892fd3cb5e6/resource/8a88fe6d-d8fb-41a3-9d04-f0550a44999f/download/daily_change_in_cases_by_phu.csv' #geo = gpd.read_file('https://data.ontario.ca/dataset/f4112442-bdc8-45d2-be3c-12efae72fb27/resource/4f39b02b-47fe-4e66-95b6-e6da879c6910/download/conposcovidloc.geojson') src_conpos = pd.read_csv(url, index_col=0, parse_dates=['Accurate_Episode_Date', 'Case_Reported_Date', 'Test_Reported_Date', 'Specimen_Date']).reset_index() src_testing = pd.read_csv(url_2, index_col=0, parse_dates=['Reported Date']).reset_index() src_daily = pd.read_csv(url_3, index_col=0, parse_dates=['Date']).reset_index() # duplicate entry for December 5th, 2020 causing error src_testing.drop_duplicates(inplace=True) print('Source files successfully loaded.') # + id="RUXw_HmxJd0Z" colab={"base_uri": "https://localhost:8080/", "height": 35} outputId="aed5b2b1-32b1-40b2-b805-09e71537cdf6" #hide last_date = src_testing['Reported Date'].max() init_date = src_conpos['Accurate_Episode_Date'].min() display(HTML("Ontario data set last updated on: " + last_date.strftime("%x"))) # + id="T0Sa7wOsJd0a" colab={"base_uri": "https://localhost:8080/", "height": 209} outputId="b7154714-1672-43e8-e542-c7d623857445" #hide_input src_testing[src_testing['Confirmed Negative'] > 0].tail() # no confirmed negatives after March 29th 2020 testing_latest = src_testing[src_testing['Reported Date'] == src_testing['Reported Date'].max()] # get the previous day results testing_delta1 = src_testing[src_testing['Reported Date'] == src_testing['Reported Date'].max() - timedelta(days=1)] # get the daily percentage +/- dailypct = src_testing.join(src_testing[['Total Cases', 'Resolved', 'Confirmed Positive', 'Deaths']].pct_change().add_suffix('_pct')) dailypct = dailypct[dailypct['Reported Date'] == dailypct['Reported Date'].max()] # apply a percentage format; conventional formatting method throws an error def percentage_delta(raw): # pctfmt = raw.map('{:+.2%}'.format) # split = str(pctfmt).split() formatted_pct = str(raw.map('{:+.2%}'.format)).split()[1] return formatted_pct # original background colour code: #504e4e, secondary: #585858 display(HTML( "<div id='number-plate' style = 'background-color: #ececec; padding: 30px; text-align: center; marginTop: 1%; marginBottom: 1%;'>" + "<p style='color: black; font-size: 32px'> Ontario Covid-19 Cases to Date</p>" "<div id='confirmed' style = 'display: inline-block'> <span style='color: black; font-size: 23px;'> Confirmed: " + str('{:,.0f}'.format(int(testing_latest['Total Cases']))) + "</span>" + "<br> <span style='color: black; font-size:15px; '> " + str('{0:+,d}'.format(int(testing_latest['Total Cases']) - (int(testing_delta1['Total Cases'])))) + ' (' + percentage_delta(dailypct['Total Cases_pct']) + ')' + "</span>" + "</div>" + "<div id='resolved' style = 'display: inline-block'> <span style='color: #228b22; font-size: 23px; margin-left: 20px;'> Resolved: " + str('{:,.0f}'.format(int(testing_latest['Resolved']))) + "</span>" + "<br> <span style='color: black; font-size:15px; '>" + str('{0:+,d}'.format(int(testing_latest['Resolved']) - (int(testing_delta1['Resolved'])))) + ' (' + percentage_delta(dailypct['Resolved_pct']) + ')' + "</span>" + "</div>" + "<div id='active' style = 'display: inline-block'> <span style='color: orange; font-size: 23px; margin-left: 20px;'> Active: " + str('{:,.0f}'.format(int(testing_latest['Confirmed Positive']))) +"</span>" + "<br> <span style='color: black; font-size:15px; '>" + str('{0:+,d}'.format(int(testing_latest['Confirmed Positive']) - (int(testing_delta1['Confirmed Positive'])))) + ' (' + percentage_delta(dailypct['Confirmed Positive_pct']) + ')' + "</span>" + "</div>" + "<div id='deaths' style = 'display: inline-block'> <span style='color: red; font-size: 23px; margin-left: 20px;'> Deaths: " + str('{:,.0f}'.format(int(testing_latest['Deaths']))) + "</span>" + "<br> <span style='color: black; font-size:15px; '>" + str('{0:+,d}'.format(int(testing_latest['Deaths']) - (int(testing_delta1['Deaths'])))) + ' (' + percentage_delta(dailypct['Deaths_pct']) + ')' + "</span>" + "</div>" + "<br><div id='rates' style = 'display: block; marginTop: 1%;'> " + "<div id='deathrate' style = 'display: inline-block'> <span style='color: black;'> Current Death Rate: " + "</span>" + "<span style = 'color: red'>" + str((testing_latest['Deaths'] / testing_latest['Total Cases']).map('{:.2%}'.format)).split()[1] + "</span>" + "</div>" + "<div id='survivalrate' style = 'display:inline-block'> <span style='color: black; margin-left: 20px;'> Current Survival Rate: " + "</span>" + "<span style = 'color: #228b22'>" + str(((testing_latest['Total Cases'] - testing_latest['Deaths']) / testing_latest['Total Cases']).map('{:.2%}'.format)).split()[1] + "</span>" + "</div>" + "</div>" + "<span style='color: black; font-size: 11px;'> Ontario data last updated: " + str(testing_latest['Reported Date'].max().strftime('%Y-%m-%d')) + ", Refreshed on: " + datetime.now(pytz.timezone('America/New_York') ).strftime("%Y-%m-%d at %H:%M") + "</span>" + "</div>" )) # + id="51i4OSwLJd0c" colab={"base_uri": "https://localhost:8080/", "height": 309} outputId="55265d70-ab77-4fd4-8d61-bd72b6692f00" #hide src_conpos.head() # + id="E0gdvoKgJd0e" colab={"base_uri": "https://localhost:8080/"} outputId="e3b77ae0-d5df-4fdd-fea8-63f45583b77e" #hide src_conpos['Case_AcquisitionInfo'].unique() src_conpos['Outcome1'].unique() # + id="SsYKtaFxJd0f" colab={"base_uri": "https://localhost:8080/", "height": 202} outputId="298089b6-3b9c-4f6f-f4c2-018e94c98794" #hide src_conpos.groupby(["Accurate_Episode_Date","Reporting_PHU", "Reporting_PHU_City","Reporting_PHU_Latitude", "Reporting_PHU_Longitude"]) \ .count()["Row_ID"] \ .reset_index() \ .rename(columns={"Row_ID" : "Cases", "Reporting_PHU_Latitude": "Latitude", "Reporting_PHU_Longitude": "Longitude"}) \ .sort_values(by = ["Reporting_PHU", "Accurate_Episode_Date"]).tail() # + id="nEot2RaJJd0g" colab={"base_uri": "https://localhost:8080/", "height": 202} outputId="b652c4c0-00db-4a01-86e1-74ddddd3d2da" #hide # create dataframe of all active cases in Ontario by region since Covid-19 inception # Note: Brampton and Caledon not included in PHU Cities; use Region instead active = src_conpos[src_conpos['Outcome1'] == "Not Resolved"] active = active.groupby(["Reporting_PHU", "Reporting_PHU_City","Reporting_PHU_Latitude", "Reporting_PHU_Longitude"]) \ .count()["Row_ID"] \ .reset_index() \ .rename(columns={"Row_ID" : "Cases", "Reporting_PHU_Latitude": "Latitude", "Reporting_PHU_Longitude": "Longitude"}) \ .sort_values(by = ["Reporting_PHU"]) active["Symptom_Onset"] = "Anytime" active.head() # + id="YsGDVISXJd0i" colab={"base_uri": "https://localhost:8080/", "height": 415} outputId="9bfa1a49-942e-4562-96ca-750e83e8850d" #hide # create dataframes for last 14, 7 and 5 day time periods # 14 day delta active14 = src_conpos[(src_conpos['Outcome1'] == "Not Resolved") & (src_conpos['Accurate_Episode_Date'] >= datetime.now() - timedelta(days=14))] active14 = active14.groupby(["Reporting_PHU", "Reporting_PHU_City","Reporting_PHU_Latitude", "Reporting_PHU_Longitude"]) \ .count()["Row_ID"] \ .reset_index() \ .rename(columns={"Row_ID" : "Cases", "Reporting_PHU_Latitude": "Latitude", "Reporting_PHU_Longitude": "Longitude"}) \ .sort_values(by = ["Reporting_PHU"]) active14["Symptom_Onset"] = "Last 14 days" # 7 day delta active7 = src_conpos[(src_conpos['Outcome1'] == "Not Resolved") & (src_conpos['Accurate_Episode_Date'] >= datetime.now() - timedelta(days=7))] active7 = active7.groupby(["Reporting_PHU", "Reporting_PHU_City","Reporting_PHU_Latitude", "Reporting_PHU_Longitude"]) \ .count()["Row_ID"] \ .reset_index() \ .rename(columns={"Row_ID" : "Cases", "Reporting_PHU_Latitude": "Latitude", "Reporting_PHU_Longitude": "Longitude"}) \ .sort_values(by = ["Reporting_PHU"]) active7["Symptom_Onset"] = "Last 7 days" # 5 day delta active5 = src_conpos[(src_conpos['Outcome1'] == "Not Resolved") & (src_conpos['Accurate_Episode_Date'] >= datetime.now() - timedelta(days=5))] active5 = active5.groupby(["Reporting_PHU", "Reporting_PHU_City","Reporting_PHU_Latitude", "Reporting_PHU_Longitude"]) \ .count()["Row_ID"] \ .reset_index() \ .rename(columns={"Row_ID" : "Cases", "Reporting_PHU_Latitude": "Latitude", "Reporting_PHU_Longitude": "Longitude"}) \ .sort_values(by = ["Reporting_PHU"]) active5["Symptom_Onset"] = "Last 5 days" # append all dataframes together df_active = active.append(active14).append(active7).append(active5) df_active # + [markdown] id="wIE5dAgGJd0j" # ## Geographical Spread of Cases # + id="SfhWtW1zJd0l" colab={"base_uri": "https://localhost:8080/", "height": 817} outputId="f885b338-7097-4f44-e5fd-bf2fea4e9534" #hide_input activemap = px.scatter_mapbox(df_active, lat="Latitude", lon="Longitude", color="Symptom_Onset", color_discrete_sequence=["maroon", "darkorange", "yellow", "grey"], size="Cases", hover_name="Reporting_PHU", size_max=28, zoom=5.4, center=dict(lat=45, lon=-79.4), height=800, #width=900, labels={"Symptom_Onset" : "Symptom Onset"}, # tracking currently active cases based on PHU title="Currently Active (Not Recovered) Cases per Public Health Jurisdiction", #mapbox_style="open-street-map" ) activemap.update_layout(mapbox_style="open-street-map") #activemap.show() #HTML(activemap.to_html()) # + [markdown] id="P_CSAJaqJd0r" # ## Timeline of Cases # + id="FzfbpdPpJd0s" colab={"base_uri": "https://localhost:8080/", "height": 430} outputId="fb81f0ae-d800-4d96-8017-99799a704325" #hide src_testing.tail() # + id="6g53gqQ4Jd0s" colab={"base_uri": "https://localhost:8080/", "height": 713} outputId="95ba8fe9-2194-4140-b0b2-71b7b87c0f7f" #hide df_testing = src_testing.drop(['Confirmed Negative', 'Presumptive Negative', 'Presumptive Positive', 'Total patients approved for testing as of Reporting Date', 'Under Investigation'], axis=1).fillna(0) df_testing['New Cases'] = df_testing['Total Cases'].fillna(0).diff() df_testing['New Deaths'] = df_testing['Deaths'].fillna(0).diff() df_testing['Completed Tests Cumulative'] = df_testing['Total tests completed in the last day'].cumsum() # rolling averages numdays = 7 df_testing['New Cases 5 Day Average'] = round(df_testing['New Cases'].rolling(5).mean()) df_testing['New Cases 7 Day Average'] = round(df_testing['New Cases'].rolling(numdays).mean()) df_testing['New Tests 7 Day Average'] = round(df_testing['Total tests completed in the last day'].rolling(numdays).mean()) df_testing['7 Day Avg Percent Positivity'] = round((df_testing['New Cases 7 Day Average'] / df_testing['New Tests 7 Day Average']) *100, 2) df_testing # + id="jf65OSRMJd0t" colab={"base_uri": "https://localhost:8080/", "height": 542} outputId="aa200738-f03c-4519-bf98-454181804fbd" #hide_input # report daily case load and % positivity fig_conf = px.bar(df_testing, x='Reported Date', y='New Cases', color='7 Day Avg Percent Positivity', range_color=[0,8], title='Ontario Confirmed Cases with % Positivity Rate and 7-Day Rolling Average', labels={'7 Day Avg Percent Positivity': '% Positivity Rate'} ) fig_conf.update_layout(bargap=0) fig_conf.update_layout(coloraxis_colorbar=dict(yanchor="top", y=1, x=1, ticks="outside", ticksuffix="")) fig_conf.add_trace(go.Scatter( x=df_testing['Reported Date'], y=df_testing['New Cases 7 Day Average'], mode='lines', name='7d Avg', line=dict(color="#057D41", dash="solid") )) fig_conf.update_layout(legend=dict( orientation="h", yanchor="bottom", y=1.02, xanchor="right", x=1,)) # + id="_4tdVl-2Jd0u" colab={"base_uri": "https://localhost:8080/", "height": 542} outputId="cde80579-07f2-4c97-9881-1b31b9b76bc9" #hide_input # report tests completed # testing data does not exist prior to April 15, 2020 df_test = df_testing[(df_testing["Total tests completed in the last day"].notnull()) & (df_testing["Total tests completed in the last day"] != 0)] fig_test = px.bar(df_test, x='Reported Date', y='Total tests completed in the last day', template='seaborn', color='7 Day Avg Percent Positivity', range_color=[0,8], title='Ontario Testing with % Positivity Rate and 7-Day Rolling Average', labels={'7 Day Avg Percent Positivity': '% Positivity Rate', 'Total tests completed in the last day': 'Tests Completed'}, ) fig_test.update_layout(bargap=0) fig_test.update_layout(coloraxis_colorbar=dict(yanchor="top", y=1, x=1, ticks="outside", ticksuffix="")) fig_test.add_trace(go.Scatter( x=df_test['Reported Date'], y=df_test['New Tests 7 Day Average'], mode='lines', name='7d Avg', line=dict(color="#1E90FF", dash="solid") )) fig_test.update_layout(legend=dict( orientation="h", yanchor="bottom", y=1.02, xanchor="right", x=1,)) # + id="E-JfL2LMJd0u" colab={"base_uri": "https://localhost:8080/", "height": 35} outputId="031ce9fe-57eb-4cd7-f506-8839ea4d8556" #hide_input display(HTML("<div style='text-Align: right; margin-right: 10%;'> <span style='text-align: right'> **Note that testing data is not available prior to April 15, 2020. </span> </div>"))
_notebooks/2020_12_20_Ontario_Overview.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: conda-env-EvEn-py # language: python # name: conda-env-even-py # --- import sys if '..' not in sys.path: sys.path.append('..') data_path = '../output/guessing_names.csv' # %matplotlib inline import os from glob import glob import re import pandas as pd from pyhocon import ConfigFactory from cort.core.corpora import Corpus import urllib from IPython.core.display import display, HTML from mturk.score_submissions import evaluate_submissions, adhoc_fix from mturk.gformtools import visualize, extract_errors, unpack_json, chains_str_from_events from collections import Counter from tqdm import tqdm anns_paths = ['../data/annotations/Coref-annotation.csv', '../data/annotations/Coref-annotation-too-long.csv'] anns_gform = pd.concat([pd.read_csv(adhoc_fix(p)) for p in anns_paths], sort=False) anns_gform = anns_gform.drop_duplicates(['Document', 'Username'], keep='last') anns_chains = anns_gform.Annotations.apply(unpack_json) anns = pd.concat([anns_gform, anns_chains], axis=1) # there's no difference between using auto or gold because humans don't see our syntactic and semantic annotations # some *.auto_conll files are missing so I replace them with the *.gold_conll equivalence anns['conll_file'] = anns.conll_file.str.replace('auto_conll', 'gold_conll') root_dir = '..' def iterate_masked_name_mentions(df): for _, row in tqdm(list(df.iterrows())): conll_path = os.path.join(root_dir, row['conll_file']) if re.search(r'/no-(?:external|internal/)', conll_path): with open(conll_path) as f: corpus = Corpus.from_file('', f) doc, = corpus.documents mention_strs = Counter(' '.join(doc.tokens[m.span.begin:m.span.end+1]) for m in doc.annotated_mentions) name_mention_strs = [s for s, cnt in mention_strs.items() if re.match(r'_\w+_(?: _\w+_)*$', s) and cnt >= 2] for s in name_mention_strs: yield {'mention': s, 'conll_path': conll_path, 'Document': row['Document'], 'Username': row['Username']} df = pd.DataFrame(iterate_masked_name_mentions(anns)) # + def format_url(row): prefilled_form_url_template = 'https://docs.google.com/forms/d/e/1FAIpQLSfk04beuK-ZwD9j2twk4hIZDNy-UPxpKX5jXaPQRj1iGTqDmg/viewform?usp=pp_url&entry.366340186=%s&entry.1671245065=%s' get_url = lambda row: prefilled_form_url_template %(urllib.parse.quote(row['Document']), urllib.parse.quote(row['mention'])) return '<li><a href="%s">%s: %s</a></li>' %(get_url(row), row['Document'], row['mention']) df['prefilled_form'] = df.apply(format_url, axis=1) # - df.sample(10) assert not os.path.exists(data_path) df.to_csv(data_path, index=False) df = pd.read_csv(data_path) df.groupby('Username').agg({'Document': [min, max]}).drop_duplicates(keep='last') # ### Student 1 def format_url_list(df, username): data = df[df.Username.str.contains(username)].sample(frac=1, random_state=348292) s = ['<ol>'] + list(data.prefilled_form) + ['</ol>'] display(HTML('\n'.join(s))) format_url_list(df, 'student1') # ### Student 2 format_url_list(df, 'student2') # ### Student 3 format_url_list(df, 'student3')
notebooks/generate-named-entity-questions.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 (ipykernel) # language: python # name: python3 # --- # [[source]](../api/alibi.explainers.cfproto.rst) # # Counterfactuals Guided by Prototypes # ## Overview # This method is based on the [Interpretable Counterfactual Explanations Guided by Prototypes](https://arxiv.org/abs/1907.02584) paper which proposes a fast, model agnostic method to find interpretable counterfactual explanations for classifier predictions by using class prototypes. # # Humans often think about how they can alter the outcome of a situation. *What do I need to change for the bank to approve my loan?* is a common example. This form of counterfactual reasoning comes natural to us and explains how to arrive at a desired outcome in an interpretable manner. Moreover, examples of counterfactual instances resulting in a different outcome can give powerful insights of what is important to the the underlying decision process. This makes it a compelling method to explain predictions of machine learning models. In the context of predictive models, a counterfactual instance describes the necessary change in input features of a test instance that alter the prediction to a predefined output (e.g. a prediction class). The counterfactual is found by iteratively perturbing the input features of the test instance during an optimization process until the desired output is achieved. # # A high quality counterfactual instance $x_{cf}$ should have the following desirable properties: # # * The model prediction on $x_{cf}$ needs to be close to the predefined output. # # * The perturbation $\delta$ changing the original instance $x_{0}$ into $x_{cf} = x_{0} + \delta$ should be sparse. # # * The counterfactual $x_{cf}$ should be interpretable. This implies that $x_{cf}$ needs to lie close to both the overall and counterfactual class specific data distribution. # # * The counterfactual $x_{cf}$ needs to be found fast enough so it can be used in a real life setting. # We can obtain those properties by incorporating additional loss terms in the objective function that is optimized using gradient descent. A basic loss function for a counterfactual can look like this: # # $$ # Loss = cL_{pred} + \beta L_{1} + L_{2} # $$ # # The first loss term, $cL_{pred}$, encourages the perturbed instance to predict another class than the original instance. The $\beta$$L_{1}$ + $L_{2}$ terms act as an elastic net regularizer and introduce sparsity by penalizing the size of the difference between the counterfactual and the perturbed instance. While we can obtain sparse counterfactuals using this objective function, these are often not very interpretable because the training data distribution is not taken into account, and the perturbations are not necessarily meaningful. # # The [Contrastive Explanation Method (CEM)](./CEM.ipynb) uses an [autoencoder](https://en.wikipedia.org/wiki/Autoencoder) which is trained to reconstruct instances of the training set. We can then add the $L_{2}$ reconstruction error of the perturbed instance as a loss term to keep the counterfactual close to the training data distribution. The loss function becomes: # # $$ # Loss = cL_{pred} + \beta L_{1} + L_{2} + L_{AE} # $$ # # The $L_{AE}$ does however not necessarily lead to interpretable solutions or speed up the counterfactual search. The lack of interpretability occurs because the overall data distribution is followed, but not the class specific one. That's where the prototype loss term $L_{proto}$ comes in. To define the prototype for each prediction class, we can use the encoder part of the previously mentioned autoencoder. We also need the training data or at least a representative sample. We use the model to make predictions on this data set. For each predicted class, we encode the instances belonging to that class. The class prototype is simply the average of the k closest encodings in that class to the encoding of the instance that we want to explain. When we want to generate a counterfactual, we first find the nearest prototype other than the one for the predicted class on the original instance. The $L_{proto}$ loss term tries to minimize the $L_{2}$ distance between the counterfactual and the nearest prototype. As a result, the perturbations are guided to the closest prototype, speeding up the counterfactual search and making the perturbations more meaningful as they move towards a typical in-distribution instance. If we do not have a trained encoder available, we can build class representations using [k-d trees](https://en.wikipedia.org/wiki/K-d_tree) for each class. The prototype is then the *k* nearest instance from a k-d tree other than the tree which represents the predicted class on the original instance. The loss function now looks as follows: # # $$ # Loss = cL_{pred} + \beta L_{1} + L_{2} + L_{AE} + L_{proto} # $$ # # The method allows us to select specific prototype classes to guide the counterfactual to. For example, in MNIST the closest prototype to a 9 could be a 4. However, we can specify that we want to move towards the 7 prototype and avoid 4. # # In order to help interpretability, we can also add a trust score constraint on the proposed counterfactual. The trust score is defined as the ratio of the distance between the encoded counterfactual and the prototype of the class predicted on the original instance, and the distance between the encoded counterfactual and the prototype of the class predicted for the counterfactual instance. Intuitively, a high trust score implies that the counterfactual is far from the originally predicted class compared to the counterfactual class. For more info on trust scores, please check out the [documentation](./TrustScores.ipynb). # # Because of the $L_{proto}$ term, we can actually remove the prediction loss term and still obtain an interpretable counterfactual. This is especially relevant for fully black box models. When we provide the counterfactual search method with a Keras or TensorFlow model, it is incorporated in the TensorFlow graph and evaluated using automatic differentiation. However, if we only have access to the model's prediction function, the gradient updates are numerical and typically require a large number of prediction calls because of $L_{pred}$. These prediction calls can slow the search down significantly and become a bottleneck. We can represent the gradient of the loss term as follows: # # $$ # \frac{\partial L_{pred}}{\partial x} = \frac{\partial L_{pred}}{\partial p} \frac{\partial p}{\partial x} # $$ # # where $p$ is the prediction function and $x$ the input features to optimize. For a 28 by 28 MNIST image, the $^{\delta p}/_{\delta x}$ term alone would require a prediction call with batch size 28x28x2 = 1568. By using the prototypes to guide the search however, we can remove the prediction loss term and only make a single prediction at the end of each gradient update to check whether the predicted class on the proposed counterfactual is different from the original class. # # ## Categorical Variables # # It is crucial for many machine learning applications to deal with both continuous numerical and categorical data. Explanation methods which rely on perturbations or sampling of the input features need to make sure those perturbations are meaningful and capture the underlying structure of the data. If not done properly, the perturbed or sampled instances are possibly out of distribution compared to the training data and result in misleading explanations. The perturbation or sampling process becomes tricky for categorical features. For instance random perturbations across possible categories or enforcing a ranking between categories based on frequency of occurrence in the training data do not capture this structure. # # Our method first computes the pairwise distances between categories of a categorical variable based on either the model predictions (MVDM) or the context provided by the other variables in the dataset (ABDM). For MVDM, we use the difference between the conditional model prediction probabilities of each category. This method is based on the *Modified Value Difference Metric* (MVDM) by [Cost et al (1993)](https://link.springer.com/article/10.1023/A:1022664626993). ABDM stands for *Association-Based Distance Metric*, a categorical distance measure introduced by [Le et al (2005)](http://www.jaist.ac.jp/~bao/papers/N26.pdf). ABDM infers context from the presence of other variables in the data and computes a dissimilarity measure based on the Kullback-Leibler divergence. Both methods can also be combined as *ABDM-MVDM*. We can then apply multidimensional scaling to project the pairwise distances into Euclidean space. More details will be provided in a forthcoming paper. # # The different use cases are highlighted in the example notebooks linked at the bottom of the page. # ## Usage # ### Initialization # # The counterfactuals guided by prototypes method works on fully black-box models. This means that they can work with arbitrary functions that take arrays and return arrays. However, if the user has access to a full TensorFlow (TF) or Keras model, this can be passed in as well to take advantage of the automatic differentiation in TF to speed up the search. This section describes the initialization for a TF/Keras model. Please see the [numerical gradients](#Numerical-Gradients) section for black box models. # # We first load our MNIST classifier and the (optional) autoencoder and encoder: # # ```python # cnn = load_model('mnist_cnn.h5') # ae = load_model('mnist_ae.h5') # enc = load_model('mnist_enc.h5') # ``` # # We can now initialize the counterfactual: # # ```python # shape = (1,) + x_train.shape[1:] # cf = CounterfactualProto(cnn, shape, kappa=0., beta=.1, gamma=100., theta=100., # ae_model=ae, enc_model=enc, max_iterations=500, # feature_range=(-.5, .5), c_init=1., c_steps=5, # learning_rate_init=1e-2, clip=(-1000., 1000.), write_dir='./cf') # ``` # # Besides passing the predictive, and (optional) autoencoder and models, we set a number of **hyperparameters** … # # ... **general**: # # * `shape`: shape of the instance to be explained, starting with batch dimension. Currently only single explanations are supported, so the batch dimension should be equal to 1. # # * `feature_range`: global or feature-wise min and max values for the perturbed instance. # # * `write_dir`: write directory for Tensorboard logging of the loss terms. It can be helpful when tuning the hyperparameters for your use case. It makes it easy to verify that e.g. not 1 loss term dominates the optimization, that the number of iterations is OK etc. You can access Tensorboard by running `tensorboard --logdir {write_dir}` in the terminal. The figure below for example shows the loss to be optimized over different $c$ iterations. It is clear that within each iteration, the number of `max_iterations` steps is too high and we can speed up the search. # # ![cfplossopt](cfplossopt.png) # # ... related to the **optimizer**: # # * `max_iterations`: number of loss optimization steps for each value of *c*; the multiplier of the first loss term. # # * `learning_rate_init`: initial learning rate, follows polynomial decay. # # * `clip`: min and max gradient values. # # ... related to the **objective function**: # # * `c_init` and `c_steps`: the multiplier $c$ of the first loss term is updated for `c_steps` iterations, starting at `c_init`. The first loss term encourages the perturbed instance to be predicted as a different class than the original instance. If we find a candidate counterfactual for the current value of $c$, we reduce the value of $c$ for the next optimization cycle to put more emphasis on the other loss terms and improve the solution. If we cannot find a solution, $c$ is increased to put more weight on the prediction class restrictions of the counterfactual. # # * `kappa`: the first term in the loss function is defined by a difference between the predicted probabilities for the perturbed instance of the original class and the max of the other classes. $\kappa \geq 0$ defines a cap for this difference, limiting its impact on the overall loss to be optimized. Similar to CEM, we set $\kappa$ to 0 in the examples. # # * `beta`: $\beta$ is the $L_{1}$ loss term multiplier. A higher value for $\beta$ means more weight on the sparsity restrictions of the perturbations. $\beta$ equal to 0.1 works well for the example datasets. # # * `gamma`: multiplier for the optional $L_{2}$ reconstruction error. A higher value for $\gamma$ means more emphasis on the reconstruction error penalty defined by the autoencoder. A value of 100 is reasonable for the examples. # # * `theta`: multiplier for the $L_{proto}$ loss term. A higher $\theta$ means more emphasis on the gradients guiding the counterfactual towards the nearest class prototype. A value of 100 worked well for the examples. # # When the dataset contains categorical variables, we need to additionally pass the following arguments: # # * `cat_vars`: if the categorical variables have ordinal encodings, this is a dictionary with as keys the categorical columns and values the number of categories for the categorical variable in the dataset. If one-hot encoding is applied to the data, then the keys of the `cat_vars` dictionary represent the column where each categorical variable starts while the values still return the number of categories. # # * `ohe`: a flag (True or False) whether the categories are one-hot encoded. # # It is also important to remember that the perturbations are applied in the numerical feature space, after the categorical variables have been transformed into numerical features. This has to be reflected by the dimension and values of `feature_range`. Imagine for example that we have a dataset with 10 columns. Two of the features are categorical and one-hot encoded. They can both take 3 values each. As a result, the number of columns in the dataset is reduced to 6 when we transform those categorical features to numerical features. As a result, the `feature_range` needs to contain the upper and lower ranges for 6 features. # # While the default values for the loss term coefficients worked well for the simple examples provided in the notebooks, it is recommended to test their robustness for your own applications. # <div class="alert alert-warning"> # Warning # # Once a `CounterfactualProto` instance is initialized, the parameters of it are frozen even if creating a new instance. This is due to TensorFlow behaviour which holds on to some global state. In order to change parameters of the explainer in the same session (e.g. for explaining different models), you will need to reset the TensorFlow graph manually: # # ```python # import tensorflow as tf # tf.keras.backend.clear_session() # ``` # You may need to reload your model after this. Then you can create a new `CounterfactualProto` instance with new parameters. # # </div> # ### Fit # # If we use an encoder to find the class prototypes, we need an additional `fit` step on the training data: # # ```python # cf.fit(x_train) # ``` # # We also need the `fit` step if the data contains categorical features so we can compute the numerical transformations. In practice, most of these optional arguments do not need to be changed. # # ```python # cf.fit(x_train, d_type='abdm', w=None, disc_perc=[25, 50, 75], standardize_cat_vars=False, # smooth=1., center=True, update_feature_range=True) # ``` # # * `d_type`: the distance metric used to compute the pairwise distances between the categories of each categorical variable. As discussed in the introduction, the options are `"abdm"`, `"mvdm"` or `"abdm-mvdm"`. # # * `w`: if the combined metric `"abdm-mvdm"` is used, `w` is the weight (between 0 and 1) given to `abdm`. # # * `disc_perc`: for `abdm`, we infer context from the other features. If there are continuous numerical features present, these are binned according to the quartiles in `disc_perc` before computing the similarity metric. # # * `standardize_car_vars`: whether to return the standardized values for the numerical distances of each categorical feature. # # * `smooth`: if the difference in the distances between the categorical variables is too large, then a lower value of the `smooth` argument (0, 1) can smoothen out this difference. This would only be relevant if one categorical variable has significantly larger differences between its categories than others. As a result, the counterfactual search process will likely leave that categorical variable unchanged. # # * `center`: whether to center the numerical distances of the categorical variables between the min and max feature ranges. # # * `update_feature_range`: whether to update the `feature_range` parameter for the categorical variables based on the min and max values it computed in the `fit` step. # ### Explanation # # We can now explain the instance: # # ```python # explanation = cf.explain(X, Y=None, target_class=None, k=20, k_type='mean', # threshold=0., verbose=True, print_every=100, log_every=100) # ``` # # * `X`: original instance # * `Y`: one-hot-encoding of class label for `X`, inferred from the prediction on `X` if *None*. # * `target_class`: classes considered for the nearest class prototype. Either a list with class indices or *None*. # * `k`: number of nearest instances used to define the prototype for a class. Defaults to using all instances belonging to the class. # * `k_type`: use either the average encoding of the `k` nearest instances in a class as the class prototype (`k_type`='mean') or the k-nearest encoding in the class (`k_type`='point'). This parameter is only relevant if an encoder is used to define the prototypes. # * `threshold`: threshold level for the ratio between the distance of the counterfactual to the prototype of the predicted class for the original instance over the distance to the prototype of the predicted class for the counterfactual. If the trust score is below the threshold, the proposed counterfactual does not meet the requirements and is rejected. # * `verbose`: if *True*, print progress of counterfactual search every `print_every` steps. # * `log_every`: if `write_dir` for Tensorboard is specified, then log losses every `log_every` steps. # # The ```explain``` method returns an `Explanation` object with the following attributes: # # * *cf*: a dictionary with the overall best counterfactual found. *explanation['cf']* has the following *key: value* pairs: # # # * *X*: the counterfactual instance # # * *class*: predicted class for the counterfactual # # * *proba*: predicted class probabilities for the counterfactual # # * *grads_graph*: gradient values computed from the TF graph with respect to the input features at the counterfactual # # * *grads_num*: numerical gradient values with respect to the input features at the counterfactual # # # * *orig_class*: predicted class for original instance # # * *orig_proba*: predicted class probabilities for original instance # # * *all*: a dictionary with the iterations as keys and for each iteration a list with counterfactuals found in that iteration as values. So for instance, during the first iteration, *explanation['all'][0]*, initially we typically find fairly noisy counterfactuals that improve over the course of the iteration. The counterfactuals for the subsequent iterations then need to be *better* (sparser) than the previous best counterfactual. So over the next few iterations, we probably find less but *better* solutions. # ### Numerical Gradients # So far, the whole optimization problem could be defined within the TF graph, making automatic differentiation possible. It is however possible that we do not have access to the model architecture and weights, and are only provided with a ```predict``` function returning probabilities for each class. The counterfactual can then be initialized in the same way: # # ```python # # define model # cnn = load_model('mnist_cnn.h5') # predict_fn = lambda x: cnn.predict(x) # ae = load_model('mnist_ae.h5') # enc = load_model('mnist_enc.h5') # # # initialize explainer # shape = (1,) + x_train.shape[1:] # cf = CounterfactualProto(predict_fn, shape, gamma=100., theta=100., # ae_model=ae, enc_model=enc, max_iterations=500, # feature_range=(-.5, .5), c_init=1., c_steps=4, # eps=(1e-2, 1e-2), update_num_grad=100) # ``` # # In this case, we need to evaluate the gradients of the loss function with respect to the input features numerically: # # $$ # \frac{\partial L_{pred}}{\partial x} = \frac{\partial L_{pred}}{\partial p} \frac{\partial p}{\partial x} # $$ # # where $L_{pred}$ is the loss term related to the prediction function, $p$ is the prediction function and $x$ are the input features to optimize. There are now 2 additional hyperparameters to consider: # # * `eps`: a tuple to define the perturbation size used to compute the numerical gradients. `eps[0]` and `eps[1]` are used respectively for $^{\delta L_{pred}}/_{\delta p}$ and $^{\delta p}/_{\delta x}$. `eps[0]` and `eps[1]` can be a combination of float values or numpy arrays. For `eps[0]`, the array dimension should be *(1 x nb of prediction categories)* and for `eps[1]` it should be *(1 x nb of features)*. For the Iris dataset, `eps` could look as follows: # # ```python # eps0 = np.array([[1e-2, 1e-2, 1e-2]]) # 3 prediction categories, equivalent to 1e-2 # eps1 = np.array([[1e-2, 1e-2, 1e-2, 1e-2]]) # 4 features, also equivalent to 1e-2 # eps = (eps0, eps1) # ``` # # - `update_num_grad`: for complex models with a high number of parameters and a high dimensional feature space (e.g. Inception on ImageNet), evaluating numerical gradients can be expensive as they involve prediction calls for each perturbed instance. The `update_num_grad` parameter allows you to set a batch size on which to evaluate the numerical gradients, reducing the number of prediction calls required. # # We can also remove the prediction loss term by setting `c_init` to 0 and only run 1 `c_steps`, and still obtain an interpretable counterfactual. This dramatically speeds up the counterfactual search (e.g. by 100x in the MNIST example notebook): # # ```python # cf = CounterfactualProto(predict_fn, shape, gamma=100., theta=100., # ae_model=ae, enc_model=enc, max_iterations=500, # feature_range=(-.5, .5), c_init=0., c_steps=1) # ``` # ### k-d trees # So far, we assumed that we have a trained encoder available to find the nearest class prototype. This is however not a hard requirement. As mentioned in the *Overview* section, we can use k-d trees to build class representations, find prototypes by querying the trees for each class and return the k nearest class instance as the closest prototype. We can run the counterfactual as follows: # # ```python # cf = CounterfactualProto(cnn, shape, use_kdtree=True, theta=10., feature_range=(-.5, .5)) # cf.fit(x_train, trustscore_kwargs=None) # explanation = cf.explain(X, k=2) # ``` # # * `trustscore_kwargs`: keyword arguments for the trust score object used to define the k-d trees for each class. Please check the trust scores [documentation](./TrustScores.ipynb) for more info. # ## Examples # [Counterfactuals guided by prototypes on MNIST](../examples/cfproto_mnist.nblink) # # [Counterfactuals guided by prototypes on Boston housing dataset](../examples/cfproto_housing.nblink) # # [Counterfactual explanations with one-hot encoded categorical variables](../examples/cfproto_cat_adult_ohe.nblink) # # [Counterfactual explanations with ordinally encoded categorical variables](../examples/cfproto_cat_adult_ord.nblink)
doc/source/methods/CFProto.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- from selenium import webdriver browser = webdriver.Chrome('C:/Develops/chromedriver.exe') browser.get('https://www.github.com/login') # # id : input#login_field, pw : <PASSWORD>#password, button : input[type="submit"] # # + import time time.sleep(5) # ID browser.find_elements_by_css_selector('input#login_field').send_keys('<EMAIL>') # PASSWORD browser.find_element_by_css_selector('input#login_field').send_keys(#) # -
github_login_webscraping.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ## Problem Statement # # You will have to use PINNs to solve the fluid flow for the given geometry and flow parameters. # # A 2D chip is placed inside a 2D channel. The flow enters inlet at $u=1.5\text{ m/s}$ and exits through the outlet which is a $0 Pa$. All the other walls are treated as no-slip. The kinematic viscosity $(\nu)$ for the flow is $0.02 \text{ }m^2/s$ and the density $(\rho)$ is $1 \text{ }kg/m^3$. The problem is shown in the figure below. # # <img src="chip_2d_geom.png" alt="Drawing" style="width: 800px;"/> # # ## Challenge # # The main challenge in this problem is to correctly formulate the problem using PINNs. In order to achieve that, you will have to complete the following parts successfully: # 1. Define the correct geometry for the problem # 2. Set-up the correct boundary conditions and equations # 3. Create the neural network and solve the problem # # A successful completion of the problem should result in distribution of flow variables as shown below. Also, you should aim to achieve a relative $L_2$ error of less than 0.2 for all the variables w.r.t the given OpenFOAM solution. # # <img src="challenge_results.png" alt="Drawing" style="width: 650px;"/> # In this template, we will have give you a skeleton code which you will fill in to define and solve the problem. # # **Note: You need to edit the `chip_2d_template.py` script that is placed in the ../source_code/chip_2d/ directory.** # # From the top menu, click on File, and Open `chip_2d_template.py` from the current directory at `../source_code/chip_2d` directory. Remember to SAVE your code after changes, before running below cells. # # # Let us start with importing the required packages. Pay attention to the various modules and packages that are being imported, especially the equations, geometry and architectures. # ```python # from sympy import Symbol # import numpy as np # import tensorflow as tf # from simnet.solver import Solver # from simnet.dataset import TrainDomain, ValidationDomain # from simnet.data import Validation # from simnet.sympy_utils.geometry_2d import Rectangle, Line, Channel2D # from simnet.sympy_utils.functions import parabola # from simnet.csv_utils.csv_rw import csv_to_dict # from simnet.PDES.navier_stokes import IntegralContinuity, NavierStokes # from simnet.controller import SimNetController # from simnet.architecture import FourierNetArch # ``` # Now, define the simulation parameters and generate the geometry. You will be using the 2D geometry modules for this example. Please fill in the appropriate values for each geometry. Remember, `Channel2D` and `Rectangle` are defined by its two endpoints. The difference between a channel and rectangle in SimNet is that, the channel geometry does not have bounding curves in the x-direction. This is helpful in getting a signed distance field that it infinite in x-direction. This can be important when the signed distance field is used as a wall distance in some of the turbulence models (Refer *SimNet User Guide Chapter 3* for more details). Hence we will create the inlet and outlet using `Line` geometry (*Please note that this is a 2d line, as opposed to the* `Line1D` *that was used in the diffusion tutorial*) # ```python # # simulation params # channel_length = (-2.5, 2.5) # channel_width = (-0.5, 0.5) # chip_pos = -1.0 # chip_height = 0.6 # chip_width = 1.0 # inlet_vel = 1.5 # ``` # ```python # #TODO: Replace x1, y1, x2, y2, and X's with appropriate values # # # define geometry # # define channel # channel = Channel2D((x1, y1), (x2, y2)) # # define inlet and outlet # inlet = Line((x1, y1), (x1, y2), normal= X) # outlet = Line((x1, y1), (x1, y2), normal= X) # # define the chip # rec = Rectangle((x1, y1), (x2, y2)) # # create a geometry for higher sampling of point cloud near the fin # flow_rec = Rectangle((chip_pos-0.25, channel_width[0]), # (chip_pos+chip_width+0.25, channel_width[1])) # # fluid area # geo = channel - rec # geo_hr = geo & flow_rec # geo_lr = geo - flow_rec # ``` # The current problem is a channel flow with an incompressible fluid. In such cases, the mass flow rate through each cross-section of the channel and in turn the volumetric flow is constant. This can be used as an additional constraint in the problem which we will help us in improving the speed of convergence. # # Wherever, possible, using such additional knowledge about the problem can help in better and faster solutions. More examples of this can be found in the *SimNet User Guide Chapter 6*. # ```python # # Optional integral continuity planes to speed up convergence # x_pos = Symbol('x_pos') # integral_line = Line((x_pos, channel_width[0]), # (x_pos, channel_width[1]), # 1) # x_pos_range = {x_pos: lambda batch_size: np.full((batch_size, 1), np.random.uniform(channel_length[0], channel_length[1]))} # ``` # Now you will use the created geometry to define the training data for the problem. Implement the required boundary conditions and equations in the `Chip2DTrain` class below. Remember that you will have to create the training data points both for the boundary condition and to reduce the equation residuals. You can refer to the `NavierStokes` class from the PDEs module to check how the equations are defined and the keys required for each equation. For ease of access, we show the equations below. # ```python # # Equation definitions of the NavierStokes class # # # Note, this block is only for reference. Do not include this in your final script # # These equations are already present in the NavierStokes class definition # # # set equations # self.equations = Variables() # self.equations['continuity'] = rho.diff(t) + (rho*u).diff(x) + (rho*v).diff(y) + (rho*w).diff(z) # self.equations['momentum_x'] = ((rho*u).diff(t) # + (u*((rho*u).diff(x)) + v*((rho*u).diff(y)) + w*((rho*u).diff(z)) + rho*u*(curl)) # + p.diff(x) # - (-2/3*mu*(curl)).diff(x) # - (mu*u.diff(x)).diff(x) # - (mu*u.diff(y)).diff(y) # - (mu*u.diff(z)).diff(z) # - (mu*(curl).diff(x))) # self.equations['momentum_y'] = ((rho*v).diff(t) # + (u*((rho*v).diff(x)) + v*((rho*v).diff(y)) + w*((rho*v).diff(z)) + rho*v*(curl)) # + p.diff(y) # - (-2/3*mu*(curl)).diff(y) # - (mu*v.diff(x)).diff(x) # - (mu*v.diff(y)).diff(y) # - (mu*v.diff(z)).diff(z) # - (mu*(curl).diff(y))) # self.equations['momentum_z'] = ((rho*w).diff(t) # + (u*((rho*w).diff(x)) + v*((rho*w).diff(y)) + w*((rho*w).diff(z)) + rho*w*(curl)) # + p.diff(z) # - (-2/3*mu*(curl)).diff(z) # - (mu*w.diff(x)).diff(x) # - (mu*w.diff(y)).diff(y) # - (mu*w.diff(z)).diff(z) # - (mu*(curl).diff(z))) # # if self.dim == 2: # self.equations.pop('momentum_z') # ``` # Now use this understanding to define the problem. An example of the inlet boundary condition is shown. Also, the integral continuity constraint is already coded up for you. # ```python # #TODO: Replace all the placeholders with appropriate values # # # define sympy variables to parametrize domain curves # x, y = Symbol('x'), Symbol('y') # # class Chip2DTrain(TrainDomain): # def __init__(self, **config): # super(Chip2DTrain, self).__init__() # # # inlet # inlet_parabola = parabola(y, channel_width[0], channel_width[1], inlet_vel) # inlet_bc = inlet.boundary_bc(outvar_sympy={'u': inlet_parabola, 'v': 0}, # batch_size_per_area=64) # self.add(inlet_bc, name="Inlet") # # # outlet # outlet_bc = outlet.boundary_bc(outvar_sympy={placeholder}, # batch_size_per_area=placeholder) # self.add(outlet_bc, name="Outlet") # # # noslip # noslip = geo.boundary_bc(outvar_sympy={placeholder}, # batch_size_per_area=placeholder) # self.add(noslip, name="ChipNS") # # # interior lr # interior_lr = geo_lr.interior_bc(outvar_sympy={placeholder}, # bounds={placeholder}, # lambda_sympy={placeholder}, # batch_size_per_area=placeholder) # self.add(interior_lr, name="InteriorLR") # # # interior hr # interior_hr = geo_hr.interior_bc(outvar_sympy=placeholder, # bounds=placeholder, # lambda_sympy=placeholder, # batch_size_per_area=placeholder) # self.add(interior_hr, name="InteriorHR") # # # # integral continuity # for i in range(4): # IC = integral_line.boundary_bc(outvar_sympy={'integral_continuity': 1.0}, # batch_size_per_area=512, # lambda_sympy={'lambda_integral_continuity': 1.0}, # criteria=geo.sdf>0, # param_ranges=x_pos_range, # fixed_var=False) # self.add(IC, name="IntegralContinuity_"+str(i)) # ``` # Now, add validation data to the problem. The examples folder contains a `openfoam` directory that contains the solution of same problem using OpenFOAM solver. The CSV file is read in and converted to a dictionary for you. Now, you will have to complete the definition of `Chip2DVal` class below. # ```python # #TODO: Set the appropriate normalization for the validation data # # The validation data has domain extents of (0,0) to (5,1). Normalize this based on your definition of the domain # # # validation data # mapping = {'Points:0': 'x', 'Points:1': 'y', # 'U:0': 'u', 'U:1': 'v', 'p': 'p'} # openfoam_var = csv_to_dict('openfoam/2D_chip_fluid0.csv', mapping) # openfoam_var['x'] -= 2.5 #TODO Samle normalization of position. Edit based on your geometry definition # openfoam_var['y'] -= 0.5 # openfoam_invar_numpy = {key: value for key, value in openfoam_var.items() if key in ['x', 'y']} # openfoam_outvar_numpy = {key: value for key, value in openfoam_var.items() if key in ['u', 'v', 'p']} # # class Chip2DVal(ValidationDomain): # def __init__(self, **config): # super(Chip2DVal, self).__init__() # val = Validation.from_numpy(placeholder) # self.add(val, name='Val') # ``` # Now, complete the last part of the code by creating the `ChipSolver` to solve the problem. You will make use of an advanced architecture called the `FourierNetArch` in this problem. This architecture will help to reach the convergence faster. More details about this architecture can be found in *SimNet User Guide Section 1.6.1.1*. The important parameters of the neural network are defined for you. Feel free to tweak them and observe its behavior on the results and speed of convergence. # ```python # #TODO: Replace all the placeholders with appropriate values # class ChipSolver(Solver): # train_domain = placeholder # val_domain = placeholder # arch = FourierNetArch # # def __init__(self, **config): # super(ChipSolver, self).__init__(**config) # # self.frequencies = ('axis,diagonal', [i/5. for i in range(25)]) # # self.equations = (placeholder) # flow_net = self.arch.make_node(name='flow_net', # inputs=[placeholder], # outputs=[placeholder]) # self.nets = [flow_net] # # @classmethod # def update_defaults(cls, defaults): # defaults.update({ # 'network_dir': './network_checkpoint_chip_2d', # 'rec_results': True, # 'rec_results_freq': 5000, # 'max_steps': 10000, # 'decay_steps': 100, # 'xla': True # }) # if __name__ == '__main__': # ctr = SimNetController(ChipSolver) # ctr.run() # ``` # Once you have completed all the sections of the code, you can export the notebook as a python script and then execute it as we have seen in the earlier tutorials. Record your relative L2 errors with respect to the OpenFOAM solution and try to achieve errors for all the variables lower than 0.2. Also try to visualize your results using contour plots by reading in the `.npz` files created in the network checkpoint. # # # # Licensing # This material is released by NVIDIA Corporation under the Creative Commons Attribution 4.0 International (CC BY 4.0)
hpc_ai/PINN/English/python/jupyter_notebook/chip_2d/Challenge_1_template.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # <center><img src="../../Picture Data/logo.png" alt="Header" style="width: 800px;"/></center> # @Copyright (C): 2010-2019, Shenzhen Yahboom Tech # @Author: Malloy.Yuan # @Date: 2019-07-17 10:10:02 # @LastEditors: Malloy.Yuan # @LastEditTime: 2019-09-17 17:54:19 # ### Creating a robot instance # We use the Robot object to call our already packaged motor drive library to drive motor. from jetbot import Robot import time robot = Robot() # ### GPIO port initialization configuration # Import GPIO objects from RPi.GPIO, initialize pin objects # Set the GPIO port mode of the limit switch to BCM mode and set it to input mode. import RPi.GPIO as GPIO up_limit_pin = 17 down_limit_pin = 18 GPIO.setmode(GPIO.BCM) GPIO.setup(up_limit_pin, GPIO.IN) GPIO.setup(down_limit_pin, GPIO.IN) # ### Import python thread library, initialize PTZ lift variable # Create a vertical_motors_action pan/tilt global variable and import the thread package to monitor the pan/tilt status import threading global vertical_motors_action vertical_motors_action = 0 # ### PTZ lifting upper and lower limit detection method def limit_detect(): global vertical_motors_action while 1: if vertical_motors_action == 1: if GPIO.input(up_limit_pin) == 0: robot.vertical_motors_stop() vertical_motors_action = 0 print('云台到顶') elif vertical_motors_action == 2: if GPIO.input(down_limit_pin) == 0: robot.vertical_motors_stop() vertical_motors_action = 0 print('云台到底') time.sleep(0.5) # ### Create and open the thread that monitors the upper and lower limits of the PTZ thread1 = threading.Thread(target=limit_detect) # thread1.setDaemon(True) thread1.start() # ### Method of PTZ rise vertical_motors_action = 1 if(GPIO.input(up_limit_pin)): robot.up(1) print('cameraup') else: print('Top') robot.vertical_motors_stop() vertical_motors_action = 0 # ### Method of PTZ decline vertical_motors_action = 2 if(GPIO.input(down_limit_pin)): robot.down(1) print('cameradown') else: print('Bottom') robot.vertical_motors_stop() vertical_motors_action = 0 # ### Method of PTZ stop robot.vertical_motors_stop() vertical_motors_action = 0 print('camerastop')
notebooks/4.Using of PCA9685 driver/4.Control camera U-L by limit switch/Control camera U-L by limit switch.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: 'Python 3.8.12 (''kitti'': conda)' # language: python # name: python3 # --- # + import os file1 = '/multiview/3d-count/obj_detection/normalized_data/ImageSets/train.txt' with open(file1, 'w') as f: for i in range(0,503063): i = str(i) f.write(i.zfill(6) +" \n") # -
tools/fill_sample_name_files.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python [conda env:geoproject] # language: python # name: conda-env-geoproject-py # --- # + import geopandas as gpd import pandas as pd from Utility import string2list from varname import nameof import geopandas as gpd from shapely.geometry import LineString from Utility import draw_several_dataset #standard packages import re import pandas as pd import numpy as np from bokeh.plotting import figure, output_file, show #plot things from bokeh.io import output_notebook, show output_notebook() from bokeh.models import GMapOptions,HoverTool,ColumnDataSource from bokeh.plotting import gmap import bokeh.palettes as bp from bokeh.layouts import row from prettytable import PrettyTable from matplotlib import pyplot as plt import seaborn as sns from IPython.core.display import HTML def draw_several_dataset(df1,df2=None, needdf2 = False): map_options = GMapOptions(lat=51.10, lng=12, map_type="roadmap", zoom=6) #google api key api_key = "<KEY>" p = gmap(api_key, map_options, title="EMAP") TOOLTIPS=[('capacity','@long')] #p.add_tools( HoverTool(tooltips=TOOLTIPS)) source=ColumnDataSource(df1[['long','lat','capacity']]) source2=ColumnDataSource(df2[['long','lat','capacity']]) p.multi_line('long', 'lat', color='white', line_width=2,source=source) p.add_tools( HoverTool(tooltips=TOOLTIPS,mode='vline')) if needdf2: p.multi_line('long', 'lat', color='yellow', line_width=1,source=source2) print('df1 white, df2 yellow') show(p) def preprocessing(df,dst_name): df = unpack(df) #only keep certain capacity df['capacity'] =df.apply(lambda x: 0 if x['uncertain']>0 else x['capacity'],axis=1) # parsing country code df.country_code = df.country_code.apply(string2list) df['from'] = df.country_code.str[0] df['to'] = df.country_code.str[1] #save a simple json version, capacity remove all estimated value, only keep certain value df_geo = df[['long','lat','capacity','name']].copy() df_geo['geometry'] = df_geo.apply(lambda x: LineString(zip(x['long'],x['lat'])),axis=1) gdf = gpd.GeoDataFrame(df_geo,crs = 'EPSG:4326') gdf[['capacity','name','geometry']].to_file(dst_name+'new_dataset_pipe.geojson') print(df[['long','lat','capacity']].head()) return df def unpack(df): df['capacity'] = pd.DataFrame(df.param.apply(string2list).to_list())['max_cap_M_m3_per_d']*35.8/3.6 #gwh/day df['uncertain'] = pd.DataFrame(df.uncertainty.apply(string2list).to_list())['max_cap_M_m3_per_d'] df['direction'] = pd.DataFrame(df.param.apply(string2list).to_list())['is_bothDirection'] df.lat = df.lat.apply(string2list) df.long = df.long.apply(string2list) return df def select(a,b,df = IGGIELGN): return df[((df['from'] == a) &(df['to'] == b))|((df['from'] == b) & (df['to'] == a))][['from','to','capacity','uncertain','lat','long']] # - IGGIELGN = pd.read_csv('data_IGGIELGN/IGGIELGN_PipeSegments.csv',sep=';') IGGIELGN = preprocessing(IGGIELGN,nameof(IGGIELGN)) INET = pd.read_csv('new data/Data//INET_PipeSegments.csv',sep=';') INET = preprocessing(INET,nameof(INET)) INET[INET.name.isin(['MEGAL_Nord_13','MEGAL_Nord_Loop_13'])] draw_several_dataset(IGGIELGN,INET,True) part = select('DE','FR') print(part) draw_several_dataset(part,part) #real tech 608 Gwh/d # + part = select('DE','NL') print(part) draw_several_dataset(part,part) #43.4 - 298.1 gwh/d # + part = select('CH','DE') print(part) draw_several_dataset(part,part) #317 172 8.8 (DE-CH) # + part = select('CH','FR') print(part) draw_several_dataset(part,part) #37.4 223 100 # - part = select('CH','IT') print(part) draw_several_dataset(part,part) #635 428 entsog_dataset=pd.read_csv('data/entsog_2019_dataset.csv') entsog_dataset
gas_network_exploring/Exploring New Data 2021.ipynb
// --- // jupyter: // jupytext: // text_representation: // extension: .rs // format_name: light // format_version: '1.5' // jupytext_version: 1.14.4 // kernelspec: // display_name: Java // language: java // name: java // --- // # G2Engine Reference // ## Prepare Environment import com.senzing.g2.engine.G2Engine; import com.senzing.g2.engine.G2JNI; import com.senzing.g2.engine.Result; // ### Helper class for Json Rendering // %%loadFromPOM <dependency> <groupId>org.glassfish</groupId> <artifactId>javax.json</artifactId> <version>1.1.4</version> </dependency> import javax.json.*; import static java.util.Collections.*; import static javax.json.stream.JsonGenerator.PRETTY_PRINTING; // + public class JsonUtil { private static final JsonWriterFactory PRETTY_FACTORY = Json.createWriterFactory(singletonMap(PRETTY_PRINTING, true)); private static final JsonWriterFactory UGLY_FACTORY = Json.createWriterFactory(emptyMap()); public static String toJsonText(JsonValue val) { return toJsonText(val, true); } public static String toJsonText(JsonValue val, boolean prettyPrint) { JsonWriterFactory factory = (prettyPrint) ? PRETTY_FACTORY : UGLY_FACTORY; StringWriter sw = new StringWriter(); JsonWriter writer = factory.createWriter(sw); writer.write(val); sw.flush(); return sw.toString(); } public static JsonObject parseJsonObject(String jsonText) { if (jsonText == null) return null; StringReader sr = new StringReader(jsonText); JsonReader jsonReader = Json.createReader(sr); return jsonReader.readObject(); } public static JsonArray parseJsonArray(String jsonText) { if (jsonText == null) return null; StringReader sr = new StringReader(jsonText); JsonReader jsonReader = Json.createReader(sr); return jsonReader.readArray(); } } // - import java.util.UUID; public static void RenderJSON(Object obj){ String str = obj.toString(); JsonObject json = JsonUtil.parseJsonObject(str); String Config = JsonUtil.toJsonText(json, false); UUID id = UUID.randomUUID(); String uuid = id.toString(); String div = "<div id=\""+ uuid +"\" style=\"height:100%; width:100%; background-color: LightCyan\"></div>"; display(div, "text/html"); String jav = "require([\"https://rawgit.com/caldwell/renderjson/master/renderjson.js\"], function() {document.getElementById(\'"+ uuid +"\').appendChild(renderjson("+json+"))});"; display(jav, "application/javascript"); } // ### Initialize Senzing configuration // // Using environment variables and default values, create `senzingConfigJson`. // This value is used when instantiating Senzing objects. // + // Get variables used in constructing Senzing Engine configuration. String configPath = System.getenv("SENZING_ETC_DIR"); if (configPath == null) { configPath = "/etc/opt/senzing"; } String supportPath = System.getenv("SENZING_DATA_VERSION_DIR"); if (supportPath == null) { supportPath = "/opt/senzing/data"; } String g2Path = System.getenv("SENZING_G2_DIR"); if (g2Path == null) { g2Path = "/opt/senzing/g2"; } String resourcePath = g2Path + "/resources"; String sqlConnection = System.getenv("SENZING_SQL_CONNECTION"); if (sqlConnection == null) { sqlConnection = "sqlite3://na:na@/var/opt/senzing/sqlite/G2C.db"; } // Construct the JSON string used for Senzing Engine configuration. String senzingConfigJson = "{" + "\"PIPELINE\": {" + "\"CONFIGPATH\": \"" + configPath + "\"," + "\"SUPPORTPATH\": \"" + supportPath + "\"," + "\"RESOURCEPATH\": \"" + resourcePath + "\"" + "}," + "\"SQL\": {" + "\"CONNECTION\": \"" + sqlConnection + "\"" + "}}"; RenderJSON(senzingConfigJson); // - // ## G2Engine // // To start using Senzing G2Engine, create and initialize an instance. // This should be done once per process. // The `initV2()` method accepts the following parameters: // // - **moduleName:** A short name given to this instance of the G2Engine // object. // - **senzingConfigJson:** A JSON string containing configuration parameters. // - **verbosLogging:** A boolean which enables diagnostic logging. // - **configId:** (optional) The identifier value for the engine configuration // can be returned here. // // Calling this function will return "0" upon success. // + G2Engine g2engine = new G2JNI(); String moduleName = "ExampleG2Engine"; boolean verboseLogging = true; int return_code = g2engine.initV2(moduleName, senzingConfigJson, verboseLogging); System.out.print(return_code); // - // ### initWithConfigIDV2 // // Alternatively `initWithConfigIDV2()` can be used to specify a configuration. // ### reinitV2 // // The `reinitV2()` function may be used to reinitialize the engine with a configuration // ### primeEngine // // The `primeEngine()` method may optionally be called to pre-initialize some of the heavier weight internal resources of the G2 engine. int return_code= g2engine.primeEngine(); if(return_code!=0) System.out.print(g2engine.getLastException()); System.out.print(return_code) // ### getActiveConfigID // // Call `getActiveConfigID()` to return an identifier for the loaded // Senzing engine configuration. // The call will assign a long integer to a user-designated variable // -- the function itself will return "0" upon success. // The `getActiveConfigID()` method accepts one parameter as input: // // - **configID:** The identifier value for the engine configuration. // The result of function call is returned here // + Result<Long> configID = new Result<Long>(); g2engine.getActiveConfigID(configID); System.out.print(configID.getValue()); // - // ### exportConfig // // Call `exportConfig()` to retrieve your Senzing engine's configuration. // The call will assign a JSON document to a user-designated buffer, // containing all relevant configuration information // -- the function itself will return "0" upon success. // The exportConfig function accepts the following parameters as input: // // - **response:** The memory buffer to retrieve the JSON // configuration document // - **configID:** The identifier value for the engine configuration // can be returned here. // + StringBuffer response = new StringBuffer(); g2engine.exportConfig(response, configID); System.out.print("Config ID: "+configID.getValue()); // - // ### stats // // Call `stats()` to retrieve workload statistics for the current process. // These statistics will automatically reset after retrieval. // // - **response:** A memory buffer for returning the response // document. If an error occurred, an error response is stored here. String response = g2engine.stats(); RenderJSON(response); // ### getRepositoryLastModifiedTime // // Call `getRepositoryLastModifiedTime()` to obtain the last modified time of // the Senzing repository, measured in the number of seconds between the last // modified time and January 1, 1970 12:00am GMT (epoch time). // The call will assign a long integer to a user-designated buffer // -- the function itself will return "0" upon success. // The getRepositoryLastModifiedTime() method accepts one parameter as input: // // - **lastModifiedTime:** The last modified time. The result of function // call is returned here // // + Result<Long> lastModifiedTime = new Result<Long>(); g2engine.getRepositoryLastModifiedTime(lastModifiedTime); System.out.print(lastModifiedTime.getValue()); // - // ## Insert // ### addRecord // // Once the Senzing engine is initialized, use addRecord() to load a record into the Senzing repository -- addRecord() can be called as many times as desired and from multiple threads at the same time. The addRecord() function returns "0" upon success, and accepts four parameters as input: // // - **dataSourceCode:** The name of the data source the record is associated with. This value is configurable to the system // - **recordID:** The record ID, used to identify distinct records // - **jsonData:** A JSON document with the attribute data for the record // - **loadID:** The observation load ID for the record; value can be null and will default to data_source // - **responseBuffer:** A memory buffer for returning the response // document. If an error occurred, an error response is stored here. // + String dataSourceCode = "TEST"; String recordID = "1"; String jsonData = "{\"NAME_TYPE\": \"PRIMARY\", \"NAME_FIRST\": \"JANE\", \"NAME_LAST\": \"SMITH\", \"ADDR_TYPE\": \"HOME\", \"ADDR_LINE1\": \"653 STATE ROUTE 7\", \"ADDR_CITY\": \"FRESNO\", \"ADDR_STATE\": \"CA\", \"ADDR_POSTAL_CODE\": \"55073-1234\"}"; String loadID = null; /** addRecord */ int return_code= g2engine.addRecord(dataSourceCode, recordID, jsonData, loadID); /** addRecordWithReturnedRecordID */ StringBuffer recordID2 = new StringBuffer(); int ret2 = g2engine.addRecordWithReturnedRecordID(dataSourceCode, recordID2, jsonData, loadID); System.out.print("New Record ID: " + recordID2.toString()); /** addRecordWithInfo */ StringBuffer responseBuffer = new StringBuffer(); int flags = 0; int ret3 = g2engine.addRecordWithInfo(dataSourceCode, recordID, jsonData, loadID, flags, responseBuffer); RenderJSON(responseBuffer.toString()); // - String dataSourceCode = "TEST"; String recordID = "1"; StringBuffer response = new StringBuffer(); g2engine.getEntityByRecordIDV2(dataSourceCode, recordID, flags, response); JsonObject jsonObject = JsonUtil.parseJsonObject(response.toString()); long entityID1 = jsonObject.getJsonObject("RESOLVED_ENTITY").getJsonNumber("ENTITY_ID").longValue(); // ## Search // ### Record search // #### getRecordV2 // // Use `getRecord()` to retrieve a single record from the data repository; the record is assigned in JSON form to a user-designated buffer, and the function itself returns "0" upon success. Once the Senzing engine is initialized, `getRecord()` can be called as many times as desired and from multiple threads at the same time. The `getRecord()` function accepts the following parameters as input: // // - **dataSourceCode:** The name of the data source the record is associated with. This value is configurable to the system // - **recordID:** The record ID for a particular data record // - **flags:** Control flags for specifying what data about the // entity to retrieve. // - **response:** A memory buffer for returning the response document; if an error occurred, an error response is stored here. // + int flags = 0; String dataSourceCode = "TEST"; String recordID = "1"; StringBuffer response = new StringBuffer(); int return_code= g2engine.getRecordV2(dataSourceCode, recordID, flags, response); if(return_code!=0) System.out.print(g2engine.getLastException()); else RenderJSON(response) // - // ### Entity search // #### getEntityByRecordIDV2 // // Use `getEntityByRecordIDV2()` to retrieve entity data based on the record ID of a particular data record. This function accepts the following parameters as input: // // - **dataSourceCode:** The name of the data source the record is associated with. This value is configurable to the system // - **recordID:** The record ID for a particular data record // - **flags:** Control flags for specifying what data about the // entity to retrieve. // - **response:** A memory buffer for returning the response document; if an error occurred, an error response is stored here. // + String dataSourceCode = "TEST"; String recordID = "1"; StringBuffer response = new StringBuffer(); int return_code = g2engine.getEntityByRecordIDV2(dataSourceCode, recordID, flags, response); int start = response.indexOf("\"ENTITY_ID\":")+12; int end = response.indexOf(","); long entityID=Integer.parseInt(response.substring(start, end)); if(return_code!=0) RenderJSON(g2engine.getLastException()); else RenderJSON(response); // - // #### getEntityByEntityIDV2 // // Entity searching is a key component for interactive use of Entity Resolution // intelligence. // The core Senzing engine provides real-time search capabilities that are // easily accessed via the Senzing API. // Senzing offers methods for entity searching, all of which can be called as // many times // as desired and from multiple threads at the same time (and all of which // return "0" upon success). // // Use `getEntityByEntityIDV2()` to retrieve entity data based on the ID of a // resolved identity. // This function accepts the following parameters as input: // // - **entityID:** The numeric ID of a resolved entity // - **flags:** Control flags for specifying what data about the // entity to retrieve. // - **response:** A memory buffer for returning the response // document; if an error occurred, an error response is stored here. // + response = new StringBuffer(); int return_code = g2engine.getEntityByEntityIDV2(entityID, flags, response); if(return_code!=0) System.out.print(g2engine.getLastException()); else RenderJSON(response); // - // ### searchByAttributes // // Use `searchByAttributesV2()` to retrieve entity data based on a user-specified set of entity attributes. This function accepts the following parameters as input: // // - **jsonData:** A JSON document with the attribute data to search for // - **flags:** Control flags for specifying what data about the // entity to retrieve. // - **response:** A memory buffer for returning the response // document; if an error occurred, an error response is stored here. // + String dataSourceCode = "TEST"; String recordID = "1"; int flags = 0; response = new StringBuffer(); int return_code = g2engine.searchByAttributesV2(jsonData, flags, response); if(return_code!=0) System.out.print(g2engine.getLastException()); else RenderJSON(response); // - // ### Finding Paths // The `FindPathByEntityID()` and `FindPathByRecordID()` functions can be used to find single relationship paths between two entities. Paths are found using known relationships with other entities. // // Entities can be searched for by either Entity ID or by Record ID, depending on which function is chosen. // // These functions have the following parameters: // // - **entityID1:** The entity ID for the starting entity of the search path // - **entityID2:** The entity ID for the ending entity of the search path // - **dataSourceCode1:** The data source for the starting entity of the search path // - **recordID1:** The record ID for the starting entity of the search path // - **dataSourceCode2:** The data source for the ending entity of the search path // - **recordID2:** The record ID for the ending entity of the search path // - **maxDegree:** The number of relationship degrees to search // First you will need to create some records so that you have some that you can compare. Can you see what is the same between this record and the previous one? // + String dataSourceCode = "TEST"; String recordID = "2"; String jsonData = "{\"NAME_TYPE\": \"PRIMARY\", \"NAME_FIRST\": \"JOHN\", \"NAME_LAST\": \"SMITH\", \"ADDR_TYPE\": \"HOME\", \"ADDR_LINE1\": \"753 STATE ROUTE 8\", \"ADDR_CITY\": \"FRESNO\", \"ADDR_STATE\": \"CA\", \"ADDR_POSTAL_CODE\": \"55073-1234\"}"; String loadID = null; int return_code = g2engine.addRecord(dataSourceCode, recordID, jsonData, loadID); if(return_code!=0) System.out.print(g2engine.getLastException()); StringBuffer recordID2 = new StringBuffer(); int return_code = g2engine.addRecordWithReturnedRecordID(dataSourceCode, recordID2, jsonData, loadID); System.out.print("New Record ID: " + recordID2.toString()); if(return_code!=0) System.out.print(g2engine.getLastException()); // - String dataSourceCode = "TEST"; String recordID = "2"; StringBuffer response = new StringBuffer(); g2engine.getEntityByRecordIDV2(dataSourceCode, recordID, flags, response); JsonObject jsonObject = JsonUtil.parseJsonObject(response.toString()); long entityID2 = jsonObject.getJsonObject("RESOLVED_ENTITY").getJsonNumber("ENTITY_ID").longValue(); // #### FindPathByEntityIDV2 // + long entityID1 = entityID; int maxDegree = 3; StringBuffer response = new StringBuffer(); int ret_code = g2engine.findPathByEntityID(entityID2,maxDegree,flags,response); if(ret_code!=0) System.out.print(g2engine.getLastException()); else RenderJSON(response); // - // #### FindPathByRecordID() // + String dataSourceCode1 = new String("TEST"); String recordID1 = new String("1"); String dataSourceCode2 = new String("TEST"); String recordID2 = new String("2"); ret_code = g2engine.findPathByRecordID(dataSourceCode1,recordID1,dataSourceCode2,recordID2,maxDegree,response); if(ret_code!=0) System.out.print(g2engine.getLastException()); else RenderJSON(response); // - // ### Finding Paths with Exclusions // The `FindPathExcludingByEntityID()` and `FindPathExcludingByRecordID()` functions can be used to find single relationship paths between two entities. Paths are found using known relationships with other entities. In addition, it will find paths that exclude certain entities from being on the path. // // Entities can be searched for by either Entity ID or by Record ID, depending on which function is chosen. Additionally, entities to be excluded can also be specified by either Entity ID or by Record ID. // // When excluding entities, the user may choose to either (a) strictly exclude the entities, or (b) prefer to exclude the entities, but still include them if no other path is found. By default, entities will be strictly excluded. A "preferred exclude" may be done by specifying the G2_FIND_PATH_PREFER_EXCLUDE control flag. // // These functions have the following parameters: // // - **entityID1:** The entity ID for the starting entity of the search path // - **entityID2:** The entity ID for the ending entity of the search path // - **dataSourceCode1:** The data source for the starting entity of the search path // - **recordID1:** The record ID for the starting entity of the search path // - **dataSourceCode2:** The data source for the ending entity of the search path // - **recordID2:** The record ID for the ending entity of the search path // - **maxDegree:** The number of relationship degrees to search // - **excludedEntities:** Entities that should be avoided on the path (JSON document) // - **flags:** Operational flags // + String dataSourceCode = "TEST"; String recordID = "3"; String jsonData = "{\"NAME_TYPE\": \"PRIMARY\", \"NAME_FIRST\": \"SAM\", \"NAME_LAST\": \"MILLER\", \"ADDR_TYPE\": \"HOME\", \"ADDR_LINE1\": \"753 STATE ROUTE 8\", \"ADDR_CITY\": \"FRESNO\", \"ADDR_STATE\": \"CA\", \"ADDR_POSTAL_CODE\": \"55073-1234\", \"SSN_NUMBER\": \"111-11-1111\"}"; String loadID = null; int return_code= g2engine.addRecord(dataSourceCode, recordID, jsonData, loadID); if(return_code!=0) System.out.print(g2engine.getLastException()); StringBuffer recordID2 = new StringBuffer(); int return_code2 = g2engine.addRecordWithReturnedRecordID(dataSourceCode, recordID2, jsonData, loadID); System.out.print("New Record ID: " + recordID2.toString()); if(return_code2!=0) System.out.print(g2engine.getLastException()); // - // #### FindPathExcludingByEntityID() // + int maxDegree = 4; String excludedEntities = new String("{\"ENTITIES\":[{\"ENTITY_ID\":\"1\"}]}"); int flags = G2Engine.G2_EXPORT_DEFAULT_FLAGS; StringBuffer response = new StringBuffer(); int ret_code = g2engine.findPathExcludingByEntityID(entityID1,entityID2,maxDegree,excludedEntities,flags,response); if(ret_code!=0) System.out.print(g2engine.getLastException()); else RenderJSON(response); // - // #### FindPathExcludingByRecordID() // + String dataSourceCode1 = new String("TEST"); String recordID1 = new String("2"); String dataSourceCode2 = new String("TEST"); String recordID2 = new String("3"); String excludedRecords = new String("{\"RECORDS\":[{\"RECORD_ID\":\"1\",\"DATA_SOURCE\":\"TEST\"}]}"); return_code = g2engine.findPathExcludingByRecordID(dataSourceCode1,recordID1,dataSourceCode2,recordID2,maxDegree,excludedRecords,flags,response); if(return_code!=0) System.out.print(g2engine.getLastException()); else RenderJSON(response); // - // ### Finding Paths with Required Sources // The `FindPathIncludingSourceByEntityID()` and `FindPathIncludingSourceByRecordID()` functions can be used to find single relationship paths between two entities. In addition, one of the enties along the path must include a specified data source. // // Entities can be searched for by either Entity ID or by Record ID, depending on which function is chosen. The required data source or sources are specified by a json document list. // // Specific entities may also be excluded, using the same methodology as the `FindPathExcludingByEntityID()` and `FindPathExcludingByRecordID()` functions use. // // These functions have the following parameters: // // - **entityID1:** The entity ID for the starting entity of the search path // - **entityID2:** The entity ID for the ending entity of the search path // - **dataSourceCode1:** The data source for the starting entity of the search path // - **recordID1:** The record ID for the starting entity of the search path // - **dataSourceCode2:** The data source for the ending entity of the search path // - **recordID2:** The record ID for the ending entity of the search path // - **maxDegree:** The number of relationship degrees to search // - **excludedEntities:** Entities that should be avoided on the path (JSON document) // - **requiredDsrcs:** Entities that should be avoided on the path (JSON document) // - **flags:** Operational flags // + int maxDegree = 4; String excludedEntities = new String("{\"ENTITIES\":[{\"ENTITY_ID\":\"1\"}]}"); String requiredDsrcs = new String("{\"DATA_SOURCES\":[\"TEST\"]}"); int flags = 0; StringBuffer response = new StringBuffer(); int ret_code = g2engine.findPathIncludingSourceByEntityID(entityID1,entityID2,maxDegree,excludedEntities,requiredDsrcs,flags,response); if(return_code!=0) System.out.print(g2engine.getLastException()); else RenderJSON(response); String dataSourceCode1 = new String("TEST"); String recordID1 = new String("2"); String dataSourceCode2 = new String("TEST"); String recordID2 = new String("3"); String excludedRecords = new String("{\"RECORDS\":[{\"RECORD_ID\":\"1\",\"DATA_SOURCE\":\"TEST\"}]}"); return_code = g2engine.findPathIncludingSourceByRecordID(dataSourceCode1,recordID1,dataSourceCode2,recordID2,maxDegree,excludedRecords,requiredDsrcs,flags,response); if(return_code!=0) System.out.print(g2engine.getLastException()); else RenderJSON(response); // - // ### Finding Networks // // The `findNetworkByEntityID()` and `findNetworkByRecordID()` functions // can be used to find all entities surrounding a requested set of entities. // This includes the requested entities, paths between them, and relations to // other nearby entities. // // Entities can be searched for by either Entity ID or by Record ID, // depending on which function is chosen. // // These functions have the following parameters: // // - **entityList:** A list of entities, specified by Entity ID // (JSON document) // - **recordList:** A list of entities, specified by Record ID // (JSON document) // - **maxDegree:** The maximum number of degrees in paths between search // entities // - **buildoutDegree:** The number of degrees of relationships to show around // each search entity // - **maxEntities:** The maximum number of entities to return in the // discovered network // - **flags:** Control flags for specifying what data about the // entity to retrieve. // - **response:** A memory buffer for returning the response // document; if an error occurred, an error response is stored here. // // They also have various arguments used to return response documents // // The functions return a JSON document that identifies the path between the // each set of search entities (if the path exists), and the information on the // entities in question (search entities, path entities, and build-out entities. // #### findNetworkByEntityID // + StringBuffer response = new StringBuffer(); int maxDegree = 2; int buildoutDegree = 1; int maxEntities = 12; String entityList = "{\"ENTITIES\": [{\"ENTITY_ID\":"+entityID+"}, {\"ENTITY_ID\":"+entityID2+"}]}"; int return_code = g2engine.findNetworkByEntityIDV2(entityList, maxDegree, buildoutDegree, maxEntities, flags, response); if(return_code!=0) System.out.print(g2engine.getLastException()); else RenderJSON(response); // - // #### findNetworkByRecordIDV2 // + StringBuffer response = new StringBuffer(); int maxDegree = 2; int buildoutDegree = 1; int maxEntities = 12; String recordList = "{\"RECORDS\": [{\"RECORD_ID\": \""+recordID1+"\", \"DATA_SOURCE\": \""+dataSourceCode1+"\" }, {\"RECORD_ID\": \""+recordID2+"\", \"DATA_SOURCE\": \""+dataSourceCode2+"\" }]}"; int return_code = g2engine.findNetworkByRecordIDV2(recordList, maxDegree, buildoutDegree, maxEntities, flags, response); if(return_code!=0) System.out.print(g2engine.getLastException()); else RenderJSON(response); // - // ## Connection details // The `whyEntities()`, `whyRecords()`, `whyEntityByRecordID()`, and `whyEntityByEntityID()` functions can be used // to determine why records belong to their resolved entities. // These functions will compare the record data within an entity against the // rest of the entity data, and show why they are connected. // This is calculated based on the features that record data represents. // // Records can be chosen by either Record ID or by Entity ID, // depending on which function is chosen. // If a single record ID is used, // then comparison results for that single record will be generated, as part of // its entity. // If an Entity ID is used, // then comparison results will be generated for every record within that // entity. // // These functions have the following parameters: // // - **entityID:** The entity ID for the entity to be analyzed // - **datasourceCode:** The data source for the record to be analyzed // - **recordID:** The record ID for the record to be analyzed // - **flags:** Control flags for outputting entities // - **response:** A memory buffer for returning the response // document; if an error occurred, an error response is stored here. // // // They also have various arguments used to return response documents. // // The functions return a JSON document that gives the results of the record // analysis. // The document contains a section called "WHY_RESULTS", // which shows how specific records relate to the rest of the entity. // It has a "WHY_KEY", which is similar to a match key, in defining the relevant // connected data. // It shows candidate keys for features that initially cause the records // to be analyzed for a relationship, // plus a series of feature scores that show how similar the feature data was. // // The response document also contains a separate ENTITIES section, // with the full information about the resolved entity. // (Note: When working with this entity data, // Senzing recommends using the flags `G2_ENTITY_OPTION_INCLUDE_INTERNAL_FEATURES` // and `G2_ENTITY_OPTION_INCLUDE_FEATURE_STATS`. // This will provide detailed feature data that is not included by default, // but is useful for understanding the WHY_RESULTS data.) // // The functions `whyEntitiesV2()`, `whyRecordsV2()`, `whyEntityByRecordIDV2()`, and `whyEntityByEntityIDV2()` are // enhanced versions of `whyEntities()`, `whyRecords()`, `whyEntityByRecordID()`, and `whyEntityByEntityID()` // that also allow you to use control flags. // The `whyEntities()`, `whyRecords()`, `whyEntityByRecordID()`, and `whyEntityByEntityID()` functions work in the // same way, but use the default flag value `G2_WHY_ENTITY_DEFAULT_FLAGS`. // ### whyEntityByEntityID // + /** define input variables */ long entityID = 1; /** buffer response variables */ StringBuffer response = new StringBuffer(); /** find the why-information */ int return_code = g2engine.whyEntityByEntityID(entityID1,response); /** print the results */ if(return_code!=0) System.out.print(g2engine.getLastException()); else RenderJSON(response); // - // ### whyEntityByEntityIDV2 // + /** define input variables */ long entityID = 1; /** buffer response variables */ StringBuffer response = new StringBuffer(); /** find the why-information */ int return_code = g2engine.whyEntityByEntityIDV2(entityID1,flags,response); /** print the results */ if(return_code!=0) System.out.print(g2engine.getLastException()); else RenderJSON(response); // - // ### whyEntityByRecordID // + /** find the why-information */ int return_code = g2engine.whyEntityByRecordID(dataSourceCode1,recordID1,response); /** print the results */ if(return_code!=0) System.out.print(g2engine.getLastException()); else RenderJSON(response); // - // ### whyEntityByRecordIDV2 // + /** find the why-information */ int return_code = g2engine.whyEntityByRecordIDV2(dataSourceCode,recordID,flags,response); /** print the results */ if(return_code!=0) System.out.print(g2engine.getLastException()); else RenderJSON(response); // - // ## Replace // ### Replace the record // Use the `replaceRecord()` function to update or replace a record in the data repository (if record doesn't exist, a new record is added to the data repository. Like the above functions, `replaceRecord()` returns "0" upon success, and it can be called as many times as desired and from multiple threads at the same time. The `replaceRecord()` function accepts four parameters as input: // // - **dataSourceCode:** The name of the data source the record is associated with. This value is configurable to the system // - **recordID:** The record ID, used to identify distinct records // - **jsonData:** A JSON document with the attribute data for the record // - **loadID:** The observation load ID for the record; value can be null and will default to dataSourceCode // + String dataSourceCode = "TEST"; String recordID = "1"; String jsonData = "{\"NAME_TYPE\": \"PRIMARY\", \"NAME_FIRST\": \"JANE\", \"NAME_LAST\": \"ADAMS\", \"ADDR_TYPE\": \"HOME\", \"ADDR_LINE1\": \"653 STATE ROUTE 7\", \"ADDR_CITY\": \"FRESNO\", \"ADDR_STATE\": \"CA\", \"ADDR_POSTAL_CODE\": \"55073-1234\"}"; String loadID = null; int return_code= g2engine.replaceRecord(dataSourceCode, recordID, jsonData, loadID); if(return_code!=0) System.out.print(g2engine.getLastException()); else System.out.print(return_code) // - // Do ```getRecord()``` again to see the changes // + String dataSourceCode = "TEST"; String recordID = "1"; StringBuffer response = new StringBuffer(); g2engine.getRecord(dataSourceCode, recordID, response); if(return_code!=0) System.out.print(g2engine.getLastException()); else RenderJSON(response); // - // ### replaceRecordWithInfo // + String dataSourceCode = "TEST"; String recordID = "1"; String jsonData = "{\"NAME_TYPE\": \"PRIMARY\", \"NAME_FIRST\": \"JANE\", \"NAME_LAST\": \"ADAMS\", \"ADDR_TYPE\": \"HOME\", \"ADDR_LINE1\": \"653 STATE ROUTE 7\", \"ADDR_CITY\": \"FRESNO\", \"ADDR_STATE\": \"CA\", \"ADDR_POSTAL_CODE\": \"55073-1234\"}"; String loadID = null; StringBuffer response = new StringBuffer(); int return_code= g2engine.replaceRecordWithInfo(dataSourceCode, recordID, jsonData, loadID, flags, response); if(return_code!=0) System.out.print(g2engine.getLastException()); else RenderJSON(response) // - // ## Re-evaluate // ### reevaluateRecord // + int return_code = g2engine.reevaluateRecord(dataSourceCode1, recordID1, flags); if(return_code!=0) System.out.print(g2engine.getLastException()); else System.out.print(return_code); // - // ### ReevaluateRecordWithInfo // + StringBuffer response = new StringBuffer(); int return_code = g2engine.reevaluateRecordWithInfo(dataSourceCode1, recordID1, flags, response); if(return_code!=0) System.out.print(g2engine.getLastException()); else RenderJSON(response); // - // ### ReevaluateEntity // Find an entity String dataSourceCode = "TEST"; String recordID = "2"; StringBuffer response = new StringBuffer(); g2engine.getEntityByRecordIDV2(dataSourceCode, recordID, flags, response); JsonObject jsonObject = JsonUtil.parseJsonObject(response.toString()); long entityID = jsonObject.getJsonObject("RESOLVED_ENTITY").getJsonNumber("ENTITY_ID").longValue(); // Re-evaluate the entity. // + int return_code = g2engine.reevaluateEntity(entityID, flags); if(return_code!=0) System.out.print(g2engine.getLastException()); else System.out.print(return_code); // - // ### reevaluateEntityWithInfo // + StringBuffer response = new StringBuffer(); return_code = g2engine.reevaluateEntityWithInfo(entityID, flags, response); if(return_code!=0) System.out.print(return_code); else RenderJSON(response); // - // ## Reporting // ### Export JSON Entity Report // // There are three steps to exporting resolved entity data from the G2Engine object in JSON format. First, use the `exportJSONEntityReport()` method to generate a long integer, referred to here as an 'exportHandle'. The `exportJSONEntityReport()` method accepts one parameter as input: // // - **flags**: An integer specifying which entity details should be included in the export. See the "Entity Export Flags" section for further details. // // Second, use the fetchNext() method to read the exportHandle and export a row of JSON output containing the entity data for a single entity. Note that successive calls of fetchNext() will export successive rows of entity data. The fetchNext() method accepts the following parameters as input: // // - **exportHandle:** A long integer from which resolved entity data may be read and exported // - **response:** A memory buffer for returning the response document; if an error occurred, an error response is stored here. // + int flags = g2engine.G2_EXPORT_INCLUDE_ALL_ENTITIES; long exportHandle = g2engine.exportJSONEntityReport(flags); String response = g2engine.fetchNext(exportHandle); RenderJSON(response); g2engine.closeExport(exportHandle); // - // ### Export CSV Entity Report // // There are three steps to exporting resolved entity data from the G2Engine object in CSV format. First, use the `exportCSVEntityReportV2()` method to generate a long integer, referred to here as an 'exportHandle'. The `exportCSVEntityReportV2()` method accepts one parameter as input: // // - **csvColumnList:** A String containing a comma-separated list of column names for the CSV // export. // - **flags:** An integer specifying which entity details should be included in the export. See the "Entity Export Flags" section in the link below for further details. // // Second, use the `fetchNext()` method to read the exportHandle and export a row of CSV output containing the entity data for a single entity. Note that the first call of `fetchNext()` may yield a header row, and that successive calls of `fetchNext()` will export successive rows of entity data. The `fetchNext()` method accepts the following parameters as input: // // - **exportHandle:** A long integer from which resolved entity data may be read and exported // - **response:** A memory buffer for returning the response document; if an error occurred, an error response is stored here // // + int flags = g2engine.G2_EXPORT_INCLUDE_ALL_ENTITIES; String headers = "RESOLVED_ENTITY_ID,RESOLVED_ENTITY_NAME,RELATED_ENTITY_ID,MATCH_LEVEL,MATCH_KEY,IS_DISCLOSED,IS_AMBIGUOUS,DATA_SOURCE,RECORD_ID,JSON_DATA,LAST_SEEN_DT,NAME_DATA,ATTRIBUTE_DATA,IDENTIFIER_DATA,ADDRESS_DATA,PHONE_DATA,RELATIONSHIP_DATA,ENTITY_DATA,OTHER_DATA"; long exportHandle = g2engine.exportCSVEntityReportV2(headers, flags); String response = g2engine.fetchNext(exportHandle); while (response != null) { System.out.print(response); response = g2engine.fetchNext(exportHandle); } g2engine.closeExport(exportHandle); // - // ## Redo Processing // Redo records are automatically created by Senzing when certain conditions occur where it believes more processing may be needed. Some examples: // * A value becomes generic and previous decisions may need to be revisited // * Clean up after some record deletes // * Detected related entities were being changed at the same time // * A table inconsistency exists, potentially after a non-graceful shutdown // First we will need to have a total of 6 data sources so let's add 4 more // + String dataSourceCode = "TEST"; String recordID = "4"; String jsonData = "{\"NAME_TYPE\": \"PRIMARY\", \"NAME_FIRST\": \"JANE\", \"NAME_LAST\": \"ADAMS\", \"SSN_NUMBER\": \"111-11-1111\"}"; String loadID = null; int return_code= g2engine.addRecord(dataSourceCode, recordID, jsonData, loadID); if(return_code!=0) System.out.print(g2engine.getLastException()); System.out.print(return_code); String dataSourceCode = "TEST"; String recordID = "5"; String jsonData = "{\"NAME_TYPE\": \"PRIMARY\", \"NAME_FIRST\": \"LILY\", \"NAME_LAST\": \"OWENS\", \"SSN_NUMBER\": \"111-11-1111\"}"; String loadID = null; int return_code= g2engine.addRecord(dataSourceCode, recordID, jsonData, loadID); if(return_code!=0) System.out.print(g2engine.getLastException()); System.out.print(return_code); String dataSourceCode = "TEST"; String recordID = "6"; String jsonData = "{\"NAME_TYPE\": \"PRIMARY\", \"NAME_FIRST\": \"AUGUST\", \"NAME_LAST\": \"Bauler\", \"SSN_NUMBER\": \"111-11-1111\"}"; String loadID = null; int return_code= g2engine.addRecord(dataSourceCode, recordID, jsonData, loadID); if(return_code!=0) System.out.print(g2engine.getLastException()); System.out.print(return_code); String dataSourceCode = "TEST"; String recordID = "7"; String jsonData = "{\"NAME_TYPE\": \"PRIMARY\", \"NAME_FIRST\": \"JACK\", \"NAME_LAST\": \"MILLER\", \"SSN_NUMBER\": \"111-11-1111\"}"; String loadID = null; int return_code= g2engine.addRecord(dataSourceCode, recordID, jsonData, loadID); if(return_code!=0) System.out.print(g2engine.getLastException()); System.out.print(return_code); String dataSourceCode = "TEST"; String recordID = "8"; String jsonData = "{\"NAME_TYPE\": \"PRIMARY\", \"NAME_FIRST\": \"LOGAN\", \"NAME_LAST\": \"WILLIAMS\", \"SSN_NUMBER\": \"111-11-1111\"}"; String loadID = null; int return_code= g2engine.addRecord(dataSourceCode, recordID, jsonData, loadID); if(return_code!=0) System.out.print(g2engine.getLastException()); System.out.print(return_code); // - // ### Counting the number of redos // This returns the number of redos within the processed records that are awaiting processing. long response = g2engine.countRedoRecords(); if(response<0) System.out.print(g2engine.getLastException()); // ### Geting a redo record // Gets a redo record so that it can be processed StringBuffer response = new StringBuffer(); g2engine.getRedoRecord(response); System.out.print(response); StringBuffer response_string = new StringBuffer(); int response = g2engine.getRedoRecord(response_string); if(response==0 && response_string.length()>0) response = g2engine.process(response_string.toString()); if(response!=0) System.out.print(g2engine.getLastException()); System.out.print(response); // ## Delete // ### Deleting Records // use `deleteRecord()` to remove a record from the data repository (returns "0" upon success) ; `deleteRecord()` can be called as many times as desired and from multiple threads at the same time. The `deleteRecord()` function accepts three parameters as input: // // - **dataSourceCode:** The name of the data source the record is associated with. This value is configurable to the system // - **recordID:** The record ID, used to identify distinct records // - **loadID:** The observation load ID for the record; value can be null and will default to dataSourceCode // + String dataSourceCode = "TEST"; String recordID = "1"; String loadID = null; /** deleteRecord */ int return_code= g2engine.deleteRecord(dataSourceCode, recordID, loadID); if(return_code!=0) System.out.print(g2engine.getLastException()); else System.out.print(return_code); // - // ### deleteRecordWithInfo // + String recordID = "2"; String dataSourceCode = "TEST"; /** deleteRecordWithInfo */ StringBuffer response = new StringBuffer(); int flags = 0; int return_code = g2engine.deleteRecordWithInfo(dataSourceCode, recordID, loadID, flags, response); if(return_code!=0) System.out.print(g2engine.getLastException()); else RenderJSON(response); // - // Attempt to get the record again. It should error and give an output similar to "Unknown record" // + StringBuffer response = new StringBuffer(); int return_code = g2engine.getRecord(dataSourceCode, recordID, response); if(return_code!=0) System.out.print(g2engine.getLastException()); // - // ## Cleanup // ### Purge Repository // To purge the G2 repository, use the aptly named `purgeRepository()` method. This will remove every record in your current repository. int return_code= g2engine.purgeRepository(); if(return_code!=0) System.out.print(g2engine.getLastException()); else System.out.print(return_code) // ### destroy // Once all searching is done in a process call `destroy()` to uninitialize Senzing and clean up resources. You should always do this once at the end of each process. // + int return_code = g2engine.destroy(); if(return_code!=0) System.out.print(g2engine.getLastException()); else System.out.print(return_code);
notebooks/java/senzing-G2Engine-sdk-api-specification.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Python Basic Programming Assignment_11 # #### 1. Write a Python program to find words which are greater than given length k? # + #Answer: given_length = 8 def words_grtr_than_given_length(l): new_list = [] for word in l: word = word.lower() if len(word)>given_length: new_list.append(word) return new_list l = ["car", "dog", "upendra", "kumar", "icecream", "football", "corona-virus", "hospital", "bilateraltalk", "sunriseapartment", "youarewelcome"] words_grtr_than_given_length(l) # - # #### 2. Write a Python program for removing i-th character from a string? # + #Answer: x = "upendra" def remove_ith_char(word,index): """Function to remove i-th character from a string. Arguments word: string to remove i-th character index: index number in int """ remove_char = word[index] new_word = "" for char in word: if char == remove_char: continue new_word = new_word + char return new_word remove_ith_char("upendra",3) # + #Another approach word = "upendra" def remove_ith_char2(word,index): remove_char = word[index] new_word = word.replace(remove_char,"") return new_word remove_ith_char2("upendra",4) # - # #### 3. Write a Python program to split and join a string? # + #Answer: # python program to split and join a string string = "Data science is a most demanding technology now a days." def split_join(string): word_list = string.split(" ") new_string = " ".join(i for i in word_list) return word_list, new_string split_join(string) # - # #### 4. Write a Python to check if a given string is binary string or not? # + #Answer: # Python program to check if a string is binary or not def check_binary(string) : # declare set of '0', '1' . s = {'0', '1'} p = set(string) if s == p or p == {'0'} or p == {'1'}: print("Yes, Given String is Binary String") else : print("No, Given String is Not Binary String") string = "101010000111" # function calling check_binary(string) # - # #### 5. Write a Python program to find uncommon words from two Strings? # + #Answer: # Python program to find a list of uncommon words def UncommonWords(A, B): #count will contain all the word counts count = {} for word in A.split(): count[word] = count.get(word, 0) + 1 for word in B.split(): count[word] = count.get(word, 0) + 1 #return required list of words return [word for word in count if count[word] == 1] A = "My name is <NAME>" B = "My name is <NAME>" print(UncommonWords(A, B)) # - # #### 6. Write a Python to find all duplicate characters in string? # + #Answer: string = "UuPeNnDdra" def duplicate_char(string): string = string.lower() count = {} for i in string: count[i] = count.get(i, 0) + 1 main_list = [] for i in string: if count[i]>1: main_list.append(i) duplicate_characters = set(main_list) if len(duplicate_characters)==0: return "No duplicate characters present" return f"Duplicate characters present in this string are: {duplicate_characters}" # calling function duplicate_char(string) # - # #### 7. Write a Python Program to check if a string contains any special character? # + #Answer: # program to check if string contains any special character def string_special_char_check(string): special_charecter = "!@#$%^&*()<>/|[]{}~?\|" spcl_list = [] for i in string: if i in special_charecter: spcl_list.append(i) if len(spcl_list)>0: return "String contains special character" else: return "String does not contains any special character" # + string = "iNe#uron!" #calling function string_special_char_check(string) # + string = "iNeuron" #calling function string_special_char_check(string) # -
Python Basic Programming/Python Basic Programming Assignment_11-checkpoint.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] id="hJyXk97ZnTSF" # # Week 1 Assignment: Neural Style Transfer # # Welcome to the first programming assignment of this course! Here, you will be implementing neural style transfer using the [Inception](https://arxiv.org/abs/1512.00567v3) model as your feature extractor. This is very similar to the Neural Style Transfer ungraded lab so if you get stuck, remember to review the said notebook for tips. # + [markdown] id="QKa5uifDKII3" # ***Important:*** *This colab notebook has read-only access so you won't be able to save your changes. If you want to save your work periodically, please click `File -> Save a Copy in Drive` to create a copy in your account, then work from there.* # + [markdown] id="eqxUicSPUOP6" # ## Imports # + id="NyftRTSMuwue" try: # # %tensorflow_version only exists in Colab. # %tensorflow_version 2.x except Exception: pass import tensorflow as tf import matplotlib.pyplot as plt import numpy as np from keras import backend as K from imageio import mimsave from IPython.display import display as display_fn from IPython.display import Image, clear_output # + [markdown] id="5Rd1FKJ9KOr5" # ## Utilities # # As before, we've provided some utility functions below to help in loading, visualizing, and preprocessing the images. # + id="qCMK4u6poA1k" def tensor_to_image(tensor): '''converts a tensor to an image''' tensor_shape = tf.shape(tensor) number_elem_shape = tf.shape(tensor_shape) if number_elem_shape > 3: assert tensor_shape[0] == 1 tensor = tensor[0] return tf.keras.preprocessing.image.array_to_img(tensor) def load_img(path_to_img): '''loads an image as a tensor and scales it to 512 pixels''' max_dim = 512 image = tf.io.read_file(path_to_img) image = tf.image.decode_jpeg(image) image = tf.image.convert_image_dtype(image, tf.float32) shape = tf.shape(image)[:-1] shape = tf.cast(tf.shape(image)[:-1], tf.float32) long_dim = max(shape) scale = max_dim / long_dim new_shape = tf.cast(shape * scale, tf.int32) image = tf.image.resize(image, new_shape) image = image[tf.newaxis, :] image = tf.image.convert_image_dtype(image, tf.uint8) return image def load_images(content_path, style_path): '''loads the content and path images as tensors''' content_image = load_img("{}".format(content_path)) style_image = load_img("{}".format(style_path)) return content_image, style_image def imshow(image, title=None): '''displays an image with a corresponding title''' if len(image.shape) > 3: image = tf.squeeze(image, axis=0) plt.imshow(image) if title: plt.title(title) def show_images_with_objects(images, titles=[]): '''displays a row of images with corresponding titles''' if len(images) != len(titles): return plt.figure(figsize=(20, 12)) for idx, (image, title) in enumerate(zip(images, titles)): plt.subplot(1, len(images), idx + 1) plt.xticks([]) plt.yticks([]) imshow(image, title) def clip_image_values(image, min_value=0.0, max_value=255.0): '''clips the image pixel values by the given min and max''' return tf.clip_by_value(image, clip_value_min=min_value, clip_value_max=max_value) def preprocess_image(image): '''preprocesses a given image to use with Inception model''' image = tf.cast(image, dtype=tf.float32) image = (image / 127.5) - 1.0 return image # + [markdown] id="0U9It5Ii2Oof" # ## Download Images # + [markdown] id="oeXebYusyHwC" # You will fetch the two images you will use for the content and style image. # + id="wqc0OJHwyFAk" content_path = tf.keras.utils.get_file('content_image.jpg','https://storage.googleapis.com/laurencemoroney-blog.appspot.com/MLColabImages/dog1.jpg') style_path = tf.keras.utils.get_file('style_image.jpg','https://storage.googleapis.com/download.tensorflow.org/example_images/Vassily_Kandinsky%2C_1913_-_Composition_7.jpg') # + id="vE2TBEBntSjm" # display the content and style image content_image, style_image = load_images(content_path, style_path) show_images_with_objects([content_image, style_image], titles=[f'content image: {content_path}', f'style image: {style_path}']) # + [markdown] id="Jt3i3RRrJiOX" # ## Build the feature extractor # + [markdown] id="vwvREWQ1nTSV" # Next, you will inspect the layers of the Inception model. # + id="psmTncz8nTSV" # clear session to make layer naming consistent when re-running this cell K.clear_session() # download the inception model and inspect the layers tmp_inception = tf.keras.applications.InceptionV3() tmp_inception.summary() # delete temporary model del tmp_inception # + [markdown] id="Yk6qYGEynTSW" # As you can see, it's a very deep network and compared to VGG-19, it's harder to choose which layers to choose to extract features from. # # - Notice that the Conv2D layers are named from `conv2d`, `conv2d_1` ... `conv2d_93`, for a total of 94 conv2d layers. # - So the second conv2D layer is named `conv2d_1`. # - For the purpose of grading, please choose the following # - For the content layer: choose the Conv2D layer indexed at `88`. # - For the style layers, please choose the first `five` conv2D layers near the input end of the model. # - Note the numbering as mentioned in these instructions. # + [markdown] id="Wt-tASys0eJv" # Choose intermediate layers from the network to represent the style and content of the image: # # + id="ArfX_6iA0WAX" ### START CODE HERE ### # choose the content layer and put in a list content_layers = ['conv2d_88'] # choose the five style layers of interest style_layers = ['conv2d', 'conv2d_1', 'conv2d_2', 'conv2d_3', 'conv2d_4'] # combine the content and style layers into one list content_and_style_layers = style_layers + content_layers ### END CODE HERE ### # count the number of content layers and style layers. # you will use these counts later in the assignment NUM_CONTENT_LAYERS = len(content_layers) NUM_STYLE_LAYERS = len(style_layers) # + [markdown] id="MGo9tQtlTtfQ" # You can now setup your model to output the selected layers. # # + id="nfec6MuMAbPx" def inception_model(layer_names): """ Creates a inception model that returns a list of intermediate output values. args: layer_names: a list of strings, representing the names of the desired content and style layers returns: A model that takes the regular inception v3 input and outputs just the content and style layers. """ ### START CODE HERE ### # Load InceptionV3 with the imagenet weights and **without** the fully-connected layer at the top of the network inception = tf.keras.applications.InceptionV3(include_top=False, weights='imagenet') # Freeze the weights of the model's layers (make them not trainable) inception.trainable = False # Create a list of layer objects that are specified by layer_names output_layers = [inception.get_layer(name).output for name in layer_names] # Create the model that outputs the content and style layers model = tf.keras.Model(inputs=inception.input, outputs=output_layers) # return the model return model ### END CODE HERE ### # + [markdown] id="IwJJkA0enTSX" # Create an instance of the content and style model using the function that you just defined # + id="6AYqTPpOnTSX" K.clear_session() ### START CODE HERE ### inception = inception_model(content_and_style_layers) ### END CODE HERE ### # + [markdown] id="jbaIvZf5wWn_" # ## Calculate style loss # # The style loss is the average of the squared differences between the features and targets. # + id="nv8hZU0oKIm_" def get_style_loss(features, targets): """Expects two images of dimension h, w, c Args: features: tensor with shape: (height, width, channels) targets: tensor with shape: (height, width, channels) Returns: style loss (scalar) """ ### START CODE HERE ### # Calculate the style loss style_loss = tf.reduce_mean(tf.square(features - targets)) ### END CODE HERE ### return style_loss # + [markdown] id="QDDiPF6YnTSY" # ## Calculate content loss # # Calculate the sum of the squared error between the features and targets, then multiply by a scaling factor (0.5). # + id="et8M1lOgKL8o" def get_content_loss(features, targets): """Expects two images of dimension h, w, c Args: features: tensor with shape: (height, width, channels) targets: tensor with shape: (height, width, channels) Returns: content loss (scalar) """ # get the sum of the squared error multiplied by a scaling factor content_loss = 0.5 * tf.reduce_sum(tf.square(features - targets)) return content_loss # + [markdown] id="2ygKYFw1nTSY" # ## Calculate the gram matrix # # Use `tf.linalg.einsum` to calculate the gram matrix for an input tensor. # - In addition, calculate the scaling factor `num_locations` and divide the gram matrix calculation by `num_locations`. # # $$ \text{num locations} = height \times width $$ # + id="HAy1iGPdoEpZ" def gram_matrix(input_tensor): """ Calculates the gram matrix and divides by the number of locations Args: input_tensor: tensor of shape (batch, height, width, channels) Returns: scaled_gram: gram matrix divided by the number of locations """ # calculate the gram matrix of the input tensor gram = tf.linalg.einsum('bijc,bijd->bcd', input_tensor, input_tensor) # get the height and width of the input tensor input_shape = tf.shape(input_tensor) height = input_shape[1] width = input_shape[2] # get the number of locations (height times width), and cast it as a tf.float32 num_locations = tf.cast(height * width, tf.float32) # scale the gram matrix by dividing by the number of locations scaled_gram = gram / num_locations return scaled_gram # + [markdown] id="3TYRWE0JnTSZ" # ## Get the style image features # # Given the style image as input, you'll get the style features of the inception model that you just created using `inception_model()`. # - You'll first preprocess the image using the given `preprocess_image` function. # - You'll then get the outputs of the model. # - From the outputs, just get the style feature layers and not the content feature layer. # # You can run the following code to check the order of the layers in your inception model: # + id="YkVlPUMWnTSZ" tmp_layer_list = [layer.output for layer in inception.layers] tmp_layer_list # + [markdown] id="D3IATpyxnTSZ" # - For each style layer, calculate the gram matrix. Store these results in a list and return it. # + id="YzTK5qzG_MKh" def get_style_image_features(image): """ Get the style image features Args: image: an input image Returns: gram_style_features: the style features as gram matrices """ ### START CODE HERE ### # preprocess the image using the given preprocessing function preprocessed_style_image = preprocess_image(image) # get the outputs from the inception model that you created using inception_model() outputs = inception(preprocessed_style_image) # Get just the style feature layers (exclude the content layer) style_outputs = outputs[:NUM_STYLE_LAYERS] # for each style layer, calculate the gram matrix for that layer and store these results in a list gram_style_features = [gram_matrix(style_layer) for style_layer in style_outputs] ### END CODE HERE ### return gram_style_features # + [markdown] id="No7Yox0bnTSa" # ## Get content image features # # You will get the content features of the content image. # - You can follow a similar process as you did with `get_style_image_features`. # - For the content image, you will not calculate the gram matrix of these style features. # + id="Y7rq02U9_a6L" def get_content_image_features(image): """ Get the content image features Args: image: an input image Returns: content_outputs: the content features of the image """ ### START CODE HERE ### # preprocess the image preprocessed_content_image = preprocess_image(image) # get the outputs from the inception model outputs = inception(preprocessed_content_image) # get the content layer of the outputs content_outputs = outputs[NUM_STYLE_LAYERS:] ### END CODE HERE ### return content_outputs # + [markdown] id="p5gcMSfLnTSa" # ## Calculate the total loss # # Please define the total loss using the helper functions you just defined. As a refresher, the total loss is given by $L_{total} = \beta L_{style} + \alpha L_{content}$, where $\beta$ and $\alpha$ are the style and content weights, respectively. # # + id="q20XhIHnotQA" def get_style_content_loss(style_targets, style_outputs, content_targets, content_outputs, style_weight, content_weight): """ Combine the style and content loss Args: style_targets: style features of the style image style_outputs: style features of the generated image content_targets: content features of the content image content_outputs: content features of the generated image style_weight: weight given to the style loss content_weight: weight given to the content loss Returns: total_loss: the combined style and content loss """ # Sum of the style losses style_loss = tf.add_n([ get_style_loss(style_output, style_target) for style_output, style_target in zip(style_outputs, style_targets)]) # Sum up the content losses content_loss = tf.add_n([get_content_loss(content_output, content_target) for content_output, content_target in zip(content_outputs, content_targets)]) ### START CODE HERE ### # scale the style loss by multiplying by the style weight and dividing by the number of style layers style_loss = style_loss * style_weight / NUM_STYLE_LAYERS # scale the content loss by multiplying by the content weight and dividing by the number of content layers content_loss = content_loss * content_weight / NUM_CONTENT_LAYERS # sum up the style and content losses total_loss = style_loss + content_loss ### END CODE HERE ### # return the total loss return total_loss # + [markdown] id="W6lE_zt8nTSb" # ## Calculate gradients # # Please use `tf.GradientTape()` to get the gradients of the loss with respect to the input image. Take note that you will *not* need a regularization parameter in this exercise so we only provided the style and content weights as arguments. # + id="mp2g2tI58RI0" def calculate_gradients(image, style_targets, content_targets, style_weight, content_weight): """ Calculate the gradients of the loss with respect to the generated image Args: image: generated image style_targets: style features of the style image content_targets: content features of the content image style_weight: weight given to the style loss content_weight: weight given to the content loss Returns: gradients: gradients of the loss with respect to the input image """ ### START CODE HERE ### with tf.GradientTape() as tape: # get the style image features style_features = get_style_image_features(image) # get the content image features content_features = get_content_image_features(image) # get the style and content loss loss = get_style_content_loss(style_targets, style_features, content_targets, content_features, style_weight, content_weight) # calculate gradients of loss with respect to the image gradients = tape.gradient(loss, image) ### END CODE HERE ### return gradients # + [markdown] id="I4drTvUNnTSb" # ## Update the image with the style # # Please define the helper function to apply the gradients to the generated/stylized image. # + id="e-MPRxuGp-5A" def update_image_with_style(image, style_targets, content_targets, style_weight, content_weight, optimizer): """ Args: image: generated image style_targets: style features of the style image content_targets: content features of the content image style_weight: weight given to the style loss content_weight: weight given to the content loss optimizer: optimizer for updating the input image """ ### START CODE HERE ### # Calculate gradients using the function that you just defined. gradients = calculate_gradients(image, style_targets, content_targets, style_weight, content_weight) # apply the gradients to the given image optimizer.apply_gradients([(gradients, image)]) ### END CODE HERE ### # Clip the image using the given clip_image_values() function image.assign(clip_image_values(image, min_value=0.0, max_value=255.0)) # + [markdown] id="foTOpNNw2Wp2" # ## Generate the stylized image # # Please complete the function below to implement neural style transfer between your content and style images. # + id="U0Btr_j9M1gu" def fit_style_transfer(style_image, content_image, style_weight=1e-2, content_weight=1e-4, optimizer='adam', epochs=1, steps_per_epoch=1): """ Performs neural style transfer. Args: style_image: image to get style features from content_image: image to stylize style_targets: style features of the style image content_targets: content features of the content image style_weight: weight given to the style loss content_weight: weight given to the content loss optimizer: optimizer for updating the input image epochs: number of epochs steps_per_epoch = steps per epoch Returns: generated_image: generated image at final epoch images: collection of generated images per epoch """ images = [] step = 0 # get the style image features style_targets = get_style_image_features(style_image) # get the content image features content_targets = get_content_image_features(content_image) # initialize the generated image for updates generated_image = tf.cast(content_image, dtype=tf.float32) generated_image = tf.Variable(generated_image) # collect the image updates starting from the content image images.append(content_image) for n in range(epochs): for m in range(steps_per_epoch): step += 1 ### START CODE HERE ### # Update the image with the style using the function that you defined update_image_with_style(generated_image, style_targets, content_targets, style_weight, content_weight, optimizer) ### END CODE HERE print(".", end='') if (m + 1) % 10 == 0: images.append(generated_image) # display the current stylized image clear_output(wait=True) display_image = tensor_to_image(generated_image) display_fn(display_image) # append to the image collection for visualization later images.append(generated_image) print("Train step: {}".format(step)) # convert to uint8 (expected dtype for images with pixels in the range [0,255]) generated_image = tf.cast(generated_image, dtype=tf.uint8) return generated_image, images # + [markdown] id="rFNfb_bpY6Qe" # With all the helper functions defined, you can now run the main loop and generate the stylized image. This will take a few minutes to run. # + id="MtUgMzp8tHs6" # PLEASE DO NOT CHANGE THE SETTINGS HERE # define style and content weight style_weight = 1 content_weight = 1e-32 # define optimizer. learning rate decreases per epoch. adam = tf.optimizers.Adam( tf.keras.optimizers.schedules.ExponentialDecay( initial_learning_rate=80.0, decay_steps=100, decay_rate=0.80 ) ) # start the neural style transfer stylized_image, display_images = fit_style_transfer(style_image=style_image, content_image=content_image, style_weight=style_weight, content_weight=content_weight, optimizer=adam, epochs=10, steps_per_epoch=100) # + [markdown] id="TlittQSqo-b_" # When the loop completes, please right click the image you generated and download it for grading in the classroom. # # **Congratulations! You just completed the assignment on Neural Style Transfer!** #
Development/StyleTransfer/Copy_of_C4W1_Assignment.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: demix # language: python # name: demix # --- # %load_ext autoreload # %autoreload 2 # %matplotlib inline from lr.train import train_lr from lr.hyperparameters import BEST_HPS import pandas as pd from tqdm.auto import tqdm # ## Download training data # The training data consists of about 600M whitespace tokens; 300M tokens from Common Crawl, 100M tokens from OpenWebText, Wikipedia, and Books3 corpus each. The test/dev data are 10M tokens each. # + # !mkdir -p filter_data # !curl -Lo filter_data/test.jsonl http://arkdata.cs.washington.edu/sg01/gpt3-filter/test.jsonl # !curl -Lo filter_data/dev.jsonl http://arkdata.cs.washington.edu/sg01/gpt3-filter/dev.jsonl # !curl -Lo filter_data/train.jsonl http://arkdata.cs.washington.edu/sg01/gpt3-filter/train.jsonl # - # ## Read data train = pd.read_json('filter_data/train.jsonl', lines=True) dev = pd.read_json('filter_data/dev.jsonl', lines=True) test = pd.read_json('filter_data/test.jsonl', lines=True) tqdm.pandas() dev.loc[dev.label == 0].text.progress_apply(lambda x: len(x.split())).sum() dev.loc[dev.label == 1].text.progress_apply(lambda x: len(x.split())).sum() # ## Train Classifer # (this will take about 10-15 minutes) clf, vectorizer, results = train_lr(train, dev, test, BEST_HPS) results wiki_text = """ Blakeney Chapel is a ruined building on the Norfolk coast of England. Even though named as such, it was probably not a chapel, and is not in the adjoining village of Blakeney, but rather in the parish of Cley next the Sea. The building stood on a raised mound or "eye" on the seaward end of the coastal marshes, less than 200 m (220 yd) from the sea and just to the north of the current channel of the River Glaven where it turns to run parallel to the shoreline. It consisted of two rectangular rooms of unequal size, and appears to be intact in a 1586 map, but is shown as ruins in later charts. Only the foundations and part of a wall still remain. Three archaeological investigations between 1998 and 2005 provided more detail of the construction, and showed two distinct periods of active use. Although it is described as a chapel on several maps, there is no documentary or archaeological evidence to suggest that it had any religious function. A small hearth, probably used for smelting iron, is the only evidence of a specific activity on the site. Much of the structural material was long ago carried off for reuse in buildings in Cley and Blakeney. The surviving ruins are protected as a scheduled monument and Grade II listed building because of their historical importance, but there is no active management. The ever-present threat from the encroaching sea is likely to accelerate following a realignment of the Glaven's course through the marshes, and lead to the loss of the ruins. """ score(wiki_text, clf, vectorizer) random_webtext = """ As someone who comes from a country with a lot of mountains and hills, I would highly recommend going trekking to places accessible only by foot. It's really nice to see the untouched,peaceful nature existing there, without humans to ruin it. """ score(random_webtext, clf, vectorizer)[0][0] # + import gradio as gr from data.score import score def start(text): k = round(score(text, clf, vectorizer)[0][1], 2) return {"GPT-3 Filter Quality Score": k } # - face = gr.Interface(fn=start, inputs="text", outputs="key_values") face.launch(share=True)
Filter.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 (ipykernel) # language: python # name: python3 # --- import pandas as pd data = pd.read_csv("https://short.upm.es/dyjzp") data.info() data.head() data.describe() import seaborn as sns sns.countplot(x='class', data=data) from sklearn.preprocessing import LabelEncoder encoder = LabelEncoder() data['class'] = encoder.fit_transform(data['class']) data.head() column_names = data.columns.tolist() column_names.remove('class') data=pd.get_dummies(data=data,columns=column_names,drop_first=False) data.head() data_features=data.drop(['class'],axis=1) data_classes=data['class'] data_features.head() from sklearn.decomposition import PCA pca_2 = PCA(n_components=2) data_2 = pd.DataFrame(pca_2.fit_transform(data_features), columns=['pca_1','pca_2']) data_2['class']=data['class'].tolist() sns.scatterplot(data=data_2, x="pca_1", y="pca_2", hue="class", style="class") from sklearn.preprocessing import LabelEncoder, StandardScaler scaler = StandardScaler() data_features = scaler.fit_transform(data_features) print(data_features) from sklearn.decomposition import PCA pca = PCA() pca.fit_transform(data_features) pca_variance = pca.explained_variance_ratio_ print(pca_variance) import matplotlib.pyplot as plt plt.bar(range(len(pca_variance)),pca_variance, label="individual variance") plt.legend() plt.ylabel("Variance Ratio") plt.xlabel("Principal Components") plt.show() pca_40 = PCA(n_components=40) pca_40.fit(data_features) data_features_40 = pd.DataFrame(pca_40.transform(data_features)) data_features_40.head() # + from sklearn.model_selection import train_test_split X_train, X_test, y_train, y_test = train_test_split(data_features_40,data_classes,test_size=0.3,random_state=15) print("done!") # + from sklearn.linear_model import LogisticRegression from sklearn.metrics import accuracy_score,confusion_matrix LR_model= LogisticRegression() LR_model.fit(X_train,y_train) LR_y_pred = LR_model.predict(X_test) accuracy=accuracy_score(y_test, LR_y_pred)*100 print("Accuracy Score: ","{0:.2f}".format(accuracy)) sns.heatmap(pd.DataFrame(confusion_matrix(y_test, LR_y_pred)),annot=True,fmt="g", cmap='viridis') # + from sklearn.naive_bayes import GaussianNB GB_model= GaussianNB() GB_model.fit(X_train,y_train) GB_y_pred = GB_model.predict(X_test) accuracy=accuracy_score(y_test, GB_y_pred)*100 print("Accuracy Score: ","{0:.2f}".format(accuracy)) sns.heatmap(pd.DataFrame(confusion_matrix(y_test, GB_y_pred)),annot=True,fmt="g", cmap='viridis') # + from sklearn.ensemble import RandomForestClassifier RF_model=RandomForestClassifier(n_estimators=10) RF_model.fit(X_train,y_train) RF_y_pred = RF_model.predict(X_test) accuracy=accuracy_score(y_test, RF_y_pred)*100 print("Accuracy Score: ","{0:.2f}".format(accuracy)) sns.heatmap(pd.DataFrame(confusion_matrix(y_test, RF_y_pred)),annot=True,fmt="g", cmap='viridis') # + from sklearn import svm SVM_model=svm.LinearSVC() SVM_model.fit(X_train,y_train) SVM_y_pred = SVM_model.predict(X_test) accuracy=accuracy_score(y_test, SVM_y_pred)*100 print("Accuracy Score: ","{0:.2f}".format(accuracy)) sns.heatmap(pd.DataFrame(confusion_matrix(y_test, SVM_y_pred)),annot=True,fmt="g", cmap='viridis') # + from sklearn.neighbors import KNeighborsClassifier knn_model=KNeighborsClassifier() knn_model.fit(X_train,y_train) knn_y_pred = knn_model.predict(X_test) accuracy=accuracy_score(y_test, knn_y_pred)*100 print("Accuracy Score: ","{0:.2f}".format(accuracy)) sns.heatmap(pd.DataFrame(confusion_matrix(y_test, knn_y_pred)),annot=True,fmt="g", cmap='viridis') # - import numpy as np from scipy.cluster.hierarchy import dendrogram, linkage from matplotlib import pyplot as plt leaf_labels = list(data_features.columns.values) data_features_t = data_features.T.values #Transpose values linkage_data = linkage(data_features_t, method='ward', metric='euclidean') plt.figure(figsize=(20, 16)) plt.xlabel('Feature') plt.ylabel('Relevance') dendrogram(linkage_data, leaf_rotation=90., leaf_font_size=10.,labels=leaf_labels) plt.savefig('dendrogram_mushrooms_original.png') import numpy as np from scipy.cluster.hierarchy import dendrogram, linkage from matplotlib import pyplot as plt leaf_labels = list(data_features_40.columns.values) data_features_t = data_features_40.T.values #Transpose values linkage_data = linkage(data_features_t, method='ward', metric='euclidean') plt.figure(figsize=(20, 16)) plt.xlabel('Feature') plt.ylabel('Relevance') dendrogram(linkage_data, leaf_rotation=90., leaf_font_size=10.,labels=leaf_labels) plt.savefig('dendrogram_mushrooms_pca.png') from sklearn.cluster import KMeans from collections import Counter kmeans = KMeans(n_clusters=8) kmeans = kmeans.fit(data_features) labels = kmeans.predict(data_features) centroids = kmeans.cluster_centers_ Counter(labels) from sklearn.metrics import silhouette_score sil = silhouette_score(data_features, labels) print(sil) data_2['kmeans']=labels sns.scatterplot(data=data_2, x="pca_1", y="pca_2", hue="kmeans", palette="pastel") print(centroids[0]) round_to_tenths = [round(num, 1) for num in centroids[0]] print(round_to_tenths) idx = 0 for value in centroids[0]: round_value = round(value,1) if (round_value > 0.0): feature = data_features.columns[idx] print(feature,value) idx+=1
dimensionality_reduction/Mushroom-Classification.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: PySpark3 # language: '' # name: pyspark3kernel # --- # + [markdown] cell_status={"execute_time": {"duration": 147.34814453125, "end_time": 1584417525073.1}} editable=true deletable=true # # Workload Insights Notebook # # This notebook analyzes the denormalized representation of query workload. This denormalized dataset contains the information from applications, queries, plans, and runtime metrics. The plans were also annotated with signatures to identify subexpressions. We hope that the analysis in the notebook helps you in making data-driven decisions. # # # ## Features # 1. <a href=#sample>Sample Rows</a> # 2. <a href=#qcount>Query and Operator Counts</a> # 3. <a href=#opfreq>Operator Frequencies</a> # 4. <a href=#cquery>Overlapping Queries</a> # 5. <a href=#csubop>Common Subexpressions Per Operator</a> # 6. <a href=#selviews>Selected Views</a> # 7. <a href=#ss>SparkCruise Savings</a> # 8. <a href=#filterstat>Filter Selectivity</a> # 9. <a href=#exstat>Exchange Operator Statistics</a> # 10. <a href=#recjobs>Recurring Jobs</a> # + [markdown] editable=true deletable=true # ## Setup # + cell_status={"execute_time": {"duration": 2432.85107421875, "end_time": 1593665556870.339}} editable=true deletable=true logicalExps = spark.read.format("csv").option("sep", "|").option("schema","AppID: string (nullable = true), ClusterName: string (nullable = true), Subscription: string (nullable = true), QueryID: integer (nullable = true), AppQueryID: string (nullable = true), OperatorName: string (nullable = true), TreeLevel: integer (nullable = true), ChildCount: integer (nullable = true), StrictSignature: integer (nullable = true), NonStrictSignature: integer (nullable = true), Parameters: string (nullable = true)").option("header", "true").load("/peregrine/views/logical_ir.csv") logicalExps.createOrReplaceTempView("LogicalExps") physicalExps = spark.read.format("csv").option("sep", "|").option("schema","AppID: string (nullable = true), ClusterName: string (nullable = true), Subscription: string (nullable = true), QueryID: integer (nullable = true), AppQueryID: string (nullable = true), OperatorName: string (nullable = true), TreeLevel: integer (nullable = true), ChildCount: integer (nullable = true), StrictSignature: integer (nullable = true), NonStrictSignature: integer (nullable = true), Parameters: string (nullable = true)").option("header", "true").load("/peregrine/views/physical_ir.csv") physicalExps.createOrReplaceTempView("PhysicalExps") analysisExps = spark.sql("SELECT * FROM LogicalExps WHERE ChildCount > 0") analysisExps.createOrReplaceTempView("AnalysisExps") repeatSubexps = spark.sql("SELECT StrictSignature FROM AnalysisExps GROUP BY StrictSignature HAVING COUNT(DISTINCT AppQueryID) > 1") repeatSubexps.createOrReplaceTempView("RepeatSubexps") # + [markdown] editable=true deletable=true # ## Sample Rows <a name='sample' /> # Example of records in intermediate representation. # # + diagram={"chartConfig": {"category": "bar", "aggByBackend": false, "values": [], "yLabel": "", "keys": [], "xLabel": "", "aggregation": "SUM"}, "activateDiagramType": 1, "isSql": false, "isSummary": false, "aggData": {}, "previewData": {"filter": null}} cell_status={"execute_time": {"duration": 874.85400390625, "end_time": 1593665558182.483}} editable=true deletable=true language="sql" # SELECT AppID, AppName, AppStartTime, QueryID, QueryWallClockTime, OperatorName, LogicalName, StrictSignature, PRowCount, PExclusiveTime # FROM PhysicalExps # WHERE LENGTH(StrictSignature) > 0 AND PExclusiveTime > 0 # ORDER BY rand() # LIMIT 10 # + [markdown] editable=true deletable=true # ## Query and Operator Count <a name='qcount' /> # # Number of queries and operators in workload. # + diagram={"chartConfig": {"category": "bar", "aggByBackend": false, "values": ["DistinctOperators"], "yLabel": "DistinctOperators", "keys": [], "xLabel": "", "aggregation": "SUM"}, "activateDiagramType": 1, "isSql": false, "isSummary": false, "aggData": {"DistinctOperators": {"": 13}}, "previewData": {"filter": null}} inputCollapsed=true cell_status={"execute_time": {"duration": 375.075927734375, "end_time": 1593665558565.013}} deletable=true editable=true language="sql" # SELECT COUNT(DISTINCT AppQueryID) AS QueryCount, COUNT(*) AS OperatorCount, COUNT(DISTINCT OperatorName) AS DistinctOperators # FROM PhysicalExps # + [markdown] editable=true deletable=true # ## Operator Frequency <a name='opfreq' /> # Frequency of logical and physical operators in workload. # # + diagram={"chartConfig": {"category": "pie", "aggByBackend": false, "keys": ["OperatorName"], "isValid": true, "yLabel": "Frequency", "aggregation": "SUM", "values": ["Frequency"], "series": null, "xLabel": "OperatorName", "inValidMsg": null}, "activateDiagramType": 2, "isSql": false, "isSummary": false, "aggData": {"Frequency": {"Sort": 90, "Project": 1589, "Join": 735, "Union": 34, "LocalRelation": 20, "Filter": 933, "LocalLimit": 85, "Window": 30, "LogicalRelation": 868, "Aggregate": 233, "SetDatabaseCommand": 103, "GlobalLimit": 85, "Expand": 10}}, "previewData": {"filter": null}} cell_status={"execute_time": {"duration": 2305.572021484375, "end_time": 1593664721691.412}} editable=true deletable=true language="sql" # SELECT OperatorName, COUNT(*) AS Frequency # FROM LogicalExps # GROUP BY OperatorName # ORDER BY Frequency DESC # + [markdown] editable=true deletable=true # Frequency of physical operators in workload. # # + diagram={"chartConfig": {"category": "pie", "aggByBackend": false, "keys": ["OperatorName"], "isValid": true, "aggregation": "SUM", "values": ["Frequency"], "yLabel": "Frequency", "xLabel": "OperatorName", "inValidMsg": null}, "activateDiagramType": 2, "isSql": false, "isSummary": false, "aggData": {"Frequency": {"FileSourceScanExec": 605, "LocalTableScanExec": 20, "FilterExec": 648, "ProjectExec": 1256, "UnionExec": 33, "ShuffledHashJoinExec": 123, "SortExec": 196, "ShuffleExchangeExec": 581, "WholeStageCodegenExec": 1265, "BroadcastExchangeExec": 323, "ExpandExec": 10, "SortMergeJoinExec": 90, "BroadcastNestedLoopJoinExec": 19, "CollectLimitExec": 5, "BroadcastHashJoinExec": 443, "ReusedExchangeExec": 202, "HashAggregateExec": 451, "InputAdapter": 1158, "TakeOrderedAndProjectExec": 80, "ExecutedCommandExec": 103, "WindowExec": 30}}, "previewData": {"filter": null}} inputCollapsed=true cell_status={"execute_time": {"duration": 2439.66796875, "end_time": 1593664725353.642}} deletable=true editable=true language="sql" # SELECT OperatorName, COUNT(*) AS Frequency # FROM PhysicalExps # GROUP BY OperatorName # ORDER BY Frequency DESC # + [markdown] editable=true deletable=true # ### Overlapping queries <a name='cquery' /> # Queries with overlapping computations. # # + diagram={"chartConfig": {"category": "line", "aggByBackend": false, "keys": ["QueryCount", "QueriesWithOneOverlap", "QueriesWithTwoOverlaps"], "isValid": false, "aggregation": "SUM", "values": [], "yLabel": "", "xLabel": "QueryCount,QueriesWithOneOverlap,QueriesWithTwoOverlaps", "inValidMsg": "At least one value column is required!"}, "activateDiagramType": 1, "isSql": false, "isSummary": false, "aggData": {}, "previewData": {"filter": null}} inputCollapsed=true cell_status={"execute_time": {"duration": 760.51904296875, "end_time": 1593664726270.094}} deletable=true editable=true queryCountQuery = """ SELECT COUNT(DISTINCT AppQueryID) AS QueryCount FROM AnalysisExps""" queryCount = spark.sql(queryCountQuery) queryCount.createOrReplaceTempView("QueryCount") queriesWithRepeatQuery = """ SELECT COUNT(DISTINCT AppQueryID) AS QueriesWithOneOverlap FROM AnalysisExps WHERE StrictSignature IN ( SELECT StrictSignature FROM RepeatSubexps )""" queriesWithRepeat = spark.sql(queriesWithRepeatQuery) queriesWithRepeat.createOrReplaceTempView("QueriesWithRepeat") queriesWithTwoRepeatsQuery = """ SELECT COUNT(*) AS QueriesWithTwoOverlaps FROM ( SELECT AppQueryID, COUNT(*) AS Repeats FROM AnalysisExps WHERE StrictSignature IN ( SELECT StrictSignature FROM RepeatSubexps ) GROUP BY AppQueryID HAVING Repeats > 1 ORDER BY Repeats DESC)""" queriesWithTwoRepeats = spark.sql(queriesWithTwoRepeatsQuery) queriesWithTwoRepeats.createOrReplaceTempView("QueriesWithTwoRepeats") # + cell_status={"execute_time": {"duration": 166.10205078125, "end_time": 1593664726440.694}} language="sql" # SELECT *, CAST((QueriesWithOneOverlap/QueryCount)*100 AS Decimal(38,2)) AS OverlapPercent # FROM QueryCount AS R1, QueriesWithRepeat AS R2, QueriesWithTwoRepeats AS R3 # + [markdown] editable=true deletable=true # ## Overlapping Computations <a name='csubop' /> # # Overlapping computations per operator. # # + diagram={"chartConfig": {"category": "bar", "aggByBackend": false, "keys": ["OperatorName"], "isValid": true, "aggregation": "SUM", "values": ["Total", "Repeats"], "yLabel": "Total,Repeats", "xLabel": "OperatorName", "inValidMsg": null}, "activateDiagramType": 2, "isSql": false, "isSummary": false, "aggData": {"Repeats": {"Aggregate": 26, "Filter": 513, "Project": 393, "Join": 48}, "Total": {"Aggregate": 233, "Filter": 933, "Project": 1589, "Join": 735}}, "previewData": {"filter": null}} inputCollapsed=true cell_status={"execute_time": {"duration": 1294.239990234375, "end_time": 1593665528156.392}} deletable=true editable=true opFreqQuery = """ SELECT OperatorName, COUNT(*) AS Total FROM AnalysisExps GROUP BY OperatorName ORDER BY Total DESC""" opFreq = spark.sql(opFreqQuery) opFreq.createOrReplaceTempView("OpFreq") opRepeatSubexpQuery = """ SELECT OperatorName, COUNT(*) AS Repeats FROM AnalysisExps WHERE StrictSignature IN ( SELECT StrictSignature FROM RepeatSubexps ) GROUP BY OperatorName ORDER BY Repeats DESC""" opRepeatSubexp = spark.sql(opRepeatSubexpQuery) opRepeatSubexp.createOrReplaceTempView("OpRepeatSubexp") opDistinctRepeatQuery = """ SELECT OperatorName, COUNT(*) AS DistinctRepeats FROM ( SELECT DISTINCT OperatorName, StrictSignature FROM AnalysisExps WHERE StrictSignature IN ( SELECT StrictSignature FROM RepeatSubexps )) GROUP BY OperatorName ORDER BY DistinctRepeats DESC""" opDistinctRepeat = spark.sql(opDistinctRepeatQuery) opDistinctRepeat.createOrReplaceTempView("OpDistinctRepeat") # + cell_status={"execute_time": {"duration": 769.869873046875, "end_time": 1593665529433.968}} language="sql" # SELECT R1.OperatorName, Total, Repeats, DistinctRepeats, CAST(Repeats/DistinctRepeats AS Decimal(38,2)) AS AvgRepFrequency, CAST((Repeats/Total)*100 AS Decimal(38,2)) AS RepeatPercent # FROM OpFreq AS R1, OpRepeatSubexp AS R2, OpDistinctRepeat AS R3 # WHERE R1.OperatorName = R2.OperatorName AND R2.OperatorName = R3.OperatorName # ORDER BY RepeatPercent DESC # + [markdown] editable=true deletable=true # ## Selected Views <a name='selviews' /> # Per operator summary of selected views. # + cell_status={"execute_time": {"duration": 1319.8837890625, "end_time": 1593665531816.984}} selViews = spark.read.format("csv").option("sep", "|").option("schema","AppID: string (nullable = true), ClusterName: string (nullable = true), Subscription: string (nullable = true), QueryID: integer (nullable = true), AppQueryID: string (nullable = true), OperatorName: string (nullable = true), TreeLevel: integer (nullable = true), ChildCount: integer (nullable = true), StrictSignature: integer (nullable = true), NonStrictSignature: integer (nullable = true), Parameters: string (nullable = true)").option("header", "true").load("/peregrine/views/views.csv") selViews.createOrReplaceTempView("SelViews") views = spark.sql("SELECT DISTINCT StrictSignature FROM SelViews") views.createOrReplaceTempView("Views") # + diagram={"chartConfig": {"category": "bar", "aggByBackend": false, "values": ["ViewCount"], "yLabel": "ViewCount", "keys": ["LogicalName"], "xLabel": "LogicalName", "aggregation": "SUM"}, "activateDiagramType": 2, "isSql": false, "isSummary": false, "aggData": {"ViewCount": {"Aggregate": 4, "Filter": 18, "Project": 8, "Join": 12}}, "previewData": {"filter": null}} inputCollapsed=true cell_status={"execute_time": {"duration": 1294.304931640625, "end_time": 1593665534662.815}} deletable=true editable=true distinctViewsQuery = """ SELECT P.LogicalName AS LogicalName, COUNT(DISTINCT P.StrictSignature) AS ViewCount FROM Views V, PhysicalExps P WHERE V.StrictSignature = P.StrictSignature GROUP BY P.LogicalName ORDER BY ViewCount DESC""" distinctViews = spark.sql(distinctViewsQuery) distinctViews.createOrReplaceTempView("DistinctViews") viewSubexprsOpsQuery = """ SELECT P.LogicalName AS LogicalName, COUNT(*) AS ViewRepeats FROM Views V, PhysicalExps P WHERE V.StrictSignature = P.StrictSignature GROUP BY P.LogicalName ORDER BY ViewRepeats DESC""" viewSubexprsOps = spark.sql(viewSubexprsOpsQuery) viewSubexprsOps.createOrReplaceTempView("ViewSubexprsOps") # + cell_status={"execute_time": {"duration": 1329.829833984375, "end_time": 1593665537600.952}} language="sql" # SELECT X.LogicalName, ViewCount, ViewRepeats # FROM DistinctViews AS X, ViewSubexprsOps AS Y # WHERE X.LogicalName = Y.LogicalName # + [markdown] editable=true deletable=true # ## SparkCruise Savings <a name='ss' /> # Potential savings per view. # # + diagram={"chartConfig": {"category": "bar", "aggByBackend": false, "values": ["AvgSerialTime"], "yLabel": "AvgSerialTime", "keys": ["AvgRowCount"], "xLabel": "AvgRowCount", "aggregation": "SUM"}, "activateDiagramType": 1, "isSql": false, "isSummary": false, "aggData": {"AvgSerialTime": {"506811.728": 84115.256}}, "previewData": {"filter": null}} inputCollapsed=true cell_status={"execute_time": {"duration": 858.774169921875, "end_time": 1593665538514.189}} deletable=true editable=true language="sql" # SELECT LogicalName, NumOccurrences, AvgSerialTime_ms, AvgRowCount, AvgRowLength_bytes # FROM ( # SELECT P.LogicalName AS LogicalName, P.StrictSignature AS Id, COUNT(*) AS NumOccurrences, AVG(PSerialTime) AS AvgSerialTime_ms, AVG(PRowCount) AS AvgRowCount, AVG(AvgRowLength) AS AvgRowLength_bytes # FROM Views V, PhysicalExps P # WHERE V.StrictSignature = P.StrictSignature # GROUP BY P.LogicalName, P.StrictSignature) # + [markdown] editable=true deletable=true # ## Filter Selectivity <a name='filterstat' /> # Selectivity of filters. # # + cell_status={"execute_time": {"duration": 2330.943115234375, "end_time": 1593665542369.796}} editable=true deletable=true magic_args="-o filterSel" language="sql" # # SELECT F.AppQueryID, F.OperatorName AS FilterOp, F.RowCount AS PassCount, T.OperatorName AS ScanOp, T.RowCount AS TotalCount, CAST(F.RowCount/T.RowCount AS Decimal(38, 5)) AS FilterSel # FROM PhysicalExps F, PhysicalExps T # WHERE F.AppQueryID = T.AppQueryID AND # F.OperatorName = 'FilterExec' AND # T.ParentID = F.OperatorID AND # T.RowCount > 0 AND # T.ChildCount = 0 # ORDER BY FilterSel # + cell_status={"execute_time": {"duration": 888.823974609375, "end_time": 1593665543298.182}} # %%local # %matplotlib inline import matplotlib.pyplot as plt # CDF filterSel['pdf'] = filterSel['FilterSel']/sum(filterSel['FilterSel']) filterSel['ecdf'] = (filterSel['pdf'].cumsum()) ax = filterSel.plot(x = 'FilterSel', y = 'ecdf', grid = True) ax.set_xlabel("Filter Selectivity") ax.set_ylabel("CDF") ax.set_xlim(0, 1) ax.set_ylim(0, 1) # + [markdown] editable=true deletable=true # ## Exchange <a name='exstat' /> # How many rows are shuffled in real-world Spark workloads? # + diagram={"chartConfig": {"category": "bar", "aggByBackend": false, "keys": ["RowCountBucket"], "isValid": true, "aggregation": "SUM", "values": ["Count"], "yLabel": "Count", "xLabel": "RowCountBucket", "inValidMsg": null}, "activateDiagramType": 2, "isSql": false, "isSummary": false, "aggData": {"Count": {"3. 1k-100k": 81, "6. 100M-1B": 1, "5. 1M-100M": 169, "2. 1-1k": 73, "4. 100k-1M": 80}}, "previewData": {"filter": null}} inputCollapsed=true cell_status={"execute_time": {"duration": 3367.154052734375, "end_time": 1593665547942.685}} deletable=true editable=true def getBucket(rowCount): rows = int(rowCount) if rows <=1: return "a. 0-1" elif (rows > 1 and rows <= 1e3): return "b. 1-1k" elif (rows > 1e3 and rows <= 1e5): return "c. 1k-100k" elif (rows > 1e5 and rows <= 1e6): return "d. 100k-1M" elif (rows > 1e6 and rows <= 1e8): return "e. 1M-100M" elif (rows > 1e8 and rows <= 1e9): return "f. 100M-1B" else: return "g. >1B" spark.udf.register("getBucket", getBucket) exBucketQuery = """ SELECT Bucket AS RowCountBucket, CAST(AVG(MB) AS Decimal(38,2)) AS AvgDataSizeInMB, CAST(MAX(MB) AS Decimal(38,2)) AS MaxDataSizeInMB, COUNT(*) AS Count FROM ( SELECT getBucket(PRowCount) AS Bucket, Bytes/(1024.0*1024) AS MB FROM PhysicalExps WHERE PRowCount > 0 AND Bytes > 0 AND OperatorName LIKE '%ShuffleExchangeExec%') GROUP BY Bucket ORDER BY RowCountBucket """ exBucket = spark.sql(exBucketQuery) exBucket.show(100, False) exBucket.createOrReplaceTempView("ExBucket") # + cell_status={"execute_time": {"duration": 830.60888671875, "end_time": 1593665548808.813}} magic_args="-o exBuckets" language="sql" # SELECT * # FROM ExBucket # + cell_status={"execute_time": {"duration": 395.66796875, "end_time": 1593665549330.06}} # %%local # %matplotlib inline import matplotlib.pyplot as plt exBuckets['RCBucketLabels'] = exBuckets['RowCountBucket'].str[3:] ax = exBuckets.plot.bar(x='RCBucketLabels', y='Count', rot=0) ax.set_xlabel("Number of rows in exchange") ax.set_ylabel("Frequency") # + [markdown] editable=true deletable=true # ## Recurring jobs <a name='recjobs' /> # Recurring jobs share same non-strict signature at the root level. To capture temporal patterns, we can take the intersection of the result set for a few consecutive days and then split the intesection set in hourly, daily repeat patterns based on the AppSubmitTime value. # + diagram={"chartConfig": {"category": "bar", "aggByBackend": false, "values": ["Count"], "yLabel": "Count", "keys": ["Subscription"], "xLabel": "Subscription", "aggregation": "SUM"}, "activateDiagramType": 1, "isSql": false, "isSummary": false, "aggData": {"Count": {"test": 2}}, "previewData": {"filter": null}} inputCollapsed=true cell_status={"execute_time": {"duration": 2447.49609375, "end_time": 1593665552970.298}} deletable=true editable=true language="sql" # SELECT Subscription, NonStrictSignature, COUNT(*) AS Count # FROM AnalysisExps # WHERE TreeLevel = 0 AND LENGTH(NonStrictSignature) > 0 # GROUP BY Subscription, NonStrictSignature # HAVING COUNT(*) > 1 # ORDER BY Count DESC # -
SparkCruise/WorkloadInsights_HDI_v0.4.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Tweet Preprocessing # #### Basic text-preprocessing pipeline (in no particular order): # - Detect and translate tweets to English # - Tokenization # - Stopword removal & Lemmatization # - Remove URLs and reserved words (RTs) # - Lowercasing # - Remove # and @ symbols but keep values # - Spell Checker # - Remove punctuation (possibly, although useful for tweet fragmentation) # + import os import pandas as pd import numpy as np import preprocessor as prep from os.path import join from sqlite3 import connect import seaborn as sns; sns.set() import matplotlib.pyplot as plt from googletrans import Translator from nltk.corpus import stopwords from nltk.util import ngrams from spellchecker import SpellChecker import nltk import string pd.options.display.max_columns = None pd.options.display.max_rows = None # - # ### Getting Data (tweets) # + project_dir = join(os.getcwd(), os.pardir) raw_dir = join(project_dir, 'data', 'raw') interim_dir = join(project_dir, 'data', 'interim') # %config InlineBackend.figure_format = 'svg' # - raw_fname = 'data_pull_sample.json' df = pd.read_json(join(raw_dir, raw_fname), lines=True) # + df_users = pd.DataFrame(df['user'].tolist()) df_tweets = df.drop(columns=[ 'id', 'in_reply_to_status_id', 'in_reply_to_user_id', 'user', 'coordinates', 'place', 'quoted_status_id', 'favorited', 'retweeted', 'retweeted_status', 'matching_rules', 'geo', 'filter_level', 'display_text_range', 'contributors', 'quoted_status', 'quoted_status_id', 'quoted_status_permalink', 'in_reply_to_screen_name', 'text', 'extended_tweet', 'truncated', 'entities', 'extended_entities' ]) df_tweets['user_id_str'] = df['user'].apply(lambda x: x['id_str']) df_tweets['full_text'] = df.apply( lambda row: row['text'] if not row['truncated'] else row['extended_tweet']['full_text'], axis=1 ) def get_retweet_id(row): """returns: is_retweet, original_tweet_id_str""" if type(row['retweeted_status']) == dict: return True, row['retweeted_status']['id_str'] else: return False, np.nan df_tweets['is_retweet'], df_tweets['original_tweet_id_str'] = zip(*df.apply(get_retweet_id, axis=1)) df_tweets['is_reply'] = ~df['in_reply_to_status_id'].isna() df_tweets.drop_duplicates(subset='id_str', inplace=True) df_tweets.loc[:,'is_original'] = ~df_tweets[['is_reply', 'is_retweet', 'is_quote_status']].sum(1).astype(bool) # - print(df_tweets.shape) df_tweets = df_tweets[df_tweets['is_retweet']==False] df_tweets.shape df_tweets.head() # ### Translate Tweets def translate_tweet(text, lang): trans = Translator() return trans.translate(text).text # + # # %%time # temp_df = df_tweets # for i in temp_df.index: # if temp_df['lang'][i]!='en': # temp_df.loc[i,'full_text_processed'] = translate_tweet(temp_df['full_text'][i], temp_df['lang'][i]) # temp_df.head() # + # # %%time # temp_df = df_tweets # for row in temp_df.itertuples(): # if row.lang != 'en': # df_tweets.at[row.Index,'full_text_processed'] = translate_tweet(row.full_text, row.lang) # temp_df.head() # + # # %%time # temp_df = df_tweets # def translate_func(x, text, lang,col): # if x[lang] != 'en': # x[col]= translate_tweet(x[text], x[lang]) # else: # x[col]=x[text] # return x # temp_df.apply(lambda x: translate_func(x, 'full_text', 'lang','full_text_processed'),axis=1) # temp_df.head() # + # %%time def translate_func(x, text, lang): if x[lang] != 'en': process = translate_tweet(x[text], x[lang]) else: process = x[text] return process df_tweets['full_text_processed'] = df_tweets.apply(lambda x: translate_func(x, 'full_text', 'lang'),axis=1) df_tweets.head() # - # ### Removing URLs and Reserved Words (RTs) # + from preprocessor import api df_tweets['full_text_processed'] = df_tweets['full_text_processed'].astype(str) api.set_options('urls','reserved_words') # - df_tweets['full_text_processed'] = df_tweets['full_text_processed'].apply(lambda x: api.clean(x)) # ### Lowercasing & Punctuation Removal df_tweets['full_text_processed'] = df_tweets['full_text_processed'].apply(lambda x: x.lower()) def remove_punct(text): table = str.maketrans('','',string.punctuation) return text.translate(table) df_tweets['full_text_processed'] = df_tweets['full_text_processed'].apply(lambda x: remove_punct(x)) # ### Lemmatization & Stopword removal lemmatizer = nltk.stem.WordNetLemmatizer() df_tweets['full_text_processed'] = df_tweets['full_text_processed'].apply(lambda x: ' '.join([lemmatizer.lemmatize(w) for w in x.split()])) stop_words = set(stopwords.words('english')) df_tweets['full_text_processed'] = df_tweets['full_text_processed'].apply(lambda x: ' '.join([word for word in x.split() if word not in stop_words])) # ### Spell Checker # + spell = SpellChecker() def correct_spellings(text): corrected_text = [] misspelled_words = spell.unknown(text.split()) for word in text.split(): if word in misspelled_words: corrected_text.append(spell.correction(word)) else: corrected_text.append(word) return " ".join(corrected_text) # + # %%time ##Taking too much time to execute # temp_df = df_tweets # df_tweets['full_text_processed'] = df_tweets['full_text_processed'].apply(lambda x: correct_spellings(x)) # + import nltk from nltk.sentiment.vader import SentimentIntensityAnalyzer nltk.download('vader_lexicon') sid = SentimentIntensityAnalyzer() print(sid.polarity_scores('i love vader')) # + def create_sentiment(x,text): return sid.polarity_scores(text)['compound'] df_tweets['sentiment'] = df_tweets.apply(lambda x: create_sentiment(x, x['full_text_processed']),axis=1) df_tweets.head() # - # ### Loading into Database from src.data._load_es import load_es from src.data._transform import merge_dataframes df_merge = merge_dataframes(df_users,df_tweets) df_merge.head() load_es(df_merge)
notebooks/1.6-jr-tweet-preprocessing-extension.ipynb
# -*- coding: utf-8 -*- # --- # jupyter: # jupytext: # formats: jl:light,ipynb # text_representation: # extension: .jl # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Julia 1.4.0 # language: julia # name: julia-1.4 # --- # + using Gen using Statistics include("DistributionsBacked.jl") using AdvancedHMC const my_normal = DistributionsBacked{Float64}((mu, sigma) -> Distributions.Normal(mu, sigma), [true, true], true) const my_unif = DistributionsBacked{Float64}((lo, hi) -> Distributions.Uniform(lo, hi), [true, true], true) ; # + @gen function corbiv_model() #correlated bivariate normal ρ ~ my_unif(-1., 1.) x ~ my_normal(0., 1.) y ~ my_normal(ρ*x, sqrt(1. - ρ^2)) end @gen function cormiv_model() #correlated bivariate normal ρ ~ my_unif(0., 1.) #correlation between x and y ρ2 ~ my_unif(0., ρ) #correlation between z and (x or y) zy_cond_zx = (1-ρ2^2)*ρ2 x ~ my_normal(0., 1.) y ~ my_normal(ρ*x, sqrt(1. - ρ^2)) z ~ my_normal(ρ2*x + zy_cond_zx*y, sqrt(1. - ρ2^2 - zy_cond_zx^2)) end ; # + function constrainρ(ρ::Float64) constraints = Gen.choicemap() constraints[:ρ] = ρ constraints end function constrainρ2(ρ::Float64, ρ2, z) constraints = Gen.choicemap() constraints[:ρ] = ρ constraints[:ρ2] = ρ2 constraints[:z] = z constraints end function mcmc_inference(ρ, num_iters, update, selection) observation = constrainρ(ρ) (trace, _) = generate(corbiv_model, (), observation) samples = Array{Float64}(undef,num_iters,2) for i=1:num_iters trace = update(trace, selection) ch = get_choices(trace) samples[i,1] = ch[:x] samples[i,2] = ch[:y] end samples end function mcmc_m_inference(ρ, ρ2, z, num_iters, update) observation = constrainρ2(ρ, ρ2, z) (trace, _) = generate(cormiv_model, (), observation) samples = Array{Float64}(undef,num_iters,2) for i=1:num_iters trace = update(trace) ch = get_choices(trace) samples[i,1] = ch[:x] samples[i,2] = ch[:y] end samples end function block_mh(tr, selection) (tr, _) = mh(tr, select(:x, :y)) tr end function simple_hmc(tr, selection) (tr, _) = hmc(tr, select(:x, :y)) tr end ; # - iters = 10_000 show = 5 ρ = -.5 samps = mcmc_inference(ρ, iters, block_mh, select(:x,:y)) samps[(iters-show+1):iters,:] iters = 100 show = 5 ρ = .8 samps = mcmc_inference(ρ, iters, simple_hmc) samps[(iters-show+1):iters,:] println(mean(samps)) println(cor(samps[:,1],samps[:,2])) # Disable AdvancedHMC's NUTS logging # + using Logging using LoggingExtras function ignore_sampling_filter(log_args) !(occursin("sampling steps",log_args.message) || occursin("adapation steps",log_args.message)) end logger = ActiveFilteredLogger(ignore_sampling_filter, global_logger()) if !(@isdefined old_logger) #do this only once old_logger = global_logger(logger) end # - function my_nuts(trace, selection, n_postadapt_steps = 2, n_adapts = 1, initial_ϵ_reduce_fac = 10) n_NUTS_steps = n_postadapt_steps + n_adapts filtered_choices = get_selected(get_choices(trace), selection) cur_xy = to_array(filtered_choices, Float64) dimension = length(cur_xy) metric = DiagEuclideanMetric(dimension) retval_grad = nothing #accepts_output_grad(get_gen_fn(trace)) ? zero(get_retval(trace)) : nothing function update_xy(val) extra_constraints = from_array(filtered_choices, val) update(trace, (), (NoChange(),), extra_constraints) end function val_to_lp_plus_c(val) (new_trace, weight, discard, retdiff) = update_xy(val) weight end function val_to_grad(val) (new_trace, weight, discard, retdiff) = update_xy(val) (retval_grad_out, values_trie, gradient_trie) = choice_gradients(new_trace, selection, retval_grad) grad = [gradient_trie[:x], gradient_trie[:y]] (weight, grad) end # Define a Hamiltonian system, using metric defined globally above hamiltonian = Hamiltonian(metric, val_to_lp_plus_c, val_to_grad) # Define a leapfrog solver, with initial step size chosen heuristically initial_ϵ = find_good_stepsize(hamiltonian, cur_xy) ./ initial_ϵ_reduce_fac integrator = Leapfrog(initial_ϵ) # Define an HMC sampler, with the following components # - multinomial sampling scheme, # - generalised No-U-Turn criteria, and # - windowed adaption for step-size and diagonal mass matrix proposal = NUTS{MultinomialTS, GeneralisedNoUTurn}(integrator) adaptor = StanHMCAdaptor(MassMatrixAdaptor(metric), StepSizeAdaptor(0.8, integrator)) # Run the sampler to draw samples from the specified Gaussian, where # - `samples` will store the samples # - `stats` will store diagnostic statistics for each sample samples, stats = sample(hamiltonian, proposal, cur_xy, n_NUTS_steps, adaptor, n_adapts; progress=false) #println(samples[3]) (new_trace, weight, discard, retdiff) = update_xy(samples[n_NUTS_steps]) new_trace end # + iters = 200 show = 5 ρ = .99 samps = mcmc_inference(ρ, iters, my_nuts, select(:x,:y)) samps[(iters-show+1):iters,:] # - println(cor(samps[1:iters-1,1],samps[2:iters,1])) #serial correlation; lower is better println(ρ^4) #for comparison, gibbs would be ρ² for each step; ρ⁴ for two steps positive
examples/stan/bugsv1/bivariateNuts.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="view-in-github" colab_type="text" # <a href="https://colab.research.google.com/github/alliwene/toyota-model-classifier/blob/main/gather_data.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # + colab={"base_uri": "https://localhost:8080/"} id="zomTIMNpPtS5" outputId="6ef4b49e-4c06-4b46-c40e-f7b34f93927f" # Mount Google Drive from google.colab import drive # import drive from google colab import os, random, sys ROOT = "/content/drive" # default location for the drive drive.mount(ROOT, force_remount=True) # + colab={"base_uri": "https://localhost:8080/"} id="6xbHEWEyQCTJ" outputId="c045892f-00fc-4a7c-9009-aa65316aead3" # set file path path = "/content/drive/My Drive/toyota_data/train" os.chdir(path) # !ls # + id="Y9UvtfJsQRWx" # # !pip install -Uqq fastbook --quiet # + id="j_-ce2ZXQCHV" colab={"base_uri": "https://localhost:8080/"} outputId="e0efafd6-4d58-4146-c915-ff22dab725a6" import fastbook fastbook.setup_book() # + id="bHWCZBV65eHX" from fastbook import * from fastai.vision.widgets import * from pathlib import Path # + id="iAXa4V0w7eEz" from PIL import Image # + [markdown] id="kXRLuo9ahB_I" # ### Gather data using Bing API # + id="dPosWvDGagcc" key = os.environ.get('AZURE_SEARCH_KEY', 'X') # + id="-rxTCYn1an0c" brands = ['Sienna','Tacoma','Tundra', '4Runner', 'Highlander', 'Land cruiser', 'RAV4', 'Sequoia', 'Venza', '86', 'Avalon', 'Camry', 'Corolla', 'Mirai', 'Prius', 'Supra', 'Yaris'] path = Path(path) # + id="iG4vcyqwanjg" for b in brands: dest = (path/b) dest.mkdir(exist_ok=True) results = search_images_bing(key, f'toyota {b}') download_images(dest, urls=results.attrgot('contentUrl')) # + colab={"base_uri": "https://localhost:8080/"} id="GaWg7L1UgesS" outputId="2fa33e1e-0f4f-4ad6-c748-107712241789" # assign all saved images fns fns = get_image_files(path) fns # + [markdown] id="pwJyW7Z3hfgd" # ### Clean data # + colab={"base_uri": "https://localhost:8080/"} id="gbej9SYSkCr0" outputId="291c9575-a7ee-44f2-d01a-220552fad78e" # get corrupted images failed = verify_images(fns) failed # + id="rkeYf7figeWs" # delete corrupted images failed.map(Path.unlink); # + [markdown] id="B9-rb87uhLHa" # Remove files that are not images # + colab={"base_uri": "https://localhost:8080/"} id="ApzTzAwVY7ek" outputId="0ddcdc3f-a0dd-4780-e31a-225ed58516e5" drop = [] for folder in os.listdir(path): for f in os.listdir(folder): if not f.endswith(('.jpg', '.png', '.jpeg')): drop.append(os.path.join(path, folder, f)) os.remove(os.path.join(path, folder, f)) print(drop) print(len(drop)) # + [markdown] id="8G08fEsrjF_R" # Convert png to RGB jpg # + id="bKwg394xZ6WC" c = 1 for folder in os.listdir(path): for f in os.listdir(folder): if f.endswith(('.png')): im = Image.open(os.path.join(path, folder, f)) name = 'img'+str(c)+'.jpg' rgb_im = im.convert('RGB') rgb_im.save(os.path.join(path, folder, name)) c+=1 print('.') os.remove(os.path.join(path, folder, f)) # + [markdown] id="YMRPQERC8l-U" # Check for palette images. Convert palette image to RGB and delete palette image. # # # + id="3zOajHbAY7Db" colab={"base_uri": "https://localhost:8080/"} outputId="2df04d6b-8f13-401d-d636-d76785578849" c = 0 for folder in os.listdir(path): for f in os.listdir(folder): if f.endswith(('.jpg', '.jpeg')): im = Image.open(os.path.join(path, folder, f)) if im.mode == "P": print(os.path.join(path, folder, f)) name = 'P00000'+str(c)+'.jpg' rgb_im = im.convert('RGB') rgb_im.save(os.path.join(path, folder, name)) c += 1 print(os.path.join(path, folder, name)) os.remove(os.path.join(path, folder, f)) # + id="QRh07G_RdIt2"
gather_data.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python [conda env:i2i_p0_env] # language: python # name: conda-env-i2i_p0_env-py # --- # + #set plotting to inline # %matplotlib inline #import relevant packages import numpy as np import scipy as sc import matplotlib.pyplot as plt #set plot options to get nicer figures plt.rcParams['figure.figsize'] = [12, 4] plt.rcParams['figure.dpi'] = 100 # -
empty_notebook.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + #import argparse import numpy as np import cv2 #from scipy.misc import imresize #from moviepy.editor import VideoFileClip #from IPython.display import HTML from keras.models import load_model #from PIL import Image #argparser = argparse.ArgumentParser( #description='test FCN8 network for taillights detection') #argparser.add_argument( #'-i', #'--image', #help='path to image file') def auto_canny(image, sigma=0.33): # compute the median of the single channel pixel intensities v = np.median(image) # apply automatic Canny edge detection using the computed median lower = int(max(0, (1.0 - sigma) * v)) upper = int(min(255, (1.0 + sigma) * v)) edged = cv2.Canny(image, lower, upper) # return the edged image return edged # Load Keras model #model = load_model('full_CNN_model.h5') # Class to average lanes with #class Lanes(): #def __init__(self): #self.recent_fit = [] #self.avg_fit = [] def taillight_detect(image): """ Takes in a road image, re-sizes for the model, predicts the lane to be drawn from the model in G color, recreates an RGB image of a lane and merges with the original road image. """ model = load_model('full_CNN_model.h5') #image1=image #image1=np.array(image1) #objects=np.squeeze(image,2) #rows,cols=objects.shape rows, cols,_ = image.shape #cols, rows = image.size #cols=160 #rows=80 # Get image ready for feeding into model small_img = cv2.resize(image, (160, 80)) #img_y_cr_cb = cv2.cvtColor(small_img, cv2.COLOR_BGR2YCrCb) #y, cr, cb = cv2.split(img_y_cr_cb) # Applying equalize Hist operation on Y channel. #y_eq = cv2.equalizeHist(y) #clahe = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(8,8)) #y_eq = clahe.apply(y) #img_y_cr_cb_eq = cv2.merge((y_eq, cr, cb)) #small_img = cv2.cvtColor(img_y_cr_cb_eq, cv2.COLOR_YCR_CB2BGR) #small_img = imresize(image, (80, 160, 3)) small_img = np.array(small_img) small_img = small_img[None,:,:,:] # Make prediction with neural network (un-normalize value by multiplying by 255) prediction = model.predict(small_img)[0] * 255 #new_image = imresize(prediction, (rows, cols, 3)) mask = cv2.resize(prediction, (cols, rows)) img_y_cr_cb = cv2.cvtColor(image, cv2.COLOR_BGR2YCrCb) y, cr, cb = cv2.split(img_y_cr_cb) # Applying equalize Hist operation on Y channel. #y_eq = cv2.equalizeHist(y) clahe = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(8,8)) y_eq = clahe.apply(y) img_y_cr_cb_eq = cv2.merge((y_eq, cr, cb)) image_he = cv2.cvtColor(img_y_cr_cb_eq, cv2.COLOR_YCR_CB2BGR) gray = cv2.cvtColor(image_he, cv2.COLOR_BGR2GRAY) blurred = cv2.GaussianBlur(gray, (3, 3), 0) auto = auto_canny(blurred) for i in range(rows): for j in range(cols): if auto[i,j] >0 and mask [i,j]>100: auto[i,j]=255 else: auto[i,j]=0 cv2.imshow('histogram equalisation', auto) cv2.waitKey(0) #h, w = edges.shape[:2] filled_from_bottom = np.zeros((rows, cols)) for col in range(cols): for row in reversed(range(rows)): if auto[row][col] < 255: filled_from_bottom[row][col] = 255 else: break filled_from_top = np.zeros((rows, cols)) for col in range(cols): for row in range(rows): if auto[row][col] < 255: filled_from_top[row][col] = 255 else: break filled_from_left = np.zeros((rows, cols)) for row in range(rows): for col in range(cols): if auto[row][col] < 255: filled_from_left[row][col] = 255 else: break filled_from_right = np.zeros((rows, cols)) for row in range(rows): for col in reversed(range(cols)): if auto[row][col] < 255: filled_from_right[row][col] = 255 else: break for i in range(rows): for j in range(cols): if filled_from_bottom[i,j] ==0 and filled_from_top[i,j]==0 and filled_from_right[i,j] ==0 and filled_from_left[i,j]==0: auto[i,j]=255 else: auto[i,j]=0 kernel = np.ones((5,5),np.uint8) opening = cv2.morphologyEx(auto, cv2.MORPH_OPEN, kernel) closing = cv2.morphologyEx(auto, cv2.MORPH_CLOSE, kernel) mask = np.expand_dims(mask, 2) mask = np.repeat(mask, 3, axis=2) # give the mask the same shape as your image colors = {"red": [0.0,1.0,1.0], "blue": [0.,0.,0.1]} # a dictionary for your colors, experiment with the values colored_mask = np.multiply(mask, colors["red"]) # broadcast multiplication (thanks to the multiplication by 0, you'll end up with values different from 0 only on the relevant channels and the right regions) image = image+colored_mask # element-wise sum (sinc img and mask have the same shape) #return image.astype(float) / 255 #return new_image return auto #lanes = Lanes() # Where to save the output video #vid_output = 'proj_reg_vid.mp4' # Location of the input video #clip1 = VideoFileClip("project_video.mp4") #vid_clip = clip1.fl_image(road_lines) #vid_clip.write_videofile(vid_output, audio=False) #def _main_(args): #image_path = args.image #im = cv2.imread("ft.png") #detected=taillight_detect(im) #cv2.imwrite('detected.jpg',detected) image = cv2.imread("ft1.png") x=taillight_detect(image) cv2.imshow('histogram equalisation', x) cv2.waitKey(0) #img_y_cr_cb = cv2.cvtColor(image, cv2.COLOR_BGR2YCrCb) #y, cr, cb = cv2.split(img_y_cr_cb) # Applying equalize Hist operation on Y channel. #y_eq = cv2.equalizeHist(y) #clahe = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(8,8)) #y_eq = clahe.apply(y) #img_y_cr_cb_eq = cv2.merge((y_eq, cr, cb)) #image = cv2.cvtColor(img_y_cr_cb_eq, cv2.COLOR_YCR_CB2BGR) #gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY) #blurred = cv2.GaussianBlur(gray, (3, 3), 0) # apply Canny edge detection using a wide threshold, tight # threshold, and automatically determined threshold #wide = cv2.Canny(blurred, 10, 200) #tight = cv2.Canny(blurred, 225, 250) #auto = auto_canny(blurred) # show the images #cv2.imshow("Original", image) #cv2.imshow("Edges", np.hstack([wide, tight, auto])) #cv2.waitKey(0) #rows,cols = auto.shape #for i in range(rows): #x = [] #for j in range(cols): #k = gray[i,j] #print(k) #x.append(auto[i,j]) #print(x) #cv2.imshow('histogram equalisation', detected) #cv2.waitKey(0) #if __name__ == '__main__': #args = argparser.parse_args() #_main_(args) # -
.ipynb_checkpoints/approach for taillight detection v1-checkpoint.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # scraping Game of thrones Seasons Dataset # importing all the necessary library import pandas as pd import requests from bs4 import BeautifulSoup url = "https://en.wikipedia.org/wiki/List_of_Game_of_Thrones_episodes" response = requests.get(url) response.status_code html_data = BeautifulSoup(response.text,'html.parser') # print(html_data) seasons_name_lst = html_data.find_all('div',{'class':'hatnote navigation-not-searchable'}) seasons_lst = [i.a.text for i in seasons_name_lst] seasons_lst table_lst = html_data.find_all('table',{'class':'wikitable plainrowheaders wikiepisodetable'}) len(table_lst) print(table_lst[-1].prettify()) # deleting the last index table because it is not necessary del table_lst[-1] print(len(table_lst)) header_html_lst =table_lst[0].find_all('th',{'scope':'col'})[1:] header_html_lst header_lst = [i.text for i in header_html_lst] header_lst table_rows_lst = table_lst[0].find_all('tr',{'class':'vevent'}) len(table_rows_lst) table_rows = [] for i in table_rows_lst: html_table_row = i.find_all('td') row = [i.text for i in html_table_row] table_rows.append(row) table_rows # ## Actual code of for all the seasons for season,table in zip(seasons_lst,table_lst): table_rows_lst = table.find_all('tr',{'class':'vevent'}) table_rows = [] for i in table_rows_lst: html_table_row = i.find_all('td') row = [i.text for i in html_table_row] table_rows.append(row) dataframe = pd.DataFrame({header_lst[0]:[table_rows[i][0] for i in range(len(table_rows))], header_lst[1]:[table_rows[i][1] for i in range(len(table_rows))], header_lst[2]:[table_rows[i][2] for i in range(len(table_rows))], header_lst[3]:[table_rows[i][3] for i in range(len(table_rows))], header_lst[4]:[table_rows[i][4] for i in range(len(table_rows))], header_lst[5]:[table_rows[i][5] for i in range(len(table_rows))]}) dataframe.to_csv(season+'.csv', index=False) # ### Now reading the data of every seasons df = pd.read_csv('Game of Thrones (season 1).csv') df df.set_index('No. inseason')
Scraping Game of Thrones episode Dataset.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ## Testing notebook import algorithm as a1 import zdt3 import cf6_4d import numpy as np import matplotlib.pyplot as plt import parsing as p zdt = zdt3.ZDT3() cf6 = cf6_4d.CF6() # # Testing # ## ZDT3 Problem unconstrained # ### Base implementation vs NSGAII # Budget of 4000 evaluations. # * Population: 40 # * Generations: 100 # * Neighbourhood: 20 moec = a1.MOEC(40,100,20,0.5,0.5,20,"DE","zdt3_p40_g100.out") frente = moec.run() # Diversity preservation (number of unique individuals from a population of 40) len(np.unique(frente, axis=0)) res_1 = np.array([zdt.func(c)[0] for c in frente]) res_2 = np.array([zdt.func(c)[1] for c in frente]) nsga_x,nsga_y = p.read_plotout("NSGAII_outs/ZDT3/plot_zdt3_p40_g100.out") plt.plot(res_1,res_2,'bo') plt.plot(nsga_x,nsga_y,'ro') plt.xlabel("Objective 1") plt.ylabel("Objective 2") plt.show() # ### Base implementation vs NSGAII # Budget of 3960 evaluations. # * Population: 60 # * Generations: 66 # * Neighbourhood: 20 moec_2 = a1.MOEC(60,66,50,0.5,0.5,20,"DE","zdt3_p60_g66.out") frente_2 = moec_2.run() # Diversity preservation (number of unique individuals from a population of 50) len(np.unique(frente_2, axis=0)) res_3 = np.array([zdt.func(c)[0] for c in frente_2]) res_4 = np.array([zdt.func(c)[1] for c in frente_2]) nsga_x,nsga_y = p.read_plotout("NSGAII_outs/ZDT3/plot_zdt3_p60_g66.out") plt.plot(res_3,res_4,'bo') plt.plot(nsga_x,nsga_y,'ro') plt.xlabel("Objective 1") plt.ylabel("Objective 2") plt.show() # ### Base implementation vs NSGAII # Budget of 4000 evaluations. # * Population: 80 # * Generations: 50 # * Neighbourhood: 50 moec_3 = a1.MOEC(80,50,50,0.5,0.5,20,"DE","zdt3_p60_g66.out") frente_3 = moec_3.run() # Diversity preservation (number of unique individuals from a population of 50) len(np.unique(frente_3, axis=0)) res_5 = np.array([zdt.func(c)[0] for c in frente_3]) res_6 = np.array([zdt.func(c)[1] for c in frente_3]) nsga_x,nsga_y = p.read_plotout("NSGAII_outs/ZDT3/plot_zdt3_p80_g50.out") plt.plot(res_5,res_6,'bo',label="MOEC") plt.plot(nsga_x,nsga_y,'ro',label="NSGAII") plt.show() # ### Base implementation vs NSGAII # Budget of 4000 evaluations. # * Population: 100 # * Generations: 40 # * Neighbourhood: 50 moec_4 = a1.MOEC(100,40,50,0.5,0.5,20,"DE","zdt3_p100_g40.out") frente_4 = moec_4.run() # Diversity preservation (number of unique individuals from a population of 50) len(np.unique(frente_4, axis=0)) nsga_x,nsga_y = p.read_plotout("NSGAII_outs/ZDT3/plot_zdt3_p100_g40.out") res_7 = np.array([zdt.func(c)[0] for c in frente_4]) res_8 = np.array([zdt.func(c)[1] for c in frente_4]) plt.plot(res_7,res_8,'bo',label="MOEC") plt.plot(nsga_x,nsga_y,'ro',label="NSGAII") plt.xlabel("Objective 1") plt.ylabel("Objective 2") plt.show() # ### Base implementation vs NSGAII # Budget of 4000 evaluations. # * Population: 160 # * Generations: 25 # * Neighbourhood: 100 moec_5 = a1.MOEC(160,25,100,0.5,0.5,20,"DE","zdt3_p160_g25.out") frente_5 = moec_5.run() # Diversity preservation (number of unique individuals from a population of 50) len(np.unique(frente_5, axis=0)) nsga_x,nsga_y = p.read_plotout("NSGAII_outs/ZDT3/plot_zdt3_p160_g25.out") res_9 = np.array([zdt.func(c)[0] for c in frente_5]) res_10 = np.array([zdt.func(c)[1] for c in frente_5]) plt.plot(res_9,res_10,'bo',label="MOEC") plt.plot(nsga_x,nsga_y,'ro',label="NSGA") plt.xlabel("Objective 1") plt.ylabel("Objective 2") plt.legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.) plt.show() # ### Base implementation vs NSGAII # Budget of 10000 evaluations. # * Population: 100 # * Generations: 100 # * Neighbourhood: 60 moec_6 = a1.MOEC(100,100,30,0.5,0.5,20,"DE","zdt3_p100_g100.out") frente_6 = moec_6.run() # Diversity preservation (number of unique individuals from a population of 50) len(np.unique(frente_6, axis=0)) nsga_x,nsga_y = p.read_plotout("NSGAII_outs/ZDT3/plot_zdt3_p100_g100.out") res_11 = np.array([zdt.func(c)[0] for c in frente_6]) res_12 = np.array([zdt.func(c)[1] for c in frente_6]) plt.plot(res_11,res_12,'bo',label="MOEC") plt.plot(nsga_x,nsga_y,'ro',label="NSGAII") plt.xlabel("Objective 1") plt.ylabel("Objective 2") plt.legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.) plt.show() # ### Base implementation vs NSGAII # Budget of 9960 evaluations. # * Population: 60 # * Generations: 166 # * Neighbourhood: 30 moec_7 = a1.MOEC(60,166,55,0.5,0.7,20,"AHX","zdt3_p60_g166.out") frente_7 = moec_7.run() # Diversity preservation (number of unique individuals from a population of 50) len(np.unique(frente_7, axis=0)) nsga_x,nsga_y = p.read_plotout("NSGAII_outs/ZDT3/plot_zdt3_p60_g166.out") res_13 = np.array([zdt.func(c)[0] for c in frente_7]) res_14 = np.array([zdt.func(c)[1] for c in frente_7]) plt.plot(res_13,res_14,'bo',label="MOEC") plt.plot(nsga_x,nsga_y,'ro',label="NSGAII") plt.xlabel("Objective 1") plt.ylabel("Objective 2") plt.legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.) plt.show() # ### Base implementation vs NSGAII # Budget of 9920 evaluations. # * Population: 160 # * Generations: 62 # * Neighbourhood: 100 moec_8 = a1.MOEC(160,62,100,0.5,0.5,20,"DE","zdt3_p160_g62.out") frente_8 = moec_8.run() # Diversity preservation (number of unique individuals from a population of 50) len(np.unique(frente_8, axis=0)) nsga_x,nsga_y = p.read_plotout("NSGAII_outs/ZDT3/plot_zdt3_p160_g62.out") res_15 = np.array([zdt.func(c)[0] for c in frente_8]) res_16 = np.array([zdt.func(c)[1] for c in frente_8]) plt.plot(res_15,res_16,'bo',label="MOEC") plt.plot(nsga_x,nsga_y,'ro',label="NSGAII") plt.xlabel("Objective 1") plt.ylabel("Objective 2") plt.legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.) plt.show() # ### Base implementation vs NSGAII # Budget of 9920 evaluations. # * Population: 200 # * Generations: 50 # * Neighbourhood: 100 moec_9 = a1.MOEC(200,50,100,0.5,0.5,20,"DE","zdt3_p200_g50.out") frente_9 = moec_9.run() # Diversity preservation (number of unique individuals from a population of 50) len(np.unique(frente_9, axis=0)) nsga_x,nsga_y = p.read_plotout("NSGAII_outs/ZDT3/plot_zdt3_p200_g50.out") res_17 = np.array([zdt.func(c)[0] for c in frente_9]) res_18 = np.array([zdt.func(c)[1] for c in frente_9]) plt.plot(res_17,res_18,'bo',label="MOEC") plt.plot(nsga_x,nsga_y,'ro',label="NSGAII") plt.xlabel("Objective 1") plt.ylabel("Objective 2") plt.legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.) plt.show() # ### Base implementation vs NSGAII # Budget of 9920 evaluations. # * Population: 400 # * Generations: 25 # * Neighbourhood: 100 moec_10 = a1.MOEC(400,25,150,0.5,0.5,20,"DE","zdt3_p400_g25.out") frente_10 = moec_10.run() # Diversity preservation (number of unique individuals from a population of 50) len(np.unique(frente_10, axis=0)) nsga_x,nsga_y = p.read_plotout("NSGAII_outs/ZDT3/plot_zdt3_p400_g25.out") res_19 = np.array([zdt.func(c)[0] for c in frente_10]) res_20 = np.array([zdt.func(c)[1] for c in frente_10]) plt.plot(res_19,res_20,'bo',label="MOEC") plt.plot(nsga_x,nsga_y,'ro',label="NSGAII") plt.xlabel("Objective 1") plt.ylabel("Objective 2") plt.legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.) plt.show() # ## CF6 Problem with constraints # ### CF6 with separation for constraint handling # * Population 40 # * Generations 100 # * Constraint weights: 100,100 cf6_1 = a1.MOEC(40,100,10,0.5,0.5,20,"DE","cf6_p40_g100.out",const_mode="separation",problem=cf6_4d.CF6(),weights=[100,100]) fc_1 = cf6_1.run() # Diversity preservation (number of unique individuals from a population of 50) len(np.unique(fc_1, axis=0)) rc_1 = np.array([cf6.func(c)[0] for c in fc_1]) rc_2 = np.array([cf6.func(c)[1] for c in fc_1]) nsga_x,nsga_y = p.read_plotout("NSGAII_outs/CF6/plot_cf6_p40_g100.out") plt.plot(rc_1,rc_2,'bo',label="MOEC") plt.plot(nsga_x,nsga_y,'ro',label="NSGAII") plt.xlabel("Objective 1") plt.ylabel("Objective 2") plt.legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.) plt.show() # Intento de implementar separación de objetivos fallida. Mi idea es una función que devuelve la suma de las violaciones de restricciones si esta es mayor que cero o la función de tchebycheff si no lo es, de manera que se puedan comparar estas funciones aplicadas por separado a cada solución candidata. El problema es que es posible que una solución tenga menor violacion de restricciones que la otra función de tchebycheff, con lo que la primera es elegida aunque en la heurística de separación de objetivos debiera salir la segunda. con_1 = np.array([cf6.const(c)[0] for c in fc_1]) con_2 = np.array([cf6.const(c)[1] for c in fc_1]) plt.plot(con_1,con_2,'ro') plt.show() # ### CF6 with penalty for constraint handling # * Population 60 # * Generations 66 # * Constraint weights: 100,100 cf6_2 = a1.MOEC(60,66,20,0.5,0.5,20,"DE","cf6_p60_g66.out",const_mode="penalty",problem=cf6_4d.CF6(),weights=[1,1]) fc_2 = cf6_2.run() # Diversity preservation (number of unique individuals from a population of 50) len(np.unique(fc_2, axis=0)) rc_3 = np.array([cf6.func(c)[0] for c in fc_2]) rc_4 = np.array([cf6.func(c)[1] for c in fc_2]) nsga_x,nsga_y = p.read_plotout("NSGAII_outs/CF6/plot_cf6_p60_g66.out") plt.plot(rc_3,rc_4,'bo',label="MOEC") plt.plot(nsga_x,nsga_y,'ro',label="NSGAII") plt.xlabel("Objective 1") plt.ylabel("Objective 2") plt.legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.) plt.show() con_3 = np.array([cf6.const(c)[0] for c in fc_2]) con_4 = np.array([cf6.const(c)[1] for c in fc_2]) plt.plot(con_3,con_4,'ro') plt.xlabel("Constraint 1") plt.ylabel("Constraint 2") plt.show() # ### CF6 with penalty for constraint handling # * Population 80 # * Generations 50 # * Constraint weights: 100,100 cf6_3 = a1.MOEC(80,50,50,0.5,0.5,20,"DE","cf6_p80_g50.out",const_mode="penalty",problem=cf6_4d.CF6(),weights=[1,1]) fc_3 = cf6_3.run() # Diversity preservation (number of unique individuals from a population of 50) len(np.unique(fc_3, axis=0)) rc_5 = np.array([cf6.func(c)[0] for c in fc_3]) rc_6 = np.array([cf6.func(c)[1] for c in fc_3]) nsga_x,nsga_y = p.read_plotout("NSGAII_outs/CF6/plot_cf6_p80_g50.out") plt.plot(rc_5,rc_6,'bo',label="MOEC") plt.plot(nsga_x,nsga_y,'ro',label="NSGAII") plt.xlabel("Objective 1") plt.ylabel("Objective 2") plt.legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.) plt.show() con_5 = np.array([cf6.const(c)[0] for c in fc_3]) con_6 = np.array([cf6.const(c)[1] for c in fc_3]) plt.plot(con_3,con_4,'ro') plt.xlabel("Constraint 1") plt.ylabel("Constraint 2") plt.show() # ### CF6 with penalty for constraint handling # * Population 100 # * Generations 40 # * Neighbourhood 60 # * Constraint weights: 100,100 cf6_4 = a1.MOEC(100,40,60,0.5,0.5,20,"DE","cf6_p100_g40.out",const_mode="penalty",problem=cf6_4d.CF6(),weights=[1,1]) fc_4 = cf6_4.run() # Diversity preservation (number of unique individuals from a population of 50) len(np.unique(fc_4, axis=0)) rc_7 = np.array([cf6.func(c)[0] for c in fc_4]) rc_8 = np.array([cf6.func(c)[1] for c in fc_4]) nsga_x,nsga_y = p.read_plotout("NSGAII_outs/CF6/plot_cf6_p100_g40.out") plt.plot(rc_7,rc_8,'bo',label="MOEC") plt.plot(nsga_x,nsga_y,'ro',label="NSGAII") plt.xlabel("Objective 1") plt.ylabel("Objective 2") plt.legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.) plt.show() # ### CF6 with penalty for constraint handling # * Population 160 # * Generations 25 # * Neighbourhood 100 # * Constraint weights: 100,100 cf6_5 = a1.MOEC(160,25,100,0.5,0.5,20,"DE","cf6_p160_g25.out",const_mode="penalty",problem=cf6_4d.CF6(),weights=[1,1]) fc_5 = cf6_5.run() # Diversity preservation (number of unique individuals from a population of 50) len(np.unique(fc_5, axis=0)) rc_9 = np.array([cf6.func(c)[0] for c in fc_5]) rc_10 = np.array([cf6.func(c)[1] for c in fc_5]) nsga_x,nsga_y = p.read_plotout("NSGAII_outs/CF6/plot_cf6_p160_g25.out") plt.plot(rc_9,rc_10,'bo',label="MOEC") plt.plot(nsga_x,nsga_y,'ro',label="NSGAII") plt.xlabel("Objective 1") plt.ylabel("Objective 2") plt.legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.) plt.show() con_7 = np.array([cf6.const(c)[0] for c in fc_5]) con_8 = np.array([cf6.const(c)[1] for c in fc_5]) plt.plot(con_3,con_4,'ro') plt.xlabel("Constraint 1") plt.ylabel("Constraint 2") plt.show() # ### CF6 with penalty for constraint handling # * Population 100 # * Generations 100 # * Neighbourhood 60 # * Constraint weights: 100,100 cf6_6 = a1.MOEC(100,100,60,0.5,0.5,20,"DE","cf6_p100_g100.out",const_mode="penalty",problem=cf6_4d.CF6(),weights=[100,100]) fc_6 = cf6_6.run() # Diversity preservation (number of unique individuals from a population of 50) len(np.unique(fc_6, axis=0)) rc_11 = np.array([cf6.func(c)[0] for c in fc_6]) rc_12 = np.array([cf6.func(c)[1] for c in fc_6]) nsga_x,nsga_y = p.read_plotout("NSGAII_outs/CF6/plot_cf6_p100_g100.out") plt.plot(rc_11,rc_12,'bo',label="MOEC") plt.plot(nsga_x,nsga_y,'ro',label="NSGAII") plt.xlabel("Objective 1") plt.ylabel("Objective 2") plt.legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.) plt.show() # ### CF6 with penalty for constraint handling # * Population 60 # * Generations 166 # * Neighbourhood 100 # * Constraint weights: 100,100 cf6_7 = a1.MOEC(60,166,40,0.5,0.5,20,"DE","cf6_p60_g166.out",const_mode="penalty",problem=cf6_4d.CF6(),weights=[100,100]) fc_7 = cf6_7.run() # Diversity preservation (number of unique individuals from a population of 50) len(np.unique(fc_7, axis=0)) rc_13 = np.array([cf6.func(c)[0] for c in fc_7]) rc_14 = np.array([cf6.func(c)[1] for c in fc_7]) nsga_x,nsga_y = p.read_plotout("NSGAII_outs/CF6/plot_cf6_p60_g166.out") plt.plot(rc_13,rc_14,'bo',label="MOEC") plt.plot(nsga_x,nsga_y,'ro',label="NSGAII") plt.xlabel("Objective 1") plt.ylabel("Objective 2") plt.legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.) plt.show() # ### CF6 with penalty for constraint handling # * Population 160 # * Generations 62 # * Neighbourhood 100 # * Constraint weights: 100,100 cf6_8 = a1.MOEC(160,62,100,0.5,0.5,20,"DE","cf6_p160_g62.out",const_mode="penalty",problem=cf6_4d.CF6(),weights=[100,100]) fc_8 = cf6_8.run() # Diversity preservation (number of unique individuals from a population of 50) len(np.unique(fc_8, axis=0)) rc_15 = np.array([cf6.func(c)[0] for c in fc_8]) rc_16 = np.array([cf6.func(c)[1] for c in fc_8]) nsga_x,nsga_y = p.read_plotout("NSGAII_outs/CF6/plot_cf6_p160_g62.out") plt.plot(rc_15,rc_16,'bo',label="MOEC") plt.plot(nsga_x,nsga_y,'ro',label="NSGAII") plt.xlabel("Objective 1") plt.ylabel("Objective 2") plt.legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.) plt.show() # ### CF6 with penalty for constraint handling # * Population 200 # * Generations 50 # * Neighbourhood 100 # * Constraint weights: 100,100 cf6_9 = a1.MOEC(200,50,100,0.5,0.5,20,"DE","cf6_p200_g50.out",const_mode="penalty",problem=cf6_4d.CF6(),weights=[100,100]) fc_9 = cf6_9.run() len(np.unique(fc_9, axis=0)) rc_17 = np.array([cf6.func(c)[0] for c in fc_9]) rc_18 = np.array([cf6.func(c)[1] for c in fc_9]) nsga_x,nsga_y = p.read_plotout("NSGAII_outs/CF6/plot_cf6_p200_g50.out") plt.plot(rc_17,rc_18,'bo',label="MOEC") plt.plot(nsga_x,nsga_y,'ro',label="NSGAII") plt.xlabel("Objective 1") plt.ylabel("Objective 2") plt.legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.) plt.show() con_9 = np.array([cf6.const(c)[0] for c in fc_9]) con_10 = np.array([cf6.const(c)[1] for c in fc_9]) plt.plot(con_9,con_10,'ro') plt.xlabel("Constraint 1") plt.ylabel("Constraint 2") plt.show() # ### CF6 with penalty for constraint handling # * Population 400 # * Generations 25 # * Neighbourhood 150 # * Constraint weights: 100,100 cf6_0 = a1.MOEC(400,25,150,0.5,0.5,20,"DE","cf6_p400_g25.out",const_mode="penalty",problem=cf6_4d.CF6(),weights=[100,100]) fc_0 = cf6_0.run() len(np.unique(fc_0, axis=0)) rc_19 = np.array([cf6.func(c)[0] for c in fc_0]) rc_20 = np.array([cf6.func(c)[1] for c in fc_0]) nsga_x,nsga_y = p.read_plotout("NSGAII_outs/CF6/plot_cf6_p400_g25.out") plt.plot(rc_19,rc_20,'bo',label="MOEC") plt.plot(nsga_x,nsga_y,'ro',label="NSGAII") plt.xlabel("Objective 1") plt.ylabel("Objective 2") plt.legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.) plt.show() # ### CF6 without constraint handling # * Population 80 # * Generations 50 # * Constraint weights: 100,100 m_cf6_2 = a1.MOEC(80,50,20,0.5,0.5,20,problem=cf6_4d.CF6()) f_cf6_2 = m_cf6_2.run() res_1 = np.array([cf6.func(c)[0] for c in f_cf6_2]) res_2 = np.array([cf6.func(c)[1] for c in f_cf6_2]) plt.plot(res_1,res_2,'bo') plt.plot(nsga_x,nsga_y,'rx') plt.show() con_1 = np.array([cf6.const(c)[0] for c in f_cf6_2]) con_2 = np.array([cf6.const(c)[1] for c in f_cf6_2]) plt.plot(con_1,con_2,'ro') plt.show()
Tests.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # <h1><center> IBM® LinuxONE Data and AI: Credit Default Risk Analysis using scikit-learn </center></h1> # Financial organizations around the world face the constant challenge to accurately predict the risk of credit default. This covers all aspects of financial lending, including personal, corporate and public finance. To enhance and automate this process, great interest has been placed on the usage of data science and machine learning to predict the customers most likely to default. These lending processes mainly run on an institution's core business systems, with IBM Z and LinuxONE providing the industry standard platform for security, resiliency and scalability. With Anaconda on IBM Z and LinuxONE a customer can run their machine learning model co-located to their key processes, without the need to move data off the platform, ensuring security and data currency. To that end, we have created an example credit default model leveraging python and conda with Linux on Z. # # In this example we will train a Logistic Regression model on customers' credit history dataset using scikit-learn. # # NOTE: This notebook is a based on the original notebook available at https://github.com/ibmsoe/snap-ml/blob/master/notebooks/credit-default-prediction-example.ipynb # + #Installation instructions for Anaconda on Linux on Z and LinuxONE can be found here: https://docs.anaconda.com/anaconda/install/linux-s390x/ # - # ### Download dataset that has the customer information # # The dataset contains 10 million transactions with 19 variables. # # You can comment out the downloading code below if running the same wget command more than once. # + # Download dataset csv file # #!wget -O credit_customer_history.csv -nc https://ibm.box.com/shared/static/c84jns0hy2ty05t3c3a9c17ca1mxpe6s.csv # - # !uname -om # ### Install the necessary conda packages # + # #!conda install -y scikit-learn pandas matplotlib seaborn # - # ### Import statements for data science and related packages # + from __future__ import print_function import numpy as np # multi-dimensional arrays , matrices , linear algebra and others import pandas as pd # data manipulation and analysis pd.options.display.max_columns = 999 import matplotlib.pyplot as plt # static, animated, and interactive data visualizations # %matplotlib inline import sklearn # Data preparation, train and score import seaborn as sns # Data visualization library for statistical graphics plotting from sklearn.model_selection import train_test_split, StratifiedKFold from sklearn.preprocessing import MinMaxScaler, LabelEncoder, normalize from sklearn.ensemble import RandomForestClassifier from sklearn.metrics import f1_score, accuracy_score, roc_curve, roc_auc_score from scipy.stats import chi2_contingency,ttest_ind from sklearn.utils import shuffle import time import warnings warnings.filterwarnings('ignore') # + language="javascript" # IPython.OutputArea.prototype._should_scroll = function(lines) { # return false; # } # # - # ## Exploratory Data Analysis (EDA) # # # As part of the EDA we will conduct the following exploration below: # 1) We would like to see if there are any null values present in the dataset to ensure no imputation. # 2) Our target varible is IS_DEFAULT, and we would like to understand its distribution. # 3) Then we will look into the distribution of a few select features and their relationship to the target varaible. # # Let's take a quick look at the dataset # + cust_pd_full = pd.read_csv('credit_customer_history.csv') #Taking only 1 Million records for EDA. rows=1000000 cust_pd = cust_pd_full.head(rows) print("There are " + str(len(cust_pd_full)) + " observations in the customer history dataset.") print("There are " + str(len(cust_pd_full.columns)) + " variables in the dataset.") cust_pd.head() # - #the datatype of the features cust_pd.info() # Let us see if there are any missing values in the dataset cust_pd.isnull().sum() # Since there are no missing values in the dataset, let's move forward with dataset visualization. # ### Distribution of the Target variable (IS_DEFAULT) # Create visuals to understand the percentage of accounts in default. #Pie chart to show the percentage of accounts in default cust_pd.IS_DEFAULT.value_counts().plot(kind='pie',autopct='%1.0f%%').legend(bbox_to_anchor=(1.2, 0.6)) # In the dataset 70% of accounts are in good standing, while 30% are in default. # ### Default by Credit Program (TRANSACTION_CATEGORY) # # As part of the EDA, we will examine the types of credit programs in the dataset, and conduct a comparative study of their individual default rates. # Pie chart to show the percentage of accounts by Credit Program (TRANSACTION_CATEGORY) cust_pd.TRANSACTION_CATEGORY.value_counts().plot(kind='pie',autopct='%1.0f%%').legend(bbox_to_anchor=(1.2, 0.7)) # The top three credit programs with most customers are Furniture (57%), Education (21%) and Electronics (20%). # + pixiedust={"displayParams": {"aggregation": "COUNT", "chartsize": "100", "handlerId": "pieChart", "keyFields": "IS_DEFAULT", "rowCount": "1000"}} #Create a bar chart to show the default rates by Credit Program (TRANSACTION_CATEGORY) defaults_by_credit_program = cust_pd.groupby(['TRANSACTION_CATEGORY','IS_DEFAULT']).size() percentages = defaults_by_credit_program.groupby(level=0).apply(lambda x:100 * x / float(x.sum())) percentages.unstack().plot(kind='bar',stacked=True,color=['blue','red'],grid=False).legend(bbox_to_anchor=(1.2, 0.5)) # - # The top three credit programs with the highest default rate are New Car(89.7%), Used Car(60.6%) and Retraining(58.5%). However, all undeclared credit programs by the lendees belong to `OTHER` category in the dataset. # # ### Default by State Border # # In case IS_STATE_BORDER is set to YES, accounts/businesses are expanded to multiple states. It also signifies the scale of the business, assuming that mulit-state businesses are larger. # Pie chart to show percentage of businesses in single vs multiple states cust_pd.IS_STATE_BORDER.value_counts().plot(kind='pie',autopct='%1.0f%%').legend(bbox_to_anchor=(1.2, 0.5)) #Bar chart to compare default rates based on if an account expands across state borders defaults_by_xborder = cust_pd.groupby(['IS_STATE_BORDER','IS_DEFAULT']).size() percentages = defaults_by_xborder.groupby(level=0).apply(lambda x:100 * x / float(x.sum())) percentages.unstack().plot(kind='bar',stacked=True, color=['blue','red'], grid=False).legend(bbox_to_anchor=(1.2, 0.5)) # Fromt the visual above, we can infer that the larger scale businesses have a lower chance of default. # ### Will a customer still default with a co-applicant? # Examination on the effects on default of having a co-applicant #Bar chart looking at the default rates when having and not having a co-applicant defaults_by_co_applicant = cust_pd.groupby(['HAS_CO_APPLICANT','IS_DEFAULT']).size() percentages = defaults_by_co_applicant.groupby(level=0).apply(lambda x:100 * x / float(x.sum())) percentages.unstack().plot(kind='bar',stacked=True, color=['blue','red'], grid=False).legend(bbox_to_anchor=(1.2, 0.5)) # From bar chart above we can see that HAS_CO_APPLICANT feature has negligible impact on the default rate. # ### Does credit history play a role in the default rate? # # Examination into the impact of a customer's credit history on the default rate. # Bar chart of the types of credit history for each account, based on prior payment history cust_pd.CREDIT_HISTORY.value_counts().plot(kind='bar', title='CREDIT_HISTORY') # Stack bar chart showing the current default percentages for accounts based on their past payment history defaults_by_history = cust_pd.groupby(['CREDIT_HISTORY','IS_DEFAULT']).size() percentages = defaults_by_history.groupby(level=0).apply(lambda x:100 * x / float(x.sum())) percentages.unstack().plot(kind='bar',stacked=True,color=['blue','red'],grid=False).legend(bbox_to_anchor=(1.2, 0.5)) # From the visual above we observe that lendees with prior payment problems have a higher likelihood of default, as well as those who are labeled critical accounts. # ### Numerical features - EMI_TENURE, TRANSACTION_AMOUNT, NUMBER_CREDITS, RFM_SCORE # # The dataset has more categorical features than numerical ones overall as seen above. Below we will visualize the numerical variables and find out the meaning and relationships of the features, such as - EMI_TENURE, TRANSACTION_AMOUNT, NUMBER_CREDITS, RFM_SCORE, with the Target Variable (IS_DEFAULT). # Create a pair plot of numerical variables with the default variable sns.pairplot(cust_pd, hue="IS_DEFAULT") # * `pairplot` is a useful method to identify trends for further analysis. It appears that higher EMI_TENURE slightly increases the chances of default. EMI stands for equated monthy installement. # ## Data preparation # # In this section, we will get the data ready for training and evaluating the model by transforming features using popular techniques such as normalization, one-hot encoding, label encoding etc. # Quick preview of the dataset cust_pd = cust_pd_full cust_pd.head() # ### Split dataframe into Features and Labels # Now we divide the cust_pd dataframe into cust_pd_X (features) and cust_pd_Y (labels) # + # Spit the dataset into features and labels #Labels cust_pd_Y = cust_pd[['IS_DEFAULT']] #Features cust_pd_X = cust_pd.drop(['IS_DEFAULT'],axis=1) #See the shape of the Features and Labels print('cust_pd_X.shape=', cust_pd_X.shape, 'cust_pd_Y.shape=', cust_pd_Y.shape) # - # ### Transform Label # # Since the labels will be used during the training of the model, we need to transform categorical labels into numerical labels. # Preview the labels cust_pd_Y.head() #`LabelEncoder` transformer will be used to encode the target labels below. le = LabelEncoder() cust_pd_Y['IS_DEFAULT'] = le.fit_transform(cust_pd_Y['IS_DEFAULT']) cust_pd_Y.head() # ### Transform Features # Transform the features to be used in the model training. # EMI_TENURE, NUMBER_CREDITS, TRANSACTION_AMOUNT, CREDIT_HISTORY_ALL CREDITS PAID BACK, CREDIT_HISTORY_CRITICAL ACCOUNT, CREDIT_HISTORY_DELAY IN PAST, CREDIT_HISTORY_EXISTING CREDITS PAID BACK, CREDIT_HISTORY_NONE TAKEN etc are some of the features which are being used here for the training and inferencing. # View of the features print('features df shape = ', cust_pd_X.shape) cust_pd_X.head() # ### One-hot encoding for categorical columns # Categorical variables will be represented as binary vectors using One-hot encoding. # + # One-hot encoding categoricalColumns = ['CREDIT_HISTORY', 'TRANSACTION_CATEGORY', 'ACCOUNT_TYPE', 'ACCOUNT_AGE', 'STATE', 'IS_URBAN', 'IS_STATE_BORDER', 'HAS_CO_APPLICANT', 'HAS_GUARANTOR', 'OWN_REAL_ESTATE', 'OTHER_INSTALMENT_PLAN', 'OWN_RESIDENCE', 'RFM_SCORE', 'OWN_CAR', 'SHIP_INTERNATIONAL'] cust_pd_X = pd.get_dummies(cust_pd_X, columns=categoricalColumns) cust_pd_X.head() # - # ### Normalize Features # # Feature scaling is the one of the essential steps to build the best performing machine learning models. Here we are scaling the features using the normalization technique. Normalization helps to scale the features between 0 to 1. # + # Normalize the variables min_max_scaler = MinMaxScaler() features = min_max_scaler.fit_transform(cust_pd_X) features = normalize(features, axis=1, norm='l1') cust_pd_X = pd.DataFrame(features,columns=cust_pd_X.columns) cust_pd_X.head() # - # ### Split training dataset # # We are splitting the dataset with 70:30 ratio for training and evaluating the model. # + # Split the dataset label = cust_pd_Y.values features = cust_pd_X.values label = np.reshape(label,(-1,1)) X_train,X_test,y_train,y_test = \ train_test_split(features, label, test_size=0.3, random_state=42, stratify=label) print('X_train.shape=', X_train.shape, 'Y_train.shape=', y_train.shape) print('X_test.shape=', X_test.shape, 'Y_test.shape=', y_test.shape) # - # ### Train using sklearn # Now we will train our model using the `LogisticRegression` model for the classification problem. # Create a new Logistic regression classifier object using LogisticRegression() from sklearn from sklearn.linear_model import LogisticRegression sklearn_lr = LogisticRegression() # Training the LogisticRegression model sklearn_lr.fit(X_train, y_train) # ### Inferencing and Evaluating the model # For evaluating the model, we have chosen `accuracy_score` metric here. # Inference and check the model accuracy sklearn_prediction = sklearn_lr.predict(X_test) print('sklearn ml accuracy score = ', accuracy_score(y_test, sklearn_prediction)) # Since the model achieves a reasonable accuracy score, it can be deployed in a production like or test environment for further validation. # ## Conclusion: # # Building a machine learning pipeline on IBM Z and LinuxONE is a seamless experience with the availability of the conda packages required for data visualizations, data preparation, training, evaluating the model and inferencing. To leverage Anaconda packages further on IBM Z and LinuxONE the installation guide can be found [here](https://docs.anaconda.com/anaconda/install/linux-s390x/) # &copy; Copyright IBM Corporation 2018, 2021
CreditDefaultPredictionSklearn.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Probability # Many of the problems we try to solve using statistics are to do with *probability*. For example, what's the probable salary for a graduate who scored a given score in their final exam at school? Or, what's the likely height of a child given the height of his or her parents? # # It therefore makes sense to learn some basic principles of probability as we study statistics. # # ## Probability Basics # Let's start with some basic definitions and principles. # - An ***experiment*** or ***trial*** is an action with an uncertain outcome, such as tossing a coin. # - A ***sample space*** is the set of all possible outcomes of an experiment. In a coin toss, there's a set of two possible oucomes (*heads* and *tails*). # - A ***sample point*** is a single possible outcome - for example, *heads*) # - An ***event*** is a specific outome of single instance of an experiment - for example, tossing a coin and getting *tails*. # - ***Probability*** is a value between 0 and 1 that indicates the likelihood of a particular event, with 0 meaning that the event is impossible, and 1 meaning that the event is inevitable. In general terms, it's calculated like this: # # \begin{equation}\text{probability of an event} = \frac{\text{Number of sample points that produce the event}}{\text{Total number of sample points in the sample space}} \end{equation} # # For example, the probability of getting *heads* when tossing as coin is <sup>1</sup>/<sub>2</sub> - there is only one side of the coin that is designated *heads*. and there are two possible outcomes in the sample space (*heads* and *tails*). So the probability of getting *heads* in a single coin toss is 0.5 (or 50% when expressed as a percentage). # # Let's look at another example. Suppose you throw two dice, hoping to get 7. # # The dice throw itself is an *experiment* - you don't know the outome until the dice have landed and settled. # # The *sample space* of all possible outcomes is every combination of two dice - 36 *sample points*: # <table style='font-size:36px;'> # <tr><td>&#9856;+&#9856;</td><td>&#9856;+&#9857;</td><td>&#9856;+&#9858;</td><td>&#9856;+&#9859;</td><td>&#9856;+&#9860;</td><td>&#9856;+&#9861;</td></tr> # <tr><td>&#9857;+&#9856;</td><td>&#9857;+&#9857;</td><td>&#9857;+&#9858;</td><td>&#9857;+&#9859;</td><td>&#9857;+&#9860;</td><td>&#9857;+&#9861;</td></tr> # <tr><td>&#9858;+&#9856;</td><td>&#9858;+&#9857;</td><td>&#9858;+&#9858;</td><td>&#9858;+&#9859;</td><td>&#9858;+&#9860;</td><td>&#9858;+&#9861;</td></tr> # <tr><td>&#9859;+&#9856;</td><td>&#9859;+&#9857;</td><td>&#9859;+&#9858;</td><td>&#9859;+&#9859;</td><td>&#9859;+&#9860;</td><td>&#9859;+&#9861;</td></tr> # <tr><td>&#9860;+&#9856;</td><td>&#9860;+&#9857;</td><td>&#9860;+&#9858;</td><td>&#9860;+&#9859;</td><td>&#9860;+&#9860;</td><td>&#9860;+&#9861;</td></tr> # <tr><td>&#9861;+&#9856;</td><td>&#9861;+&#9857;</td><td>&#9861;+&#9858;</td><td>&#9861;+&#9859;</td><td>&#9861;+&#9860;</td><td>&#9861;+&#9861;</td></tr> # </table> # # The *event* you want to happen is throwing a 7. There are 6 *sample points* that could produce this event: # # <table style='font-size:36px;'> # <tr><td style='color:lightgrey;'>&#9856;+&#9856;</td><td style='color:lightgrey;'>&#9856;+&#9857;</td><td style='color:lightgrey;'>&#9856;+&#9858;</td><td style='color:lightgrey;'>&#9856;+&#9859;</td><td style='color:lightgrey;'>&#9856;+&#9860;</td><td>&#9856;+&#9861;</td></tr> # <tr><td style='color:lightgrey;'>&#9857;+&#9856;</td><td style='color:lightgrey;'>&#9857;+&#9857;</td><td style='color:lightgrey;'>&#9857;+&#9858;</td><td style='color:lightgrey;'>&#9857;+&#9859;</td><td>&#9857;+&#9860;</td><td style='color:lightgrey;'>&#9857;+&#9861;</td></tr> # <tr><td style='color:lightgrey;'>&#9858;+&#9856;</td><td style='color:lightgrey;'>&#9858;+&#9857;</td><td style='color:lightgrey;'>&#9858;+&#9858;</td><td>&#9858;+&#9859;</td><td style='color:lightgrey;'>&#9858;+&#9860;</td><td style='color:lightgrey;'>&#9858;+&#9861;</td></tr> # <tr><td style='color:lightgrey;'>&#9859;+&#9856;</td><td style='color:lightgrey;'>&#9859;+&#9857;</td><td>&#9859;+&#9858;</td><td style='color:lightgrey;'>&#9859;+&#9859;</td><td style='color:lightgrey;'>&#9859;+&#9860;</td><td style='color:lightgrey;'>&#9859;+&#9861;</td></tr> # <tr><td style='color:lightgrey;'>&#9860;+&#9856;</td><td>&#9860;+&#9857;</td><td style='color:lightgrey;'>&#9860;+&#9858;</td><td style='color:lightgrey;'>&#9860;+&#9859;</td><td style='color:lightgrey;'>&#9860;+&#9860;</td><td style='color:lightgrey;'>&#9860;+&#9861;</td></tr> # <tr><td>&#9861;+&#9856;</td><td style='color:lightgrey;'>&#9861;+&#9857;</td><td style='color:lightgrey;'>&#9861;+&#9858;</td><td style='color:lightgrey;'>&#9861;+&#9859;</td><td style='color:lightgrey;'>&#9861;+&#9860;</td><td style='color:lightgrey;'>&#9861;+&#9861;</td></tr> # </table> # # The *probability* of throwing a 7 is therefore <sup>6</sup>/<sub>36</sub> which can be simplified to <sup>1</sup>/<sub>6</sub> or approximately 0.167 (16.7%). # # ### Probability Notation # When we express probability, we use an upper-case **P** to indicate *probability* and an upper-case letter to represent the event. So to express the probability of throwing a 7 as an event valled ***A***, we could write: # # \begin{equation}P(A) = 0.167 \end{equation} # # ### The Complement of an Event # The *complement* of an event is the set of *sample points* that do ***not*** result in the event. # # For example, suppose you have a standard deck of playing cards, and you draw one card, hoping for a *spade*. In this case, the drawing of a card is the *experiment*, and the *event* is drawing a spade. There are 13 cards of each suit in the deck. So the *sample space* contains 52 *sample points*: # # <table> # <tr><td>13 x <span style='font-size:32px;color:red;'>&hearts;</span></td><td>13 x <span style='font-size:32px;color:black;'>&spades;</span></td><td>13 x <span style='font-size:32px;color:black;'>&clubs;</span></td><td>13 x <span style='font-size:32px;color:red;'>&diams;</span></td></tr> # </table> # # There are 13 *sample points* that would satisfy the requirements of the event: # # <table> # <tr><td style='color:lightgrey;'>13 x <span style='font-size:32px;'>&hearts;</span></td><td>13 x <span style='font-size:32px;'>&spades;</span></td><td style='color:lightgrey;'>13 x <span style='font-size:32px;'>&clubs;</span></td><td style='color:lightgrey;'>13 x <span style='font-size:32px'>&diams;</span></td></tr> # </table> # # So the *probability* of the event (drawing a spade) is <sup>13</sup>/<sub>52</sub> which is <sup>1</sup>/<sub>4</sub> or 0.25 (25%). # # The *complement* of the event is all of the possible outcomes that *don't* result in drawing a spade: # # <table> # <tr><td>13 x <span style='font-size:32px;color:red;'>&hearts;</span></td><td style='color:lightgrey;'>13 x <span style='font-size:32px;'>&spades;</span></td><td>13 x <span style='font-size:32px;color:black;'>&clubs;</span></td><td>13 x <span style='font-size:32px;color:red;'>&diams;</span></td></tr> # </table> # # There are 39 sample points in the complement (3 x 13), so the probability of the complement is <sup>39</sup>/<sub>52</sub> which is <sup>3</sup>/<sub>4</sub> or 0.75 (75%). # # Note that the probability of an event and the probability of its complement ***always add up to 1***. # # This fact can be useful in some cases. For example, suppose you throw two dice and want to know the probability of throwing more than 4. You *could* count all of the outcomes that would produce this result, but there are a lot of them. It might be easier to identify the ones that *do not* produce this result (in other words, the complement): # # <table style='font-size:36px;'> # <tr><td>&#9856;+&#9856;</td><td>&#9856;+&#9857;</td><td>&#9856;+&#9858;</td><td style='color:lightgrey;'>&#9856;+&#9859;</td><td style='color:lightgrey;'>&#9856;+&#9860;</td><td style='color:lightgrey;'>&#9856;+&#9861;</td></tr> # <tr><td>&#9857;+&#9856;</td><td>&#9857;+&#9857;</td><td style='color:lightgrey;'>&#9857;+&#9858;</td><td style='color:lightgrey;'>&#9857;+&#9859;</td><td style='color:lightgrey;'>&#9857;+&#9860;</td><td style='color:lightgrey;'>&#9857;+&#9861;</td></tr> # <tr><td>&#9858;+&#9856;</td><td style='color:lightgrey;'>&#9858;+&#9857;</td><td style='color:lightgrey;'>&#9858;+&#9858;</td><td style='color:lightgrey;'>&#9858;+&#9859;</td><td style='color:lightgrey;'>&#9858;+&#9860;</td><td style='color:lightgrey;'>&#9858;+&#9861;</td></tr> # <tr><td style='color:lightgrey;'>&#9859;+&#9856;</td><td style='color:lightgrey;'>&#9859;+&#9857;</td><td style='color:lightgrey;'>&#9859;+&#9858;</td><td style='color:lightgrey;'>&#9859;+&#9859;</td><td style='color:lightgrey;'>&#9859;+&#9860;</td><td style='color:lightgrey;'>&#9859;+&#9861;</td></tr> # <tr><td style='color:lightgrey;'>&#9860;+&#9856;</td><td style='color:lightgrey;'>&#9860;+&#9857;</td><td style='color:lightgrey;'>&#9860;+&#9858;</td><td style='color:lightgrey;'>&#9860;+&#9859;</td><td style='color:lightgrey;'>&#9860;+&#9860;</td><td style='color:lightgrey;'>&#9860;+&#9861;</td></tr> # <tr><td style='color:lightgrey;'>&#9861;+&#9856;</td><td style='color:lightgrey;'>&#9861;+&#9857;</td><td style='color:lightgrey;'>&#9861;+&#9858;</td><td style='color:lightgrey;'>&#9861;+&#9859;</td><td style='color:lightgrey;'>&#9861;+&#9860;</td><td style='color:lightgrey;'>&#9861;+&#9861;</td></tr> # </table> # # Out of a total of 36 sample points in the sample space, there are 6 sample points where you throw a 4 or less (1+1, 1+2, 1+3, 2+1, 2+2, and 3+1); so the probability of the complement is <sup>6</sup>/<sub>36</sub> which is <sup>1</sup>/<sub>6</sub> or approximately 0.167 (16.7%). # # Now, here's the clever bit. Since the probability of the complement and the event itself must add up to 1, the probability of the event must be **<sup>5</sup>/<sub>6</sub>** or **0.833** (**83.3%**). # # We indicate the complement of an event by adding a **'** to the letter assigned to it, so: # # \begin{equation}P(A) = 1 - P(A') \end{equation} # # ### Bias # Often, the sample points in the sample space do not have the same probability, so there is a *bias* that makes one outcome more likely than another. For example, suppose your local weather forecaster indicates the predominant weather for each day of the week like this: # # <table> # <tr><td style='text-align:center'>Mon</td><td style='text-align:center'>Tue</td><td style='text-align:center'>Wed</td><td style='text-align:center'>Thu</td><td style='text-align:center'>Fri</td><td style='text-align:center'>Sat</td><td style='text-align:center'>Sun</td></tr> # <tr style='font-size:32px'><td>&#9729;</td><td>&#9730;</td><td>&#9728;</td><td>&#9728;</td><td>&#9728;</td><td>&#9729;</td><td>&#9728;</td></tr> # </table> # # This forceast is pretty typical for your area at this time of the year. In fact, historically the weather is sunny on 60% of days, cloudy on 30% of days, and rainy on only 10% of days. On any given day, the sample space for the weather contains 3 sample points (*sunny*, *cloudy*, and *rainy*); but the probabities for these sample points are not the same. # # If we assign the letter **A** to a sunny day event, **B** to a cloudy day event, and **C** to a rainy day event then we can write these probabilities like this: # # \begin{equation}P(A)=0.6\;\;\;\; P(B)=0.3\;\;\;\; P(C)=0.1 \end{equation} # # The complement of **A** (a sunny day) is any day where it is not sunny - it is either cloudy or rainy. We can work out the probability for this in two ways: we can subtract the probablity of **A** from 1: # # \begin{equation}P(A') = 1 - P(A) = 1 - 0.6 = 0.4 \end{equation} # # Or we can add together the probabilities for all events that do *not* result in a sunny day: # # \begin{equation}P(A') = P(B) + P(C) = 0.3 + 0.1 = 0.4 \end{equation} # # Either way, there's a 40% chance of it not being sunny! # # ## Conditional Probability and Dependence # Events can be: # - *Independent* (events that are not affected by other events) # - *Dependent* (events that are conditional on other events) # - *Mutually Exclusive* (events that can't occur together) # # ### Independent Events # Imagine you toss a coin. The sample space contains two possible outomes: heads (<span style='font-size:42px;color:gold;'><sub>&#10050;</sub></span>) or tails (<span style='font-size:42px;color:gold;'><sub>&#9854;</sub></span>). # # The probability of getting *heads* is <sup>1</sup>/<sub>2</sub>, and the probability of getting *tails* is also <sup>1</sup>/<sub>2</sub>. Let's toss a coin... # # <span style='font-size:48px;color:gold;'>&#10050;</span> # # OK, so we got *heads*. Now, let's toss the coin again: # # <span style='font-size:48px;color:gold;'>&#10050;</span> # # It looks like we got *heads* again. If we were to toss the coin a third time, what's the probability that we'd get *heads*? # # Although you might be tempted to think that a *tail* is overdue, the fact is that each coin toss is an independent event. The outcome of the first coin toss does not affect the second coin toss (or the third, or any number of other coin tosses). For each independent coin toss, the probability of getting *heads* (or *tails*) remains <sup>1</sup>/<sub>2</sub>, or 50%. # # Run the following Python code to simulate 10,000 coin tosses by assigning a random value of 0 or 1 to *heads* and *tails*. Each time the coin is tossed, the probability of getting *heads* or *tails* is 50%, so you should expect approximately half of the results to be *heads* and half to be *tails* (it won't be exactly half, due to a little random variation; but it should be close): # + # %matplotlib inline import random # Create a list with 2 element (for heads and tails) heads_tails = [0,0] # loop through 10000 trials trials = 10000 trial = 0 while trial < trials: trial = trial + 1 # Get a random 0 or 1 toss = random.randint(0,1) # Increment the list element corresponding to the toss result heads_tails[toss] = heads_tails[toss] + 1 print (heads_tails) # Show a pie chart of the results from matplotlib import pyplot as plt plt.figure(figsize=(5,5)) plt.pie(heads_tails, labels=['heads', 'tails']) plt.legend() plt.show() # - # ### Combining Independent Events # Now, let's ask a slightly different question. What is the probability of getting three *heads* in a row? Since the probability of a heads on each independent toss is <sup>1</sup>/<sub>2</sub>, you might be tempted to think that the same probability applies to getting three *heads* in a row; but actually, we need to treat getting three *heads* as it's own event, which is the combination of three independent events. To combine independent events like this, we need to multiply the probability of each event. So: # # <span style='font-size:48px;color:gold;'><sub>&#10050;</sub></span> = <sup>1</sup>/<sub>2</sub> # # <span style='font-size:48px;color:gold;'><sub>&#10050;&#10050;</sub></span> = <sup>1</sup>/<sub>2</sub> x <sup>1</sup>/<sub>2</sub> # # <span style='font-size:48px;color:gold;'><sub>&#10050;&#10050;&#10050;</sub></span> = <sup>1</sup>/<sub>2</sub> x <sup>1</sup>/<sub>2</sub> x <sup>1</sup>/<sub>2</sub> # # So the probability of tossing three *heads* in a row is 0.5 x 0.5 x 0.5, which is 0.125 (or 12.5%). # # Run the code below to simulate 10,000 trials of flipping a coin three times: # + import random # Count the number of 3xHeads results h3 = 0 # Create a list of all results results = [] # loop through 10000 trials trials = 10000 trial = 0 while trial < trials: trial = trial + 1 # Flip three coins result = ['H' if random.randint(0,1) == 1 else 'T', 'H' if random.randint(0,1) == 1 else 'T', 'H' if random.randint(0,1) == 1 else 'T'] results.append(result) # If it's three heads, add it to the count h3 = h3 + int(result == ['H','H','H']) # What proportion of trials produced 3x heads print ("%.2f%%" % ((h3/trials)*100)) # Show all the results print (results) # - # The output shows the percentage of times a trial resulted in three heads (which should be somewhere close to 12.5%). You can count the number of *['H', 'H', 'H']* entries in the full list of results to verify this if you like! # # # #### Probability Trees # You can represent a series of events and their probabilities as a probability tree: # # ____H(0.5) : 0.5 x 0.5 x 0.5 = 0.125 # / # ____H(0.5) # / \____T(0.5) : 0.5 x 0.5 x 0.5 = 0.125 # / # __H(0.5) ____H(0.5) : 0.5 x 0.5 x 0.5 = 0.125 # / \ / # / \____T(0.5) # / \____T(0.5) : 0.5 x 0.5 x 0.5 = 0.125 # / # _____/ _____H(0.5) : 0.5 x 0.5 x 0.5 = 0.125 # \ / # \ ___H(0.5) # \ / \_____T(0.5) : 0.5 x 0.5 x 0.5 = 0.125 # \ / # \__T(0.5) _____H(0.5) : 0.5 x 0.5 x 0.5 = 0.125 # \ / # \___T(0.5) # \_____T(0.5) : 0.5 x 0.5 x 0.5 = 0.125 # _____ # 1.0 # # Starting at the left, you can follow the branches in the tree that represent each event (in this case a coin toss result of *heads* or *tails* at each branch). Multiplying the probability of each branch of your path through the tree gives you the combined probability for an event composed of all of the events in the path. In this case, you can see from the tree that you are equally likely to get any sequence of three *heads* or *tails* results (so three *heads* is just as likely as three *tails*, which is just as likely as *head-tail-head*, *tail-head-tail*, or any other combination!) # # Note that the total probability for all paths through the tree adds up to 1. # # #### Combined Event Probability Notation # When calculating the probability of combined events, we assign a letter such as **A** or **B** to each event, and we use the *intersection* (**&cap;**) symbol to indicate that we want the combined probability of multiple events. So we could assign the letters **A**, **B**, and **C** to each independent coin toss in our sequence of three tosses, and express the combined probability like this: # # \begin{equation}P(A \cap B \cap C) = P(A) \times P(B) \times P(C) \end{equation} # # #### Combining Events with Different Probabilities # Imagine you have created a new game that mixes the excitment of coin-tossing with the thrill of die-rolling! The objective of the game is to roll a die and get *6*, and toss a coin and get *heads*: # # <div style='text-align:center'><span style='font-size:48px;'>&#9861;</span><span style='font-size:42px;'> +</span><span style='font-size:48px;color:gold;'>&#10050;</span></div> # # On each turn of the game, a player rolls the die and tosses the coin. # # How can we calculate the probability of winning? # # There are two independent events required to win: a die-roll of *6* (which we'll call event **A**), and a coin-toss of *heads* (which we'll call event **B**) # # Our formula for combined independent events is: # # \begin{equation}P(A \cap B) = P(A) \times P(B) \end{equation} # # The probablilty of rolling a *6* on a fair die is <sup>1</sup>/<sub>6</sub> or 0.167; and the probability of tossing a coin and getting *heads* is <sup>1</sup>/<sub>2</sub> or 0.5: # # \begin{equation}P(A \cap B) = 0.167 \times 0.5 = 0.083 \end{equation} # # So on each turn, there's an 8.3% chance to win the game. # # #### Intersections and Unions # # Previously you saw that we use the *intersection* (**&cap;**) symbol to represent "and" when combining event probabilities. This notation comes from a branch of mathematics called *set theory*, in which we work with sets of values. let's examine this in a little more detail. # # Here's our deck of playing cards, with the full sample space for drawing any card: # # <table style='font-size:18px;'> # <tr><td style='color:red;'>A &hearts;</td><td style='color:black;'>A &spades;</td><td style='color:black;'>A &clubs;<td style='color:red;'>A &diams;</td></tr> # <tr><td style='color:red;'>K &hearts;</td><td style='color:black;'>K &spades;</td><td style='color:black;'>K &clubs;<td style='color:red;'>K &diams;</td></tr> # <tr><td style='color:red;'>Q &hearts;</td><td style='color:black;'>Q &spades;</td><td style='color:black;'>Q &clubs;<td style='color:red;'>Q &diams;</td></tr> # <tr><td style='color:red;'>J &hearts;</td><td style='color:black;'>J &spades;</td><td style='color:black;'>J &clubs;<td style='color:red;'>J &diams;</td></tr> # <tr><td style='color:red;'>10 &hearts;</td><td style='color:black;'>10 &spades;</td><td style='color:black;'>10 &clubs;<td style='color:red;'>10 &diams;</td></tr> # <tr><td style='color:red;'>9 &hearts;</td><td style='color:black;'>9 &spades;</td><td style='color:black;'>9 &clubs;<td style='color:red;'>9 &diams;</td></tr> # <tr><td style='color:red;'>8 &hearts;</td><td style='color:black;'>8 &spades;</td><td style='color:black;'>8 &clubs;<td style='color:red;'>8 &diams;</td></tr> # <tr><td style='color:red;'>7 &hearts;</td><td style='color:black;'>7 &spades;</td><td style='color:black;'>7 &clubs;<td style='color:red;'>7 &diams;</td></tr> # <tr><td style='color:red;'>6 &hearts;</td><td style='color:black;'>6 &spades;</td><td style='color:black;'>6 &clubs;<td style='color:red;'>6 &diams;</td></tr> # <tr><td style='color:red;'>5 &hearts;</td><td style='color:black;'>5 &spades;</td><td style='color:black;'>5 &clubs;<td style='color:red;'>5 &diams;</td></tr> # <tr><td style='color:red;'>4 &hearts;</td><td style='color:black;'>4 &spades;</td><td style='color:black;'>4 &clubs;<td style='color:red;'>4 &diams;</td></tr> # <tr><td style='color:red;'>3 &hearts;</td><td style='color:black;'>3 &spades;</td><td style='color:black;'>3 &clubs;<td style='color:red;'>3 &diams;</td></tr> # <tr><td style='color:red;'>2 &hearts;</td><td style='color:black;'>2 &spades;</td><td style='color:black;'>2 &clubs;<td style='color:red;'>2 &diams;</td></tr> # </table> # # Now, let's look at two potential events: # - Drawing an ace (**A**) # - Drawing a red card (**B**) # # The set of sample points for event **A** (drawing an ace) is: # # <table style='font-size:18px;'> # <tr><td style='color:red;'>A &hearts;</td><td style='color:black;'>A &spades;</td><td style='color:black;'>A &clubs;<td style='color:red;'>A &diams;</td></tr> # <tr style='color:lightgrey;'><td>K &hearts;</td><td style='color:lightgrey;'>K &spades;</td><td style='color:lightgrey;'>K &clubs;<td>K &diams;</td></tr> # <tr style='color:lightgrey;'><td>Q &hearts;</td><td>Q &spades;</td><td>Q &clubs;<td>Q &diams;</td></tr> # <tr style='color:lightgrey;'><td>J &hearts;</td><td>J &spades;</td><td>J &clubs;<td>J &diams;</td></tr> # <tr style='color:lightgrey;'><td>10 &hearts;</td><td>10 &spades;</td><td>10 &clubs;<td>10 &diams;</td></tr> # <tr style='color:lightgrey;'><td>9 &hearts;</td><td>9 &spades;</td><td>9 &clubs;<td>9 &diams;</td></tr> # <tr style='color:lightgrey;'><td>8 &hearts;</td><td>8 &spades;</td><td>8 &clubs;<td>8 &diams;</td></tr> # <tr style='color:lightgrey;'><td>7 &hearts;</td><td>7 &spades;</td><td>7 &clubs;<td>7 &diams;</td></tr> # <tr style='color:lightgrey;'><td>6 &hearts;</td><td>6 &spades;</td><td>6 &clubs;<td>6 &diams;</td></tr> # <tr style='color:lightgrey;'><td>5 &hearts;</td><td>5 &spades;</td><td>5 &clubs;<td>5 &diams;</td></tr> # <tr style='color:lightgrey;'><td>4 &hearts;</td><td>4 &spades;</td><td>4 &clubs;<td>4 &diams;</td></tr> # <tr style='color:lightgrey;'><td>3 &hearts;</td><td>3 &spades;</td><td>3 &clubs;<td>3 &diams;</td></tr> # <tr style='color:lightgrey;'><td>2 &hearts;</td><td>2 &spades;</td><td>2 &clubs;<td>2 &diams;</td></tr> # </table> # # So the probability of drawing an ace is: # # \begin{equation}P(A) = \frac{4}{52} = \frac{1}{13} = 0.077\end{equation} # # Now let's look at the set of sample points for event **B** (drawing a red card) # # <table style='font-size:18px;'> # <tr><td style='color:red;'>A &hearts;</td><td style='color:lightgrey;'>A &spades;</td><td style='color:lightgrey;'>A &clubs;<td style='color:red;'>A &diams;</td></tr> # <tr><td style='color:red;'>K &hearts;</td><td style='color:lightgrey;'>K &spades;</td><td style='color:lightgrey;'>K &clubs;<td style='color:red;'>K &diams;</td></tr> # <tr><td style='color:red;'>Q &hearts;</td><td style='color:lightgrey;'>Q &spades;</td><td style='color:lightgrey;'>Q &clubs;<td style='color:red;'>Q &diams;</td></tr> # <tr><td style='color:red;'>J &hearts;</td><td style='color:lightgrey;'>J &spades;</td><td style='color:lightgrey;'>J &clubs;<td style='color:red;'>J &diams;</td></tr> # <tr><td style='color:red;'>10 &hearts;</td><td style='color:lightgrey;'>10 &spades;</td><td style='color:lightgrey;'>10 &clubs;<td style='color:red;'>10 &diams;</td></tr> # <tr><td style='color:red;'>9 &hearts;</td><td style='color:lightgrey;'>9 &spades;</td><td style='color:lightgrey;'>9 &clubs;<td style='color:red;'>9 &diams;</td></tr> # <tr><td style='color:red;'>8 &hearts;</td><td style='color:lightgrey;'>8 &spades;</td><td style='color:lightgrey;'>8 &clubs;<td style='color:red;'>8 &diams;</td></tr> # <tr><td style='color:red;'>7 &hearts;</td><td style='color:lightgrey;'>7 &spades;</td><td style='color:lightgrey;'>7 &clubs;<td style='color:red;'>7 &diams;</td></tr> # <tr><td style='color:red;'>6 &hearts;</td><td style='color:lightgrey;'>6 &spades;</td><td style='color:lightgrey;'>6 &clubs;<td style='color:red;'>6 &diams;</td></tr> # <tr><td style='color:red;'>5 &hearts;</td><td style='color:lightgrey;'>5 &spades;</td><td style='color:lightgrey;'>5 &clubs;<td style='color:red;'>5 &diams;</td></tr> # <tr><td style='color:red;'>4 &hearts;</td><td style='color:lightgrey;'>4 &spades;</td><td style='color:lightgrey;'>4 &clubs;<td style='color:red;'>4 &diams;</td></tr> # <tr><td style='color:red;'>3 &hearts;</td><td style='color:lightgrey;'>3 &spades;</td><td style='color:lightgrey;'>3 &clubs;<td style='color:red;'>3 &diams;</td></tr> # <tr><td style='color:red;'>2 &hearts;</td><td style='color:lightgrey;'>2 &spades;</td><td style='color:lightgrey;'>2 &clubs;<td style='color:red;'>2 &diams;</td></tr> # </table> # # The probability of drawing a red card is therefore: # # \begin{equation}P(A) = \frac{26}{52} = \frac{1}{2} = 0.5\end{equation} # # ##### Intersections # # We can think of the sample spaces for these events as two sets, and we can show them as a Venn diagram: # # <br/> # # <div style='text-align:center'>Event A<span style='font-size:120px'>&#9901;</span>Event B</div> # # Each circle in the Venn diagram represents a set of sample points. The set on the left contains the sample points for event **A** (drawing an ace) and the set on the right contains the sample points for event **B** (drawing a red card). Note that the circles overlap, creating an intersection that contains only the sample points that apply to event **A** *and* event **B**. # # This intersected sample space looks like this: # # <table style='font-size:18px;'> # <tr><td style='color:red;'>A &hearts;</td><td style='color:lightgrey;'>A &spades;</td><td style='color:lightgrey;'>A &clubs;<td style='color:red;'>A &diams;</td></tr> # <tr style='color:lightgrey;'><td>K &hearts;</td><td style='color:lightgrey;'>K &spades;</td><td style='color:lightgrey;'>K &clubs;<td>K &diams;</td></tr> # <tr style='color:lightgrey;'><td>Q &hearts;</td><td>Q &spades;</td><td>Q &clubs;<td>Q &diams;</td></tr> # <tr style='color:lightgrey;'><td>J &hearts;</td><td>J &spades;</td><td>J &clubs;<td>J &diams;</td></tr> # <tr style='color:lightgrey;'><td>10 &hearts;</td><td>10 &spades;</td><td>10 &clubs;<td>10 &diams;</td></tr> # <tr style='color:lightgrey;'><td>9 &hearts;</td><td>9 &spades;</td><td>9 &clubs;<td>9 &diams;</td></tr> # <tr style='color:lightgrey;'><td>8 &hearts;</td><td>8 &spades;</td><td>8 &clubs;<td>8 &diams;</td></tr> # <tr style='color:lightgrey;'><td>7 &hearts;</td><td>7 &spades;</td><td>7 &clubs;<td>7 &diams;</td></tr> # <tr style='color:lightgrey;'><td>6 &hearts;</td><td>6 &spades;</td><td>6 &clubs;<td>6 &diams;</td></tr> # <tr style='color:lightgrey;'><td>5 &hearts;</td><td>5 &spades;</td><td>5 &clubs;<td>5 &diams;</td></tr> # <tr style='color:lightgrey;'><td>4 &hearts;</td><td>4 &spades;</td><td>4 &clubs;<td>4 &diams;</td></tr> # <tr style='color:lightgrey;'><td>3 &hearts;</td><td>3 &spades;</td><td>3 &clubs;<td>3 &diams;</td></tr> # <tr style='color:lightgrey;'><td>2 &hearts;</td><td>2 &spades;</td><td>2 &clubs;<td>2 &diams;</td></tr> # </table> # # As you've seen previously, we write this as **A &cap; B**, and we can calculate its probability like this: # # \begin{equation}P(A \cap B) = P(A) \times P(B) = 0.077 \times 0.5 = 0.0385 \end{equation} # # So when you draw a single card from a full deck, there is a 3.85% chance it will be a red ace. # # ##### Unions # The intersection describes the sample space for event **A** *and* event **B**; but what if we wanted to look at the probability of drawing an ace *or* a red card. In other words, any sample point that is in either of the Venn digram circles. # # This set of sample points looks like this: # # <table style='font-size:18px;'> # <tr><td style='color:red;'>A &hearts;</td><td style='color:black;'>A &spades;</td><td style='color:black;'>A &clubs;<td style='color:red;'>A &diams;</td></tr> # <tr><td style='color:red;'>K &hearts;</td><td style='color:lightgrey;'>K &spades;</td><td style='color:lightgrey;'>K &clubs;<td style='color:red;'>K &diams;</td></tr> # <tr><td style='color:red;'>Q &hearts;</td><td style='color:lightgrey;'>Q &spades;</td><td style='color:lightgrey;'>Q &clubs;<td style='color:red;'>Q &diams;</td></tr> # <tr><td style='color:red;'>J &hearts;</td><td style='color:lightgrey;'>J &spades;</td><td style='color:lightgrey;'>J &clubs;<td style='color:red;'>J &diams;</td></tr> # <tr><td style='color:red;'>10 &hearts;</td><td style='color:lightgrey;'>10 &spades;</td><td style='color:lightgrey;'>10 &clubs;<td style='color:red;'>10 &diams;</td></tr> # <tr><td style='color:red;'>9 &hearts;</td><td style='color:lightgrey;'>9 &spades;</td><td style='color:lightgrey;'>9 &clubs;<td style='color:red;'>9 &diams;</td></tr> # <tr><td style='color:red;'>8 &hearts;</td><td style='color:lightgrey;'>8 &spades;</td><td style='color:lightgrey;'>8 &clubs;<td style='color:red;'>8 &diams;</td></tr> # <tr><td style='color:red;'>7 &hearts;</td><td style='color:lightgrey;'>7 &spades;</td><td style='color:lightgrey;'>7 &clubs;<td style='color:red;'>7 &diams;</td></tr> # <tr><td style='color:red;'>6 &hearts;</td><td style='color:lightgrey;'>6 &spades;</td><td style='color:lightgrey;'>6 &clubs;<td style='color:red;'>6 &diams;</td></tr> # <tr><td style='color:red;'>5 &hearts;</td><td style='color:lightgrey;'>5 &spades;</td><td style='color:lightgrey;'>5 &clubs;<td style='color:red;'>5 &diams;</td></tr> # <tr><td style='color:red;'>4 &hearts;</td><td style='color:lightgrey;'>4 &spades;</td><td style='color:lightgrey;'>4 &clubs;<td style='color:red;'>4 &diams;</td></tr> # <tr><td style='color:red;'>3 &hearts;</td><td style='color:lightgrey;'>3 &spades;</td><td style='color:lightgrey;'>3 &clubs;<td style='color:red;'>3 &diams;</td></tr> # <tr><td style='color:red;'>2 &hearts;</td><td style='color:lightgrey;'>2 &spades;</td><td style='color:lightgrey;'>2 &clubs;<td style='color:red;'>2 &diams;</td></tr> # </table> # # We call this the *union* of the sets, and we write it as **A &cup; B**. # # To calculate the probability of a card being either an ace (of any color) or a red card (of any value), we can work out the probability of A, add it to the probability of B, and subtract the probability of A &cap; B (to avoid double-counting the red aces): # # \begin{equation}P(A \cup B) = P(A) + P(B) - P(A \cap B)\end{equation} # # So: # # \begin{equation}P(A \cup B) = 0.077 + 0.5 - 0.0385 = 0.5385\end{equation} # # So when you draw a card from a full deck, there is a 53.85% probability that it will be either an ace or a red card. # ### Dependent Events # Let's return to our deck of 52 cards from which we're going to draw one card. The sample space can be summarized like this: # # <table> # <tr><td>13 x <span style='font-size:32px;color:red;'>&hearts;</span></td><td>13 x <span style='font-size:32px;color:black;'>&spades;</span></td><td>13 x <span style='font-size:32px;color:black;'>&clubs;</span></td><td>13 x <span style='font-size:32px;color:red;'>&diams;</span></td></tr> # </table> # # There are two black suits (*spades* and *clubs*) and two red suits (*hearts* and *diamonds*); with 13 cards in each suit. So the probability of drawing a black card (event **A**) and the probability of drawing a red card (event **B**) can be calculated like this: # # \begin{equation}P(A) = \frac{13 + 13}{52} = \frac{26}{52} = 0.5 \;\;\;\; P(B) = \frac{13 + 13}{52} = \frac{26}{52} = 0.5\end{equation} # # Now let's draw a card from the deck: # # <div style ='text-align:center;'><span style='font-size:32px;color:red;'>&hearts;</span></div> # # We drew a heart, which is red. So, assuming we don't replace the card back into the deck, this changes the sample space as follows: # # <table> # <tr><td>12 x <span style='font-size:32px;color:red;'>&hearts;</span></td><td>13 x <span style='font-size:32px;color:black;'>&spades;</span></td><td>13 x <span style='font-size:32px;color:black;'>&clubs;</span></td><td>13 x <span style='font-size:32px;color:red;'>&diams;</span></td></tr> # </table> # # The probabilities for **A** and **B** are now: # # \begin{equation}P(A) = \frac{13 + 13}{51} = \frac{26}{51} = 0.51 \;\;\;\; P(B) = \frac{12 + 13}{51} = \frac{25}{51} = 0.49\end{equation} # # Now let's draw a second card: # # <div style ='text-align:center;'><span style='font-size:32px;color:red;'>&diams;</span></div> # # We drew a diamond, so again this changes the sample space for the next draw: # # <table> # <tr><td>12 x <span style='font-size:32px;color:red;'>&hearts;</span></td><td>13 x <span style='font-size:32px;color:black;'>&spades;</span></td><td>13 x <span style='font-size:32px;color:black;'>&clubs;</span></td><td>12 x <span style='font-size:32px;color:red;'>&diams;</span></td></tr> # </table> # # The probabilities for **A** and **B** are now: # # \begin{equation}P(A) = \frac{13 + 13}{50} = \frac{26}{50} = 0.52 \;\;\;\; P(B) = \frac{12 + 12}{50} = \frac{24}{50} = 0.48\end{equation} # # So it's clear that one event can affect another; in this case, the probability of drawing a card of a particular color on the second draw depends on the color of card drawn on the previous draw. We call these *dependent* events. # # Probability trees are particularly useful when looking at dependent events. Here's a probability tree for drawing red or black cards as the first three draws from a deck of cards: # # _______R(0.48) # / # ____R(0.49) # / \_______B(0.52) # / # __R(0.50) _______R(0.50) # / \ / # / \____B(0.51) # / \_______B(0.50) # / # _____/ ________R(0.50) # \ / # \ ___R(0.51) # \ / \________B(0.50) # \ / # \__B(0.50) ________R(0.52) # \ / # \___B(0.49) # \________B(0.48) # # # # #### Calculating Probabilities for Dependent Events # Imagine a game in which you have to predict the color of the next card to be drawn. Suppose the first card drawn is a *spade*, which is black. What is the probability of the next card being red? # # The notation for this is: # # \begin{equation}P(B|A)\end{equation} # # You can interpret this as *the probability of B, given A*. In other words, given that event **A** (drawing a black card) has already happened, what is the probability of **B** (drawing a red card). This is commonly referred to as the *conditional probability* of B given A; and it's formula is: # # \begin{equation}P(B|A) = \frac{P(A \cap B)}{P(A)}\end{equation} # # So to return to our example, the probability of the second card being red given that the first card was black is: # # \begin{equation}P(B|A) = \frac{\frac{26}{52} \times \frac{26}{51}}{\frac{26}{52}}\end{equation} # # Which simplifies to: # # \begin{equation}P(B|A) = \frac{0.5 \times 0.51}{0.5}\end{equation} # # So: # # \begin{equation}P(B|A) = \frac{0.255}{0.5} = 0.51\end{equation} # # Which is what we calculated previously - so the formula works! # # Because this is an algebraic expression, we can rearrange it like this: # # \begin{equation}P(A \cap B) = P(A) \times P(B|A)\end{equation} # # We can use this form of the formula to calculate the probability that the first two cards drawn from a full deck of cards will both be jacks. In this case, event **A** is drawing a jack for the first card, and event **B** is drawing a jack for the second card. # # The probability that the first drawn card will be a jack is: # # \begin{equation}P(A) = \frac{4}{52} = \frac{1}{13}\end{equation} # # We draw the first card: # # <br/> # <div style ='text-align:center;'><span style='font-size:32px;color:black;'>J &clubs;</span></div> # # Success! it's the jack of clubs. Our chances of the first two cards being jacks are looking good so far # # Now. we know that there are now only 3 jacks left, in a deck of 51 remaining cards; so the probability of drawing a jack as a second card, given that we drew a jack as the first card is: # # \begin{equation}P(B|A) = \frac{3}{51}\end{equation} # # So we can work out the probability of drawing two jacks from a deck like this: # # \begin{equation}P(A \cap B) = \frac{1}{13} \times \frac{3}{51} = \frac{3}{663} = \frac{1}{221}\end{equation} # # So there's a 1 in 221 (0.45%) probability that the first two cards drawn from a full deck will be jacks. # # # ### Mutually Exclusive Events # We've talked about dependent and independent events, but there's a third category to be considered: mutually exclusive events. # # For example, when flipping a coin, what is the probability that in a single coin flip the result will be *heads* ***and*** *tails*? The answer is of course, 0; a single coin flip can only result in *heads* ***or*** *tails*; not both! # # For mutually exclusive event, the probability of an intersection is: # # \begin{equation}P(A \cap B) = 0\end{equation} # # The probability for a union is: # # \begin{equation}P(A \cup B) = P(A) + P(B)\end{equation} # # Note that we don't need to subtract the intersection (*and*) probability to calculate the union (*or*) probability like we did previously, because there's no risk of double-counting the sample points that lie in both events - there are none. (The intersection probability for mutually exclusive events is always 0, so you can subtract it if you like - you'll still get the same result!) # # Let's look at another two mutually exclusive events based on rolling a die: # - Rolling a 6 (event **A**) # - Rolling an odd number (event **B**) # # The probabilities for these events are: # # \begin{equation}P(A) = \frac{1}{6} \;\;\;\; P(B) = \frac{3}{6}\end{equation} # # What's the probability of rolling a 6 *and* an odd number in a single roll? These are mutually exclusive, so: # # \begin{equation}P(A \cap B) = 0\end{equation} # # What's the probability of rolling a 6 *or* an odd number: # # \begin{equation}P(A \cup B) = \frac{1}{6} + \frac{3}{6} = \frac{4}{6}\end{equation} # ## Binomial Variables and Distributions # Now that we know something about probability, let's apply that to statistics. Statistics is about inferring measures for a full population based on samples, allowing for random variation; so we're going to have to consider the idea of a *random variable*. # # A random variable us a number that can vary in value. For example, the temperature on a given day, or the number of students taking a class. # # ### Binomial Variables # One particular type of random variable that we use in statistics is a *binomial* variable. A binomial variable is used to count how frequently an event occurs in a fixed number of repeated independent experiments. The event in question must have the same probability of occurring in each experiment, and indicates the success or failure of the experiment; with a probability ***p*** of success, which has a complement of ***1 - p*** as the probability of failure (we often call this kind of experiment a *Bernoulli Trial* after Swiss mathematician Jacob Bernoulli). # # For example, suppose we flip a coin three times, counting *heads* as success. We can define a binomial variable to represent the number of successful coin flips (that is, the number of times we got *heads*). # # Let's examine this in more detail. # # We'll call our variable ***X***, and as stated previously it represents the number of times we flip *heads* in a series of three coin flips. Let's start by examining all the possible values for ***X***. # # We're flipping the coin three times, with a probability of <sup>1</sup>/<sub>2</sub> of success on each flip. The possibile results include none of the flips resulting in *heads*, all of the flips resulting in *heads*, or any combination in between. There are two possible outcomes from each flip, and there are three flips, so the total number of possible result sets is 2<sup>3</sup>, which is 8. Here they are: # # <div style='font-size:48px;color:gold;'>&#9854;&#9854;&#9854;</div> # <br/> # <div style='font-size:48px;color:gold;'>&#9854;&#10050;&#9854;</div> # <br/> # <div style='font-size:48px;color:gold;'>&#9854;&#9854;&#10050;</div> # <br/> # <div style='font-size:48px;color:gold;'>&#9854;&#10050;&#10050;</div> # <br/> # <div style='font-size:48px;color:gold;'>&#10050;&#9854;&#9854;</div> # <br/> # <div style='font-size:48px;color:gold;'>&#10050;&#10050;&#9854;</div> # <br/> # <div style='font-size:48px;color:gold;'>&#10050;&#9854;&#10050;</div> # <br/> # <div style='font-size:48px;color:gold;'>&#10050;&#10050;&#10050;</div> # <br/> # # In these results, our variable ***X***, representing the number of successful events (getting *heads*), can vary from 0 to 3. We can write that like this: # # \begin{equation}X=\{0,1,2,3\}\end{equation} # # When we want to indicate a specific outcome for a random variable, we use write the variable in lower case, for example ***x*** So what's the probability that ***x*** = 0 (meaning that out of our three flips we got no *heads*)? # # We can easily see, that there is 1 row in our set of possible outcomes that contains no *heads*, so: # # \begin{equation}P(x=0) = \frac{1}{8}\end{equation} # # OK, let's see if we can find the probability for 1 success. There are three sample points containing a single *heads* result, so: # # \begin{equation}P(x=1) = \frac{3}{8}\end{equation} # # Again, we can easily see that from our results; but it's worth thinking about this in a slightly different way that will make it easier to calculate this probability more generically when there are more sample points (for example, if we had based our binomial variable on 100 coin flips, there would be many more combinations!). # # What we're actually saying here is that for **3** experiments (in this case coin flips), we want to *choose* **1** successful results. This is written as <sub>3</sub>C<sub>1</sub>. More generically, this is known as *n choose k*, and it's written like this: # # \begin{equation}_{n}C_{k}\end{equation} # # or sometimes like this: # # \begin{equation}\begin{pmatrix} n \\ k\end{pmatrix}\end{equation} # # The formula to calculate this is: # # \begin{equation}\begin{pmatrix} n \\ k\end{pmatrix} = \frac{n!}{k!(n-k)!}\end{equation} # # The exclamation points indicate *factorials* - the product of all positive integers less than or equal to the specified integer (with 0! having a value of 1). # # In the case of our <sub>3</sub>C<sub>1</sub> calculation, this means: # # \begin{equation}\begin{pmatrix} 3 \\ 1\end{pmatrix} = \frac{3!}{1!(3 - 1)!} = \frac{3!}{1!\times2!} =\frac{3 \times 2 \times 1}{1 \times(2 \times 1)} = \frac{6}{2} = 3 \end{equation} # # That seems like a lot of work to find the number of successful experiments, but now that you know this general formula, you can use it to calculate the number of sample points for any value of *k* from any set of *n* cases. Let's use it to find the possibility of two successful *heads* out of 3 coin flips: # # \begin{equation}P(x=2) = \frac{_{3}C_{2}}{8}\end{equation} # # Let's work out the number of combinations for <sub>3</sub>C<sub>2</sub> # # \begin{equation}_{3}C_{2} = \frac{3!}{2!(3 - 2)!} = \frac{6}{2 \times 1} = \frac{6}{2} = 3\end{equation} # # So: # # \begin{equation}P(x=2) = \frac{3}{8}\end{equation} # # Finally, what's the probability that all three flips were *heads*? # # \begin{equation}P(x=3) = \frac{_{3}C_{3}}{8}\end{equation} # # \begin{equation}_{3}C_{3} = \frac{3!}{3!(3 - 3)!} = \frac{6}{6} = 1\end{equation} # # So: # # \begin{equation}P(x=3) = \frac{1}{8}\end{equation} # # In Python, there are a number of modules you can use to find the *n choose k* combinations, including the *scipy.special.**comb*** function. # # In our coin flipping experiment, there is an equal probability of success and failure; so the probability calculations are relatively simple, and you may notice that there's a symmetry to the probability for each possible value of the binomial variable, as you can see by running the following Python code. You can increase the value of the **trials** variable to verify that no matter how many times we toss the coin, the probabilities of getting *heads* (or *tails* for that matter) form a symmetrical distribution, because there's an equal probability of success and failure in each trial. # + # %matplotlib inline from scipy import special as sps from matplotlib import pyplot as plt import numpy as np trials = 3 possibilities = 2**trials x = np.array(range(0, trials+1)) p = np.array([sps.comb(trials, i, exact=True)/possibilities for i in x]) # Set up the graph plt.xlabel('Successes') plt.ylabel('Probability') plt.bar(x, p) plt.show() # - # #### Allowing for Bias # Previously, we calculated the probability for each possible value of a random variable by simply dividing the number of combinations for that value by the total number of possible outcomes. This works if the probability of the event being tested is equal for failure and success; but of course, not all experiments have an equal chance of success or failure. Some include a bias that makes success more or less likely - so we need to be a little more thorough in our calculations to allow for this. # # Suppose you're flying off to some exotic destination, and you know that there's a one in four chance that the airport security scanner will trigger a random search for each passenger that goes though. If you watch five passengers go through the scanner, how many will be stopped for a random search? # # It's tempting to think that there's a one in four chance, so a quarter of the passengers will be stopped; but remember that the searches are triggered randomly for thousands of passengers that pass through the airport each day. It's possible that none of the next five passengers will be searched; all five of them will be searched, or some other value in between will be searched. # # Even though the probabilities of being searched or not searched are not the same, this is still a binomial variable. There are a fixed number of independent experiments (five passengers passing through the security scanner), the outcome of each experiment is either success (a search is triggered) or failure (no search is triggered), and the probability of being searched does not change for each passenger. # # There are five experiments in which a passenger goes through the security scanner, let's call this **n**. # # For each passenger, the probability of being searched is <sup>1</sup>/<sub>4</sub> or 0.25. We'll call this **p**. # # The complement of **p** (in other words, the probability of *not* being searched) is **1-p**, in this case <sup>3</sup>/<sub>4</sub> or 0.75. # # So, what's the probability that out of our **n** experiments, three result in a search (let's call that **k**) and the remaining ones (there will be **n**-**k** of them, which is two) don't? # # - The probability of three passengers being searched is 0.25 x 0.25 x 0.25 which is the same as 0.25<sup>3</sup>. Using our generic variables, this is **p<sup>k</sup>**. # - The probability that the rest don't get searched is 0.75 x 0.75, or 0.75<sup>2</sup>. In terms of our variables, this is **1-p<sup>(n-k)</sup>**. # - The combined probability of three searchs and two non-searches is therefore 0.25<sup>3</sup> x 0.75<sup>2</sup> (approximately 0.088). Using our variables, this is: # # \begin{equation}p^{k}(1-p)^{(n-k)}\end{equation} # # This formula enables us to calculate the probability for a single combination of ***n*** passengers in which ***k*** experiments had a successful outcome. In this case, it enables us to calculate that the probability of three passengers out of five being searched is approximately 0.088. However, we need to consider that there are multiple ways this can happen. The first three passengers could get searched; or the last three; or the first, third, and fifth, or any other possible combination of 3 from 5. # # There are two possible outcomes for each experiment; so the total number of possible combinations of five passengers being searched or not searched is 2<sup>5</sup> or 32. So within those 32 sets of possible result combinations, how many have three searches? We can use the <sub>n</sub>C<sub>k</sub> formula to calculate this: # # \begin{equation}_{5}C_{3} = \frac{5!}{3!(5 - 3)!} = \frac{120}{6\times 4} = \frac{120}{24} = 5\end{equation} # # So 5 out of our 32 combinations had 3 searches and 2 non-searches. # # To find the probability of any combination of 3 searches out of 5 passengers, we need to multiply the number of possible combinations by the probability for a single combination - in this case <sup>5</sup>/<sub>32</sub> x 0.088, which is 0.01375, or 13.75%. # # So our complete formula to calculate the probabilty of ***k*** events from ***n*** experiments with probability ***p*** is: # # \begin{equation}P(x=k) = \frac{n!}{k!(n-k)!} p^{k}(1-p)^{(n-k)}\end{equation} # # This is known as the *General Binomial Probability Formula*, and we use it to calculate the *probability mass function* (or *PMF*) for a binomial variable. In other words, the we can use it to calculate the probability for each possible value for the variable and use that information to determine the relative frequency of the variable values as a distribution. # # In Python, the *scipy.stats.**binom.pmf*** function encapsulates the general binomial probability formula, and you can use it to calculate the probability of a random variable having a specific value (***k***) for a given number of experiments (***n***) where the event being tested has a given probability (***p***), as demonstrated in the following code: # + # %matplotlib inline from scipy.stats import binom from matplotlib import pyplot as plt import numpy as np n = 5 p = 0.25 x = np.array(range(0, n+1)) prob = np.array([binom.pmf(k, n, p) for k in x]) # Set up the graph plt.xlabel('x') plt.ylabel('Probability') plt.bar(x, prob) plt.show() # - # You can see from the bar chart that with this small value for ***n***, the distribution is right-skewed. # # Recall that in our coin flipping experiment, when the probability of failure vs success was equal, the resulting distribution was symmetrical. With an unequal probability of success in each experiment, the bias has the effect of skewing the overall probability mass. # # However, try increasing the value of ***n*** in the code above to 10, 20, and 50; re-running the cell each time. With more observations, the *central limit theorem* starts to take effect and the distribution starts to look more symmetrical - with enough observations it starts to look like a *normal* distribution. # # There is an important distinction here - the *normal* distribution applies to *continuous* variables, while the *binomial* distribution applies to *discrete* variables. However, the similarities help in a number of statistical contexts where the number of observations (experiments) is large enough for the *central limit theorem* to make the distribution of binomial variable values behave like a *normal* distribution. # # ### Working with the Binomial Distribution # Now that you know how to work out a binomial distribution for a repeated experiment, it's time to take a look at some statistics that will help us quantify some aspects of probability. # # Let's increase our ***n*** value to 100 so that we're looking at the number of searches per 100 passengers. This gives us the binomial distribution graphed by the following code: # + # %matplotlib inline from scipy.stats import binom from matplotlib import pyplot as plt import numpy as np n = 100 p = 0.25 x = np.array(range(0, n+1)) prob = np.array([binom.pmf(k, n, p) for k in x]) # Set up the graph plt.xlabel('x') plt.ylabel('Probability') plt.bar(x, prob) plt.show() # - # #### Mean (Expected Value) # We can calculate the mean of the distribution like this: # # \begin{equation}\mu = np\end{equation} # # So for our airport passengers, this is: # # \begin{equation}\mu = 100 \times 0.25 = 25\end{equation} # # When we're talking about a probability distribution, the mean is usually referred to as the *expected value*. In this case, for any 100 passengers we can reasonably expect 25 of them to be searched. # # #### Variance and Standard Deviation # Obviously, we can't search a quarter of a passenger - the expected value reflects the fact that there is variation, and indicates an average value for our binomial random variable. To get an indication of how much variability there actually is in this scenario, we can can calculate the variance and standard deviation. # # For variance of a binomial probability distribution, we can use this formula: # # \begin{equation}\sigma^{2} = np(1-p)\end{equation} # # So for our airport passengers: # # \begin{equation}\sigma^{2} = 100 \times 0.25 \times 0.75 = 18.75\end{equation} # # To convert this to standard deviation we just take the square root: # # \begin{equation}\sigma = \sqrt{np(1-p)}\end{equation} # # So: # # \begin{equation}\sigma = \sqrt{18.75} \approx 4.33 \end{equation} # # So for every 100 passengers, we can expect 25 searches with a standard deviation of 4.33 # # In Python, you can use the ***mean***, ***var***, and ***std*** functions from the *scipy.stats.**binom*** package to return binomial distribution statistics for given values of *n* and *p*: # + from scipy.stats import binom n = 100 p = 0.25 print(binom.mean(n,p)) print(binom.var(n,p)) print(binom.std(n,p)) # -
Statistics and Probability by Hiren/04-04-Probability.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="WksDx4_To_su" # # PyTTI-Tools Colab Notebook # # If you are using PyTTI-tools from a local jupyter server, you might have a better experience with the "_local" notebook: https://github.com/pytti-tools/pytti-notebook/blob/main/pyttitools-PYTTI_local.ipynb # # If you are planning to use google colab with the "local runtime" option: this is still the notebook you want. # # ## A very brief history of this notebook # # The tools and techniques below were pioneered in 2021 by a diverse and distributed collection of amazingly talented ML practitioners, researchers, and artists. The short version of this history is that <NAME> ([@RiversHaveWings](https://twitter.com/RiversHaveWings)) published a notebook inspired by work done by [@advadnoun](https://twitter.com/advadnoun). Katherine's notebook spawned a litany of variants, each with their own twist on the technique or adding a feature to someone else's work. <NAME> ([@sportsracer48](https://twitter.com/sportsracer48)) collected several of the most interesting notebooks and stuck the important bits together with bublegum and scotch tape. Thus was born PyTTI, and there was much rejoicing in sportsracer48's patreon, where it was shared in closed beta for several months. <NAME> ([@DigThatData](https://twitter.com/DigThatData)) offered to help tidy up the mess, and sportsracer48 encouraged him to run wild with it. David's contributions snowballed into [PyTTI-Tools](https://github.com/pytti-tools), the engine this notebook sits on top of! # # If you would like to contribute, receive support, or even just suggest an improvement to the documentation, our issue tracker can be found here: https://github.com/pytti-tools/pytti-core/issues # # # Instructions # # Detailed documentation can be found here: https://pytti-tools.github.io/pytti-book/intro.html # # * Syntax for text prompts and scenes: https://pytti-tools.github.io/pytti-book/SceneDSL.html # * Descriptions of all settings: https://pytti-tools.github.io/pytti-book/Settings.html # # + [markdown] id="xZt160ePEc8f" # #Step 1: Setup # Run the cells in this section once for each runtime, or after a factory reset. # + id="YtuDj2hRynkM" # This cell should only be run once drive_mounted = False gdrive_fpath = '.' # + id="a9C9tARLzyzq" #@title 1.1 Mount google drive (optional) #@markdown Mounting your drive is optional but recommended. You can even restore from google randomly #@markdown kicking you out if you mount your drive. from pathlib import Path mount_gdrive = False # @param{type:"boolean"} if mount_gdrive and not drive_mounted: from google.colab import drive gdrive_mountpoint = '/content/drive/' #@param{type:"string"} gdrive_subdirectory = 'MyDrive/pytti_tools' #@param{type:"string"} gdrive_fpath = str(Path(gdrive_mountpoint) / gdrive_subdirectory) try: drive.mount(gdrive_mountpoint, force_remount = True) # !mkdir -p {gdrive_fpath} # %cd {gdrive_fpath} drive_mounted = True except OSError: print( "\n\n-----[PYTTI-TOOLS]-------\n\n" "If you received a scary OSError and your drive" " was already mounted, ignore it." "\n\n-----[PYTTI-TOOLS]-------\n\n" ) raise # + cellView="form" id="RosI5DYxtjh0" #@title 1.2 NVIDIA-SMI (optional) #@markdown View information about your runtime GPU. #@markdown Google will connect you to an industrial strength GPU, which is needed to run #@markdown this notebook. You can also disable error checking on your GPU to get some #@markdown more VRAM, at a marginal cost to stability. You will have to restart the runtime after #@markdown disabling it. enable_error_checking = False#@param {type:"boolean"} if enable_error_checking: # !nvidia-smi else: # !nvidia-smi # !nvidia-smi -i 0 -e 0 # + cellView="form" id="CAGyDOe2o9AE" #@title 1.3 Install everything else #@markdown Run this cell on a fresh runtime to install the libraries and modules. from os.path import exists as path_exists if path_exists(gdrive_fpath): # %cd {gdrive_fpath} def flush_reqs(): # !rm -r pytti-core def install_everything(): if path_exists('./pytti-core'): try: flush_reqs() except Exception as ex: logger.warning( str(ex) ) logger.warning( "A `pytti` folder already exists and could not be deleted." "If you encounter problems, try deleting that folder and trying again." "Please report this and any other issues here: " "https://github.com/pytti-tools/pytti-notebook/issues/new", exc_info=True) # !git clone --recurse-submodules -j8 https://github.com/pytti-tools/pytti-core # !pip install kornia pytorch-lightning transformers # !pip install jupyter loguru einops PyGLM ftfy regex tqdm hydra-core exrex # !pip install seaborn adjustText bunch matplotlib-label-lines # !pip install --upgrade gdown # !pip install ./pytti-core/vendor/AdaBins # !pip install ./pytti-core/vendor/CLIP # !pip install ./pytti-core/vendor/GMA # !pip install ./pytti-core/vendor/taming-transformers # !pip install ./pytti-core # !mkdir -p images_out # !mkdir -p videos from pytti.Notebook import change_tqdm_color change_tqdm_color() try: from adjustText import adjust_text import pytti, torch everything_installed = True except ModuleNotFoundError: everything_installed = False force_install = False #@param{type:"boolean"} if not everything_installed or force_install: install_everything() elif everything_installed: from pytti.Notebook import change_tqdm_color change_tqdm_color() # + [markdown] id="PcayyBJjE-qy" # # Step 2: Configure Experiment # # Edit the parameters, or load saved parameters, then run the model. # # * https://pytti-tools.github.io/pytti-book/SceneDSL.html # * https://pytti-tools.github.io/pytti-book/Settings.html # + cellView="form" id="TiKD7os1pyXW" #@title #2.1 Parameters: #@markdown --- from os.path import exists as path_exists if path_exists(gdrive_fpath): # %cd {{gdrive_fpath}} drive_mounted = True else: drive_mounted = False try: from pytti.Notebook import change_tqdm_color, get_last_file except ModuleNotFoundError: if drive_mounted: #THIS IS NOT AN ERROR. This is the code that would #make an error if something were wrong. raise RuntimeError('ERROR: please run setup (step 1.3).') else: #THIS IS NOT AN ERROR. This is the code that would #make an error if something were wrong. raise RuntimeError('WARNING: drive is not mounted.\nERROR: please run setup (step 1.3).') change_tqdm_color() import glob, json, random, re, math try: from bunch import Bunch except ModuleNotFoundError: if drive_mounted: #THIS IS NOT AN ERROR. This is the code that would #make an error if something were wrong. raise RuntimeError('ERROR: please run setup (step 1.3).') else: #THIS IS NOT AN ERROR. This is the code that would #make an error if something were wrong. raise RuntimeError('WARNING: drive is not mounted.\nERROR: please run setup (step 1.3).') #these are used to make the defaults look pretty model_default = None random_seed = None all = math.inf derive_from_init_aspect_ratio = -1 def define_parameters(): locals_before = locals().copy() #@markdown ###Prompts: scenes = "deep space habitation ring made of glass | galactic nebula | wow! space is full of fractal creatures darting around everywhere like fireflies"#@param{type:"string"} scene_prefix = "astrophotography #pixelart | image credit nasa | space full of cybernetic neon:3_galactic nebula | isometric pixelart by <NAME> | "#@param{type:"string"} scene_suffix = "| satellite image:-1:-.95 | text:-1:-.95 | anime:-1:-.95 | watermark:-1:-.95 | backyard telescope:-1:-.95 | map:-1:-.95"#@param{type:"string"} interpolation_steps = 0#@param{type:"number"} steps_per_scene = 60100#@param{type:"raw"} #@markdown --- #@markdown ###Image Prompts: direct_image_prompts = ""#@param{type:"string"} #@markdown --- #@markdown ###Initial image: init_image = ""#@param{type:"string"} direct_init_weight = ""#@param{type:"string"} semantic_init_weight = ""#@param{type:"string"} #@markdown --- #@markdown ###Image: #@markdown Use `image_model` to select how the model will encode the image image_model = "Limited Palette" #@param ["VQGAN", "Limited Palette", "Unlimited Palette"] #@markdown image_model | description | strengths | weaknesses #@markdown --- | -- | -- | -- #@markdown VQGAN | classic VQGAN image | smooth images | limited datasets, slow, VRAM intesnsive #@markdown Limited Palette | pytti differentiable palette | fast, VRAM scales with `palettes` | pixel images #@markdown Unlimited Palette | simple RGB optimization | fast, VRAM efficient | pixel images #@markdown The output image resolution will be `width` $\times$ `pixel_size` by height $\times$ `pixel_size` pixels. #@markdown The easiest way to run out of VRAM is to select `image_model` VQGAN without reducing #@markdown `pixel_size` to $1$. #@markdown For `animation_mode: 3D` the minimum resoultion is about 450 by 400 pixels. width = 180#@param {type:"raw"} height = 112#@param {type:"raw"} pixel_size = 4#@param{type:"number"} smoothing_weight = 0.02#@param{type:"number"} #@markdown `VQGAN` specific settings: vqgan_model = "sflckr" #@param ["imagenet", "coco", "wikiart", "sflckr", "openimages"] #@markdown `Limited Palette` specific settings: random_initial_palette = False#@param{type:"boolean"} palette_size = 6#@param{type:"number"} palettes = 9#@param{type:"number"} gamma = 1#@param{type:"number"} hdr_weight = 0.01#@param{type:"number"} palette_normalization_weight = 0.2#@param{type:"number"} show_palette = False #@param{type:"boolean"} target_palette = ""#@param{type:"string"} lock_palette = False #@param{type:"boolean"} #@markdown --- #@markdown ###Animation: animation_mode = "3D" #@param ["off","2D", "3D", "Video Source"] sampling_mode = "bicubic" #@param ["bilinear","nearest","bicubic"] infill_mode = "wrap" #@param ["mirror","wrap","black","smear"] pre_animation_steps = 100#@param{type:"number"} steps_per_frame = 50#@param{type:"number"} frames_per_second = 12#@param{type:"number"} #@markdown --- #@markdown ###Stabilization Weights: direct_stabilization_weight = ""#@param{type:"string"} semantic_stabilization_weight = ""#@param{type:"string"} depth_stabilization_weight = ""#@param{type:"string"} edge_stabilization_weight = ""#@param{type:"string"} #@markdown `flow_stabilization_weight` is used for `animation_mode: 3D` and `Video Source` flow_stabilization_weight = ""#@param{type:"string"} #@markdown --- #@markdown ###Video Tracking: #@markdown Only for `animation_mode: Video Source`. video_path = ""#@param{type:"string"} frame_stride = 1#@param{type:"number"} reencode_each_frame = True #@param{type:"boolean"} flow_long_term_samples = 1#@param{type:"number"} #@markdown --- #@markdown ###Image Motion: translate_x = "-1700*sin(radians(1.5))" #@param{type:"string"} translate_y = "0" #@param{type:"string"} #@markdown `..._3d` is only used in 3D mode. translate_z_3d = "(50+10*t)*sin(t/10*pi)**2" #@param{type:"string"} #@markdown `rotate_3d` *must* be a `[w,x,y,z]` rotation (unit) quaternion. Use `rotate_3d: [1,0,0,0]` for no rotation. #@markdown [Learn more about rotation quaternions here](https://eater.net/quaternions). rotate_3d = "[cos(radians(1.5)), 0, -sin(radians(1.5))/sqrt(2), sin(radians(1.5))/sqrt(2)]"#@param{type:"string"} #@markdown `..._2d` is only used in 2D mode. rotate_2d = "5" #@param{type:"string"} zoom_x_2d = "0" #@param{type:"string"} zoom_y_2d = "0" #@param{type:"string"} #@markdown 3D camera (only used in 3D mode): lock_camera = True#@param{type:"boolean"} field_of_view = 60#@param{type:"number"} near_plane = 1#@param{type:"number"} far_plane = 10000#@param{type:"number"} #@markdown --- #@markdown ###Output: file_namespace = "default"#@param{type:"string"} if file_namespace == '': file_namespace = 'out' allow_overwrite = False#@param{type:"boolean"} base_name = file_namespace if not allow_overwrite and path_exists(f'images_out/{file_namespace}'): _, i = get_last_file(f'images_out/{file_namespace}', f'^(?P<pre>{re.escape(file_namespace)}\\(?)(?P<index>\\d*)(?P<post>\\)?_1\\.png)$') if i == 0: print(f"WARNING: file_namespace {file_namespace} already has images from run 0") elif i is not None: print(f"WARNING: file_namespace {file_namespace} already has images from runs 0 through {i}") elif glob.glob(f'images_out/{file_namespace}/{base_name}_*.png'): print(f"WARNING: file_namespace {file_namespace} has images which will be overwritten") try: del i del _ except NameError: pass del base_name display_every = steps_per_frame #@param{type:"raw"} clear_every = 0 #@param{type:"raw"} display_scale = 1#@param{type:"number"} save_every = steps_per_frame #@param{type:"raw"} backups = 2**(flow_long_term_samples+1)+1#this is used for video transfer, so don't lower it if that's what you're doing#@param {type:"raw"} show_graphs = False #@param{type:"boolean"} approximate_vram_usage = False#@param{type:"boolean"} #@markdown --- #@markdown ###Model: #@markdown Quality settings from Dribnet's CLIPIT (https://github.com/dribnet/clipit). #@markdown Selecting too many will use up all your VRAM and slow down the model. #@markdown I usually use ViTB32, ViTB16, and RN50 if I get a A100, otherwise I just use ViT32B. #@markdown quality | CLIP models #@markdown --- | -- #@markdown draft | ViTB32 #@markdown normal | ViTB32, ViTB16 #@markdown high | ViTB32, ViTB16, RN50 #@markdown best | ViTB32, ViTB16, RN50x4 ViTB32 = True #@param{type:"boolean"} ViTB16 = False #@param{type:"boolean"} RN50 = False #@param{type:"boolean"} RN50x4 = False #@param{type:"boolean"} ViTL14 = False #@param{type:"boolean"} RN101 = False #@param{type:"boolean"} RN50x16 = False #@param{type:"boolean"} RN50x64 = False #@param{type:"boolean"} #@markdown the default learning rate is `0.1` for all the VQGAN models #@markdown except openimages, which is `0.15`. For the palette modes the #@markdown default is `0.02`. learning_rate = model_default#@param{type:"raw"} reset_lr_each_frame = True#@param{type:"boolean"} seed = random_seed #@param{type:"raw"} #@markdown **Cutouts**: #@markdown [Cutouts are how CLIP sees the image.](https://twitter.com/remi_durant/status/1460607677801897990) cutouts = 40#@param{type:"number"} cut_pow = 2#@param {type:"number"} cutout_border = .25#@param {type:"number"} gradient_accumulation_steps = 1 #@param {type:"number"} #@markdown NOTE: prompt masks (`promt:weight_[mask.png]`) will not work right on '`wrap`' or '`mirror`' mode. border_mode = "clamp" #@param ["clamp","mirror","wrap","black","smear"] models_parent_dir = '.' if seed is None: seed = random.randint(-0x8000_0000_0000_0000, 0xffff_ffff_ffff_ffff) locals_after = locals().copy() for k in locals_before.keys(): del locals_after[k] del locals_after['locals_before'] return locals_after params = Bunch(define_parameters()) print("SETTINGS:") print(json.dumps(params)) # + cellView="form" id="lWlZ2Gocb2fF" #@title 2.2 Load settings (optional) #@markdown copy the `SETTINGS:` output from the **Parameters** cell (tripple click to select the whole #@markdown line from `{'scenes'...` to `}`) and paste them in a note to save them for later. #@markdown Paste them here in the future to load those settings again. Running this cell with blank settings won't do anything. from os.path import exists as path_exists if path_exists(gdrive_fpath): # %cd {gdrive_fpath} drive_mounted = True else: drive_mounted = False try: from pytti.Notebook import * except ModuleNotFoundError: if drive_mounted: #THIS IS NOT AN ERROR. This is the code that would #make an error if something were wrong. raise RuntimeError('ERROR: please run setup (step 1.3).') else: #THIS IS NOT AN ERROR. This is the code that would #make an error if something were wrong. raise RuntimeError('WARNING: drive is not mounted.\nERROR: please run setup (step 1.3).') change_tqdm_color() import json, random try: from bunch import Bunch except ModuleNotFoundError: if drive_mounted: #THIS IS NOT AN ERROR. This is the code that would #make an error if something were wrong. raise RuntimeError('ERROR: please run setup (step 1.3).') else: #THIS IS NOT AN ERROR. This is the code that would #make an error if something were wrong. raise RuntimeError('WARNING: drive is not mounted.\nERROR: please run setup (step 1.3).') settings = ""#@param{type:"string"} #@markdown Check `random_seed` to overwrite the seed from the settings with a random one for some variation. random_seed = False #@param{type:"boolean"} if settings != '': params = load_settings(settings, random_seed) # + id="j9pjbKijynkU" from pytti.workhorse import TB_LOGDIR # %load_ext tensorboard # + id="M5vWuC4NynkU" # %tensorboard --logdir $TB_LOGDIR # + [markdown] id="4hP1u1zbynkU" # It is common for users to experience issues starting their first run. In particular, you may see an error saying something like "Access Denied" and showing you some URL links. This is caused by the google drive link for one of the models getting "hugged to death". You can still access the model, but google won't let you do it programmatically. Please follow these steps to get around the issue: # # 1. Visit either of the two URLs you see in your browser to download the file `AdaBins_nyu.pt` locally # 2. Create a new folder in colab named `pretrained` (check the left sidebar for a file browser) # 3. Upload `AdaBins_nyu.pt` to the `pretrained` folder. You should be able to just drag-and-drop the file onto the folder. # 4. Run the following code cell after the upload has completed to tell PyTTI where to find AdaBins # # You should now be able to run image generation without issues. # + id="7jpLZ48gynkU" language="sh" # # ADABINS_SRC=./pretrained/AdaBins_nyu.pt # ADABINS_DIR=~/.cache/adabins # ADABINS_TGT=$ADABINS_DIR/AdaBins_nyu.pt # # if [ -f "$ADABINS_SRC" ]; then # mkdir -p $ADABINS_DIR/ # ln $ADABINS_SRC $ADABINS_TGT # fi # + id="QQUdFJpDynkV" #@title 2.3 Run it! from pytti.workhorse import _main as render_frames from omegaconf import OmegaConf cfg = OmegaConf.create(dict(params)) # function wraps step 2.3 of the original p5 notebook render_frames(cfg) # + [markdown] id="AtekvTZxFNpf" # # Step 3: Render video # You can dowload from the notebook, but it's faster to download from your drive. # + cellView="form" id="RZH-r4yyShnX" #@title 3.1 Render video from os.path import exists as path_exists if path_exists(gdrive_fpath): # %cd {gdrive_fpath} drive_mounted = True else: drive_mounted = False try: from pytti.Notebook import change_tqdm_color except ModuleNotFoundError: if drive_mounted: #THIS IS NOT AN ERROR. This is the code that would #make an error if something were wrong. raise RuntimeError('ERROR: please run setup (step 1.3).') else: #THIS IS NOT AN ERROR. This is the code that would #make an error if something were wrong. raise RuntimeError('WARNING: drive is not mounted.\nERROR: please run setup (step 1.3).') change_tqdm_color() from tqdm.notebook import tqdm import numpy as np from os.path import exists as path_exists from subprocess import Popen, PIPE from PIL import Image, ImageFile from os.path import splitext as split_file import glob from pytti.Notebook import get_last_file ImageFile.LOAD_TRUNCATED_IMAGES = True try: params except NameError: raise RuntimeError("ERROR: no parameters. Please run parameters (step 2.1).") if not path_exists(f"images_out/{params.file_namespace}"): if path_exists(f"/content/drive/MyDrive"): #THIS IS NOT AN ERROR. This is the code that would #make an error if something were wrong. raise RuntimeError(f"ERROR: file_namespace: {params.file_namespace} does not exist.") else: #THIS IS NOT AN ERROR. This is the code that would #make an error if something were wrong. raise RuntimeError(f"WARNING: Drive is not mounted.\nERROR: file_namespace: {params.file_namespace} does not exist.") #@markdown The first run executed in `file_namespace` is number $0$, the second is number $1$, etc. latest = -1 run_number = latest#@param{type:"raw"} if run_number == -1: _, i = get_last_file(f'images_out/{params.file_namespace}', f'^(?P<pre>{re.escape(params.file_namespace)}\\(?)(?P<index>\\d*)(?P<post>\\)?_1\\.png)$') run_number = i base_name = params.file_namespace if run_number == 0 else (params.file_namespace+f"({run_number})") tqdm.write(f'Generating video from {params.file_namespace}/{base_name}_*.png') all_frames = glob.glob(f'images_out/{params.file_namespace}/{base_name}_*.png') all_frames.sort(key = lambda s: int(split_file(s)[0].split('_')[-1])) print(f'found {len(all_frames)} frames matching images_out/{params.file_namespace}/{base_name}_*.png') start_frame = 0#@param{type:"number"} all_frames = all_frames[start_frame:] fps = params.frames_per_second#@param{type:"raw"} total_frames = len(all_frames) if total_frames == 0: #THIS IS NOT AN ERROR. This is the code that would #make an error if something were wrong. raise RuntimeError(f"ERROR: no frames to render in images_out/{params.file_namespace}") frames = [] for filename in tqdm(all_frames): frames.append(Image.open(filename)) p = Popen(['ffmpeg', '-y', '-f', 'image2pipe', '-vcodec', 'png', '-r', str(fps), '-i', '-', '-vcodec', 'libx264', '-r', str(fps), '-pix_fmt', 'yuv420p', '-crf', '1', '-preset', 'veryslow', f"videos/{base_name}.mp4"], stdin=PIPE) for im in tqdm(frames): im.save(p.stdin, 'PNG') p.stdin.close() print("Encoding video...") p.wait() print("Video complete.") # + cellView="form" id="t3EgqHKrSjZx" #@title 3.1 Render video (concatenate all runs) from os.path import exists as path_exists if path_exists(gdrive_fpath): # %cd {gdrive_fpath} drive_mounted = True else: drive_mounted = False try: from pytti.Notebook import change_tqdm_color except ModuleNotFoundError: if drive_mounted: #THIS IS NOT AN ERROR. This is the code that would #make an error if something were wrong. raise RuntimeError('ERROR: please run setup (step 1.3).') else: #THIS IS NOT AN ERROR. This is the code that would #make an error if something were wrong. raise RuntimeError('WARNING: drive is not mounted.\nERROR: please run setup (step 1.3).') change_tqdm_color() from tqdm.notebook import tqdm import numpy as np from os.path import exists as path_exists from subprocess import Popen, PIPE from PIL import Image, ImageFile from os.path import splitext as split_file import glob from pytti.Notebook import get_last_file ImageFile.LOAD_TRUNCATED_IMAGES = True try: params except NameError: raise RuntimeError("ERROR: no parameters. Please run parameters (step 2.1).") if not path_exists(f"images_out/{params.file_namespace}"): if path_exists(f"/content/drive/MyDrive"): raise RuntimeError(f"ERROR: file_namespace: {params.file_namespace} does not exist.") else: raise RuntimeError(f"WARNING: Drive is not mounted.\nERROR: file_namespace: {params.file_namespace} does not exist.") #@markdown The first run executed in `file_namespace` is number $0$, the second is number $1$, etc. latest = -1 run_number = latest if run_number == -1: _, i = get_last_file(f'images_out/{params.file_namespace}', f'^(?P<pre>{re.escape(params.file_namespace)}\\(?)(?P<index>\\d*)(?P<post>\\)?_1\\.png)$') run_number = i all_frames = [] for i in range(run_number+1): base_name = params.file_namespace if i == 0 else (params.file_namespace+f"({i})") frames = glob.glob(f'images_out/{params.file_namespace}/{base_name}_*.png') frames.sort(key = lambda s: int(split_file(s)[0].split('_')[-1])) all_frames.extend(frames) start_frame = 0#@param{type:"number"} all_frames = all_frames[start_frame:] fps = params.frames_per_second#@param{type:"raw"} total_frames = len(all_frames) if total_frames == 0: #THIS IS NOT AN ERROR. This is the code that would #make an error if something were wrong. raise RuntimeError(f"ERROR: no frames to render in images_out/{params.file_namespace}") frames = [] for filename in tqdm(all_frames): frames.append(Image.open(filename)) p = Popen(['ffmpeg', '-y', '-f', 'image2pipe', '-vcodec', 'png', '-r', str(fps), '-i', '-', '-vcodec', 'libx264', '-r', str(fps), '-pix_fmt', 'yuv420p', '-crf', '1', '-preset', 'veryslow', f"videos/{base_name}.mp4"], stdin=PIPE) for im in tqdm(frames): im.save(p.stdin, 'PNG') p.stdin.close() print("Encoding video...") p.wait() print("Video complete.") # + cellView="form" id="-qZ8c_-iZ0QM" #@title 3.2 Download the last exported video from os.path import exists as path_exists if path_exists(gdrive_fpath): # %cd {gdrive_fpath} try: from pytti.Notebook import get_last_file except ModuleNotFoundError: if drive_mounted: #THIS IS NOT AN ERROR. This is the code that would #make an error if something were wrong. raise RuntimeError('ERROR: please run setup (step 1.3).') else: #THIS IS NOT AN ERROR. This is the code that would #make an error if something were wrong. raise RuntimeError('WARNING: drive is not mounted.\nERROR: please run setup (step 1.3).') try: params except NameError: #THIS IS NOT AN ERROR. This is the code that would #make an error if something were wrong. raise RuntimeError("ERROR: please run parameters (step 2.1).") from google.colab import files try: base_name = params.file_namespace if run_number == 0 else (params.file_namespace+f"({run_number})") filename = f'{base_name}.mp4' except NameError: filename, i = get_last_file(f'videos', f'^(?P<pre>{re.escape(params.file_namespace)}\\(?)(?P<index>\\d*)(?P<post>\\)?\\.mp4)$') if path_exists(f'videos/{filename}'): files.download(f"videos/{filename}") else: if path_exists(f"/content/drive/MyDrive"): #THIS IS NOT AN ERROR. This is the code that would #make an error if something were wrong. raise RuntimeError(f"ERROR: video videos/{filename} does not exist.") else: #THIS IS NOT AN ERROR. This is the code that would #make an error if something were wrong. raise RuntimeError(f"WARNING: Drive is not mounted.\nERROR: video videos/{filename} does not exist.") # + [markdown] id="N0GECf0CynkW" # # Batch Settings # # Be Advised: google may penalize you for sustained colab GPU utilization, even if you are a PRO+ subscriber. Tread lightly with batch runs, you don't wanna end up in GPU jail. # + [markdown] id="qEH4gJ1RynkW" # FYI: the batch setting feature below may not work at present. We recommend using the CLI for batch jobs, see usage instructions at https://github.com/pytti-tools/pytti-core . The code below will probably be removed in the near future. # + [markdown] id="if_Fdy_OFkjZ" # # Batch Setings # WARNING: If you use google colab (even with pro and pro+) GPUs for long enought google will throttle your account. Be careful with batch runs if you don't want to get kicked. # + cellView="form" id="vHHfWWqoSz35" #@title batch settings # ngl... this probably doesn't work right now. from os.path import exists as path_exists if path_exists(gdrive_fpath): # %cd {gdrive_fpath} drive_mounted = True else: drive_mounted = False try: from pytti.Notebook import change_tqdm_color, save_batch except ModuleNotFoundError: if drive_mounted: raise RuntimeError('ERROR: please run setup (step 1).') else: raise RuntimeError('WARNING: drive is not mounted.\nERROR: please run setup (step 1).') change_tqdm_color() try: import exrex, random, glob except ModuleNotFoundError: if drive_mounted: raise RuntimeError('ERROR: please run setup (step 1).') else: raise RuntimeError('WARNING: drive is not mounted.\nERROR: please run setup (step 1).') from numpy import arange import itertools def all_matches(s): return list(exrex.generate(s)) def dict_product(dictionary): return [dict(zip(dictionary, x)) for x in itertools.product(*dictionary.values())] #these are used to make the defaults look pretty model_default = None random_seed = None def define_parameters(): locals_before = locals().copy() scenes = ["list","your","runs"] #@param{type:"raw"} scene_prefix = ["all "," permutations "," are run "] #@param{type:"raw"} scene_suffix = [" that", " makes", " 27" ] #@param{type:"raw"} interpolation_steps = [0] #@param{type:"raw"} steps_per_scene = [300] #@param{type:"raw"} direct_image_prompts = [""] #@param{type:"raw"} init_image = [""] #@param{type:"raw"} direct_init_weight = [""] #@param{type:"raw"} semantic_init_weight = [""] #@param{type:"raw"} image_model = ["Limited Palette"] #@param{type:"raw"} width = [180] #@param{type:"raw"} height = [112] #@param{type:"raw"} pixel_size = [4] #@param{type:"raw"} smoothing_weight = [0.05] #@param{type:"raw"} vqgan_model = ["sflckr"] #@param{type:"raw"} random_initial_palette = [False] #@param{type:"raw"} palette_size = [9] #@param{type:"raw"} palettes = [8] #@param{type:"raw"} gamma = [1] #@param{type:"raw"} hdr_weight = [1.0] #@param{type:"raw"} palette_normalization_weight = [1.0] #@param{type:"raw"} show_palette = [False] #@param{type:"raw"} target_palette = [""] #@param{type:"raw"} lock_palette = [False] #@param{type:"raw"} animation_mode = ["off"] #@param{type:"raw"} sampling_mode = ["bicubic"] #@param{type:"raw"} infill_mode = ["wrap"] #@param{type:"raw"} pre_animation_steps = [100] #@param{type:"raw"} steps_per_frame = [50] #@param{type:"raw"} frames_per_second = [12] #@param{type:"raw"} direct_stabilization_weight = [""] #@param{type:"raw"} semantic_stabilization_weight = [""] #@param{type:"raw"} depth_stabilization_weight = [""] #@param{type:"raw"} edge_stabilization_weight = [""] #@param{type:"raw"} flow_stabilization_weight = [""] #@param{type:"raw"} video_path = [""] #@param{type:"raw"} frame_stride = [1] #@param{type:"raw"} reencode_each_frame = [True] #@param{type:"raw"} flow_long_term_samples = [0] #@param{type:"raw"} translate_x = ["0"] #@param{type:"raw"} translate_y = ["0"] #@param{type:"raw"} translate_z_3d = ["0"] #@param{type:"raw"} rotate_3d = ["[1,0,0,0]"] #@param{type:"raw"} rotate_2d = ["0"] #@param{type:"raw"} zoom_x_2d = ["0"] #@param{type:"raw"} zoom_y_2d = ["0"] #@param{type:"raw"} lock_camera = [True] #@param{type:"raw"} field_of_view = [60] #@param{type:"raw"} near_plane = [1] #@param{type:"raw"} far_plane = [10000] #@param{type:"raw"} file_namespace = ["Basic Batch"] #@param{type:"raw"} allow_overwrite = [False] display_every = [50] #@param{type:"raw"} clear_every = [0] #@param{type:"raw"} display_scale = [1] #@param{type:"raw"} save_every = [50] #@param{type:"raw"} backups = [2] #@param{type:"raw"} show_graphs = [False] #@param{type:"raw"} approximate_vram_usage = [False] #@param{type:"raw"} ViTB32 = [True] #@param{type:"raw"} ViTB16 = [False] #@param{type:"raw"} RN50 = [False] #@param{type:"raw"} RN50x4 = [False] #@param{type:"raw"} learning_rate = [None] #@param{type:"raw"} reset_lr_each_frame = [True] #@param{type:"raw"} seed = [None] #@param{type:"raw"} cutouts = [40] #@param{type:"raw"} cut_pow = [2] #@param{type:"raw"} cutout_border = [0.25] #@param{type:"raw"} border_mode = ["clamp"] #@param{type:"raw"} locals_after = locals().copy() for k in locals_before.keys(): del locals_after[k] del locals_after['locals_before'] return locals_after param_dict = define_parameters() batch_list = dict_product(param_dict) namespace = batch_list[0]['file_namespace'] if glob.glob(f'images_out/{namespace}/*.png'): print(f"WARNING: images_out/{namespace} contains images. Batch indicies may not match filenames unless restoring.") # + cellView="form" id="Z0hRb20yxxsc" # @title Licensed under the MIT License # Copyleft (c) 2021 <NAME> # Copyright (c) 2022 <NAME> # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN # THE SOFTWARE.
pyttitools-PYTTI.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + # %load_ext autoreload # %autoreload 2 import numpy as np import matplotlib.pyplot as plt import statsmodels.api as sm from statsmodels.base.model import GenericLikelihoodModel import scipy.stats as stats import sys sys.path.append("../") import vuong_tests6 # + class OLS_loglike(GenericLikelihoodModel): def __init__(self, *args,ols=False, **kwargs): super(OLS_loglike,self).__init__(*args,**kwargs) self.ols = ols def loglikeobs(self, params): y = self.endog x = self.exog mu_y = np.matmul(x,params) resid = y - mu_y sigma = np.sqrt(np.sum(resid**2)/resid.shape[0]) pr_y = stats.norm.logpdf( resid, loc=0,scale=sigma ) return pr_y def setup_shi(yn,xn,return_model=False,num_params=4): x1n,x2n = xn[:,0],xn[:,1:num_params+1] # model 1 grad, etc. model1 = sm.OLS(yn,sm.add_constant(x1n)) model1_fit = model1.fit(disp=False) params1 = (model1_fit.params) model1_deriv = OLS_loglike(yn,sm.add_constant(x1n)) ll1 = model1_deriv.loglikeobs(model1_fit.params) grad1 = model1_deriv.score_obs(model1_fit.params) hess1 = model1_deriv.hessian(model1_fit.params) #model 2 grad, etc. model2 = sm.OLS(yn,sm.add_constant(x2n)) model2_fit = model2.fit(disp=False) params2 = (model2_fit.params) model2_deriv = OLS_loglike(yn,sm.add_constant(x2n)) ll2 = model2_deriv.loglikeobs(model2_fit.params) grad2 = model2_deriv.score_obs(model2_fit.params) hess2 = model2_deriv.hessian(model2_fit.params) if return_model: return ll1,grad1,hess1,params1,model1,ll2,grad2,hess2,params2,model2 return ll1,grad1,hess1,params1,ll2,grad2,hess2,params2 # + def gen_data(nobs=1000, a=0.25, num_params=4): x = np.random.normal(scale=1., size=(nobs,1+num_params)) e = np.random.normal(loc=0.0, scale=1.0, size=nobs) y = 1 + a*x[:,0] + a/np.sqrt(num_params)*x[:,1:num_params+1].sum(axis=1) + e return y,x,nobs yn,xn,nobs = gen_data() ll1,grad1,hess1,params1,ll2,grad2,hess2,params2 = setup_shi(yn,xn,return_model=False,num_params=15) print(grad1.shape,hess1.shape) # - # # a = .25, k= 9 a = 0.25 num_params=9 num_sims = 100 setup_shi_ex = lambda yn,xn: setup_shi(yn,xn,num_params=num_params) gen_data_ex = lambda : gen_data(nobs=250, a=a, num_params=num_params) mc_out = vuong_tests6.monte_carlo(num_sims,gen_data_ex,setup_shi_ex) vuong_tests6.print_mc(mc_out) print(mc_out) setup_shi_ex = lambda yn,xn: setup_shi(yn,xn,num_params=num_params) gen_data_ex = lambda : gen_data(nobs=500, a=a, num_params=num_params) mc_out = vuong_tests6.monte_carlo(num_sims,gen_data_ex,setup_shi_ex) vuong_tests6.print_mc(mc_out) print(mc_out) setup_shi_ex = lambda yn,xn: setup_shi(yn,xn,num_params=num_params) gen_data_ex = lambda : gen_data(nobs=100, a=a, num_params=num_params) mc_out = vuong_tests6.monte_carlo(num_sims,gen_data_ex,setup_shi_ex) vuong_tests6.print_mc(mc_out) print(mc_out) # # a = .25, k= 4, something not right? a = 0.25 num_params=4 num_sims = 100 setup_shi_ex = lambda yn,xn: setup_shi(yn,xn,num_params=num_params) gen_data_ex = lambda : gen_data(nobs=250, a=a, num_params=num_params) mc_out = vuong_tests6.monte_carlo(num_sims,gen_data_ex,setup_shi_ex) vuong_tests6.print_mc(mc_out) print(mc_out) setup_shi_ex = lambda yn,xn: setup_shi(yn,xn,num_params=num_params) gen_data_ex = lambda : gen_data(nobs=500, a=a, num_params=num_params) mc_out = vuong_tests6.monte_carlo(num_sims,gen_data_ex,setup_shi_ex) vuong_tests6.print_mc(mc_out) print(mc_out) # # a = .25, K = 19 a = 0.25 num_params=19 num_sims = 100 setup_shi_ex = lambda yn,xn: setup_shi(yn,xn,num_params=num_params) gen_data_ex = lambda : gen_data(nobs=250, a=a, num_params=num_params) mc_out = vuong_tests6.monte_carlo(num_sims,gen_data_ex,setup_shi_ex) vuong_tests6.print_mc(mc_out) print(mc_out) setup_shi_ex = lambda yn,xn: setup_shi(yn,xn,num_params=num_params) gen_data_ex = lambda : gen_data(nobs=500, a=a, num_params=num_params) mc_out = vuong_tests6.monte_carlo(num_sims,gen_data_ex,setup_shi_ex) vuong_tests6.print_mc(mc_out) print(mc_out) # # evidence of power # + a1,a2 = np.sqrt(1.09-1), 0.00 num_params= 9 num_sims = 100 def gen_data2(nobs=1000, a1=np.sqrt(1.09-1), a2=0.00 , num_params=19): x = np.random.normal(scale=1., size=(nobs,1+num_params)) e = np.random.normal(loc=0.0, scale=1.0, size=nobs) y = 1 + a1*x[:,0] + a2/np.sqrt(num_params)*x[:,1:num_params+1].sum(axis=1) + e return y,x,nobs # - a1,a2 = np.sqrt(1.09-1), 0.00 setup_shi_ex = lambda yn,xn: setup_shi(yn,xn,num_params=num_params) gen_data_ex = lambda : gen_data2(nobs=250, a1=a1, a2=a2, num_params=num_params) mc_out = vuong_tests6.monte_carlo(num_sims,gen_data_ex,setup_shi_ex) vuong_tests6.print_mc(mc_out) print(mc_out) a1,a2 = np.sqrt(1.09**.5-1), 0.00 setup_shi_ex = lambda yn,xn: setup_shi(yn,xn,num_params=num_params) gen_data_ex = lambda : gen_data2(nobs=500, a1=a1, a2=a2, num_params=num_params) mc_out = vuong_tests6.monte_carlo(num_sims,gen_data_ex,setup_shi_ex) vuong_tests6.print_mc(mc_out) print(mc_out) a1,a2 = np.sqrt(1.09**2.5-1), 0.00 setup_shi_ex = lambda yn,xn: setup_shi(yn,xn,num_params=num_params) gen_data_ex = lambda : gen_data2(nobs=100, a1=a1, a2=a2, num_params=num_params) mc_out = vuong_tests6.monte_carlo(num_sims,gen_data_ex,setup_shi_ex) vuong_tests6.print_mc(mc_out) print(mc_out) a1,a2 = np.sqrt(1.09-1), 0.00 setup_shi_ex = lambda yn,xn: setup_shi(yn,xn,num_params=num_params) gen_data_ex = lambda : gen_data2(nobs=250, a1=a1, a2=a2, num_params=4) mc_out = vuong_tests6.monte_carlo(num_sims,gen_data_ex,setup_shi_ex) vuong_tests6.print_mc(mc_out) print(mc_out) a1,a2 = np.sqrt(1.09-1), 0.00 setup_shi_ex = lambda yn,xn: setup_shi(yn,xn,num_params=num_params) gen_data_ex = lambda : gen_data2(nobs=250, a1=a1, a2=a2, num_params=19) mc_out = vuong_tests6.monte_carlo(num_sims,gen_data_ex,setup_shi_ex) vuong_tests6.print_mc(mc_out) print(mc_out) # # evidence of power 2 a2,a1 = np.sqrt(1.09-1), 0.00 setup_shi_ex = lambda yn,xn: setup_shi(yn,xn,num_params=num_params) gen_data_ex = lambda : gen_data2(nobs=250, a1=a1, a2=a2, num_params=num_params) mc_out = vuong_tests6.monte_carlo(num_sims,gen_data_ex,setup_shi_ex) vuong_tests6.print_mc(mc_out) print(mc_out) a2,a1 = np.sqrt(1.09**.5-1), 0.00 setup_shi_ex = lambda yn,xn: setup_shi(yn,xn,num_params=num_params) gen_data_ex = lambda : gen_data2(nobs=500, a1=a1, a2=a2, num_params=num_params) mc_out = vuong_tests6.monte_carlo(num_sims,gen_data_ex,setup_shi_ex) vuong_tests6.print_mc(mc_out) print(mc_out) a2,a1 =np.sqrt(1.09**2.5-1), 0.00 setup_shi_ex = lambda yn,xn: setup_shi(yn,xn,num_params=num_params) gen_data_ex = lambda : gen_data2(nobs=100, a1=a1, a2=a2, num_params=num_params) mc_out = vuong_tests6.monte_carlo(num_sims,gen_data_ex,setup_shi_ex) vuong_tests6.print_mc(mc_out) print(mc_out) a2,a1 = np.sqrt(1.09-1), 0.00 setup_shi_ex = lambda yn,xn: setup_shi(yn,xn,num_params=num_params) gen_data_ex = lambda : gen_data2(nobs=250, a1=a1, a2=a2, num_params=4) mc_out = vuong_tests6.monte_carlo(num_sims,gen_data_ex,setup_shi_ex) vuong_tests6.print_mc(mc_out) print(mc_out) a2,a1 = np.sqrt(1.09-1), 0.00 setup_shi_ex = lambda yn,xn: setup_shi(yn,xn,num_params=num_params) gen_data_ex = lambda : gen_data2(nobs=250, a1=a1, a2=a2, num_params=4) mc_out = vuong_tests6.monte_carlo(num_sims,gen_data_ex,setup_shi_ex) vuong_tests6.print_mc(mc_out) print(mc_out)
overlapping_reg/shi_examples_summer_1_fast.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # - http://coach.nervanasys.com/algorithms/value_optimization/double_dqn/index.html # - https://medium.com/@awjuliani/simple-reinforcement-learning-with-tensorflow-part-4-deep-q-networks-and-beyond-8438a3e2b8df # - https://jaromiru.com/2016/11/07/lets-make-a-dqn-double-learning-and-prioritized-experience-replay/ import matplotlib.pyplot as plt import gym import cv2 import random import math import numpy as np import torch import torch.nn as nn import torch.nn.functional as F import torch.optim as optim from collections import deque from collections import namedtuple import copy SEED = 1234 CAPACITY = 10_000 BATCH_SIZE = 32 PROCESSED_SIZE = 84 GAME = 'Pong-v0' N_ACTIONS = gym.make(GAME).action_space.n PHI_LENGTH = 4 UPDATE_FREQ = 1 EPSILON_START = 1.0 EPSILON_END = 0.1 EPSILON_STEPS = 1_000_000 GAMMA = 0.99 TARGET_UPDATE = 1000 PRINT_UPDATE = 5_000 torch.manual_seed(SEED) torch.cuda.manual_seed(SEED) np.random.seed(SEED) random.seed(SEED) device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') class ReplayMemory: def __init__(self, capacity, batch_size): """ Replay memory that holds examples in the form of (s, a, r, s') args: capacity (int): the size of the memory batch_size (int): size of batches used for training model """ self.batch_size = batch_size self.capacity = capacity self.memory = deque(maxlen=self.capacity) self.Transition = namedtuple('Transition', ('state', 'action', 'reward', 'next_state')) self._available = False def put(self, state, action, reward, next_state): """ Places an (s, a, r, s') example in the memory args: state (np.array): action (list[int]): reward (list[int]): next_state (np.array or None): """ state = torch.FloatTensor(state) action = torch.LongTensor([action]) reward = torch.FloatTensor([reward]) if next_state is not None: next_state = torch.FloatTensor(next_state) transition = self.Transition(state=state, action=action, reward=reward, next_state=next_state) self.memory.append(transition) def sample(self): """ Gets a random sample of n = batch_size examples from the memory returns: Transitions (namedtuple): a tuple of (s, a, r, s') """ transitions = random.sample(self.memory, self.batch_size) return self.Transition(*(zip(*transitions))) def size(self): """ Returns the length of the memory returns: length (int): number of examples in the memory """ return len(self.memory) def is_available(self): """ Returns True if we have enough examples within the memory returns: available (bool): True if we have at least n = batch_size examples in the memory """ if self._available: return True if len(self.memory) > self.batch_size: self._available = True return self._available class Environment: def __init__(self, game, size, seed): """ A class that has helpful wrappers around the Gym environment game (string): name of Atari game, i.e. Breakout-v0 size (int): height and width of observation after preprocessing seed (int): random seed """ self.size = size #init game self.game = gym.make(game) #set random seed for determinism self.game.seed(seed) def process(self, obs): """ Process an observation (i.e. convert to grayscale, resize and normalize) args: obs (np.array): observation from gym of game screen, should be (height, width, channels) returns: output (np.array): (self.size, self.size) array with all values <= 1 """ assert len(obs.shape) == 3 #make sure image is correct shape gray = cv2.cvtColor(obs, cv2.COLOR_RGB2GRAY) #convert to grayscale output = cv2.resize(gray, (self.size, self.size)) #resize output = output.astype(np.float32, copy=False) #convert to float32 output /= 255.0 #normalize values between [0, 1] assert (output <= 1.0).all() return output def get_obs(self): """ Gets a processed observation returns: obs (np.array): (self.size, self.size) array with all values <= 1 """ obs = self.game.render('rgb_array') obs = self.process(obs) return obs def init(self): """ Reset the environment and return the initial state (unprocessed) returns: obs (np.array): observation from gym of game screen, should be (height, width, channels) """ obs = self.game.reset() return obs def reset(self): """ Reset the environment and return the initial state (processed) returns: output (np.array): (self.size, self.size) array with all values <= 1 """ obs = self.game.reset() output = self.process(obs) return output class DQN(nn.Module): def __init__(self, n_actions): super(DQN, self).__init__() self.conv1 = nn.Conv2d(4, 32, kernel_size=8, stride=4) self.conv2 = nn.Conv2d(32, 64, kernel_size=4, stride=2) self.conv3 = nn.Conv2d(64, 64, kernel_size=3, stride=1) self.fc1 = nn.Linear(7*7*64, 512) self.fc2 = nn.Linear(512, n_actions) #actions from from env.action_space.n def forward(self, x): x = F.relu(self.conv1(x)) x = F.relu(self.conv2(x)) x = F.relu(self.conv3(x)) x = F.relu(self.fc1(x.view(x.size(0), -1))) #flattens the (N, C, H, W) to (N, C*H*W) return self.fc2(x) class Agent: def __init__(self, env, mem, model, phi_length, update_freq, e_start, e_end, e_steps, gamma, target_update, print_update): """ An agent class that handles training the model args: mem (ReplayMemory): ReplayMemory object env (Environment): Environment object model (nn.Module): PyTorch model phi_length (int): number of observations to stack to make a state frame_skip (int): we only use every n = frame_skip observations to make a state e_start (int): initial value of epsilon e_end (int): minimum value of epsilon e_steps (int): number of steps for epsilon to go from e_start to e_end gamma (float): decay rate of rewards target_update (int): after how many steps (frames) to update target model print_update (int): after how many steps (frames) to print summary of performance """ self.env = env self.mem = mem self.model = model self.phi_length = phi_length self.update_freq = update_freq self.e_start = e_start self.e_end = e_end self.e_steps = e_steps self.gamma = gamma self.target_update = target_update self.print_update = print_update self.steps = 0 #number of steps taken self.episodes = 0 #number of episodes self.obs_buffer = deque(maxlen=phi_length) #for holding observations to be turned into states #put model on gpu if available self.model = model.to(device) #create target model #TODO: this may need to be a copy.deepcopy or load state dict self.target = copy.deepcopy(self.model) #create optimizer #trying params from: https://github.com/hengyuan-hu/rainbow #self.optimizer = optim.Adam(self.model.parameters(), lr=6.25e-5, eps=1.5e-4) #from dqn paper self.optimizer = optim.RMSprop(self.model.parameters(), lr=0.00025, alpha=0.95, momentum=0.95) def get_epsilon(self): """ Calculates the value of epsilon from the current number of frames returns: epsilon (int): the probability of doing a random action """ epsilon = self.e_end + (self.e_start - self.e_end) * math.exp(-1. * self.steps / self.e_steps) return epsilon def get_action(self, state): """ Selects action to perform, with probability = epsilon chooses a random action, else chooses the best predicted action of the model args: state (np.array): input state to the model returns: action (int): the index of the action """ #get value of epsilon epsilon = self.get_epsilon() #with probablity of epsilon, pick a random action if random.random() < epsilon: action = self.env.game.action_space.sample() else: #with probability of (1 - epsilon) pick predicted value with torch.no_grad(): state = torch.FloatTensor(state).unsqueeze(0).to(device) #convert to tensor, reshape and add to gpu Qsa = self.model(state) #pass state through model to get Qa action = Qsa.max(1)[1].item() #action is max Qa value #make sure the value is an integer assert isinstance(action, int) return action def get_initial_state(self): """ Get the initial state to the model, a stack of processed observations returns: state (np.array): a stack of n = phi_length processed observations """ _ = self.env.reset() #reset environment obs = self.env.get_obs() #get a processed observation state = np.stack([obs for _ in range(self.phi_length)], axis=0) #stack n = phi_length times to make a state #also fill the for _ in range(self.phi_length): self.obs_buffer.append(obs) return state def get_state(self): """ Get a stack from the observation buffer returns: state (np.array): a stack of n = phi_length processed observations """ state = np.array(self.obs_buffer) return state def train(self): training_done = False reward_per_episode = [] rewards_all_episodes = [] while not training_done: episode_done = False episode_reward = 0 episode_steps = 0 #get initial state state = self.get_initial_state() while not episode_done: #get action action = self.get_action(state) #apply action while skipping frames observation, reward, episode_done, info = self.env.game.step(action) #sum rewards episode_reward += reward #append processed observation to a buffer of observations self.obs_buffer.append(self.env.get_obs()) #get the next state from the observation buffer next_state = self.get_state() #add to memory, for terminal states, set next_state to None if episode_done: mem.put(state, action, reward, None) else: mem.put(state, action, reward, next_state) #make new state the old next_state state = next_state #update model parameters if mem.is_available() and self.steps % self.update_freq == 0: loss = self.optimize() #increase number of steps self.steps += 1 episode_steps += 1 if self.steps % (self.target_update * self.update_freq) == 0: self.target.load_state_dict(self.model.state_dict()) if self.steps % self.print_update == 0: avg_reward_per_episode = np.mean(reward_per_episode) #rewards_all_episodes.extend(reward_per_episode) reward_per_episode = [] print(f'Episodes: {self.episodes}, Steps: {self.steps}, Epsilon: {self.get_epsilon():.2f}, Avg. Reward per Ep: {avg_reward_per_episode:.2f}') #increase number of episodes self.episodes += 1 reward_per_episode.append(episode_reward) def optimize(self): """ Update model parameters """ #get a batch transitions = mem.sample() #need to set the Q value of terminal states to 0 #this mask will be 1 for non-terminal next_states and 0 for terminal next_states non_terminal_mask = torch.ByteTensor(list(map(lambda ns: ns is not None, transitions.next_state))) #this will be 1 for terminal next_states, and 0 for non-terminal next states terminal_mask = 1 - non_terminal_mask #state_batch = (N*C,H,W), where N is batch_size, C is phi_length, H and W are processed obs size state_batch = torch.cat(transitions.state).to(device) #action_batch = (N, 1) action_batch = torch.cat(transitions.action).unsqueeze(1).to(device) #reward_batch = (N, 1) reward_batch = torch.cat(transitions.reward).unsqueeze(1).to(device) #clip reward between +1 and -1 reward_batch.data.clamp_(-1, 1) #next_state_batch = (V*C,H,W), where V is non_terminal next_state non_terminal_next_state_batch = torch.cat([ns for ns in transitions.next_state if ns is not None]).to(device) #reshape to (N,C,H,W) state_batch = state_batch.view(mem.batch_size, self.phi_length, self.env.size, self.env.size) #reshape to (V,C,H,W) non_terminal_next_state_batch = non_terminal_next_state_batch.view(-1, self.phi_length, self.env.size, self.env.size) #get predicted Q values from model Q_pred = self.model(state_batch) #get Q values of action taken, shape (N,1) Q_vals = Q_pred.gather(1, action_batch) """ in double dqn, get q values from model and target """ #get Q values from target model model_pred = self.model(non_terminal_next_state_batch) target_pred = self.target(non_terminal_next_state_batch) """ then get the actions from the model predicted Q values, not the target """ target_actions = model_pred.max(dim=1)[1].view(-1, 1) """ then use these actions to get Q values from the target network """ target_Q = target_pred.gather(1, target_actions) #tensor for placing target values target_vals = torch.zeros(mem.batch_size, 1).to(device) """ don't forget to update this line """ #fill in target values for non_terminal states #the terminal states will stay initialized as zeros target_vals[non_terminal_mask] = reward_batch[non_terminal_mask] + target_Q * self.gamma #calculate loss between Q values and target values loss = F.smooth_l1_loss(Q_vals, target_vals.detach()) #zero gradients self.optimizer.zero_grad() #calculate gradients loss.backward() #clamp gradients for p in self.model.parameters(): p.grad.data.clamp_(-1, 1) #update parameters self.optimizer.step() return loss.item() env = Environment(GAME, PROCESSED_SIZE, SEED) mem = ReplayMemory(CAPACITY, BATCH_SIZE) model = DQN(N_ACTIONS) agent = Agent(env, mem, model, PHI_LENGTH, UPDATE_FREQ, EPSILON_START, EPSILON_END, EPSILON_STEPS, GAMMA, TARGET_UPDATE, PRINT_UPDATE) agent.train()
Untitled Folder/OLD 2 - Double DQN.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import utils inp = utils.get_input(2019, 17)[:-1] print(inp[:96]) # ## machine # + import operator from functools import partial from itertools import repeat, chain from collections import namedtuple, defaultdict class IntCode: Op = namedtuple('Op', ['func', 'params', 'resolve_last_as_ptr']) def __init__(self, program, pointer=0, rel_base=0, inputs=None, **kwargs): self._mem = defaultdict(int, enumerate(program)) self.ptr = pointer self.rel = rel_base self.state = "started" self.inputs = inputs or [] self.output = [] self._kwargs = kwargs self._ops = { 1: IntCode.Op(partial(IntCode._math, operator.add), 3, True), 2: IntCode.Op(partial(IntCode._math, operator.mul), 3, True), 3: IntCode.Op(IntCode._inp, 1, True), 4: IntCode.Op(IntCode._out, 1, False), 5: IntCode.Op(partial(IntCode._jump, lambda x: x != 0), 2, False), 6: IntCode.Op(partial(IntCode._jump, lambda x: x == 0), 2, False), 7: IntCode.Op(partial(IntCode._math, operator.lt), 3, True), 8: IntCode.Op(partial(IntCode._math, operator.eq), 3, True), 9: IntCode.Op(IntCode._base, 1, False), 99: IntCode.Op(IntCode._halt, 0, False), } @property def mem(self): return list(self._mem.values()) def run(self, inputs=None): self.inputs.extend(inputs or []) while self.state != "halted": instruction = self._mem[self.ptr] opcode, modes = self._opcode(instruction), self._modes(instruction) op = self._ops[opcode] params_raw = [self._mem[i] for i in range(self.ptr + 1, self.ptr + op.params + 1)] params = self.resolve(params_raw, modes, op.resolve_last_as_ptr) orig_ptr = self.ptr self._mem, self.ptr, self.rel, output, self.state = \ op.func(self._mem, self.ptr, self.rel, *params, inputs=self.inputs, **self._kwargs) if output is not None: self.output.append(output) if self._kwargs.get('_debug', False): print(f"{instruction},{','.join(map(str, params_raw))}", f"-> {opcode:2d} {modes} {params}", f"-> {output, self.ptr, self.rel}") if (output is not None) or (self.state == "blocked"): yield self if self.ptr == orig_ptr: self.ptr = self.ptr + op.params + 1 yield self def resolve(self, params, modes, resolve_last_as_ptr): def _resolve(p, m, as_ptr): if int(m) == 0: return self._mem[p] if not as_ptr else p elif int(m) == 2: return self._mem[self.rel + p] if not as_ptr else self.rel + p return p resolve_as_ptr = chain(repeat(False, len(params) - 1), [resolve_last_as_ptr]) return list(map(lambda t: _resolve(*t), zip(params, modes, resolve_as_ptr))) @staticmethod def _opcode(instruction): return int(str(instruction)[-2:]) @staticmethod def _modes(instruction): return f"{instruction:05d}"[:3][::-1] # operations @staticmethod def _math(func, mem, ptr, rel, a, b, out, **kwargs): mem[out] = int(func(a, b)) return mem, ptr, rel, None, "running" @staticmethod def _inp(mem, ptr, rel, out, inputs, **kwargs): if len(inputs): mem[out] = int(inputs.pop(0)) return mem, ptr, rel, None, "running" return mem, ptr, rel, None, "blocked" @staticmethod def _out(mem, ptr, rel, val, **kwargs): return mem, ptr, rel, val, "running" @staticmethod def _jump(func, mem, ptr, rel, cond, val, **kwargs): return mem, (val if func(cond) else ptr), rel, None, "running" @staticmethod def _base(mem, ptr, rel, val, **kwargs): return mem, ptr, (rel + val), None, "running" @staticmethod def _halt(mem, ptr, rel, *args, **kwargs): return mem, ptr, rel, None, "halted" # + from itertools import groupby program = map(int, inp.split(',')) vm = IntCode(program) runner = vm.run() while vm.state != "halted": vm = next(runner) out = vm.output out = list(map(chr, out[:-1])) # print(''.join(out)) # (y, x) rows = [list(g) for k, g in groupby(out, lambda x: x == '\n') if not k] intersections = [] for y, row in enumerate(rows): for x, v in enumerate(row): if v == '#' and (0 < x < len(row) - 1) and (0 < y < len(rows) - 1): cross = [(1, 0), (-1, 0), (0, 1), (0, -1)] if all(rows[y + dy][x + dx] == '#' for dx, dy in cross): intersections.append((x, y)) print(sum(a * b for a, b in intersections)) # + import re from functools import partial from itertools import groupby, combinations from collections import Counter from IPython.display import clear_output ORIENT = [(0, -1), (1, 0), (0, 1), (-1, 0)] ORIENT_SYMBOL = ['^', '>', 'v', '<'] def get_image(inp): program = map(int, inp.split(',')) vm = IntCode(program) runner = vm.run() while vm.state != "halted": vm = next(runner) out = vm.output return list(map(chr, out[:-1])) def matrix(rows): m = {} for y, row in enumerate(rows): for x, v in enumerate(row): m[(x, y)] = v return m, len(row), len(rows) def invert(d): d_ = defaultdict(list) for k, v in d.items(): d_[v].append(k) return d_ def robot(m): for x, o in zip(ORIENT_SYMBOL, ORIENT): if x in invert(m): return (invert(m)[x][0], o) return None def _direction(max_x, max_y, m, loc): def _axis(coord): return 'x' if coord[0] != 0 else 'y' for dx, dy in ORIENT: x, y = loc[0][0] + dx, loc[0][1] + dy if (0 <= x < max_x) and (0 <= y < max_y): if m[(x, y)] == '#' and _axis(loc[1]) != _axis((dx, dy)): return (dx, dy) return None def turn(prev, curr): i = ORIENT.index(prev) if ORIENT[(i + 1) % len(ORIENT)] == curr: return 'R' return 'L' def trace(m, loc): def _step(vec): return ((vec[0][0] + vec[1][0], vec[0][1] + vec[1][1]), vec[1]) curr = loc while m.get(_step(curr)[0], None) == '#': curr = _step(curr) return curr def distance(prev, curr): return max(abs(curr[0] - prev[0]), abs(curr[1] - prev[1])) def path(rows): m, max_x, max_y = matrix(rows) direction = partial(_direction, max_x, max_y, m) prev = robot(m) path = [] dir_ = direction(prev) while dir_ is not None: curr = (prev[0], dir_) path.append(turn(prev[1], curr[1])) prev = trace(m, curr) path.append(distance(curr[0], prev[0])) dir_ = direction(prev) return path def subseqs(s): def replace(s, old, new): return s.replace(old, new) def rreplace(s, old, new): li = s.rsplit(old) return new.join(li) subs = [s[i:j + 2] for i in range(0, len(s), 2) for j in range(i, len(s), 2)] subs = list(map(lambda sub: ''.join(map(str, sub)), subs)) repeated = [k for k, v in Counter(subs).items() if v >= 2 and 2 < len(k) <= 10] s = ''.join(map(str, s)) for a, b, c in combinations(repeated, 3): funcs = [replace, rreplace] for fa in funcs: for fb in funcs: for fc in funcs: if not fc(fb(fa(s, a, ''), b, ''), c, ''): yield fc(fb(fa(s, a, 'A'), b, 'B'), c, 'C'), a, b, c def encode(s): tokens = re.split(r'(\d+)', s)[:-1] if re.search(r'\d', s) else list(s) return list(map(ord, ','.join(tokens))) + [ord('\n')] img = get_image(inp) rows = [list(g) for k, g in groupby(img, lambda x: x == '\n') if not k] p = path(rows) main, a, b, c = sorted(list(subseqs(p)), key=lambda t: len(t[0]))[0] video = True inputs = list(chain(*map(encode, [main, a, b, c, 'y' if video else 'n']))) program = list(map(int, inp.split(','))) program[0] = 2 # + vm = IntCode(program, inputs=list(inputs)) runner = vm.run() streaming = False rendered = False while vm.state != "halted": out = ''.join(list(map(chr, vm.output))) if '\n\n' in out: rendered = True if streaming and rendered: if video: clear_output(wait=True) print(out) rendered = False vm.output = [] if 'Continuous video feed?\n' in out: streaming = True vm.output = [] vm = next(runner) print(vm.output[-1]) # -
aoc/day_17.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: 'Python 3.7.10 64-bit (''graph_rl'': conda)' # name: python3 # --- # + import gym import torch import torch.nn as nn import numpy as np from collections import deque import random from itertools import count import torch.nn.functional as F from tensorboardX import SummaryWriter device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") class QNetwork(nn.Module): def __init__(self): super(QNetwork, self).__init__() self.fc1 = nn.Linear(4, 64) self.relu = nn.ReLU() self.fc_value = nn.Linear(64, 256) self.fc_adv = nn.Linear(64, 256) self.value = nn.Linear(256, 1) self.adv = nn.Linear(256, 2) def forward(self, state): print(state) y = self.relu(self.fc1(state)) value = self.relu(self.fc_value(y)) adv = self.relu(self.fc_adv(y)) value = self.value(value) adv = self.adv(adv) advAverage = torch.mean(adv, dim=1, keepdim=True) Q = value + adv - advAverage return Q def select_action(self, state): with torch.no_grad(): Q = self.forward(state) action_index = torch.argmax(Q, dim=1) return action_index.item() class Memory(object): def __init__(self, memory_size: int) -> None: self.memory_size = memory_size self.buffer = deque(maxlen=self.memory_size) def add(self, experience) -> None: self.buffer.append(experience) def size(self): return len(self.buffer) def sample(self, batch_size: int, continuous: bool = True): if batch_size > len(self.buffer): batch_size = len(self.buffer) if continuous: rand = random.randint(0, len(self.buffer) - batch_size) return [self.buffer[i] for i in range(rand, rand + batch_size)] else: indexes = np.random.choice(np.arange(len(self.buffer)), size=batch_size, replace=False) return [self.buffer[i] for i in indexes] def clear(self): self.buffer.clear() env = gym.make('CartPole-v0') n_state = env.observation_space.shape[0] n_action = env.action_space.n onlineQNetwork = QNetwork().to(device) targetQNetwork = QNetwork().to(device) targetQNetwork.load_state_dict(onlineQNetwork.state_dict()) optimizer = torch.optim.Adam(onlineQNetwork.parameters(), lr=1e-4) GAMMA = 0.99 EXPLORE = 20000 INITIAL_EPSILON = 0.1 FINAL_EPSILON = 0.0001 REPLAY_MEMORY = 50000 BATCH = 16 UPDATE_STEPS = 4 memory_replay = Memory(REPLAY_MEMORY) epsilon = INITIAL_EPSILON learn_steps = 0 writer = SummaryWriter('logs/ddqn') begin_learn = False episode_reward = 0 # onlineQNetwork.load_state_dict(torch.load('ddqn-policy.para')) for epoch in count(): state = env.reset() episode_reward = 0 for time_steps in range(200): p = random.random() if p < epsilon: action = random.randint(0, 1) else: tensor_state = torch.FloatTensor(state).unsqueeze(0).to(device) action = onlineQNetwork.select_action(tensor_state) next_state, reward, done, _ = env.step(action) episode_reward += reward memory_replay.add((state, next_state, action, reward, done)) if memory_replay.size() > 128: if begin_learn is False: print('learn begin!') begin_learn = True learn_steps += 1 if learn_steps % UPDATE_STEPS == 0: targetQNetwork.load_state_dict(onlineQNetwork.state_dict()) batch = memory_replay.sample(BATCH, False) batch_state, batch_next_state, batch_action, batch_reward, batch_done = zip(*batch) print(batch_state) batch_state = torch.FloatTensor(batch_state).to(device) batch_next_state = torch.FloatTensor(batch_next_state).to(device) batch_action = torch.FloatTensor(batch_action).unsqueeze(1).to(device) batch_reward = torch.FloatTensor(batch_reward).unsqueeze(1).to(device) batch_done = torch.FloatTensor(batch_done).unsqueeze(1).to(device) with torch.no_grad(): onlineQ_next = onlineQNetwork(batch_next_state) targetQ_next = targetQNetwork(batch_next_state) online_max_action = torch.argmax(onlineQ_next, dim=1, keepdim=True) y = batch_reward + (1 - batch_done) * GAMMA * targetQ_next.gather(1, online_max_action.long()) loss = F.mse_loss(onlineQNetwork(batch_state).gather(1, batch_action.long()), y) optimizer.zero_grad() loss.backward() optimizer.step() writer.add_scalar('loss', loss.item(), global_step=learn_steps) if epsilon > FINAL_EPSILON: epsilon -= (INITIAL_EPSILON - FINAL_EPSILON) / EXPLORE if done: break state = next_state # writer.add_scalar('episode reward', episode_reward, global_step=epoch) # if epoch % 10 == 0: # torch.save(onlineQNetwork.state_dict(), 'ddqn-policy.para') # print('Ep {}\tMoving average score: {:.2f}\t'.format(epoch, episode_reward))
Duelling Dqn.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] id="view-in-github" colab_type="text" # <a href="https://colab.research.google.com/github/himasha0421/Deep-Learning-For-Medical-Applications/blob/main/brain_segmentation_unet.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # + id="UCAKW0YLqNRb" outputId="a9b48b1d-0ce3-4459-abf2-958137b799b8" colab={"base_uri": "https://localhost:8080/", "height": 122} from google.colab import drive drive.mount('/content/gdrive') # + id="BufvqIqpqQTD" outputId="67630dfd-1217-4114-bc3f-39388df81711" colab={"base_uri": "https://localhost:8080/", "height": 51} # !mkdir .kaggle import json token = {"username":"himasha0421","key":"321ba73e9c1ee572038fd854d77da957"} with open('/content/.kaggle/kaggle.json', 'w') as file: json.dump(token, file) # !mkdir ~/.kaggle # !cp /content/.kaggle/kaggle.json ~/.kaggle/kaggle.json # !kaggle config set -n path -v{/content} # !chmod 600 /root/.kaggle/kaggle.json # + id="GmL_-s6TqRTU" outputId="364cb357-479d-4215-fae1-a7c73a60b37a" colab={"base_uri": "https://localhost:8080/", "height": 68} # !kaggle datasets download -d mateuszbuda/lgg-mri-segmentation # + id="TDLTHEnJq8xs" # !unzip /content/{/content}/datasets/mateuszbuda/lgg-mri-segmentation/lgg-mri-segmentation.zip # + _uuid="7e493813-c8fa-4ae7-a3c2-de42d40a16ee" _cell_guid="c4f19fe9-0e4b-40c9-a7e7-2dc668debfc4" id="iTb13SlEqMXR" import os import random from collections import OrderedDict import numpy as np import torch import torch.nn as nn import torch.optim as optim from torch.utils.data import DataLoader from matplotlib import pyplot as plt from matplotlib.backends.backend_agg import FigureCanvasAgg from tqdm import tqdm from skimage.exposure import rescale_intensity from skimage.io import imread, imsave from skimage.transform import resize, rescale, rotate from torch.utils.data import Dataset from torchvision.transforms import Compose # + _uuid="b1172863-f248-4894-9c1b-27ed63c002b0" _cell_guid="6dd840cf-a412-4bca-99d0-2d31efd5a14f" id="Gq5zJtTpqMXX" # This Python 3 environment comes with many helpful analytics libraries installed # It is defined by the kaggle/python docker image: https://github.com/kaggle/docker-python # For example, here's several helpful packages to load in def crop_sample(x): volume, mask = x volume[volume < np.max(volume) * 0.1] = 0 z_projection = np.max(np.max(np.max(volume, axis=-1), axis=-1), axis=-1) z_nonzero = np.nonzero(z_projection) z_min = np.min(z_nonzero) z_max = np.max(z_nonzero) + 1 y_projection = np.max(np.max(np.max(volume, axis=0), axis=-1), axis=-1) y_nonzero = np.nonzero(y_projection) y_min = np.min(y_nonzero) y_max = np.max(y_nonzero) + 1 x_projection = np.max(np.max(np.max(volume, axis=0), axis=0), axis=-1) x_nonzero = np.nonzero(x_projection) x_min = np.min(x_nonzero) x_max = np.max(x_nonzero) + 1 return ( volume[z_min:z_max, y_min:y_max, x_min:x_max], mask[z_min:z_max, y_min:y_max, x_min:x_max], ) def pad_sample(x): volume, mask = x a = volume.shape[1] b = volume.shape[2] if a == b: return volume, mask diff = (max(a, b) - min(a, b)) / 2.0 if a > b: padding = ((0, 0), (0, 0), (int(np.floor(diff)), int(np.ceil(diff)))) else: padding = ((0, 0), (int(np.floor(diff)), int(np.ceil(diff))), (0, 0)) mask = np.pad(mask, padding, mode="constant", constant_values=0) padding = padding + ((0, 0),) volume = np.pad(volume, padding, mode="constant", constant_values=0) return volume, mask def resize_sample(x, size=256): volume, mask = x v_shape = volume.shape out_shape = (v_shape[0], size, size) mask = resize( mask, output_shape=out_shape, order=0, mode="constant", cval=0, anti_aliasing=False, ) out_shape = out_shape + (v_shape[3],) volume = resize( volume, output_shape=out_shape, order=2, mode="constant", cval=0, anti_aliasing=False, ) return volume, mask def normalize_volume(volume): p10 = np.percentile(volume, 10) p99 = np.percentile(volume, 99) volume = rescale_intensity(volume, in_range=(p10, p99)) m = np.mean(volume, axis=(0, 1, 2)) s = np.std(volume, axis=(0, 1, 2)) volume = (volume - m) / s return volume #*************************************** Dataset class **************************************************************************************8 class BrainSegmentationDataset(Dataset): """Brain MRI dataset for FLAIR abnormality segmentation""" in_channels = 3 out_channels = 1 def __init__( self, images_dir, transform=None, image_size=256, subset="train", random_sampling=True, seed=42, ): assert subset in ["all", "train", "validation"] # read images volumes = {} masks = {} print("reading {} images...".format(subset)) for (dirpath, dirnames, filenames) in os.walk(images_dir): image_slices = [] mask_slices = [] for filename in sorted( filter(lambda f: ".tif" in f, filenames), key=lambda x: int(x.split(".")[-2].split("_")[4]), ): filepath = os.path.join(dirpath, filename) if "mask" in filename: mask_slices.append(imread(filepath, as_gray=True)) else: image_slices.append(imread(filepath)) if len(image_slices) > 0: patient_id = dirpath.split("/")[-1] volumes[patient_id] = np.array(image_slices[1:-1]) masks[patient_id] = np.array(mask_slices[1:-1]) self.patients = sorted(volumes) # select cases to subset if not subset == "all": random.seed(seed) validation_patients = random.sample(self.patients, k=10) if subset == "validation": self.patients = validation_patients else: self.patients = sorted( list(set(self.patients).difference(validation_patients)) ) print("preprocessing {} volumes...".format(subset)) # create list of tuples (volume, mask) self.volumes = [(volumes[k], masks[k]) for k in self.patients] print("cropping {} volumes...".format(subset)) # crop to smallest enclosing volume self.volumes = [crop_sample(v) for v in self.volumes] print("padding {} volumes...".format(subset)) # pad to square self.volumes = [pad_sample(v) for v in self.volumes] print("resizing {} volumes...".format(subset)) # resize self.volumes = [resize_sample(v, size=image_size) for v in self.volumes] print("normalizing {} volumes...".format(subset)) # normalize channel-wise self.volumes = [(normalize_volume(v), m) for v, m in self.volumes] # probabilities for sampling slices based on masks self.slice_weights = [m.sum(axis=-1).sum(axis=-1) for v, m in self.volumes] self.slice_weights = [ (s + (s.sum() * 0.1 / len(s))) / (s.sum() * 1.1) for s in self.slice_weights ] # add channel dimension to masks self.volumes = [(v, m[..., np.newaxis]) for (v, m) in self.volumes] print("done creating {} dataset".format(subset)) # create global index for patient and slice (idx -> (p_idx, s_idx)) num_slices = [v.shape[0] for v, m in self.volumes] self.patient_slice_index = list( zip( sum([[i] * num_slices[i] for i in range(len(num_slices))], []), sum([list(range(x)) for x in num_slices], []), ) ) self.random_sampling = random_sampling self.transform = transform def __len__(self): return len(self.patient_slice_index) def __getitem__(self, idx): patient = self.patient_slice_index[idx][0] slice_n = self.patient_slice_index[idx][1] if self.random_sampling: patient = np.random.randint(len(self.volumes)) slice_n = np.random.choice( range(self.volumes[patient][0].shape[0]), p=self.slice_weights[patient] ) v, m = self.volumes[patient] image = v[slice_n] mask = m[slice_n] if self.transform is not None: image, mask = self.transform((image, mask)) # fix dimensions (C, H, W) image = image.transpose(2, 0, 1) mask = mask.transpose(2, 0, 1) image_tensor = torch.from_numpy(image.astype(np.float32)) mask_tensor = torch.from_numpy(mask.astype(np.float32)) # return tensors return image_tensor, mask_tensor #******************************************** Dataset transforms *********************************************************************************88 def transforms(scale=None, angle=None, flip_prob=None): transform_list = [] if scale is not None: transform_list.append(Scale(scale)) if angle is not None: transform_list.append(Rotate(angle)) if flip_prob is not None: transform_list.append(HorizontalFlip(flip_prob)) return Compose(transform_list) class Scale(object): def __init__(self, scale): self.scale = scale def __call__(self, sample): image, mask = sample img_size = image.shape[0] scale = np.random.uniform(low=1.0 - self.scale, high=1.0 + self.scale) image = rescale( image, (scale, scale), multichannel=True, preserve_range=True, mode="constant", anti_aliasing=False, ) mask = rescale( mask, (scale, scale), order=0, multichannel=True, preserve_range=True, mode="constant", anti_aliasing=False, ) if scale < 1.0: #if scale is velow 1.0 then pad the image to have same input shape otherwise nurel network results may inconsistent diff = (img_size - image.shape[0]) / 2.0 padding = ((int(np.floor(diff)), int(np.ceil(diff))),) * 2 + ((0, 0),) image = np.pad(image, padding, mode="constant", constant_values=0) mask = np.pad(mask, padding, mode="constant", constant_values=0) else: # if scale is larger than 1.0 then take the original size from that image x_min = (image.shape[0] - img_size) // 2 x_max = x_min + img_size image = image[x_min:x_max, x_min:x_max, ...] mask = mask[x_min:x_max, x_min:x_max, ...] return image, mask class Rotate(object): def __init__(self, angle): self.angle = angle def __call__(self, sample): image, mask = sample angle = np.random.uniform(low=-self.angle, high=self.angle) image = rotate(image, angle, resize=False, preserve_range=True, mode="constant") mask = rotate( mask, angle, resize=False, order=0, preserve_range=True, mode="constant" ) return image, mask class HorizontalFlip(object): def __init__(self, flip_prob): self.flip_prob = flip_prob def __call__(self, sample): image, mask = sample if np.random.rand() > self.flip_prob: return image, mask image = np.fliplr(image).copy() mask = np.fliplr(mask).copy() return image, mask # *********************************************** define data loaders *********************************8 def data_loaders(batch_size, workers, image_size, aug_scale, aug_angle): dataset_train, dataset_valid = datasets("/content/lgg-mri-segmentation/kaggle_3m", image_size, aug_scale, aug_angle) def worker_init(worker_id): np.random.seed(42 + worker_id) loader_train = DataLoader( dataset_train, batch_size=batch_size, shuffle=True, drop_last=True, num_workers=workers, worker_init_fn=worker_init, ) loader_valid = DataLoader( dataset_valid, batch_size=batch_size, drop_last=False, num_workers=workers, worker_init_fn=worker_init, ) return loader_train, loader_valid def datasets(images, image_size, aug_scale, aug_angle): train = BrainSegmentationDataset( images_dir=images, subset="train", image_size=image_size, transform=transforms(scale=aug_scale, angle=aug_angle, flip_prob=0.5), ) valid = BrainSegmentationDataset( images_dir=images, subset="validation", image_size=image_size, random_sampling=False, ) return train, valid batch_size = 16 epochs = 10 lr = 0.0001 workers = 2 weights ="/content/gdrive/My Drive/models" image_size = 224 aug_scale = 0.05 aug_angle = 15 # + [markdown] _uuid="2137c315-5596-4acb-8cf8-59966c094dd3" _cell_guid="fffa6cdb-84a3-425c-b172-b7a4e585c317" id="ur7EillJqMXb" # # Visualization # + _uuid="ecec0270-d6e8-4758-b98d-0ad832d4b3ab" _cell_guid="eecf328d-9b5b-4033-83c2-28b593370f30" id="Govso4bPqMXc" def outline(image, mask, color): mask = np.round(mask) yy, xx = np.nonzero(mask) for y, x in zip(yy, xx): if 0.0 < np.mean(mask[max(0, y - 1) : y + 2, max(0, x - 1) : x + 2]) < 1.0: image[max(0, y) : y + 1, max(0, x) : x + 1] = color return image def log_images(x, y_true, y_pred, channel=1): images = [] x_np = x[:, channel].cpu().numpy() y_true_np = y_true[:, 0].cpu().numpy() y_pred_np = y_pred[:, 0].cpu().numpy() for i in range(x_np.shape[0]): image = gray2rgb(np.squeeze(x_np[i])) image = outline(image, y_pred_np[i], color=[255, 0, 0]) image = outline(image, y_true_np[i], color=[0, 255, 0]) images.append(image) return images def gray2rgb(image): w, h = image.shape image += np.abs(np.min(image)) image_max = np.abs(np.max(image)) if image_max > 0: image /= image_max ret = np.empty((w, h, 3), dtype=np.uint8) ret[:, :, 2] = ret[:, :, 1] = ret[:, :, 0] = image * 255 return ret # + _uuid="17df0fd4-329e-415b-8341-84d0bf86cedc" _cell_guid="8cdd3a84-a281-4e69-815b-4e1d7c4dbe29" id="jU48034eqMXg" outputId="0b032867-3275-40a7-d130-6fa60c8f5d7d" colab={"base_uri": "https://localhost:8080/", "height": 86} import random def viz_images(image , mask): img_max = image.max().numpy() img_min = image.min().numpy() image = image.numpy() mask = mask.numpy() image = np.transpose(image , (1,2,0)) image = (image-img_min)/(img_max - img_min) mask = np.transpose(mask , (1,2,0)).squeeze(2) img_copy = np.copy(image) mask_copy = np.copy(mask) outline_img = outline(img_copy , mask_copy , (1,0,0)) return image , mask , outline_img """ fig = plt.figure(figsize=(15, 15)) n_cols =3 n_rows =4 sample_list = list(range(len(image_train))) for i in range(4): idx = np.random.choice(sample_list) image , mask , outline_image = viz_images(image_train[idx], mask_train[idx]) ax1 = fig.add_subplot(n_rows, n_cols, n_cols * i + 1) ax2 = fig.add_subplot(n_rows, n_cols, n_cols * i + 2) ax3 = fig.add_subplot(n_rows, n_cols, n_cols * i + 3) ax1.imshow(image) ax1.set_title("Original Image") ax2.imshow(mask) ax2.set_title("Mask Image") ax3.imshow(outline_image) ax3.set_title("Outline Image") plt.show() """ # + id="X5eRLcGnqMXk" def deconv(in_channels , out_channels , kernel_size=4 , strides=2 , padding = 1 ,dilation=1 , bathc_norm=True , activation=True): filters=[] conv_trans = nn.ConvTranspose2d(in_channels , out_channels , kernel_size=kernel_size , stride=strides , padding=padding ,dilation=dilation , bias=False) filters.append(conv_trans) if(bathc_norm): bn = nn.BatchNorm2d(out_channels) filters.append(bn) if(activation): activ = nn.ReLU(inplace=True) filters.append(activ) return nn.Sequential(*filters) # + id="JEEwoTYYqMXo" def conv(in_channels , out_channels , kernel_size , strides , padding , batch_norm=True , activation=True): filters=[] conv = nn.Conv2d(in_channels=in_channels , out_channels=out_channels , kernel_size=kernel_size , stride=strides , padding=padding , bias=False) filters.append(conv) if(batch_norm): bn = nn.BatchNorm2d(num_features=out_channels) filters.append(bn) if(activation): activ = nn.ReLU(inplace=True) filters.append(activ) return nn.Sequential(*filters) # + id="s-0fo6-6qMXr" def resblock(in_channels , out_channels , kernel_size , strides , padding , batch_norm=True): filters=[] conv_block1 = conv(in_channels=in_channels , out_channels=out_channels , kernel_size=kernel_size , strides=strides , padding=padding , batch_norm=batch_norm) filters.append(conv_block1) conv_block2 = conv(in_channels=out_channels , out_channels=out_channels , kernel_size=kernel_size , strides=strides , padding=padding , batch_norm=batch_norm) filters.append(conv_block2) return nn.Sequential(*filters) # + _uuid="f83fb3f5-63e4-4a0e-81cc-6c6f9ee6b8cb" _cell_guid="60c5b509-ffc6-4a70-ac6b-08f4c8e02cf2" id="QiR-LxUfqMXv" from torchvision import models class UNet(nn.Module): def __init__(self, in_channels=3, out_channels=1, init_features=32): super(UNet, self).__init__() features = 32 vgg_19 = models.vgg19_bn(pretrained=False) vgg_module = list(vgg_19.children())[:-2] layer_indx = [6 , 13 , 26 , 39 , 52] self.vgg_block1 = vgg_module[0][:7] # 3 --> 64 (2,2) downsample self.vgg_block2 = vgg_module[0][7:14] #64 -->128 (2,2) downsample self.vgg_block3 = vgg_module[0][14:27] #128 -->256 (2,2) downsample self.vgg_block4 = vgg_module[0][27:40] #256 --> 512 (2,2) downsample self.vgg_block5 = vgg_module[0][40:53] # 512 --> 1024 (2,2) dwonsample self.conv_vgg_out = UNet._conv(512 , 1024 , kernel_size=3 , strides=1 , padding=1) self.res_block1 = UNet._resblock(1024 , 1024 ,kernel_size=3 , strides=1 , padding=1) self.res_block2 = UNet._resblock(1024 , 1024 ,kernel_size=3 , strides=1 , padding=1) self.res_block3 = UNet._resblock(1024 , 1024 ,kernel_size=3 , strides=1 , padding=1) self.fpn_5 = UNet._deconv(features*32 , features , kernel_size=8 , strides=32 , padding = 2 ,dilation=5 , bathc_norm=True , activation=True) self.upconv4 = UNet._deconv(features * 32 , features * 16) self.decoder4 = UNet._conv((features * 16) * 2, features * 16, kernel_size=3 , strides=1 , padding=1 ) self.fpn_4 = UNet._deconv (features*16 , features , kernel_size=6 , strides=16 , padding = 0 ,dilation=3 , bathc_norm=True , activation=True) self.upconv3 = UNet._deconv(features * 16, features * 8) self.decoder3 = UNet._conv((features * 8) * 2, features * 8, kernel_size=3 , strides=1 , padding=1) self.fpn_3 = UNet._deconv (features*8 , features , kernel_size=4 , strides=8 , padding = 1 ,dilation=3 , bathc_norm=True , activation=True) self.upconv2 = UNet._deconv(features * 8, features * 4) self.decoder2 = UNet._conv((features * 4) * 2, features * 4, kernel_size=3 , strides=1 , padding=1) self.fpn_2 = UNet._deconv (features*4 , features , kernel_size=4 , strides=4 , padding = 0 ,dilation=1 , bathc_norm=True , activation=True) self.upconv1 = UNet._deconv(features * 4, features*2) self.decoder1 = UNet._conv((features * 2)*2 , features *2, kernel_size=3 , strides=1 , padding=1) self.fpn_1 = UNet._deconv (features*2 , features , kernel_size=4 , strides=2 , padding = 1 ,dilation=1 , bathc_norm=True , activation=True) self.upconv0 = UNet._deconv(features * 2 , features) self.conv = UNet._conv( in_channels=features*6, out_channels=out_channels, kernel_size=1 ,strides=1 , padding=0 , batch_norm=False , activation=False ) self.sigmoid = nn.Sigmoid() def forward(self, x): enc1 = self.vgg_block1(x) enc2 = self.vgg_block2(enc1) enc3 = self.vgg_block3(enc2) enc4 = self.vgg_block4(enc3) enc5 = self.vgg_block5(enc4) enc_out = self.conv_vgg_out(enc5) res1 = self.res_block1(enc_out) res2_in = res1 + enc_out res2 = self.res_block2(res2_in) res3_in = res2 + res2_in res3 = self.res_block3(res3_in) dec_in = res3 + res3_in fpn_5 = self.fpn_5(dec_in) dec5 = self.upconv4(dec_in) dec5 = torch.cat((dec5, enc4), dim=1) dec4 = self.decoder4(dec5) fpn_4 = self.fpn_4(dec4) dec4 = self.upconv3(dec4) dec3 = torch.cat((dec4, enc3), dim=1) dec3 = self.decoder3(dec3) fpn_3 = self.fpn_3(dec3) dec2 = self.upconv2(dec3) dec2 = torch.cat((dec2, enc2), dim=1) dec1 = self.decoder2(dec2) fpn_2 = self.fpn_2(dec1) dec1 = self.upconv1(dec1) dec0 = torch.cat((dec1, enc1), dim=1) dec_out = self.decoder1(dec0) fpn_1 = self.fpn_1(dec_out) out = self.upconv0(dec_out) dec0 = torch.cat((out ,fpn_1 ,fpn_2 , fpn_3 , fpn_4 , fpn_5 ), dim=1) return self.sigmoid(self.conv(dec0)) @staticmethod def _deconv(in_channels , out_channels , kernel_size=4 , strides=2 , padding = 1 ,dilation=1 , bathc_norm=True , activation=True): filters=[] conv_trans = nn.ConvTranspose2d(in_channels , out_channels , kernel_size=kernel_size , stride=strides , padding=padding ,dilation=dilation , bias=True) filters.append(conv_trans) if(bathc_norm): bn = nn.BatchNorm2d(out_channels) filters.append(bn) if(activation): activ = nn.ReLU(inplace=True) filters.append(activ) return nn.Sequential(*filters) @staticmethod def _conv(in_channels , out_channels , kernel_size , strides , padding , batch_norm=True , activation=True): filters=[] conv = nn.Conv2d(in_channels=in_channels , out_channels=out_channels , kernel_size=kernel_size , stride=strides , padding=padding , bias=True) filters.append(conv) if(batch_norm): bn = nn.BatchNorm2d(num_features=out_channels) filters.append(bn) if(activation): activ = nn.ReLU(inplace=True) filters.append(activ) return nn.Sequential(*filters) @staticmethod def _resblock(in_channels , out_channels , kernel_size , strides , padding , batch_norm=True): filters=[] conv_block1 = UNet._conv(in_channels=in_channels , out_channels=out_channels , kernel_size=kernel_size , strides=strides , padding=padding , batch_norm=batch_norm) filters.append(conv_block1) conv_block2 = UNet._conv(in_channels=out_channels , out_channels=out_channels , kernel_size=kernel_size , strides=strides , padding=padding , batch_norm=batch_norm) filters.append(conv_block2) return nn.Sequential(*filters) # + id="FNIJSdASn0Bh" import torch def weight_init(m): if(type(m) == nn.Conv2d): torch.nn.init.xavier_normal(m.weight) m.bias.data.fill_(0) # + _uuid="07d1a7c9-a623-421c-8216-e9aa56b95a87" _cell_guid="6e3d10a6-1a40-4801-a392-fd18bba0def6" id="Fp10eVhNqMXz" # define the IOU loss function for medical image segementation class DiceLoss(nn.Module): def __init__(self): super(DiceLoss, self).__init__() self.smooth = 1.0 def forward(self, y_pred, y_true): assert y_pred.size() == y_true.size() y_pred = y_pred[:, 0].contiguous().view(-1) y_true = y_true[:, 0].contiguous().view(-1) intersection = (y_pred * y_true).sum() dsc = (2. * intersection + self.smooth) / ( y_pred.sum() + y_true.sum() + self.smooth ) return 1. - dsc def log_loss_summary(loss, step, prefix=""): print("epoch {} | {}: {}".format(step + 1, prefix + "loss", np.mean(loss))) def log_scalar_summary(tag, value, step): print("epoch {} | {}: {}".format(step + 1, tag, value)) # + _uuid="ce470165-9e53-4e23-809c-7295381957a1" _cell_guid="25d6aca0-2a89-4e3f-95fb-79be22f86fcc" id="BbjIRL5BqMX2" def dsc(y_pred, y_true): y_pred = np.round(y_pred).astype(int) y_true = np.round(y_true).astype(int) return np.sum(y_pred[y_true == 1]) * 2.0 / (np.sum(y_pred) + np.sum(y_true)) def dsc_distribution(volumes): dsc_dict = {} for p in volumes: y_pred = volumes[p][1] y_true = volumes[p][2] dsc_dict[p] = dsc(y_pred, y_true) return dsc_dict def dsc_per_volume(validation_pred, validation_true, patient_slice_index): dsc_list = [] num_slices = np.bincount([p[0] for p in patient_slice_index]) index = 0 for p in range(len(num_slices)): y_pred = np.array(validation_pred[index : index + num_slices[p]]) y_true = np.array(validation_true[index : index + num_slices[p]]) dsc_list.append(dsc(y_pred, y_true)) index += num_slices[p] return dsc_list def postprocess_per_volume( input_list, pred_list, true_list, patient_slice_index, patients ): volumes = {} num_slices = np.bincount([p[0] for p in patient_slice_index]) index = 0 for p in range(len(num_slices)): volume_in = np.array(input_list[index : index + num_slices[p]]) volume_pred = np.round( np.array(pred_list[index : index + num_slices[p]]) ).astype(int) volume_true = np.array(true_list[index : index + num_slices[p]]) volumes[patients[p]] = (volume_in, volume_pred, volume_true) index += num_slices[p] return volumes def plot_dsc(dsc_dist): y_positions = np.arange(len(dsc_dist)) dsc_dist = sorted(dsc_dist.items(), key=lambda x: x[1]) values = [x[1] for x in dsc_dist] labels = [x[0] for x in dsc_dist] labels = ["_".join(l.split("_")[1:-1]) for l in labels] fig = plt.figure(figsize=(12, 8)) canvas = FigureCanvasAgg(fig) plt.barh(y_positions, values, align="center", color="skyblue") plt.yticks(y_positions, labels) plt.xticks(np.arange(0.0, 1.0, 0.1)) plt.xlim([0.0, 1.0]) plt.gca().axvline(np.mean(values), color="tomato", linewidth=2) plt.gca().axvline(np.median(values), color="forestgreen", linewidth=2) plt.xlabel("Dice coefficient", fontsize="x-large") plt.gca().xaxis.grid(color="silver", alpha=0.5, linestyle="--", linewidth=1) plt.tight_layout() canvas.draw() plt.close() s, (width, height) = canvas.print_to_buffer() return np.fromstring(s, np.uint8).reshape((height, width, 4)) # + _uuid="5e03c44c-db34-40e7-82aa-ee2172ae6a87" _cell_guid="7f9a0bd4-82f9-4fe7-bc8e-57d9d323f745" id="ehrc1Mb6qMX7" outputId="ed1df6d8-a35d-40eb-9c5a-622372d45c02" colab={"base_uri": "https://localhost:8080/", "height": 255} loader_train, loader_valid = data_loaders(batch_size, workers, image_size, aug_scale, aug_angle) loaders = {"train": loader_train, "valid": loader_valid} # + _uuid="b0c5cbd2-e862-4162-992e-a3d15fa5114e" _cell_guid="4cc62d98-934a-4ff8-8f7a-7b0bf59ef20d" id="ywuOI5JqqMX-" outputId="f5d3d490-2cdd-4081-b1bf-8e28e82870ae" colab={"base_uri": "https://localhost:8080/", "height": 697} def train_validate(): device = torch.device("cpu" if not torch.cuda.is_available() else "cuda:0") unet = UNet(in_channels=BrainSegmentationDataset.in_channels, out_channels=BrainSegmentationDataset.out_channels) unet.to(device) dsc_loss = DiceLoss() best_validation_dsc = 0.0 optimizer = optim.Adam(unet.parameters(), lr=lr) loss_train = [] loss_valid = [] step = 0 for epoch in range(epochs): for phase in ["train", "valid"]: if phase == "train": unet.train() else: unet.eval() validation_pred = [] validation_true = [] for i, data in enumerate(loaders[phase]): if phase == "train": step += 1 x, y_true = data x, y_true = x.to(device), y_true.to(device) optimizer.zero_grad() with torch.set_grad_enabled(phase == "train"): y_pred = unet(x) #compute the iou loss ( else we can use the BCELoss but don't use both) loss = dsc_loss(y_pred, y_true) if phase == "valid": loss_valid.append(loss.item()) y_pred_np = y_pred.detach().cpu().numpy() validation_pred.extend( [y_pred_np[s] for s in range(y_pred_np.shape[0])] ) y_true_np = y_true.detach().cpu().numpy() validation_true.extend( [y_true_np[s] for s in range(y_true_np.shape[0])] ) if phase == "train": loss_train.append(loss.item()) loss.backward() optimizer.step() if phase == "train": log_loss_summary(loss_train, epoch) loss_train = [] if phase == "valid": log_loss_summary(loss_valid, epoch, prefix="val_") mean_dsc = np.mean( dsc_per_volume( validation_pred, validation_true, loader_valid.dataset.patient_slice_index, ) ) log_scalar_summary("val_dsc", mean_dsc, epoch) if mean_dsc > best_validation_dsc: best_validation_dsc = mean_dsc torch.save(unet.state_dict(), os.path.join(weights, "unet.pt")) loss_valid = [] print("\nBest validation mean DSC: {:4f}\n".format(best_validation_dsc)) state_dict = torch.load(os.path.join(weights, "unet.pt")) unet.load_state_dict(state_dict) unet.eval() input_list = [] pred_list = [] true_list = [] for i, data in enumerate(loader_valid): x, y_true = data x, y_true = x.to(device), y_true.to(device) with torch.set_grad_enabled(False): y_pred = unet(x) y_pred_np = y_pred.detach().cpu().numpy() pred_list.extend([y_pred_np[s] for s in range(y_pred_np.shape[0])]) y_true_np = y_true.detach().cpu().numpy() true_list.extend([y_true_np[s] for s in range(y_true_np.shape[0])]) x_np = x.detach().cpu().numpy() input_list.extend([x_np[s] for s in range(x_np.shape[0])]) volumes = postprocess_per_volume( input_list, pred_list, true_list, loader_valid.dataset.patient_slice_index, loader_valid.dataset.patients, ) dsc_dist = dsc_distribution(volumes) dsc_dist_plot = plot_dsc(dsc_dist) imsave("./dsc.png", dsc_dist_plot) for p in volumes: x = volumes[p][0] y_pred = volumes[p][1] y_true = volumes[p][2] for s in range(x.shape[0]): image = gray2rgb(x[s, 1]) # channel 1 is for FLAIR image = outline(image, y_pred[s, 0], color=[255, 0, 0]) image = outline(image, y_true[s, 0], color=[0, 255, 0]) filename = "{}-{}.png".format(p, str(s).zfill(2)) filepath = os.path.join("./", filename) imsave(filepath, image) train_validate() # + _uuid="8b739607-0406-44da-a210-f5ce9c53d0ca" _cell_guid="c06a0f00-39f1-41c3-8dc6-d1dfa32323ff" id="VcPUSwRHqMYC" import os files = os.listdir('/content') # + id="nptNxUMue5DX" img_files=[] for file in files : val = file.endswith(".png") if(val): img_files.append(file) # + id="9FyS3XCbkpFm" outputId="30bd0ae5-d9a1-43be-ce7b-039fafc8660b" colab={"base_uri": "https://localhost:8080/", "height": 34} split_idx = 0.06 viz_content = int(split_idx * len(img_files)) print(viz_content) # + id="V-qusvvee6OX" outputId="d8f7967a-b963-4b1f-dcb9-e0672e523b3e" colab={"base_uri": "https://localhost:8080/", "height": 1000} import cv2 split_idx = 0.06 viz_content = int(split_idx * len(img_files)) idx_list = np.arange(len(img_files)) print("Red --> Model segmentation Green --> Ground truth segmentation ") fig=plt.figure(figsize=(20, 20)) columns = 4 rows = 5 for index in range(viz_content): img_index = np.random.choice(idx_list) img_bgr = cv2.imread(img_files[img_index]) img_rgb = cv2.cvtColor(img_bgr , cv2.COLOR_BGR2RGB) fig.add_subplot(rows, columns, index+1) plt.imshow(img_rgb) # + id="vzHqnv3tpbyY"
brain_segmentation_unet.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Circuit Reduction Tutorial # The circuits used in standard Long Sequence GST are more than what are needed to amplify every possible gate error. (Technically, this is due to the fact that the informationaly complete fiducial sub-sequences allow extraction of each germ's *entire* process matrix, when all that is needed is the part describing the amplified directions in model space.) Because of this over-completeness, fewer sequences, i.e. experiments, may be used whilst retaining the desired Heisenberg-like scaling ($\sim 1/L$, where $L$ is the maximum length sequence). The over-completeness can still be desirable, however, as it makes the GST optimization more robust to model violation and so can serve to stabilize the GST parameter optimization in the presence of significant non-Markovian noise. Recall that the form of a GST gate sequence is # # $$S = F_i (g_k)^n F_j $$ # # where $F_i$ is a "preparation fiducial" sequence, $F_j$ is a "measurement fiducial" sequence, and "g_k" is a "germ" sequence. The repeated germ sequence $(g_k)^n$ we refer to as a "germ-power". There are currently three different ways to reduce a standard set of GST operation sequences within pyGSTi, each of which removes certain $(F_i,F_j)$ fiducial pairs for certain germ-powers. # # - **Global fiducial pair reduction (GFPR)** removes the same intelligently-selected set of fiducial pairs for all germ-powers. This is a conceptually simple method of reducing the operation sequences, but it is the most computationally intensive since it repeatedly evaluates the number of amplified parameters for en *entire germ set*. In practice, while it can give very large sequence reductions, its long run can make it prohibitive, and the "per-germ" reduction discussed next is used instead. # - **Per-germ fiducial pair reduction (PFPR)** removes the same intelligently-selected set of fiducial pairs for all powers of a given germ, but different sets are removed for different germs. Since different germs amplify different directions in model space, it makes intuitive sense to specify different fiducial pair sets for different germs. Because this method only considers one germ at a time, it is less computationally intensive than GFPR, and thus more practical. Note, however, that PFPR usually results in less of a reduction of the operation sequences, since it does not (currently) take advantage overlaps in the amplified directions of different germs (i.e. if $g_1$ and $g_3$ both amplify two of the same directions, then GST doesn't need to know about these from both germs). # - **Random fiducial pair reduction (RFPR)** randomly chooses a different set of fiducial pairs to remove for each germ-power. It is extremly fast to perform, as pairs are just randomly selected for removal, and in practice works well (i.e. does not impair Heisenberg-scaling) up until some critical fraction of the pairs are removed. This reflects the fact that the direction detected by a fiducial pairs usually has some non-negligible overlap with each of the directions amplified by a germ, and it is the exceptional case that an amplified direction escapes undetected. As such, the "critical fraction" which can usually be safely removed equals the ratio of amplified-parameters to germ-process-matrix-elements (typically $\approx 1/d^2$ where $d$ is the Hilbert space dimension, so $1/4 = 25\%$ for 1 qubit and $1/16 = 6.25\%$ for 2 qubits). RFPR can be combined with GFPR or PFPR so that some number of randomly chosen pairs can be added on top of the "intelligently-chosen" pairs of GFPR or PFPR. In this way, one can vary the amount of sequence reduction (in order to trade off speed vs. robustness to non-Markovian noise) without inadvertently selecting too few or an especially bad set of random fiducial pairs. # # ## Preliminaries # # We now demonstrate how to invoke each of these methods within pyGSTi for the case of a single qubit, using our standard $X(\pi/2)$, $Y(\pi/2)$, $I$ model. First, we retrieve a target `Model` as usual, along with corresponding sets of fiducial and germ sequences. We set the maximum length to be 32, roughly consistent with our data-generating model having gates depolarized by 10%. # + #Import pyGSTi and the "stardard 1-qubit quantities for a model with X(pi/2), Y(pi/2), and idle gates" import pygsti import pygsti.construction as pc from pygsti.modelpacks import smq1Q_XYI #Collect a target model, germ and fiducial strings, and set # a list of maximum lengths. target_model = smq1Q_XYI.target_model() prep_fiducials = smq1Q_XYI.prep_fiducials() meas_fiducials = smq1Q_XYI.meas_fiducials() germs = smq1Q_XYI.germs() maxLengths = [1,2,4,8,16,32] opLabels = list(target_model.operations.keys()) print("Gate operation labels = ", opLabels) # - # ## Sequence Reduction # # Now let's generate a list of all the operation sequences for each maximum length - so a list of lists. We'll generate the full lists (without any reduction) and the lists for each of the three reduction types listed above. In the random reduction case, we'll keep 30% of the fiducial pairs, removing 70% of them. # # ### No Reduction ("standard" GST) # + #Make list-of-lists of GST operation sequences fullStructs = pc.make_lsgst_structs( opLabels, prep_fiducials, meas_fiducials, germs, maxLengths) #Print the number of operation sequences for each maximum length print("** Without any reduction ** ") for L,strct in zip(maxLengths,fullStructs): print("L=%d: %d operation sequences" % (L,len(strct.allstrs))) #Make a (single) list of all the GST sequences ever needed, # that is, the list of all the experiments needed to perform GST. fullExperiments = pc.make_lsgst_experiment_list( opLabels, prep_fiducials, meas_fiducials, germs, maxLengths) print("\n%d experiments to run GST." % len(fullExperiments)) # - # ### Global Fiducial Pair Reduction (GFPR) # + fidPairs = pygsti.alg.find_sufficient_fiducial_pairs( target_model, prep_fiducials, meas_fiducials, germs, searchMode="random", nRandom=100, seed=1234, verbosity=1, memLimit=int(2*(1024)**3), minimumPairs=2) # fidPairs is a list of (prepIndex,measIndex) 2-tuples, where # prepIndex indexes prep_fiducials and measIndex indexes meas_fiducials print("Global FPR says we only need to keep the %d pairs:\n %s\n" % (len(fidPairs),fidPairs)) gfprStructs = pc.make_lsgst_structs( opLabels, prep_fiducials, meas_fiducials, germs, maxLengths, fidPairs=fidPairs) print("Global FPR reduction") for L,strct in zip(maxLengths,gfprStructs): print("L=%d: %d operation sequences" % (L,len(strct.allstrs))) gfprExperiments = pc.make_lsgst_experiment_list( opLabels, prep_fiducials, meas_fiducials, germs, maxLengths, fidPairs=fidPairs) print("\n%d experiments to run GST." % len(gfprExperiments)) # - # ### Per-germ Fiducial Pair Reduction (PFPR) # + fidPairsDict = pygsti.alg.find_sufficient_fiducial_pairs_per_germ( target_model, prep_fiducials, meas_fiducials, germs, searchMode="random", constrainToTP=True, nRandom=100, seed=1234, verbosity=1, memLimit=int(2*(1024)**3)) print("\nPer-germ FPR to keep the pairs:") for germ,pairsToKeep in fidPairsDict.items(): print("%s: %s" % (str(germ),pairsToKeep)) pfprStructs = pc.make_lsgst_structs( opLabels, prep_fiducials, meas_fiducials, germs, maxLengths, fidPairs=fidPairsDict) #note: fidPairs arg can be a dict too! print("\nPer-germ FPR reduction") for L,strct in zip(maxLengths,pfprStructs): print("L=%d: %d operation sequences" % (L,len(strct.allstrs))) pfprExperiments = pc.make_lsgst_experiment_list( opLabels, prep_fiducials, meas_fiducials, germs, maxLengths, fidPairs=fidPairsDict) print("\n%d experiments to run GST." % len(pfprExperiments)) # - # ### Random Fiducial Pair Reduction (RFPR) # + #keep only 30% of the pairs rfprStructs = pc.make_lsgst_structs( opLabels, prep_fiducials, meas_fiducials, germs, maxLengths, keepFraction=0.30, keepSeed=1234) print("Random FPR reduction") for L,strct in zip(maxLengths,rfprStructs): print("L=%d: %d operation sequences" % (L,len(strct.allstrs))) rfprExperiments = pc.make_lsgst_experiment_list( opLabels, prep_fiducials, meas_fiducials, germs, maxLengths, keepFraction=0.30, keepSeed=1234) print("\n%d experiments to run GST." % len(rfprExperiments)) # - # ## Running GST # In each case above, we constructed (1) a list-of-lists giving the GST operation sequences for each maximum-length stage, and (2) a list of the experiments. In what follows, we'll use the experiment list to generate some simulated ("fake") data for each case, and then run GST on it. Since this is done in exactly the same way for all three cases, we'll put all of the logic in a function. Note that the use of fiducial pair redution requires the use of `do_long_sequence_gst_base`, since `do_long_sequence_gst` internally builds a *complete* list of operation sequences. # + #use a depolarized version of the target gates to generate the data mdl_datagen = target_model.depolarize(op_noise=0.1, spam_noise=0.001) def runGST(gstStructs, exptList): #Use list of experiments, expList, to generate some data ds = pc.generate_fake_data(mdl_datagen, exptList, nSamples=1000,sampleError="binomial", seed=1234) #Use "base" driver to directly pass list of circuit structures return pygsti.do_long_sequence_gst_base( ds, target_model, gstStructs, verbosity=1) print("\n------ GST with standard (full) sequences ------") full_results = runGST(fullStructs, fullExperiments) print("\n------ GST with GFPR sequences ------") gfpr_results = runGST(gfprStructs, gfprExperiments) print("\n------ GST with PFPR sequences ------") pfpr_results = runGST(pfprStructs, pfprExperiments) print("\n------ GST with RFPR sequences ------") rfpr_results = runGST(rfprStructs, rfprExperiments) # - # Finally, one can generate reports using GST with reduced-sequences: pygsti.report.construct_standard_report(full_results, title="Standard GST Strings Example" ).write_html("tutorial_files/example_stdstrs_report") pygsti.report.construct_standard_report(gfpr_results, title="Global FPR Report Example" ).write_html("tutorial_files/example_gfpr_report") pygsti.report.construct_standard_report(pfpr_results, title="Per-germ FPR Report Example" ).write_html("tutorial_files/example_pfpr_report") pygsti.report.construct_standard_report(rfpr_results, title="Random FPR Report Example" ).write_html("tutorial_files/example_rfpr_report") # If all has gone well, the [Standard GST](tutorial_files/example_stdstrs_report/main.html), # [GFPR](tutorial_files/example_gfpr_report/main.html), # [PFPR](tutorial_files/example_pfpr_report/main.html), and # [RFPR](tutorial_files/example_rfpr_report/main.html), # reports may now be viewed. # The only notable difference in the output are "gaps" in the color box plots which plot quantities such as the log-likelihood across all operation sequences, organized by germ and fiducials.
jupyter_notebooks/Tutorials/algorithms/advanced/GST-FiducialPairReduction.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .r # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Ruby 2.3.3 # language: ruby # name: ruby # --- # ## Logic Gates # All Gates are in the module **LogicGates**. All methods available can be found in the documentation. # load gemfile ruby_circuits.rb require '../../lib/ruby_ciruits' # Creating a new **AND** gate which bits as inputs. This is a 2-input **AND** gate can be extended to n-input gate by passing n-inputs g = LogicGates::AND.new(0, 1) g = LogicGates::AND.new(1, 1, 1, 1) g.output() g.get_input_states() c = Connector.new g = LogicGates::AND.new(1, 1) g.set_output(c) g1 = LogicGates::AND.new(c, 1) c.is_output?(g) c.is_output?(g1) c.is_input?(g1)
examples/jupyter_notebook/.ipynb_checkpoints/logic_gates-checkpoint.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] slideshow={"slide_type": "slide"} # # # Construction of Regression Models using Data # # Author: <NAME> (<EMAIL>) # <NAME> (<EMAIL>) # + [markdown] slideshow={"slide_type": "notes"} # Notebook version: 2.0 (Sep 26, 2017) # # Changes: v.1.0 - First version. Extracted from regression_intro_knn v.1.0. # v.1.1 - Compatibility with python 2 and python 3 # v.2.0 - New notebook generated. Fuses code from Notebooks R1, R2, and R3 # + slideshow={"slide_type": "slide"} # Import some libraries that will be necessary for working with data and displaying plots # To visualize plots in the notebook # %matplotlib inline import numpy as np import scipy.io # To read matlab files import pandas as pd # To read data tables from csv files # For plots and graphical results import matplotlib import matplotlib.pyplot as plt from mpl_toolkits.mplot3d import Axes3D import pylab # For the student tests (only for python 2) import sys if sys.version_info.major==2: from test_helper import Test # That's default image size for this interactive session pylab.rcParams['figure.figsize'] = 9, 6 # + [markdown] slideshow={"slide_type": "slide"} # # ## 1. The regression problem # # The goal of regression methods is to predict the value of some *target* variable $S$ from the observation of one or more *input* variables $X_1, X_2, \ldots, X_N$ (that we will collect in a single vector $\bf X$). # # Regression problems arise in situations where the value of the target variable is not easily accessible, but we can measure other dependent variables, from which we can try to predict $S$. # <img src="figs/block_diagram.png", width=600> # + [markdown] slideshow={"slide_type": "subslide"} # The only information available to estimate the relation between the inputs and the target is a *dataset* $\mathcal D$ containing several observations of all variables. # # $$\mathcal{D} = \{{\bf x}^{(k)}, s^{(k)}\}_{k=1}^K$$ # # The dataset $\mathcal{D}$ must be used to find a function $f$ that, for any observation vector ${\bf x}$, computes an output $\hat{s} = f({\bf x})$ that is a good predition of the true value of the target, $s$. # # <img src="figs/predictor.png", width=300> # # Note that for the generation of the regression model, we exploit the statistical dependence between random variable $S$ and random vector ${\bf X}$. In this respect, we can assume that the available dataset $\mathcal{D}$ consists of i.i.d. points from the joint distribution $p_{S,{\bf X}}(s,{\bf x})$. If we had access to the true distribution, a statistical approach would be more accurate; however, in many situations such knowledge is not available, but using training data to do the design is feasible (e.g., relying on historic data, or by manual labelling of a set of patterns). # + [markdown] slideshow={"slide_type": "slide"} # ## 2. Examples of regression problems. # # The <a href=http://scikit-learn.org/>scikit-learn</a> package contains several <a href=http://scikit-learn.org/stable/datasets/> datasets</a> related to regression problems. # # * <a href=http://scikit-learn.org/stable/modules/generated/sklearn.datasets.load_boston.html#sklearn.datasets.load_boston > Boston dataset</a>: the target variable contains housing values in different suburbs of Boston. The goal is to predict these values based on several social, economic and demographic variables taken frome theses suburbs (you can get more details in the <a href = https://archive.ics.uci.edu/ml/datasets/Housing > UCI repository </a>). # # # * <a href=http://scikit-learn.org/stable/modules/generated/sklearn.datasets.load_diabetes.html#sklearn.datasets.load_diabetes /> Diabetes dataset</a>. # # We can load these datasets as follows: # + slideshow={"slide_type": "fragment"} from sklearn import datasets # Load the dataset. Select it by uncommenting the appropriate line D_all = datasets.load_boston() #D_all = datasets.load_diabetes() # Extract data and data parameters. X = D_all.data # Complete data matrix (including input and target variables) S = D_all.target # Target variables n_samples = X.shape[0] # Number of observations n_vars = X.shape[1] # Number of variables (including input and target) # + [markdown] slideshow={"slide_type": "slide"} # This dataset contains # + slideshow={"slide_type": "fragment"} print(n_samples) # + [markdown] slideshow={"slide_type": "fragment"} # observations of the target variable and # + slideshow={"slide_type": "fragment"} print(n_vars) # + [markdown] slideshow={"slide_type": "fragment"} # input variables. # + [markdown] slideshow={"slide_type": "slide"} # ## 3. Scatter plots # # ### 3.1. 2D scatter plots # # When the instances of the dataset are multidimensional, they cannot be visualized directly, but we can get a first rough idea about the regression task if we plot the target variable versus one of the input variables. These representations are known as <i>scatter plots</i> # # Python methods `plot` and `scatter` from the `matplotlib` package can be used for these graphical representations. # # + slideshow={"slide_type": "subslide"} # Select a dataset nrows = 4 ncols = 1 + (X.shape[1]-1)/nrows # Some adjustment for the subplot. pylab.subplots_adjust(hspace=0.2) # Plot all variables for idx in range(X.shape[1]): ax = plt.subplot(nrows,ncols,idx+1) ax.scatter(X[:,idx], S) # <-- This is the key command ax.get_xaxis().set_ticks([]) ax.get_yaxis().set_ticks([]) plt.ylabel('Target') # + [markdown] slideshow={"slide_type": "slide"} # # ## 4. Evaluating a regression task # # In order to evaluate the performance of a given predictor, we need to quantify the quality of predictions. This is usually done by means of a loss function $l(s,\hat{s})$. Two common losses are # # - Square error: $l(s, \hat{s}) = (s - \hat{s})^2$ # - Absolute error: $l(s, \hat{s}) = |s - \hat{s}|$ # # Note that both the square and absolute errors are functions of the estimation error $e = s-{\hat s}$. However, this is not necessarily the case. As an example, imagine a situation in which we would like to introduce a penalty which increases with the magnitude of the estimated variable. For such case, the following cost would better fit our needs: $l(s,{\hat s}) = s^2 \left(s-{\hat s}\right)^2$. # + slideshow={"slide_type": "fragment"} # In this section we will plot together the square and absolute errors grid = np.linspace(-3,3,num=100) plt.plot(grid, grid**2, 'b-', label='Square error') plt.plot(grid, np.absolute(grid), 'r--', label='Absolute error') plt.xlabel('Error') plt.ylabel('Cost') plt.legend(loc='best') plt.show() # + [markdown] slideshow={"slide_type": "subslide"} # In general, we do not care much about an isolated application of the regression model, but instead, we are looking for a generally good behavior, for which we need to average the loss function over a set of samples. In this notebook, we will use the average of the square loss, to which we will refer as the `mean-square error` (MSE). # # $$\text{MSE} = \frac{1}{K}\sum_{k=1}^K \left(s^{(k)}- {\hat s}^{(k)}\right)^2$$ # # The following code fragment defines a function to compute the MSE based on the availability of two vectors, one of them containing the predictions of the model, and the other the true target values. # + slideshow={"slide_type": "fragment"} # We start by defining a function that calculates the average square error def square_error(s, s_est): # Squeeze is used to make sure that s and s_est have the appropriate dimensions. y = np.mean(np.power((np.squeeze(s) - np.squeeze(s_est)), 2)) return y # + [markdown] slideshow={"slide_type": "slide"} # ### 4.1. Training and test data # # The major goal of the regression problem is that the predictor should make good predictions for arbitrary new inputs, not taken from the dataset used by the regression algorithm. # # Thus, in order to evaluate the prediction accuracy of some regression algorithm, we need some data, not used during the predictor design, to *test* the performance of the predictor under new data. To do so, the original dataset is usually divided in (at least) two disjoint sets: # # * **Training set**, $\cal{D}_{\text{train}}$: Used by the regression algorithm to determine predictor $f$. # * **Test set**, $\cal{D}_{\text{test}}$: Used to evaluate the performance of the regression algorithm. # # A good regression algorithm uses $\cal{D}_{\text{train}}$ to obtain a predictor with small average loss based on $\cal{D}_{\text{test}}$ # $$ # {\bar R}_{\text{test}} = \frac{1}{K_{\text{test}}} # \sum_{ ({\bf x},s) \in \mathcal{D}_{\text{test}}} l(s, f({\bf x})) # $$ # where $K_{\text{test}}$ is the size of the test set. # # As a designer, you only have access to training data. However, for illustration purposes, you may be given a test dataset for many examples in this course. Note that in such a case, using the test data to adjust the regression model is completely forbidden. You should work as if such test data set were not available at all, and recur to it just to assess the performance of the model after the design is complete. # # To model the availability of a train/test partition, we split next the boston dataset into a training and test partitions, using 60% and 40% of the data, respectively. # + from sklearn.model_selection import train_test_split X_train, X_test, s_train, s_test = train_test_split(X, S, test_size=0.4, random_state=0) # - # ### 4.2. A first example: A baseline regression model # # A first very simple method to build the regression model is to use the average of all the target values in the training set as the output of the model, discarding the value of the observation input vector. # # This approach can be considered as a baseline, given that any other method making an effective use of the observation variables, statistically related to $s$, should improve the performance of this method. # # The following code fragment uses the train data to compute the baseline regression model, and it shows the MSE calculated over the test partitions. # + S_baseline = np.mean(s_train) print('The baseline estimator is:', S_baseline) #Compute MSE for the train data #MSE_train = square_error(s_train, S_baseline) #Compute MSE for the test data. IMPORTANT: Note that we still use #S_baseline as the prediction. MSE_test = square_error(s_test, S_baseline) #print('The MSE for the training data is:', MSE_train) print('The MSE for the test data is:', MSE_test) # + [markdown] slideshow={"slide_type": "slide"} # ## 5. Parametric and non-parametric regression models # # Generally speaking, we can distinguish two approaches when designing a regression model: # # - Parametric approach: In this case, the estimation function is given <i>a priori</i> a parametric form, and the goal of the design is to find the most appropriate values of the parameters according to a certain goal # # For instance, we could assume a linear expression # $${\hat s} = f({\bf x}) = {\bf w}^\top {\bf x}$$ # and adjust the parameter vector in order to minimize the average of the quadratic error over the training data. This is known as least-squares regression, and we will study it in Section 8 of this notebook. # # - Non-parametric approach: In this case, the analytical shape of the regression model is not assumed <i>a priori</i>. # # - # ## 6. Non parametric method: Regression with the $k$-nn method # # The principles of the $k$-nn method are the following: # # - For each point where a prediction is to be made, find the $k$ closest neighbors to that point (in the training set) # - Obtain the estimation averaging the labels corresponding to the selected neighbors # # The number of neighbors is a hyperparameter that plays an important role in the performance of the method. You can test its influence by changing $k$ in the following piece of code. # + from sklearn import neighbors n_neighbors = 1 knn = neighbors.KNeighborsRegressor(n_neighbors) knn.fit(X_train, s_train) s_hat_train = knn.predict(X_train) s_hat_test = knn.predict(X_test) print('The MSE for the training data is:', square_error(s_train, s_hat_train)) print('The MSE for the test data is:', square_error(s_test, s_hat_test)) # + max_k = 25 n_neighbors_list = np.arange(max_k)+1 MSE_train = [] MSE_test = [] for n_neighbors in n_neighbors_list: knn = neighbors.KNeighborsRegressor(n_neighbors) knn.fit(X_train, s_train) s_hat_train = knn.predict(X_train) s_hat_test = knn.predict(X_test) MSE_train.append(square_error(s_train, s_hat_train)) MSE_test.append(square_error(s_test, s_hat_test)) plt.plot(n_neighbors_list, MSE_train,'bo', label='Training square error') plt.plot(n_neighbors_list, MSE_test,'ro', label='Test square error') plt.xlabel('$k$') plt.axis('tight') plt.legend(loc='best') plt.show() # - # Although the above figures illustrate evolution of the training and test MSE for different selections of the number of neighbors, it is important to note that **this figure, and in particular the red points, cannot be used to select the value of such parameter**. Remember that it is only legal to use the test data to assess the final performance of the method, what includes also that any parameters inherent to the method should be adjusted using the train data only. # ## 7. Hyperparameter selection via cross-validation # # An inconvenient of the application of the $k$-nn method is that the selection of $k$ influences the final error of the algorithm. In the previous experiments, we kept the value of $k$ that minimized the square error on the training set. However, we also noticed that the location of the minimum is not necessarily the same from the perspective of the test data. Ideally, we would like that the designed regression model works as well as possible on future unlabeled patterns that are not available during the training phase. This property is known as <i>generalization</i>. Fitting the training data is only pursued in the hope that we are also indirectly obtaining a model that generalizes well. In order to achieve this goal, there are some strategies that try to guarantee a correct generalization of the model. One of such approaches is known as <b>cross-validation</b> # # Since using the test labels during the training phase is not allowed (they should be kept aside to simultate the future application of the regression model on unseen patterns), we need to figure out some way to improve our estimation of the hyperparameter that requires only training data. Cross-validation allows us to do so by following the following steps: # # - Split the training data into several (generally non-overlapping) subsets. If we use $M$ subsets, the method is referred to as $M$-fold cross-validation. If we consider each pattern a different subset, the method is usually referred to as leave-one-out (LOO) cross-validation. # - Carry out the training of the system $M$ times. For each run, use a different partition as a <i>validation</i> set, and use the restating partitions as the training set. Evaluate the performance for different choices of the hyperparameter (i.e., for different values of $k$ for the $k$-NN method). # - Average the validation error over all partitions, and pick the hyperparameter that provided the minimum validation error. # - Rerun the algorithm using all the training data, keeping the value of the parameter that came out of the cross-validation process. # # <img src="https://chrisjmccormick.files.wordpress.com/2013/07/10_fold_cv.png"> # **Exercise**: Use `Kfold` function from the `sklearn` library to validate parameter `k`. Use a 10-fold validation strategy. What is the best number of neighbors according to this strategy? What is the corresponding MSE averaged over the test data? # + from sklearn.model_selection import KFold max_k = 25 n_neighbors_list = np.arange(max_k)+1 MSE_val = np.zeros((max_k,)) nfolds = 10 kf = KFold(n_splits=nfolds) for train, val in kf.split(X_train): for idx,n_neighbors in enumerate(n_neighbors_list): knn = neighbors.KNeighborsRegressor(n_neighbors) knn.fit(X_train[train,:], s_train[train]) s_hat_val = knn.predict(X_train[val,:]) MSE_val[idx] += square_error(s_train[val], s_hat_val) MSE_val = [el/10 for el in MSE_val] selected_k = np.argmin(MSE_val) + 1 plt.plot(n_neighbors_list, MSE_train,'bo', label='Training square error') plt.plot(n_neighbors_list, MSE_val,'ro', label='Validation square error') plt.plot(selected_k, MSE_test[selected_k-1],'gs', label='Test square error') plt.xlabel('$k$') plt.axis('tight') plt.legend(loc='best') plt.show() print('Cross-validation selected the following value for the number of neighbors:', selected_k) print('Test MSE:', MSE_test[selected_k-1]) # - # ## 8. A parametric regression method: Least squares regression # # ### 8.1. Problem definition # # - The goal is to learn a (possibly non-linear) regression model from a set of $L$ labeled points, $\{{\bf x}^{(l)},s{(l)}\}_{l=1}^L$. # # - We assume a parametric function of the form: # # $${\hat s}({\bf x}) = f({\bf x}) = w_0 z_0({\bf x}) + w_1 z_1({\bf x}) + \dots w_M z_M({\bf x})$$ # # where $z_i({\bf x})$ are particular transformations of the input vector variables. # Some examples are: # # - If ${\bf z} = {\bf x}$, the model is just a linear combination of the input variables # - If ${\bf z} = \left[\begin{array}{c}1\\{\bf x}\end{array}\right]$, we have again a linear combination with the inclusion of a constant term. # # # - For unidimensional input $x$, ${\bf z} = [1, x, x^2, \dots,x^{M}]^\top$ would implement a polynomia of degree $M$. # # # - Note that the variables of ${\bf z}$ could also be computed combining different variables of ${\bf x}$. E.g., if ${\bf x} = [x_1,x_2]^\top$, a degree-two polynomia would be implemented with # $${\bf z} = \left[\begin{array}{c}1\\x_1\\x_2\\x_1^2\\x_2^2\\x_1 x_2\end{array}\right]$$ # - The above expression does not assume a polynomial model. For instance, we could consider ${\bf z} = [\log(x_1),\log(x_2)]$ # Least squares (LS) regression finds the coefficients of the model with the aim of minimizing the square of the residuals. If we define ${\bf w} = [w_0,w_1,\dots,w_M]^\top$, the LS solution would be defined as # # \begin{equation}{\bf w}_{LS} = \arg \min_{\bf w} \sum_{l=1}^L [e^{(l)}]^2 = \arg \min_{\bf w} \sum_{l=1}^L \left[s^{(l)} - {\hat s}^{(l)} \right]^2 \end{equation} # ### 8.2. Vector Notation # # In order to solve the LS problem it is convenient to define the following vectors and matrices: # # - We can group together all available target values to form the following vector # # $${\bf s} = \left[s^{(1)}, s^{(2)}, \dots, s^{(L)} \right]^\top$$ # # # # - The estimation of the model for a single input vector ${\bf z}^{(l)}$ (which would be computed from ${\bf x}^{(l)}$), can be expressed as the following inner product # # $${\hat s}^{(l)} = {{\bf z}^{(l)}}^\top {\bf w}$$ # - If we now group all input vectors into a matrix ${\bf Z}$, so that each row of ${\bf Z}$ contains the transpose of the corresponding ${\bf z}^{(l)}$, we can express # # $$\hat{{\bf s}} = \left[{\hat s}^{1}, {\hat s}^{2}, \dots, {\hat s}^{(L)} \right]^\top = {\bf Z} {\bf w}, \;\;\;\; \text{with} \;\; {\bf Z} = \left[\begin{array}{cccc}z_0^{(1)}&z_1^{(1)}&\cdots&z_M^{(1)} \\ z_0^{(2)}&z_1^{(2)}&\cdots&z_M^{(2)} \\ \vdots & \vdots & \ddots & \vdots \\ z_0^{(L)}&z_1^{(L)}&\cdots&z_M^{(L)}\end{array}\right]$$ # ### 8.3. Least-squares solution # # - Using the previous notation, the cost minimized by the LS model can be expressed as # # $$C({\bf w}) = \sum_{l=1}^L \left[s^{(l)} - {\hat s}^{(l)} \right]^2 = \|{\bf s} - {\hat{\bf s}}\|^2 = \|{\bf s} - {\bf Z}{\bf w}\|^2$$ # # - Since the above expression depends quadratically on ${\bf w}$ and is non-negative, we know that there is only one point where the derivative of $C({\bf w})$ becomes zero, and that point is necessarily a minimum of the cost # # $$\nabla_{\bf w} \|{\bf s} - {\bf Z}{\bf w}\|^2\Bigg|_{{\bf w} = {\bf w}_{LS}} = {\bf 0}$$ # <b>Exercise:</b> # Solve the previous problem to show that # $${\bf w}_{LS} = \left( {\bf Z}^\top{\bf Z} \right)^{-1} {\bf Z}^\top{\bf s}$$ # The next fragment of code adjusts polynomia of increasing order to randomly generated training data. # + n_points = 20 n_grid = 200 frec = 3 std_n = 0.2 max_degree = 20 colors = 'brgcmyk' #Location of the training points X_tr = (3 * np.random.random((n_points,1)) - 0.5) #Labels are obtained from a sinusoidal function, and contaminated by noise S_tr = np.cos(frec*X_tr) + std_n * np.random.randn(n_points,1) #Equally spaced points in the X-axis X_grid = np.linspace(np.min(X_tr),np.max(X_tr),n_grid) #We start by building the Z matrix Z = [] for el in X_tr.tolist(): Z.append([el[0]**k for k in range(max_degree+1)]) Z = np.matrix(Z) Z_grid = [] for el in X_grid.tolist(): Z_grid.append([el**k for k in range(max_degree+1)]) Z_grid = np.matrix(Z_grid) plt.plot(X_tr,S_tr,'b.') for k in [1, 2, n_points]: # range(max_degree+1): Z_iter = Z[:,:k+1] # Least square solution #w_LS = (np.linalg.inv(Z_iter.T.dot(Z_iter))).dot(Z_iter.T).dot(S_tr) # Least squares solution, with leass numerical errors w_LS, resid, rank, s = np.linalg.lstsq(Z_iter, S_tr) #estimates at all grid points fout = Z_grid[:,:k+1].dot(w_LS) fout = np.array(fout).flatten() plt.plot(X_grid,fout,colors[k%len(colors)]+'-',label='Degree '+str(k)) plt.legend(loc='best') plt.ylim(1.2*np.min(S_tr), 1.2*np.max(S_tr)) plt.show() # - # It may seem that increasing the degree of the polynomia is always beneficial, as we can implement a more expressive function. A polynomia of degree $M$ would include all polynomia of lower degrees as particular cases. However, if we increase the number of parameters without control, the polynomia would eventually get expressive enough to adjust any given set of training points to arbitrary precision, what does not necessarily mean that the solution is obtaining a model that can be extrapolated to new data. # # The conclusions is that, when adjusting a parametric model using least squares, we need to validate the model, for which we can use the cross-validation techniques we introudece in Section 7. In this contexts, validating the model implies: # - Validating the kind of model that will be used, e.g., linear, polynomial, logarithmic, etc ... # - Validating any additional parameters that the nodel may have, e.g., if selecting a polynomial model, the degree of the polynomia. # # The code below shows the performance of different models. However, no validation process is considered, so the reported test MSEs could not be used as criteria to select the best model. # + # Linear model with no bias w_LS, resid, rank, s = np.linalg.lstsq(X_train, s_train) s_hat_test = X_test.dot(w_LS) print('Test MSE for linear model without bias:', square_error(s_test, s_hat_test)) # Linear model with no bias Z_train = np.hstack((np.ones((X_train.shape[0],1)), X_train)) Z_test = np.hstack((np.ones((X_test.shape[0],1)), X_test)) w_LS, resid, rank, s = np.linalg.lstsq(Z_train, s_train) s_hat_test = Z_test.dot(w_LS) print('Test MSE for linear model with bias:', square_error(s_test, s_hat_test)) # Polynomial model degree 2 Z_train = np.hstack((np.ones((X_train.shape[0],1)), X_train, X_train**2)) Z_test = np.hstack((np.ones((X_test.shape[0],1)), X_test, X_test**2)) w_LS, resid, rank, s = np.linalg.lstsq(Z_train, s_train) s_hat_test = Z_test.dot(w_LS) print('Test MSE for polynomial model (order 2):', square_error(s_test, s_hat_test)) # -
R7.Regression_Overview/regression_overview.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="view-in-github" colab_type="text" # <a href="https://colab.research.google.com/github/elliotgunn/DS-Unit-2-Kaggle-Challenge/blob/master/assignment_kaggle_challenge_2.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # + [markdown] id="7IXUfiQ2UKj6" colab_type="text" # Lambda School Data Science, Unit 2: Predictive Modeling # # # Kaggle Challenge, Module 2 # # ## Assignment # - [ ] Read [“Adopting a Hypothesis-Driven Workflow”](https://outline.com/5S5tsB), a blog post by a Lambda DS student about the Tanzania Waterpumps challenge. # - [ ] Continue to participate in our Kaggle challenge. # - [ ] Try Ordinal Encoding. # - [ ] Try a Random Forest Classifier. # - [ ] Submit your predictions to our Kaggle competition. (Go to our Kaggle InClass competition webpage. Use the blue **Submit Predictions** button to upload your CSV file. Or you can use the Kaggle API to submit your predictions.) # - [ ] Commit your notebook to your fork of the GitHub repo. # # ## Stretch Goals # # ### Doing # - [ ] Add your own stretch goal(s) ! # - [ ] Do more exploratory data analysis, data cleaning, feature engineering, and feature selection. # - [ ] Try other [categorical encodings](https://contrib.scikit-learn.org/categorical-encoding/). # - [ ] Get and plot your feature importances. # - [ ] Make visualizations and share on Slack. # # ### Reading # # Top recommendations in _**bold italic:**_ # # #### Decision Trees # - A Visual Introduction to Machine Learning, [Part 1: A Decision Tree](http://www.r2d3.us/visual-intro-to-machine-learning-part-1/), and _**[Part 2: Bias and Variance](http://www.r2d3.us/visual-intro-to-machine-learning-part-2/)**_ # - [Decision Trees: Advantages & Disadvantages](https://christophm.github.io/interpretable-ml-book/tree.html#advantages-2) # - [How a Russian mathematician constructed a decision tree — by hand — to solve a medical problem](http://fastml.com/how-a-russian-mathematician-constructed-a-decision-tree-by-hand-to-solve-a-medical-problem/) # - [How decision trees work](https://brohrer.github.io/how_decision_trees_work.html) # - [Let’s Write a Decision Tree Classifier from Scratch](https://www.youtube.com/watch?v=LDRbO9a6XPU) # # #### Random Forests # - [_An Introduction to Statistical Learning_](http://www-bcf.usc.edu/~gareth/ISL/), Chapter 8: Tree-Based Methods # - [Coloring with Random Forests](http://structuringtheunstructured.blogspot.com/2017/11/coloring-with-random-forests.html) # - _**[Random Forests for Complete Beginners: The definitive guide to Random Forests and Decision Trees](https://victorzhou.com/blog/intro-to-random-forests/)**_ # # #### Categorical encoding for trees # - [Are categorical variables getting lost in your random forests?](https://roamanalytics.com/2016/10/28/are-categorical-variables-getting-lost-in-your-random-forests/) # - [Beyond One-Hot: An Exploration of Categorical Variables](http://www.willmcginnis.com/2015/11/29/beyond-one-hot-an-exploration-of-categorical-variables/) # - _**[Categorical Features and Encoding in Decision Trees](https://medium.com/data-design/visiting-categorical-features-and-encoding-in-decision-trees-53400fa65931)**_ # - _**[Coursera — How to Win a Data Science Competition: Learn from Top Kagglers — Concept of mean encoding](https://www.coursera.org/lecture/competitive-data-science/concept-of-mean-encoding-b5Gxv)**_ # - [Mean (likelihood) encodings: a comprehensive study](https://www.kaggle.com/vprokopev/mean-likelihood-encodings-a-comprehensive-study) # - [The Mechanics of Machine Learning, Chapter 6: Categorically Speaking](https://mlbook.explained.ai/catvars.html) # # #### Imposter Syndrome # - [Effort Shock and Reward Shock (How The Karate Kid Ruined The Modern World)](http://www.tempobook.com/2014/07/09/effort-shock-and-reward-shock/) # - [How to manage impostor syndrome in data science](https://towardsdatascience.com/how-to-manage-impostor-syndrome-in-data-science-ad814809f068) # - ["I am not a real data scientist"](https://brohrer.github.io/imposter_syndrome.html) # - _**[Imposter Syndrome in Data Science](https://caitlinhudon.com/2018/01/19/imposter-syndrome-in-data-science/)**_ # # # # # # + id="o9eSnDYhUGD7" colab_type="code" colab={} # If you're in Colab... import os, sys in_colab = 'google.colab' in sys.modules if in_colab: # Install required python packages: # category_encoders, version >= 2.0 # pandas-profiling, version >= 2.0 # plotly, version >= 4.0 # !pip install --upgrade category_encoders pandas-profiling plotly # Pull files from Github repo os.chdir('/content') # !git init . # !git remote add origin https://github.com/LambdaSchool/DS-Unit-2-Kaggle-Challenge.git # !git pull origin master # Change into directory for module os.chdir('module2') # + id="RcLW7Ny4yxS2" colab_type="code" colab={} import pandas as pd import numpy as np from sklearn.model_selection import train_test_split # Merge train_features.csv & train_labels.csv train = pd.merge(pd.read_csv('../data/tanzania/train_features.csv'), pd.read_csv('../data/tanzania/train_labels.csv')) # Read test_features.csv & sample_submission.csv test = pd.read_csv('../data/tanzania/test_features.csv') sample_submission = pd.read_csv('../data/tanzania/sample_submission.csv') # Split train into train & val train, val = train_test_split(train, train_size=0.80, test_size=0.20, stratify=train['status_group'], random_state=42) def clean(X): # make a copy before modifying X = X.copy() # duplicates, near duplicates, missing values X = X.drop(columns=['payment', 'quantity_group', 'source_type', 'waterpoint_type', 'extraction_type', 'extraction_type_class', 'management_group', 'water_quality', 'num_private']) # About 3% of the time, latitude has small values near zero, # outside Tanzania, so we'll treat these values like zero. X['latitude'] = X['latitude'].replace(-2e-08, 0) # some columns have zeros and shouldn't, they are like null values # replace those zeros with nulls, impute missing values later cols_with_zeros = ['longitude', 'latitude', 'population', 'construction_year', 'gps_height'] for col in cols_with_zeros: X[col] = X[col].replace(0, np.nan) # extract year, month, day from date_recorded X['date_recorded'] = pd.to_datetime(X['date_recorded'], infer_datetime_format=True) X['year_recorded'] = X['date_recorded'].dt.year X['month_recorded'] = X['date_recorded'].dt.month X['day_recorded'] = X['date_recorded'].dt.day # delete date_recorded X = X.drop(columns='date_recorded') # age of pump at time of inspection X['pump_age'] = X['year_recorded'] - X['construction_year'] # there are five values with negatives, so we will return those as a np.nan X['pump_age'] = X['pump_age'].replace([-5, -4, -3, -2, -1, -7], np.nan) # remember to deal with missing years X['years_missing'] = X['pump_age'].isnull() # drop recorded_by (never varies) and id (always varies, random) X = X.drop(columns=['recorded_by', 'id']) # return the clean df return X train = clean(train) val = clean(val) test = clean(test) # + id="QJBD4ruICm1m" colab_type="code" colab={} import pandas as pd import numpy as np from sklearn.model_selection import train_test_split # Merge train_features.csv & train_labels.csv train = pd.merge(pd.read_csv('../data/tanzania/train_features.csv'), pd.read_csv('../data/tanzania/train_labels.csv')) # Read test_features.csv & sample_submission.csv test = pd.read_csv('../data/tanzania/test_features.csv') sample_submission = pd.read_csv('../data/tanzania/sample_submission.csv') # Split train into train & val train, val = train_test_split(train, train_size=0.80, test_size=0.20, stratify=train['status_group'], random_state=42) def clean(X): # make a copy before modifying X = X.copy() # duplicates, near duplicates, missing values X = X.drop(columns=['payment', 'quantity_group']) # About 3% of the time, latitude has small values near zero, # outside Tanzania, so we'll treat these values like zero. X['latitude'] = X['latitude'].replace(-2e-08, 0) # some columns have zeros and shouldn't, they are like null values # replace those zeros with nulls, impute missing values later cols_with_zeros = ['longitude', 'latitude', 'population', 'construction_year', 'gps_height'] for col in cols_with_zeros: X[col] = X[col].replace(0, np.nan) # create a missing vaules column X[col+'_missing'] = X[col].isnull() # extract year, month, day from date_recorded X['date_recorded'] = pd.to_datetime(X['date_recorded'], infer_datetime_format=True) X['year_recorded'] = X['date_recorded'].dt.year X['month_recorded'] = X['date_recorded'].dt.month X['day_recorded'] = X['date_recorded'].dt.day # delete date_recorded X = X.drop(columns='date_recorded') # age of pump at time of inspection X['pump_age'] = X['year_recorded'] - X['construction_year'] # there are five values with negatives, so we will return those as a np.nan X['pump_age'] = X['pump_age'].replace([-5, -4, -3, -2, -1, -7], np.nan) # remember to deal with missing years X['years_missing'] = X['pump_age'].isnull() # drop recorded_by (never varies) and id (always varies, random) X = X.drop(columns=['recorded_by', 'id']) # return the clean df return X train = clean(train) val = clean(val) test = clean(test) # + id="GoLyFn9NshcW" colab_type="code" colab={} # The status_group column is the target target = 'status_group' # Get a dataframe with all train columns except the target train_features = train.drop(columns=[target]) # Get a list of the numeric features numeric_features = train_features.select_dtypes(include='number').columns.tolist() # Get a series with the cardinality of the nonnumeric features cardinality = train_features.select_dtypes(exclude='number').nunique() # Get a list of all categorical features with cardinality <= 50 categorical_features = cardinality[cardinality <= 50].index.tolist() # Combine the lists features = numeric_features + categorical_features # + id="YjbyUX-9uFgX" colab_type="code" colab={} # Arrange data into X features matrix and y target vector X_train = train[features] y_train = train[target] X_val = val[features] y_val = val[target] X_test = test[features] # + [markdown] id="9UvK7j8ysVC4" colab_type="text" # ## Random forest classifier with Ordinal encoding # + id="6UfZEUHusWyR" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="c03016f2-7cd6-438c-8f25-e96dd8004f51" import category_encoders as ce from sklearn.impute import SimpleImputer from sklearn.ensemble import RandomForestClassifier from sklearn.pipeline import make_pipeline # already arranged X features matrix, y target vector # pipeline pipeline = make_pipeline( ce.OrdinalEncoder(), SimpleImputer(strategy='mean'), RandomForestClassifier(n_estimators=500, random_state=42, n_jobs=-1, oob_score=True, min_samples_leaf = 1) ) # Fit on train, score on val pipeline.fit(X_train, y_train) print('Validation Accuracy', pipeline.score(X_val, y_val)) # + id="A_A9hPWHvYpn" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 51} outputId="9a979c9d-5420-4a6b-94c6-187705d3a3d3" # to see how many features were added through ordinal encoding encoder = pipeline.named_steps['ordinalencoder'] encoded = encoder.transform(X_train) print(X_train.shape) print(encoded.shape) # + [markdown] id="fXwG8pbU3N3H" colab_type="text" # ### parameter tuning # # n_estimators: higher trees give better performance but make code slower. choose as high value as processor can handle as makes predictions stronger and more stable # # min_sample_leaf: leaf is the end node of a decision tree. a smaller leaf makes the model more prone to capturing noise in train data. try multiple leaf sizes for the optimum size # # n_jobs: tells the engine how many processors it can use. -1 means no restriction. # # oob_score: cross validation method, much faster than leave one out validation technique. # # max_features: Max_feature is the number of features to consider each time to make the split decision. Let us say the the dimension of your data is 50 and the max_feature is 10, each time you need to find the split, you randomly select 10 features and use them to decide which one of the 10 is the best feature to use. # # # # + id="DeGr_fUC3QPQ" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 68} outputId="42a5f770-2ae8-45f7-a9b5-4e0ba0752253" sample_leaf_options = [1,5,10] for leaf_size in sample_leaf_options: # pipeline pipeline = make_pipeline( ce.OrdinalEncoder(), SimpleImputer(strategy='mean'), RandomForestClassifier(n_estimators=200, random_state=42, n_jobs=-1, min_samples_leaf = leaf_size) ) # Fit on train, score on val pipeline.fit(X_train, y_train) print(f"AUC-ROC, {leaf_size}: ", pipeline.score(X_val, y_val)) # + id="5r_RrC916swl" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 68} outputId="92bfe4af-c7a0-4e3c-977d-ce44861ecdae" import numpy as np max_features_options = [0.1, 0.2, 0.3] for num_features in max_features_options: # pipeline pipeline = make_pipeline( ce.OrdinalEncoder(), SimpleImputer(strategy='mean'), RandomForestClassifier(n_estimators=200, random_state=42, n_jobs=-1, max_features = num_features, min_samples_leaf = 1) ) # Fit on train, score on val pipeline.fit(X_train, y_train) print(f"AUC-ROC, {num_features}: ", pipeline.score(X_val, y_val)) # + [markdown] id="rsuZnE70sXVp" colab_type="text" # ## Random Forest Classifier with one hot encoder # + id="rqDRxjWPsb-f" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="e2a80822-c958-423d-f748-594c8e698036" import category_encoders as ce from sklearn.impute import SimpleImputer from sklearn.ensemble import RandomForestClassifier from sklearn.pipeline import make_pipeline pipeline = make_pipeline( ce.OneHotEncoder(use_cat_names='True'), SimpleImputer(strategy='mean'), RandomForestClassifier(n_estimators=500, random_state=42, n_jobs=-1, oob_score=True, min_samples_leaf = 1) ) # n_estimators = number of trees in the forest # n_job=-1 is asking for max power to process # fit on train, score on val pipeline.fit(X_train, y_train) print('Validation Accuracy: ', pipeline.score(X_val, y_val)) # + id="khJjDnoYw_gr" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 51} outputId="e5f0c247-a489-478c-9311-b2c6430a0407" # to see how many features were added through one hot encoding encoder = pipeline.named_steps['onehotencoder'] encoder = encoder.transform(X_train) print(X_train.shape) print(encoded.shape) # + id="zXSY4GOKxpIe" colab_type="code" colab={} # use the one hot encoder model y_pred = pipeline.predict(X_test) # + [markdown] id="cTVUnxLexb2y" colab_type="text" # ## submit # + id="bxwK3AH6xc_1" colab_type="code" colab={} # Write submission csv file submission = sample_submission.copy() submission['status_group'] = y_pred submission.to_csv('submission-04.csv', index=False) # + id="Re_UJLF2x3W5" colab_type="code" colab={} #from google.colab import drive #drive.mount('/content/drive') # #%env KAGGLE_CONFIG_DIR=/content/drive/My Drive/ # + id="oMLhDWPfxffs" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 51} outputId="9931d3a6-e721-4ec1-d001-681a4f5798cc" # !kaggle competitions submit -c ds6-predictive-modeling-challenge -f submission-04.csv -m "fourth"
assignment_kaggle_challenge_2.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="DfbgvYcB3ym9" colab_type="text" # # Downloading Colournet dataset # + id="bJqrEY6A3jI4" colab_type="code" outputId="be65c343-7194-4cff-e046-052b7caaf40d" colab={"base_uri": "https://localhost:8080/", "height": 122} from google.colab import drive drive.mount('/content/gdrive') # + id="kE2rbdjC4FxS" colab_type="code" outputId="18184a96-cc92-454f-848f-d637a90d2608" colab={"base_uri": "https://localhost:8080/", "height": 34} # cp forest.zip gdrive/'My Drive'/Data/ # + id="OeHk5cQzHdgJ" colab_type="code" outputId="71eaf97b-5620-4a65-96ed-b71f94d4e62f" colab={"base_uri": "https://localhost:8080/", "height": 204} # !wget http://cvcl.mit.edu/scenedatabase/forest.zip # + id="5n4aQu8_4KCO" colab_type="code" outputId="e9318075-8095-4538-e0b0-7fc03d9f72f8" colab={"base_uri": "https://localhost:8080/", "height": 340} !7z x *.zip -o* # + [markdown] id="YnDz1Ofi4cXD" colab_type="text" # # Preparing Libraries # + id="6c94gYF44ZEp" colab_type="code" outputId="6f14db27-bdc9-417e-8587-ca3c01a73be7" colab={"base_uri": "https://localhost:8080/", "height": 34} import pandas as pd import matplotlib.pyplot as plt import numpy as np import random from sklearn.model_selection import train_test_split import warnings warnings.filterwarnings('ignore') # %matplotlib inline import cv2 from keras.preprocessing import image from keras.preprocessing.image import ImageDataGenerator, array_to_img, img_to_array, load_img import tensorflow as tf import keras from keras.callbacks import EarlyStopping from keras.layers import * from keras.models import Model, Sequential, load_model from keras.regularizers import * from keras.activations import * from keras.optimizers import Adam from keras.utils import Sequence import math from PIL import Image from scipy import ndimage import skimage.io as io from skimage.color import rgb2lab, lab2rgb, rgb2gray, gray2rgb from skimage.transform import rescale, resize import os from keras.applications.inception_resnet_v2 import InceptionResNetV2 from keras.applications.inception_resnet_v2 import preprocess_input # + [markdown] id="t1yPSYs84jRn" colab_type="text" # # Custom Functions # + id="xaRtlCzt4irq" colab_type="code" colab={} def show(img): plt.imshow(img, cmap = 'gray') def show_predicted(l, ab): img = np.concatenate((l, ab), axis = 2) plt.imshow(lab2rgb(img)) plt.pause(0.001) # + [markdown] id="B_IyaluT4ntd" colab_type="text" # # Data Loading # + id="Vrh-DIaC1wN1" colab_type="code" colab={} DS_PATH1 = '/content/forest/forest/' # ADD path/to/dataset # Get images train_data = [] for filename in os.listdir(DS_PATH1): if os.path.isfile(os.path.join(DS_PATH1, filename)): train_data.append(os.path.join(DS_PATH1, filename)) # + id="JfF9gnBB44_s" colab_type="code" colab={} n = len(train_data) X_train_main = np.random.choice(train_data, int(n*0.9)) X_val_main = np.array([x for x in train_data if x not in X_train_main]) # + id="8Lnv1hSo2M56" colab_type="code" outputId="f4d57c20-75bb-4cea-decd-041c54eebed9" colab={"base_uri": "https://localhost:8080/", "height": 34} print(n) # + [markdown] id="a8Mz-sE08nhc" colab_type="text" # # Model # + id="jm3JWmFv4_eK" colab_type="code" colab={} def build_generator(): """ Returns generator as Keras model. """ g_input = Input(shape=(256, 256, 1)) #128 x 128 conv1 = Conv2D(64, (3, 3), padding='same', strides=2)(g_input) conv1 = BatchNormalization()(conv1) conv1 = Activation('relu')(conv1) conv2 = Conv2D(128, (3, 3), padding='same', strides=1)(conv1) conv2 = BatchNormalization()(conv2) conv2 = Activation('relu')(conv2) #64 x 64 conv3 = Conv2D(128, (3, 3), padding='same', strides=2)(conv2) conv3 = BatchNormalization()(conv3) conv3 = Activation('relu')(conv3) conv4 = Conv2D(256, (3, 3), padding='same', strides=1)(conv3) conv4 = BatchNormalization()(conv4) conv4 = Activation('relu')(conv4) #32 x 32 conv5 = Conv2D(512, (3, 3), padding='same', strides=2)(conv4) conv5 = BatchNormalization()(conv5) conv5 = Activation('relu')(conv5) #64 x 64 conv6 = UpSampling2D(size=(2, 2))(conv5) conv6 = Conv2D(256, (3, 3), padding='same')(conv6) conv6 = BatchNormalization()(conv6) conv6 = Activation('relu')(conv6) conv6 = Concatenate(axis=-1)([conv6,conv4]) conv7 = Conv2D(256, (3, 3), padding='same')(conv6) conv7 = BatchNormalization()(conv7) conv7 = Activation('relu')(conv7) #128 x 128 up2 = UpSampling2D(size=(2, 2))(conv7) conv8 = Conv2D(128, (3,3), padding='same')(up2) conv8 = BatchNormalization()(conv8) conv8 = Activation('relu')(conv8) conv8 = Concatenate(axis=-1)([conv8,conv2]) conv9 = Conv2D(128, (3, 3), padding='same')(conv8) conv9 = BatchNormalization()(conv9) conv9 = Activation('relu')(conv9) up3 = UpSampling2D(size=(2, 2))(conv9) conv10 = Conv2D(64, (3,3), padding='same')(up3) conv10 = BatchNormalization()(conv10) conv10 = Activation('relu')(conv10) conv11 = Conv2D(2, (3, 3), padding='same')(conv10) conv11 = Activation('tanh')(conv11) model = Model(inputs=g_input,outputs=conv11) return model # + id="2gNQe-mD1rxw" colab_type="code" colab={} def build_discriminator(): """ Returns discriminator as Keras model. """ model = Sequential() model.add(Conv2D(32, (3, 3), padding='same', input_shape=(256,256,2), strides=2)) model.add(LeakyReLU(.2)) model.add(AveragePooling2D(pool_size=(2, 2))) model.add(Conv2D(64, (3, 3), padding='same',strides=1)) model.add(LeakyReLU(.2)) model.add(Dropout(.25)) model.add(AveragePooling2D(pool_size=(2, 2))) model.add(Conv2D(128, (3, 3), padding='same',strides=1)) model.add(LeakyReLU(.2)) model.add(Dropout(.25)) model.add(AveragePooling2D(pool_size=(2, 2))) model.add(Conv2D(256, (3, 3), padding='same',strides=2)) model.add(LeakyReLU(.2)) model.add(Dropout(.25)) model.add(Flatten()) model.add(Dense(1)) model.add(Activation('sigmoid')) return model # + id="P7Qf64UHODMB" colab_type="code" colab={} import keras.backend as K def cus_acc(y_true, y_pred): x = K.cast(K.not_equal(y_pred,0),tf.float32) y = K.equal(x, y_true) return K.mean(y) # + [markdown] id="mxNktniX8g0_" colab_type="text" # ## Generator # + id="PRHIV0h35cFM" colab_type="code" outputId="c8047ba8-fd14-493d-ee0c-f87e0a7b196e" colab={"base_uri": "https://localhost:8080/", "height": 1000} generator = build_generator() generator.compile(loss='mse', optimizer=Adam(lr=.001)) generator.summary() # + [markdown] id="4u5CyT1y_5Yz" colab_type="text" # ## Discriminator # + id="eWiecRt42eXk" colab_type="code" outputId="2727ce68-debb-40d0-dc65-eaf816c63817" colab={"base_uri": "https://localhost:8080/", "height": 714} discriminator = build_discriminator() discriminator.compile(loss='binary_crossentropy', optimizer=Adam(lr=.0001), metrics=['accuracy']) discriminator.summary() # + [markdown] id="-SKJhQmj_94Y" colab_type="text" # ## GAN # + id="R1MGb4O96VOx" colab_type="code" colab={} gan_input = Input(shape=(256,256,1)) img_color = generator(gan_input) discriminator.trainable = False real_or_fake = discriminator(img_color) gan = Model(gan_input,real_or_fake) gan.compile(loss='binary_crossentropy', optimizer=Adam(lr=.001,decay=1e-5), metrics = ['accuracy']) # + id="wl8dEiw38a3B" colab_type="code" outputId="ce502fa5-f0b6-4698-b4f9-499a30439d24" colab={"base_uri": "https://localhost:8080/", "height": 238} gan.summary() # + [markdown] id="DxVXPDug4z_g" colab_type="text" # # Data Generator # + id="ijgU8NS6-npJ" colab_type="code" colab={} class dis_gen(Sequence): def __init__(self, X, batch_size = 32): self.img_loc = X self.batch_size = batch_size self.size = len(self.img_loc) def __getitem__(self, idx): start = idx*self.batch_size end = min((idx+1)*self.batch_size, self.size) batch = np.empty(((end-start),256,256,3)) j=0 ''' exception handling while reading image ''' for i in self.img_loc[start:end]: try: batch[j,...] = cv2.imread(i)[...,::-1] # RBG image except: pass j+=1 batch /= 255.0 gray_batch = gray2rgb(rgb2gray(batch)) lab_batch = rgb2lab(batch) X_train_L = lab_batch[...,0] X_train_L = X_train_L[...,None] X_train_AB = lab_batch[:,:,:,1:]/128.0 generated_images = generator.predict(X_train_L) X_train = np.concatenate((X_train_AB, generated_images)) n = len(X_train_L) y_train = np.array([[1]] * n + [[0]] * n) rand_arr = np.arange(len(X_train)) np.random.shuffle(rand_arr) X_train = X_train[rand_arr] y_train = y_train[rand_arr] return (X_train, y_train) def __len__(self): return math.ceil(self.size / self.batch_size) # + id="k7WwY5gkS5x9" colab_type="code" colab={} class gan_gen(Sequence): def __init__(self, X, batch_size = 16): self.img_loc = X self.batch_size = batch_size self.size = len(self.img_loc) def __getitem__(self, idx): start = idx*self.batch_size end = min((idx+1)*self.batch_size, self.size) batch = np.empty(((end-start),256,256,3)) j=0 ''' exception handling while reading image ''' for i in self.img_loc[start:end]: try: batch[j,...] = cv2.imread(i)[...,::-1] # RBG image except: pass j+=1 batch /= 255.0 gray_batch = gray2rgb(rgb2gray(batch)) lab_batch = rgb2lab(batch) X_train_L = lab_batch[...,0] X_train_L = X_train_L[...,None] n = len(X_train_L) y_train = np.ones([n,1]) return (X_train_L, y_train) def __len__(self): return math.ceil(self.size / self.batch_size) # + [markdown] id="oon7eSgv8nYx" colab_type="text" # # Train # + id="dy8q3pts8omC" colab_type="code" colab={} batch_size = 32 def train( X_train, X_test, epochs): acc_dis = 0 n = len(X_train) for e in range(epochs): print('.......Evaluating discriminator.......') disgen = dis_gen(X_train, batch_size) valgen = dis_gen(X_test, batch_size) metric = discriminator.evaluate_generator(valgen) acc_dis = metric[1] print('Accuracy : %f' %(metric[1])) print('...........training discriminator.........') if(e%3==2): noise = np.random.rand(n,256,256,2) * 2 -1 discriminator.fit(noise, np.zeros([n,1]), 32, epochs=1) while(acc_dis <= 0.89): discriminator.fit_generator(disgen, epochs=1) metric = discriminator.evaluate_generator(valgen) acc_dis = metric[1] print('Accuracy : %f' %(metric[1])) print('............training gan............') #while(acc_dis>0.81): gan.fit_generator(gan_gen(X_train), epochs=1) metric = discriminator.evaluate_generator(valgen) test() acc_dis = metric[1] print(acc_dis) #test() #test() print(e + 1,"batches done") # + id="3HbUpXiOlMYt" colab_type="code" colab={} dis_gen(X_train_main).__getitem__(0) # + id="Gp1h9qoMb21b" colab_type="code" colab={} train(X_train_main, X_val_main, 25) # + [markdown] id="xBZnI86XVc1N" colab_type="text" # # Saving and loading weights # + id="tPmrDymwVk1a" colab_type="code" outputId="5c12e5e6-b158-4d53-bf36-fd018f0c88ea" colab={"base_uri": "https://localhost:8080/", "height": 34} # cd ~/../content/gdrive/'My Drive'/Data/ # + id="irS-aSNXwx4p" colab_type="code" colab={} gan.save('gan.h5') # + id="KwC4ug9eVuf0" colab_type="code" colab={} discriminator.save_weights('dis_weights1_1280.h5') # + id="Yfmc0K9MV47h" colab_type="code" colab={} generator.save_weights('gen_weights1_1280.h5') # + id="3zY7ETjHV-25" colab_type="code" colab={} gan.save_weights('gan_weights1_1280.h5') # + id="yFnHtVHSWEeG" colab_type="code" outputId="6fae5ea1-7bde-4972-b13b-374ec5bf53d0" colab={"base_uri": "https://localhost:8080/", "height": 34} # cd ~/../content/ # + [markdown] id="2aLP9LKpACzD" colab_type="text" # # Colourization # + id="LEpZFLVzAEtm" colab_type="code" colab={} original = plt.imread(X_train_main[5])/255 gray = gray2rgb(rgb2gray(original)) # + id="AmmsH1I6AI1M" colab_type="code" colab={} show(original) # + id="ojys0xS0AQ2w" colab_type="code" colab={} lab = rgb2lab(gray) lab2 = rgb2lab(original) # + id="oYUqB5YAAVxC" colab_type="code" colab={} predicted_ab = generator.predict((lab2[...,0]).reshape((1,256,256,1))) ab = predicted_ab.reshape(256,256,2) show_predicted((lab2[...,0]).reshape((256,256,1)),ab*128) # + id="2KKmn1hzeEJu" colab_type="code" colab={} def test(): original = plt.imread(X_train_main[3])/255 gray = gray2rgb(rgb2gray(original)) lab = rgb2lab(gray) lab2 = rgb2lab(original) predicted_ab = generator.predict((lab2[...,0]).reshape((1,256,256,1))) ab = predicted_ab.reshape(256,256,2) show_predicted((lab2[...,0]).reshape((256,256,1)),ab*128) # + id="Cxq5pyD19dLR" colab_type="code" colab={} test()
GAN_colourizer.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + # default_exp catalog # - # # module name here # # > API details. # + #hide from nbdev.showdoc import * # + #export from kedro.config import ConfigLoader from kedro.io import DataCatalog # - #export conf_loader = ConfigLoader("conf/base") conf_test_data_catalog = conf_loader.get("catalog*.yaml", "catalog*/*.yaml") test_data_catalog = DataCatalog.from_config(conf_test_data_catalog) # The following datasets are available to test this package test_data_catalog.list() test_data_catalog.load("genetic_file_common_folder").full_file_path
catalog.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="view-in-github" colab_type="text" # <a href="https://colab.research.google.com/github/JuliusCaezarEugenio/OOP-1-1/blob/main/OOP_Concepts_2.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # + [markdown] id="dROPGG9rKxtv" # ##Classes with Multiple Objects # ``` # # # + id="wCCy8oS3g6JE" colab={"base_uri": "https://localhost:8080/"} outputId="717c5ab3-8b17-442a-b4a6-10138ab702f0" class Birds: def __init__(self,birds_name): self.birds_name = birds_name def flying_birds(self): print(f"{self.birds_name} flies above the sky") def non_flying_birds(self): print(f"{self.birds_name} is the national bird of Australia") vulture = Birds("Griffon Vulture") crane = Birds ("Common Crane") emu = Birds ("Emu") vulture.flying_birds() crane.flying_birds() emu.non_flying_birds() # + [markdown] id="wEJ6ooTGOwez" # ##Encapsulation (mangling with double underscore) # + colab={"base_uri": "https://localhost:8080/"} id="d0DBMgiaSboJ" outputId="c6fe3bb6-497b-473a-bf8d-b2d9fdea9f73" class foo: def __init__ (self,a,b): self.__a = a self.__b = b def add(self): return self.__a+self.__b object_foo= foo(3,4) object_foo.add() # + colab={"base_uri": "https://localhost:8080/"} id="JOXhjEhJR1Ep" outputId="181da1dd-168e-4b6b-b012-b8cc6c9d0f92" #Example 2 class Counter: def __init__(self): self.__current= 0 def increment(self): self.__current +=1 def value(self): return self.__current def reset(self): self.__current=0 number = Counter() number.__current = 1 number = Counter() number.increment() number.increment() number.increment() print (number.value()) # + [markdown] id="BGRotqIjU-OK" # ##Inheritance # + colab={"base_uri": "https://localhost:8080/"} id="v3txMXnlVB3m" outputId="5bc7c671-f063-4638-b787-dc2cc15b4622" class Person: def __init__(self,Firstname,Surname): self.Firstname = Firstname self.Surname = Surname def fullname(self): print (self.Firstname, self.Surname) person = Person ("<NAME>", "Eugenio") person.fullname() class Student(Person): pass person2 = Student("<NAME>", "Eugenio") person2.fullname() # + [markdown] id="HQZjBj8pXhzP" # ##Polymorphism # + colab={"base_uri": "https://localhost:8080/"} id="f4zEZqjrXkAK" outputId="de33175e-91a4-4c26-8659-6ce5f34aa535" class RegularPolygon: def __init__ (self,side): self.side = side class Square (RegularPolygon): def area(self): return self.side * self.side class EquilateralTriangle (RegularPolygon): def area(self): return self.side *self.side * 0.433 obj1 = Square(4) print(obj1.area()) obj2= EquilateralTriangle(3) print(obj2.area()) # + [markdown] id="XWf_B4C2ZiQf" # Application 1 # + [markdown] id="sHGopolJZCy2" # 1. Create a Python program that displays the name of three students (Student 1, Student 2, and Student 3) and their term grades # 2. Create a class name Person and attributes - std1, std2, std2, pre, mid,fin # 3. Compute the average of eatch term grade using Grade() method # 4. Information about student's grades must be hidden from others # + colab={"base_uri": "https://localhost:8080/"} id="LEjsJf5YCUjC" outputId="5d6f8f66-c0ac-4e1c-b3df-41f4ca2513b0" class Person: def __init__(self,std1, std2, std3, pre, mid, fin): self.__std1 = std1 self.__std2 = std2 self.__std3 = std3 self.__pre = pre self.__mid = mid self.__fin = fin def Student1(self): print ("\n" + self.__std1) print ("Prelims Grade:", self.__pre) print ("Mid Terms Grade:", self.__mid) print ("Final Grade:", self.__fin) def Student2(self): print ("\n"+ self.__std2) print ("Prelims Grade:", self.__pre) print ("Mid Terms Grade:", self.__mid) print ("Final Grade: ", self.__fin) def Student3(self): print ("\n"+ self.__std3) print ("Prelims Grade:", self.__pre) print ("Mid Terms Grade:", self.__mid) print ("Final Grade:", self.__fin) def Grade1(self): return (self.__pre + self.__mid + self.__fin)/3 def Grade2(self): return (self.__pre + self.__mid + self.__fin)/3 def Grade3(self): return (self.__pre + self.__mid + self.__fin)/3 Estudyante1 = Person ("Student 1", "-", "-", 70,75,80) Estudyante1.Student1() print(Estudyante1.Grade1()) Estudyante2 = Person ("-", "Student 2", "-", 80,85,90) Estudyante2.Student2() print(Estudyante2.Grade2()) Estudyante3 = Person ("-", "-", "Student3", 90,95,100) Estudyante3.Student3() print(Estudyante3.Grade3())
OOP_Concepts_2.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # Problem 1 # Handle the exception thrown by the code below by using try and except blocks. for i in ['a','b','c']: print(i**2) try: for i in ['a','b','c']: print(i**2) except TypeError: print('It is a type error') # + # Problem 2 # Handle the exception thrown by the code below by using try and except blocks. # Then use a finally block to print 'All Done.' x = 5 y = 0 z = x/y # - try: x = 5 y = 2 z = x/y except ZeroDivisionError: print('cannot divide by zero ') finally: ('All done') # + # Problem 3 # Write a function that asks for an integer and prints the square of it. # Use a while loop with a try, except, else block to account for incorrect inputs. def ask(): while True: try: n=int(input('Enter a number : ')) print('Square of number is : ',n**2) except ValueError : print('wrong value\n Please try again \n') continue else: print('good job') # - ask() def ask_2(): while True: try: n=int(input('Enter a number : ')) print('Square of number is : ',n**2) except ValueError : print('wrong value\n Please try again \n') continue else: print('good job') break ask_2()
Jupyter/Error_Except_HW.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # The S414 is a slotted, natural-laminar-flow airfoil. It is designed to have a laminar bucket between CL=.1 and CL=.65, but also have the ability to achieve high angles of attack and lift coefficients[1]. Below, the windtunnel results and the CFD results from the literature are compared to the results predicted by viiflow. # For OVERFLOW, the transition model in [2] was used. # # **Important:** The use of this airfoil is restricted [3]. The geometry of the geometry used here is *not* the geometry used in the other CFD analysis methods nor in the wind tunnel experiments. It has been digitzed and smoothed, with permission, from the reference. # As such, this is not a thorough comparison of CFD results and wind tunnel data, but merely an example of Multi-Element airfoil analysis. The good match should therefore be taken with a tbsp. of salt. # # All CFD methods use a transition model, and all are able to predict the laminar bucket and show good agreement with the measurements. Viiflow does predict a stronger increase in drag below the laminar bucket compared to the other methods and the windtunnel results. All methods overpredict maximum lift, with viiflow and MSES closely agreeing on the lift slope, which is not surprising given their similar boundary layer formulation. import matplotlib import matplotlib.pyplot as plt interactive_plot = False import numpy as np import viiflow as vf import viiflowtools.vf_tools as vft import viiflowtools.vf_plots as vfp import logging logging.getLogger().setLevel(logging.ERROR) if interactive_plot: # %matplotlib notebook matplotlib.rcParams['figure.figsize'] = [10, 7] lines = None fig, ax = plt.subplots(1,1) else: # %matplotlib inline matplotlib.rcParams['figure.figsize'] = [12, 6] # %config InlineBackend.figure_format = 'svg' # + # Read and repanel airfoil data BASE = vft.repanel(vft.read_selig("S414Main.dat")[:,::-1],190) SLOT = vft.repanel(vft.read_selig("S414Aft.dat")[:,::-1],190) # More refined in laminar bucket AOARANGE = np.r_[np.arange(-7,5,0.2),np.arange(5,20.5,0.5)] #AOARANGE = np.r_[np.arange(5,22,0.5)] # Scale to c=1 SCALE = np.max(SLOT[0,:]) BASE = BASE/SCALE; SLOT = SLOT/SCALE; # Setup RE = 1.0e6 ncrit = 9.0 Mach = 0.1 AOA0 = AOARANGE[0] s = vf.setup(Re=RE,Ma=Mach,Ncrit=ncrit,Alpha=AOA0) s.IterateWakes = False s.Silent = True # Set-up and initialize based on inviscid panel solution (p,bl,x) = vf.init([BASE,SLOT],s) # result array alv = [] clv = [] cdv = [] lines = None # For plot function if not interactive_plot: fig, ax = plt.subplots(1,1) else: # For interactive plot. We iterate for 10 iterations and, if not converged, can observe the solution. s.Itermax = 10 for alpha in AOARANGE: s.Alpha = alpha res = None grad = None for k in range(int(100/s.Itermax)): # Make effectively 100 iterations, but show some in between [x,flag,res,grad,_] = vf.iter(x,bl,p,s,res,grad) # Plot geometry and update interactively if interactive_plot: nres=np.sqrt(np.dot(res.T,res)) lines = vfp.plot_geometry(ax,p,bl,lines) title = "AOA %f RES %f"%(alpha,nres) ax.set_title(title) fig.canvas.draw() # Just plot a single geometry at AOA = 5° elif alpha==5: lines = vfp.plot_geometry(ax,p,bl,lines) # Decide whether to stop iterating if flag>0: alv.append(alpha) clv.append(p.CL) cdv.append(bl[0].CD+bl[1].CD) print('AL: %f CL: %f CD0: %f CD1: %f' % (alpha,clv[-1],bl[0].CD,bl[1].CD) ) break if flag<=0: (p,bl,x) = vf.init([BASE,SLOT],s) # + # Load airfoil measurement data PolarMaughmerWT = np.genfromtxt("MaughmerPolar.csv",skip_header=2,delimiter=",",usecols=[0,1]) PolarMaughmerMSES = np.genfromtxt("MaughmerPolar.csv",skip_header=2,delimiter=",",usecols=[2,3]) PolarMaughmerOVERFLOW = np.genfromtxt("MaughmerPolar.csv",skip_header=2,delimiter=",",usecols=[4,5]) CLMaughmerWT = np.genfromtxt("MaughmerCL.csv",skip_header=2,delimiter=",",usecols=[0,1]) CLMaughmerMSES = np.genfromtxt("MaughmerCL.csv",skip_header=2,delimiter=",",usecols=[2,3]) CLMaughmerOVERFLOW = np.genfromtxt("MaughmerCL.csv",skip_header=2,delimiter=",",usecols=[4,5]) # Make a new plot with the results fig,ax = plt.subplots(1,1) ax.plot(alv,clv,color="orange") ax.plot(CLMaughmerWT.T[0],CLMaughmerWT.T[1]) ax.plot(CLMaughmerMSES.T[0],CLMaughmerMSES.T[1]) ax.plot(CLMaughmerOVERFLOW.T[0],CLMaughmerOVERFLOW.T[1]) ax.set_xlabel('AOA') ax.set_ylabel('CL') ax.legend(['VIIFLOW','Wind Tunnel','MSES','OVERFLOW w/ Transition']) ax.grid(1) ax.set_xlim([-5,20]) ax.set_ylim([-0.5,2.3]) fig,ax = plt.subplots(1,1) ax.plot(cdv,clv,color="orange") ax.plot(PolarMaughmerWT.T[0]/1000,PolarMaughmerWT.T[1]) ax.plot(PolarMaughmerMSES.T[0]/1000,PolarMaughmerMSES.T[1]) ax.plot(PolarMaughmerOVERFLOW.T[0]/1000,PolarMaughmerOVERFLOW.T[1]) ax.set_xlabel('CD') ax.set_ylabel('CL'); ax.legend(['VIIFLOW','Wind Tunnel','MSES','OVERFLOW w/ Transition']) ax.set_xlim([0,0.025]) ax.set_ylim([-0.5,1.8]) ax.grid(1) # - # [1] <NAME>, <NAME>, and <NAME>, *Exploration of a Slotted, Natural-Laminar-Flow Airfoil Concept*, 2018 Applied Aerodynamics Conference. Atlanta, Georgia. # # [2] <NAME> and <NAME>, *Fluid Dynamics Compatible Transition Modeling Using an Amplification Factor Transport Equation*, AIAA Journal 2014 52:11, 2506-2512 # # **[3] The original coordinates, and the concept of this airfoil, are patented under US Patent 6905092 B2. # To use this proprietary airfoil, a license agreement with *Airfoil, Incorporated* is necessary and its use is subject to distribution restrictions imposed by the U.S. Army.**
S414-Slot/S414.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # KOL Ascension Log Wrapper (v1) # _A Styling Project by <NAME>. (AKA Captain Scotch, #437479)_ # # Hey all! This is a wrapper meant to isolate and analyze Ascension logs via Python, formatting everything into a clean Excel wrapper. Originally, this was simply an overlay of [CKB's run log parser in ASH](https://kolmafia.us/showthread.php?22963-RunLogSummary), which is fantastic, but as I'm more of a Python dev this quickly enlarged into a broader project that turned into a personal goal to build a Python log parser. That's in development right now! I tried to make it as clean as possible; let me know if you have any questions. import pandas as pd import requests # Set universal variables; directories for inputs & outputs, & the dates of the run you're analyzing kolmafDir = "/home/tbh/parser/parser/KoL", outputDir = "/home/tbh/parser/parser/KoL", kolName = 'threebullethamburgler' runDates = ['20200510','20200511','20200512','20200513'] runNotes = 'blah blah blah blah /n blah blah blah BLAH /n blah blah, blah blah, Baaaaaah' # Read in the last parsed run via RunLogSum.ash, for testing purposes oldRun = pd.read_csv('{}data/{}-runlog_0095.txt'.format(kolmafDir,kolName),sep='\t') # + # These are, all things considered, extremely small files. So I just read the whole # thing into memory so that it's all accessible without finicky itertools stuff. # Populate our session dictionary dDict = {} for day, session in enumerate(runDates): # Python is zero-indexed, so we increment day by 1 here. # Also replace spaces w/ _ for playername. dDict[day+1] = open('/home/tbh/parser/parser/KoL/sessions/{}_{}.txt'.format(kolName.replace(" ","_"), session)).read() # Remove everything before Valhalla & after freeing Ralph if day == 0: start = dDict[1].find('Welcome to Valhalla!') dDict[1] = dDict[1][start:] else: end = dDict[day+1].find('Freeing King Ralph') dDict[day+1] = dDict[day+1][:end] # Also, to minimize server hits, grab a few files from the Mafia SVN data mafCombats = requests.get('https://svn.code.sf.net/p/kolmafia/code/src/data/combats.txt') mafMonster = requests.get('https://svn.code.sf.net/p/kolmafia/code/src/data/monsters.txt') mafMods = requests.get('https://svn.code.sf.net/p/kolmafia/code/src/data/modifiers.txt') # + findString = 'dayc' buffer = 3 day = 2 # simple print w/ buffer for finding things within the logs for ct, x in enumerate(dDict[day].split(sep='\n')): if findString.lower() in x.lower(): print('===========\n'+ '\n'.join(dDict[day].split(sep='\n')[(ct-buffer):(ct+buffer)])+ '\n===========\n') # + def monsterLocation(mon): ''' Sub-function that uses KOLMafia's combats directory to ascertain where a monster is likely to occur (used for establishing wanderer likelihood in the absence of full adventure parsing; was used for initial tests of digitize capture). Eventually will use this for full error-checking of the parser. ''' locs = [] mon = str(mon).strip() # Read in combats.txt and reformat it via split for line in mafCombats.text.split('\n'): if mon in line: locs = locs + [line.split('\t')[0]] # For some reason, Camel's Toe shows up as "The" Camel's Toe when # you genie wish; this checks for and removes extraneous "The" elif mon.lower().replace('the ','') in line.lower(): locs = locs + [line.split('\t')[0]] if locs == []: locs = ['FOOTAGE NOT FOUND'] return locs monsterLocation('Green Ops Soldier') # + def extractWanderers(sessionLogs): ''' Quick yet annoying function to attempt to ascertain wanderers. The Digitize parser (unfortunately) doesn't work particularly well yet; when ascertaining digitize usage I usually just look over the print & mental math for the spreadsheet. The enamorang parser works better, which is good, since it's currently the only standard "choice" wanderer as of 1/19. Features to add: - include detection of vote wanderers - once the XML turn parser is done, revise this to reference it - (you basically fuckin did that already in this hacky POS) ''' wandererDict = {} # Let's start with digitized monsters! for day in sessionLogs.keys(): wandererDict[day] = {} # Tracking digitized monsters is a pain in the ass. Luckily, simply # extracting who they are can be pretty easy with some rough # nested loops. digiMons = [] for ct, row in enumerate(sessionLogs[day].split('\n')): if 'casts DIGITIZE!' in row: x = 0 # Walk backwards through the file w/ a while loop while x < ct: x +=1 monName = sessionLogs[day].split('\n')[ct-x] if 'Encounter:' in monName: break monName = cleanNames(monName.replace('Encounter: ','')) digiMons = digiMons + [monName] for mon in digiMons: # Now that we have that list, we want to figure out how many times # they were fought. To do this, we'll need to compare the zone # they originated from to the zones they have been found in. # This method is meh, but kind of works, in the absence of Mafia # saving intro messages for digitized monsters. monLoc = monsterLocation(mon) # So, cool unintentional thing here; when Mafia logs Witchess, it # totally messes up the turn/combat statement, because it treats # it differently. This is great! It means digitized Witchess # pieces are super easy to find. for ct, row in enumerate(sessionLogs[day].split('\n')): if 'Encounter: {}'.format(mon) in row: data = sessionLogs[day].split('\n')[ct-1].split('] ') try: turn = int(data[0][1:]) loct = data[1] print('Turn {}: fought {} @ {}.'.format(turn,mon,loct)) except: print(' Could not parse {}'.format(data)) monLOVs = {} for ct, row in enumerate(sessionLogs[day].split('\n')): if 'uses the LOV Enamorang' in row: x = 0 # Walk backwards through the file w/ a while loop while x < ct: x +=1 monName = sessionLogs[day].split('\n')[ct-x] if 'Encounter:' in monName: dataSplit = sessionLogs[day].split('\n')[ct-(x+1)].split('] ') try: turn = int(dataSplit[0][1:]) loct = dataSplit[1] except: turn = -999 break monName = cleanNames(monName.replace('Encounter: ','')) monLOVs[turn] = monName for turn, mon in monLOVs.items(): # Similar process for above, with a few small items. Here, I am # checking turn differential to make positively sure it is the # enamorang'd monster; it has to be 14 turns! monLoc = monsterLocation(mon) for ct, row in enumerate(sessionLogs[day].split('\n')): if 'Encounter: {}'.format(mon) in row: data = sessionLogs[day].split('\n')[ct-1].split('] ') try: foundTurn = int(data[0][1:]) currLoct = data[1] # For testing #print('Turn {}: fought {} @ {}.'.format(foundTurn,mon,loct)) except: # Warn that something was unparsable print(' Could not parse {}'.format(data)) if turn != foundTurn: wandererDict[day][mon] = {'Type':'Enamorang', 'Location':currLoct, 'Turn':foundTurn} # Turn this into easily pasted data frames = [] for dayID, d in wandererDict.items(): frames.append(pd.DataFrame.from_dict(d,orient='index').T) return pd.concat(frames, keys=['Day #{}'.format(x) for x in wandererDict.keys()],sort=False).T extractWanderers(dDict) # - def cleanNames(monName): ''' This won't be much of an issue going forward, but the Intergnat had an annoying habit of modifying monster names. This attempts to clean that particular detritus to extract pure names. I'm assuming this will come back in other forms as well. Features to add: - parse OCRS modifiers - parse "Yes, Can Has" effect''' # All the strings added to monster names. Haven't added # OCRS modifiers yet, but intend to. Also would like # to include rewriting the can has skill, in case it # comes up through the crazy horse, but haven't yet gnatList = ['NAMED NEIL', 'ELDRITCH HORROR ', 'AND TESLA', 'WITH BACON!!!', 'WITH SCIENCE'] ocrsMods = [] replList = gnatList + ocrsMods for repl in replList: monName = monName.replace(repl,'') # Insert renaming for that one cheeseburger skill here return monName.strip() # + def extractWishes(sessionLogs): ''' Quick function to ascertain wish usage ''' wishDict = {} for day in sessionLogs.keys(): wishDict[day] = {} # Wishes aren't quite as easy as pulls, which are easily snagged # on one line with newline split logic, but they're certainly # easier than digitizations. # Combat parsing, utilizing the combat to pull monsters out for ct, row in enumerate(sessionLogs[day].split('\n')): if "genie summoned monster" in row: turn = row[(row.find('[')+1):row.find(']')] wishMon = sessionLogs[day].split('\n')[ct+1].replace('Encounter: ','') wishMon = cleanNames(wishMon) wishDict[day][ct] = {'Type':'Fight', 'Details':wishMon, 'Turn':int(turn)} # Extra parsing, using the actual wish URL to fill others for ct, row in enumerate(sessionLogs[day].split('\n')): if "&wish=" in row: wishString = row[(row.find('&wish=')+6):] try: # Note we have to use ct+2 due to session logs # taking 2 lines to get from URL to fight. If # this ever changes, will need to change. wishDict[day][ct+2]['Type'] except: wishDict[day][ct] = {'Type':'Non-Fight', 'Details':wishString, 'Turn':'?'} # Rename the wishes from line # to wish # for i, val in enumerate(wishDict[day]): wishDict[day][i+1] = wishDict[day].pop(val) # Turn this into easily pasted data frames = [] for dayID, d in wishDict.items(): frames.append(pd.DataFrame.from_dict(d,orient='index').T) return pd.concat(frames, keys=['Day #{}'.format(x) for x in wishDict.keys()]).T extractWishes(dDict) # + def extractPulls(sessionLogs): ''' Quick function to extract normal run pulls into a table. ''' pullDict = {} for day in sessionLogs.keys(): # Find all your pulls; relies on sessions storing them as 'pull: ' pulls = [i[6:] for i in sessionLogs[day].split('\n') if i.startswith('pull: ')] # Split out pulls into a 20 item list, for formatting numbs = [int(i[0:2].strip()) for i in pulls] pullDict[day] = [] for count, pull in enumerate(pulls): pullDict[day] = pullDict[day] + [pull[1:].strip()]*numbs[count] if len(pullDict[day]) < 20: pullDict[day] = pullDict[day] + ['']*(20-len(pullDict[day])) # Renaming for simplicity/ease of pasting; pullsOut = pd.DataFrame(pullDict).rename( index = lambda x: '#{}'.format(x+1), columns= lambda x: 'Day #{}'.format(x)) return pullsOut extractPulls(dDict) # + def goblinParser(sessionLogs): ''' Figure out where/when the user encountered sausage goblins. This SHOULD NOT be used as a spading tool, as it does not properly track important things like how much meat was in the grinder and how many turns the grinder was used. It's just for me to think about where I dropped goblins in-run ''' goblinDict = {} gobNum = 0 for day in sessionLogs.keys(): # I am keeping this a list in case we eventually find a # sausage goblin boss... for mon in ['sausage goblin']: for ct, row in enumerate(sessionLogs[day].split('\n')): if 'Encounter: {}'.format(mon) in row: gobNum = gobNum + 1 data = sessionLogs[day].split('\n')[ct-1].split('] ') try: turn = int(data[0][1:]) loct = data[1] # print('Turn {}: fought {} @ {}.'.format(turn,mon,loct)) except: # Gotta show that error! print(' Could not parse {}'.format(data)) goblinDict[gobNum] = {'Turn': turn, 'Location': loct, 'Day': day} return pd.DataFrame.from_dict(goblinDict).T goblinParser(dDict) # - # ## Unfinished functions, to be added over time def buffSelection(sessionLogs): ''' Parse out the IOTM buffs selected in the selected run, by day ''' def itemSelection(sessionLogs): ''' Parse out the IOTM items selected in the selected run, by day ''' def horseBuffSpading(sessionLogs): ''' Find out what buffs you get from the crazy horse, for a simulation project I'm working on to establish bounds on crazy horse value ''' # + import re import pandas as pd def pizzaCube(sessionLogs): ''' Quick function to extract normal run pulls into a table. ''' kolmafDir = "/home/tbh/parser/parser/KoL", outputDir = "/home/tbh/parser/parser/KoL", kolName = 'threebullethamburgler' runDates = ['20200510','20200511','20200512','20200513'] dDict = {} pizzaDict = {} pizzaNum = 0 # Set universal variables; directories for inputs & outputs, & the dates of the run you're analyzing for day, session in enumerate(runDates): # Python is zero-indexed, so we increment day by 1 here. # Also replace spaces w/ for playername. dDict[day+1] = open('/home/tbh/parser/parser/KoL/sessions/{}_{}.txt'.format(kolName.replace(" ","_"), session)).read() # Remove everything before Valhalla & after freeing Ralph if day == 0: start = dDict[1].find('Welcome to Valhalla!') dDict[1] = dDict[1][start:] else: end = dDict[day+1].find('Freeing King Ralph') dDict[day+1] = dDict[day+1][:end] tuples = re.findall(r'\npizza ([^,]+), ([^,]+), ([^,]+), (.+)\n', dDict[day+1]) text = re.findall(r'eat 1 diabolic pizza\n(.*?)You gain 3 Fullness', dDict[day+1],re.DOTALL) for var in range(len(tuples)): pizzaDict[pizzaNum] = {'Day': day+1, 'Ingred1': tuples[var][0], 'Ingred2': tuples[var][1], 'Ingred3': tuples[var][2], 'Ingred4': tuples[var][3], 'Results': text[var].replace('\n',',') } pizzaNum = pizzaNum + 1 return pd.DataFrame.from_dict(pizzaDict).T pizzaCube(dDict) # -
Log-Parser/KOL Log Wrapper v1.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # --- # **Name:** \_\_\_\_\_ <NAME> # # **EID:** \_\_\_\_\_ 54791401 # # CS4487 - Tutorial 5a: Face Detection in Images # # In this tutorial you will train support vector machine and kernel support vector machine classifiers to detect whether there is a face in a small image patch. # # First we need to initialize Python. Run the below cell. # %matplotlib inline import IPython.core.display # setup output image format (Chrome works best) IPython.core.display.set_matplotlib_formats("svg") import matplotlib.pyplot as plt import matplotlib from numpy import * from sklearn import * import os import zipfile import fnmatch random.seed(100) from scipy import ndimage from scipy import signal import skimage.color import skimage.exposure import skimage.io import skimage.util # ## 1. Loading Data and Pre-processing # Next we need to load the images. Download `faces.zip`, and put it in the same direcotry as this ipynb file. **Do not unzip the file.** Then run the following cell to load the images. # + imgdata = {'train':[], 'test':[]} classes = {'train':[], 'test':[]} # the dataset is too big, so subsample the training and test sets... # reduce training set by a factor of 4 train_subsample = 4 train_counter = [0, 0] # maximum number of samples in each class for test set test_maxsample = 472 test_counter = [0, 0] # load the zip file filename = 'faces.zip' zfile = zipfile.ZipFile(filename, 'r') for name in zfile.namelist(): # check file name matches if fnmatch.fnmatch(name, "faces/*/*/*.png"): # filename is : faces/train/face/fname.png (fdir1, fname) = os.path.split(name) # get file name (fdir2, fclass) = os.path.split(fdir1) # get class (face, nonface) (fdir3, fset) = os.path.split(fdir2) # get training/test set # class 1 = face; class 0 = non-face myclass = int(fclass == "face") loadme = False if fset == 'train': if (train_counter[myclass] % train_subsample) == 0: loadme = True train_counter[myclass] += 1 elif fset == 'test': if test_counter[myclass] < test_maxsample: loadme = True test_counter[myclass] += 1 if (loadme): # open file in memory, and parse as an image myfile = zfile.open(name) #img = matplotlib.image.imread(myfile) img = skimage.io.imread(myfile) # convert to grayscale img = skimage.color.rgb2gray(img) myfile.close() # append data imgdata[fset].append(img) classes[fset].append(myclass) zfile.close() imgsize = img.shape print(len(imgdata['train'])) print(len(imgdata['test'])) trainclass2start = sum(classes['train']) # - # Each image is a 19x19 array of pixel values. Run the below code to show an example: print(img.shape) plt.subplot(1,2,1) plt.imshow(imgdata['train'][0], cmap='gray', interpolation='nearest') plt.title("face sample") plt.subplot(1,2,2) plt.imshow(imgdata['train'][trainclass2start], cmap='gray', interpolation='nearest') plt.title("non-face sample") plt.show() # Run the below code to show more images! # + # function to make an image montage def image_montage(X, imsize=None, maxw=10): """X can be a list of images, or a matrix of vectorized images. Specify imsize when X is a matrix.""" tmp = [] numimgs = len(X) # create a list of images (reshape if necessary) for i in range(0,numimgs): if imsize != None: tmp.append(X[i].reshape(imsize)) else: tmp.append(X[i]) # add blanks if (numimgs > maxw) and (mod(numimgs, maxw) > 0): leftover = maxw - mod(numimgs, maxw) meanimg = 0.5*(X[0].max()+X[0].min()) for i in range(0,leftover): tmp.append(ones(tmp[0].shape)*meanimg) # make the montage tmp2 = [] for i in range(0,len(tmp),maxw): tmp2.append( hstack(tmp[i:i+maxw]) ) montimg = vstack(tmp2) return montimg # show a few images plt.figure(figsize=(9,9)) plt.imshow(image_montage(imgdata['train'][::20]), cmap='gray', interpolation='nearest') plt.show() # - # Each image is a 2d array, but the classifier algorithms work on 1d vectors. Run the following code to convert all the images into 1d vectors by flattening. The result should be a matrix where each row is a flattened image. # + trainX = empty((len(imgdata['train']), prod(imgsize))) for i,img in enumerate(imgdata['train']): trainX[i,:] = ravel(img) trainY = asarray(classes['train']) # convert list to numpy array print(trainX.shape) print(trainY.shape) testX = empty((len(imgdata['test']), prod(imgsize))) for i,img in enumerate(imgdata['test']): testX[i,:] = ravel(img) testY = asarray(classes['test']) # convert list to numpy array print(testX.shape) print(testY.shape) # - # ## 2. Detection Using Pixel Values # # Train kernel SVM using either RBF or polynomia kernel classifiers to classify an image patch as face or non-face. Evaluate all classifiers on the test set. # # Normalize the features and setup all the parameters and models. # + # Normalization: sklearn.preprocessing.MinMaxScaler() scaler = preprocessing.MinMaxScaler(feature_range=(-1,1)) # make scaling object trainXn = scaler.fit_transform(trainX) # use training data to fit scaling parameters testXn = scaler.transform(testX) # apply scaling to test data clfs = {} # - # setup all the parameters and models exps = { 'svm-lin': { 'paramgrid': {'C': logspace(-2,3,10)}, 'clf': svm.SVC(kernel='linear') }, 'svm-rbf': { 'paramgrid': {'C': logspace(-2,3,10), 'gamma': logspace(-4,3,10) }, 'clf': svm.SVC(kernel='rbf') }, 'svm-poly': { 'paramgrid': {'C': logspace(-2,3,10), 'degree': [2, 3, 4] }, 'clf': svm.SVC(kernel='poly') }, } # Try to train each classifier and show the parameters. # run the experiment for (name, ex) in exps.items(): ### INSERT YOUR CODE HERE ## HINT ## 1. Classfiers: sklearn.model_selection.GridSearchCV() ## 2. Parameters: cv=5, verbose=1, n_jobs=-1 clf = model_selection.GridSearchCV( ex['clf'], ex['paramgrid'], cv=5, verbose=1, n_jobs=-1 ) clf.fit(trainXn, trainY) tmp_clf = {name: clf} clfs.update(tmp_clf) print(f"{name}: {clf.best_params_}") # Calculate the training and test accuracy for the each classifier. # + predYtrain = {} predYtest = {} print("Training/Test - {Name: Score}") for (name, clf) in clfs.items(): ### INSERT YOUR CODE HERE ## HINT # 1. clf.predict() pred_train = clf.predict(trainXn) pred_test = clf.predict(testXn) # 2. metrics.accuracy_score() score_train = metrics.accuracy_score(trainY, pred_train) score_test = metrics.accuracy_score(testY, pred_test) train_dict = {name: score_train} test_dict = {name: score_test} print(f"Training - {train_dict},\nTest - {test_dict}") predYtrain.update(train_dict) predYtest.update(test_dict) # - for (name,clf) in clfs.items(): print(name, ' ', clf) # set variables for later predY = predYtest['svm-poly'] #adaclf = clfs['ada'].best_estimator_ svmclf_rbf = clfs['svm-rbf'].best_estimator_ svmclf_poly = clfs['svm-poly'].best_estimator_ #rfclf = clfs['rf'].best_estimator_ # _Which classifier was best?_ # - **INSERT YOUR ANSWER HERE** # # SVM polynomial # - **INSERT YOUR ANSWER HERE** # - SVM polynomial # ### Error analysis # The accuracy only tells part of the classifier's performance. We can also look at the different types of errors that the classifier makes: # - _True Positive (TP)_: classifier correctly said face # - _True Negative (TN)_: classifier correctly said non-face # - _False Positive (FP)_: classifier said face, but not a face # - _False Negative (FN)_: classifier said non-face, but was a face # # This is summarized in the following table: # # <table> # <tr><th colspan=2 rowspan=2><th colspan=2 style="text-align: center">Actual</th></tr> # <tr> <th>Face</th><th>Non-face</th></tr> # <tr><th rowspan=2>Prediction</th><th>Face</th><td>True Positive (TP)</td><td>False Positive (FP)</td></tr> # <tr> <th>Non-face</th><td>False Negative (FN)</td><td>True Negative (TN)</td></tr> # </table> # # We can then look at the _true positive rate_ and the _false positive rate_. # - _true positive rate (TPR)_: proportion of true faces that were correctly detected # - _false positive rate (FPR)_: proportion of non-faces that were mis-classified as faces. # # Use the below code to calculate the TPR and FPR of your classifiers. # + # predY is the prediction from the classifier Pind = where(testY==1) # indicies for face Nind = where(testY==0) # indicies for non-face TP = count_nonzero(testY[Pind] == predY[Pind]) FP = count_nonzero(testY[Pind] != predY[Pind]) TN = count_nonzero(testY[Nind] == predY[Nind]) FN = count_nonzero(testY[Nind] != predY[Nind]) TPR = TP / (TP+FN) FPR = FP / (FP+TN) print("TP=", TP) print("FP=", FP) print("TN=", TN) print("FN=", FN) print("TPR=", TPR) print("FPR=", FPR) # - # _How does the classifier make errors?_ # - **INSERT YOUR ANSWER HERE** # - **INSERT YOUR ANSWER HERE** # - high TPR, means it gets all the faces, but high FPR also means it misdetects things as faces. # For kernel SVM, we can look at the support vectors to see what the classifier finds difficult. # + # svmclf is the trained SVM classifier print("num support vectors:", len(svmclf_poly.support_vectors_)) si = svmclf_poly.support_ # get indicies of support vectors # get all the patches for each support vector simg = [ imgdata['train'][i] for i in si ] # make montage outimg = image_montage(simg, maxw=20) plt.figure(figsize=(9,9)) plt.imshow(outimg, cmap='gray', interpolation='nearest') # - # Comment on anything you notice about what the SVM finds difficult (i.e., on the decision boundary or within the margin) # - **INSERT YOUR ANSWER HERE** # - **INSERT YOUR ANSWER HERE** # - glasses on faces. some non-faces look like faces. # ## 3. Detection using Image Feature # # ### Image Feature Extraction # The detection performance is not that good using pixel values. The problem is that we are using the raw pixel values as features, so it is difficult for the classifier to interpret larger structures of the face that might be important. To fix the problem, we will extract features from the image using a set of filters. # # Run the below code to look at the filter output. The filters are a sets of black and white boxes that respond to similar structures in the image. After applying the filters to the image, the filter response map is aggregated over a 4x4 window. Hence each filter produces a 5x5 feature response. Since there are 4 filters, then the feature vector is 100 dimensions. def extract_features(imgs, doplot=False): # the filter layout lay = [array([-1,1]), array([-1,1,-1]), array([[1],[-1]]), array([[-1],[1],[-1]])] sc=8 # size of each filter patch poolmode = 'i' # pooling mode (interpolate) cmode = 'same' # convolution mode brick = ones((sc,sc)) # filter patch ks = [] for l in lay: tmp = [brick*i for i in l] if (l.ndim==1): k = hstack(tmp) else: k = vstack(tmp) ks.append(k) # get the filter response size if (poolmode=='max') or (poolmode=='absmax'): tmpimg = maxpool(maxpool(imgs[0])) else: tmpimg = ndimage.interpolation.zoom(imgs[0], 0.25) fs = prod(tmpimg.shape) # get the total feature length fst = fs*len(ks) # filter the images X = empty((len(imgs), fst)) for i,img in enumerate(imgs): x = empty(fst) # for each filter for j,th in enumerate(ks): # filter the image imgk = signal.convolve(img, ks[j], mode=cmode) # do pooling if poolmode == 'maxabs': mimg = maxpool(maxpool(abs(imgk))) elif poolmode == 'max': mimg = maxpool(maxpool(imgk)) else: mimg = ndimage.interpolation.zoom(imgk, 0.25) # put responses into feature vector x[(j*fs):(j+1)*fs] = ravel(mimg) if (doplot): plt.subplot(3,len(ks),j+1) plt.imshow(ks[j], cmap='gray', interpolation='nearest') plt.title("filter " + str(j)) plt.subplot(3,len(ks),len(ks)+j+1) plt.imshow(imgk, cmap='gray', interpolation='nearest') plt.title("filtered image") plt.subplot(3,len(ks),2*len(ks)+j+1) plt.imshow(mimg, cmap='gray', interpolation='nearest') plt.title("image features") X[i,:] = x return X # new features img = imgdata['train'][0] plt.imshow(img, cmap='gray', interpolation='nearest') plt.title("image") plt.figure(figsize=(9,9)) extract_features([img], doplot=True); # Now lets extract image features on the training and test sets. It may take a few seconds. trainXf = extract_features(imgdata['train']) print(trainXf.shape) testXf = extract_features(imgdata['test']) print(testXf.shape) # ### Detection # Now train AdaBoost and SVM classifiers on the image feature data. Evaluate on the test set. # + ### Nomalization scalerf = preprocessing.MinMaxScaler(feature_range=(-1,1)) # make scaling object trainXfn = scalerf.fit_transform(trainXf) # use training data to fit scaling parameters testXfn = scalerf.transform(testXf) # apply scaling to test data clfs2 = {} # + # setup all the parameters and models exps = { 'svm-lin': { 'paramgrid': {'C': logspace(-2,3,10)}, 'clf': svm.SVC(kernel='linear') }, 'svm-rbf': { 'paramgrid': {'C': logspace(-2,3,10), 'gamma': logspace(-4,3,10) }, 'clf': svm.SVC(kernel='rbf') }, 'svm-poly': { 'paramgrid': {'C': logspace(-2,3,10), 'degree': [2, 3, 4] }, 'clf': svm.SVC(kernel='poly') }, } clfs2 = {} # - # Try to train each classifier and show the parameters. # run the experiment for (name, ex) in exps.items(): ### INSERT YOUR CODE HERE ## HINT ## Classfiers: sklearn.model_selection.GridSearchCV() ## Parameters: cv=5, verbose=1, n_jobs=-1 clf = model_selection.GridSearchCV( ex['clf'], ex['paramgrid'], cv=5, verbose=1, n_jobs=-1 ) clf.fit(trainXfn, trainY) tmp_clf = {name: clf} clfs2.update(tmp_clf) print(f"{name}: {clf.best_params_}") # Calculate the training and test accuracy for the each classifier. # + predYtrain = {} predYtest = {} print("Training/Test - {Name: Score}") for (name, clf) in clfs2.items(): ### INSERT YOUR CODE HERE ## HINT # 1. clf.predict() pred_train = clf.predict(trainXfn) pred_test = clf.predict(testXfn) # 2. metrics.accuracy_score() score_train = metrics.accuracy_score(trainY, pred_train) score_test = metrics.accuracy_score(testY, pred_test) train_dict = {name: score_train} test_dict = {name: score_test} print(f"Training - {train_dict},\nTest - {test_dict}") predYtrain.update(train_dict) predYtest.update(test_dict) # - testY = predYtest['svm-rbf'] bestclf = clfs2['svm-rbf'] # ### Error Analysis # Similar to before, repeat the error analysis for the new classifiers. # + ### INSERT YOUR CODE HERE ## TP, FN, TN, TP, TPR, FPR # - # How has the classifier using image features improved? # - **INSERT YOUR ANSWER HERE** # - **INSERT YOUR ANSWER HERE** # - lower FPR and lower TPR. # # Test image # Now let us try your face detector on a real image. Download the "nasa-small.png" image and put it in the same directory as your ipynb file. The below code will load the image, crop out image patches and then extract features. (this may take a few minutes) fname = "nasa-small.png" # + # load image testimg3 = skimage.io.imread(fname) # convert to grayscale testimg = skimage.color.rgb2gray(testimg3) print(testimg.shape) plt.imshow(testimg, cmap='gray') # + # step size for the sliding window step = 4 # extract window patches with step size of 4 patches = skimage.util.view_as_windows(testimg, (19,19), step=step) psize = patches.shape # collapse the first 2 dimensions patches2 = patches.reshape((psize[0]*psize[1], psize[2], psize[3])) print(patches2.shape) # histogram equalize patches (improves contrast) patches3 = empty(patches2.shape) for i in range(patches2.shape[0]): patches3[i,:,:] = skimage.exposure.equalize_hist(patches2[i,:,:]) # extract features newXf = extract_features(patches3) newYf = extract_features(patches3) # - # Now predict using your classifier. The extracted features are in `newXf`. ### INSERT YOUR CODE HERE ## HINT ## 1. Apply scaling to test data(scalerf.transform clf3 = svm.SVC(kernel='ploy', C=77.42636826811278, degree=2) # clf3.fit(newXf, newYf) scaler_nasa = preprocessing.MinMaxScaler(feature_range=(-1, 1)) newXfn_nasa = scaler_nasa.transform(newXf) # Now we we will view the results on the image. Use the below code. `prednewY` is the vector of predictions. # + # reshape prediction to an image imgY = prednewY.reshape(psize[0], psize[1]) # zoom back to image size imgY2 = ndimage.interpolation.zoom(imgY, step, output=None, order=0) # pad the top and left with half the window size imgY2 = vstack((zeros((9, imgY2.shape[1])), imgY2)) imgY2 = hstack((zeros((imgY2.shape[0],9)), imgY2)) # pad right and bottom to same size as image if (imgY2.shape[0] != testimg.shape[0]): imgY2 = vstack((imgY2, zeros((testimg.shape[0]-imgY2.shape[0], imgY2.shape[1])))) if (imgY2.shape[1] != testimg.shape[1]): imgY2 = hstack((imgY2, zeros((imgY2.shape[0],testimg.shape[1]-imgY2.shape[1])))) # show detections with image #detimg = dstack(((0.5*imgY2+0.5)*testimg, 0.5*testimg, 0.5*testimg)) nimgY2 = 1-imgY2 tmp = nimgY2*testimg detimg = dstack((imgY2+tmp, tmp, tmp)) # show it! plt.figure(figsize=(9,9)) plt.subplot(2,1,1) plt.imshow(imgY2, interpolation='nearest') plt.title('detection map') plt.subplot(2,1,2) plt.imshow(detimg) plt.title('image') plt.axis('image') # - # _How did your face detector do?_ # - **INSERT YOUR ANSWER HERE** # - **INSERT YOUR ANSWER HERE** # - got some faces, but also had lots of FP! # - You can try it on your own images. The faces should all be around 19x19 pixels though. # - We only used 1/4 of the training data. Try using more data to train it!
tutorial-5/Tutorial5a.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 2 # language: python # name: python2 # --- # # (Interactive) Plotting using Matplotlib and Seaborn # # [Matplotlib](http://matplotlib.org) is a basic plotting library for Python inspired by Matlab. [Seaborn](http://stanford.edu/~mwaskom/software/seaborn) is built on top of it with integrated analysis, specialized plots, and pretty good integration with Pandas. # # Also see [the full gallery of Seaborn](http://stanford.edu/~mwaskom/software/seaborn/examples/index.html) or [Matplotlib](http://matplotlib.org/gallery.html). # # + #disable some annoying warnings import warnings warnings.filterwarnings('ignore', category=FutureWarning) #plots the figures in place instead of a new window # %matplotlib inline import matplotlib.pyplot as plt import seaborn as sns import pandas as pd import numpy as np # - #use a standard dataset of heterogenous data cars = pd.read_csv('data/mtcars.csv') cars.head() # ## Scatterplot plt.scatter(x=cars['mpg'],y=cars['wt']) plt.xlabel('miles per gallon') plt.ylabel('weight') plt.title('MPG vs WT') plt.show() #integrated in pandas, too cars.plot(x='mpg',y='wt',kind='scatter') cars.plot(kind='scatter', x='mpg',y='wt',c='hp',s=cars['cyl']*20,alpha=0.5) #what if we plot everything? cars.plot() # ## Histogram cars['mpg'].hist(bins=5) plt.hist(cars['mpg'],bins=5) plt.title('miles per gallon') #seaborn does not just show a histogram but also a kernel density enstimation and better default settings sns.distplot(cars['mpg'],bins=5) # # Box plots #box plots cars['mpg'].plot(kind='box') cars.boxplot('mpg') #group by gear cars.boxplot('mpg', by='gear') # load gapminder again and select 2007 gap = pd.read_csv('data/gapminder-unfiltered.tsv',index_col=0, sep='\t') gap2007 = gap[gap.year == 2007] gap2007.columns # ## Log scale gap2007.plot(kind='scatter', x='lifeExp',y='gdpPercap') # The data is unbalanced with outliers. What about log scale? gap2007.plot(kind='scatter', x='lifeExp',y='gdpPercap') plt.yscale('log') # ## Grouping / coloring plots # # How to group by color? #create a color palette colors = sns.color_palette() sns.palplot(colors) #for each group create an own plot and overlay them for (name, group),color in zip(gap2007.groupby('continent'),colors): plt.scatter(x=group['lifeExp'],y=group['gdpPercap'],label=name, c=color,s=30) plt.yscale('log') plt.legend() # + #playing with categories ... seaborn is pretty good with it plt.figure(figsize=(40,20)) plt.subplot(121) sns.boxplot(x='continent',y='gdpPercap',data=gap) plt.subplot(122) sns.violinplot(x='continent',y='gdpPercap',data=gap2007) # + # or with linear regression anscombe = sns.load_dataset("anscombe") sns.lmplot('x','y',col='dataset',hue='dataset', data=anscombe, col_wrap=2) #g = sns.FacetGrid(anscombe, col="dataset", size=4, aspect=1) #g.map(sns.regplot, "x", "y") # + # or with structured heatmaps #compute the correlations and take a look at them corrmat = gap.corr() # draw a clustered heatmap using seaborn sns.clustermap(corrmat, square=True) # - # ## TASK # > Create a scatterplot where # > * x = lifeExp # > * y = gdpPerCap # > * color = continent # > * size = pop # > # > Label the axis appropiately and use a log scale for gdp. # ## Interactive plots # # Simple interaction is possible with IPython by default. That means whenever the user changes some parameter the visualization is recreated on the server-side and sent to the client. from ipywidgets import interact, interact_manual @interact(text='Hello', slider=(0,10),check=True,categories=['red','green','blue']) def react(text,slider,check,categories): print(text,slider*10,check,categories) @interact_manual(text='Hello', slider=(0,10),check=True,categories=['red','green','blue']) def react(text,slider,check,categories): print(text,slider*10,check,categories) @interact(bins=(5, 25, 5),color=['red','green','orange','blue']) def show_distplot(bins,color): cars['mpg'].hist(bins=bins, color=color) # # Custom-built widgets # # http://nbviewer.ipython.org/github/ipython/ipython/blob/3.x/examples/Interactive%20Widgets/Widget%20List.ipynb # + from ipywidgets import widgets [widget for widget in dir(widgets) if not widget.endswith('Widget') and widget[0] == widget[0].upper() and widget[0] != '_'] # - @interact(bins=widgets.FloatText(value=5)) def show_distplot(bins): cars['mpg'].hist(bins=int(bins)) # + text_widget = widgets.Textarea(value='Hello', description='text area') slider_widget = widgets.BoundedFloatText(5,min=0,max=10, description='slider area') check_widget = widgets.Checkbox(True,description="CheckboxWidget") toggle = widgets.RadioButtons(options=['red','green','blue'], description="RadioButtonsWidget") @interact(text=text_widget, slider=slider_widget,check=check_widget,categories=toggle) def react(text, slider,check,categories): print(text,slider*10,check,categories) # + b = widgets.Button(description="Update") checkbox = widgets.Checkbox(description="CheckboxWidget") tab1_children = [b, checkbox, widgets.Dropdown(options=['A','B'], description="DropdownWidget"), widgets.RadioButtons(options=['A','B'], description="RadioButtonsWidget"), widgets.Select(options=['A','B'], description="SelectWidget"), widgets.Text(description="TextWidget"), widgets.Textarea(description="TextareaWidget"), widgets.ToggleButton(description="ToggleButtonWidget"), widgets.ToggleButtons(options=["Value 1", "Value2"], description="ToggleButtonsWidget"), ] tab2_children = [widgets.BoundedFloatText(description="BoundedFloatTextWidget"), widgets.BoundedIntText(description="BoundedIntTextWidget"), widgets.FloatSlider(description="FloatSliderWidget"), widgets.FloatText(description="FloatTextWidget"), widgets.IntSlider(description="IntSliderWidget"), widgets.IntText(description="IntTextWidget"), ] tab1 = widgets.Box(children=tab1_children) tab2 = widgets.Box(children=tab2_children) i = widgets.Accordion(children=[tab1, tab2]) i.set_title(0,"Basic Widgets") i.set_title(1,"Numbers Input") from IPython.display import display def button_clicked(bb): print(checkbox.value) #TODO update plot b.on_click(button_clicked) display(i) # - # ## TASK # > Convert the static plot from before into an interactive one where you can slide through the year. # ## Next # # [Machine Learning using Scikit Learn](04_MachineLearning.ipynb)
03_Plotting.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] button=false new_sheet=false run_control={"read_only": false} id="7tdjjQsq6PYW" # # # # Multiple Linear Regression # # # ## Objectives # # After completing this lab you will be able to: # # * Use scikit-learn to implement Multiple Linear Regression # * Create a model, train it, test it and use the model # # + [markdown] id="s7jJLxaB6PYY" # <h1>Table of contents</h1> # # <div class="alert alert-block alert-info" style="margin-top: 20px"> # <ol> # <li><a href="https://#understanding-data">Understanding the Data</a></li> # <li><a href="https://#reading_data">Reading the Data in</a></li> # <li><a href="https://#multiple_regression_model">Multiple Regression Model</a></li> # <li><a href="https://#prediction">Prediction</a></li> # <li><a href="https://#practice">Practice</a></li> # </ol> # </div> # <br> # <hr> # # + [markdown] button=false new_sheet=false run_control={"read_only": false} id="5KBbCR5a6PYZ" # ### Importing Needed packages # # + button=false new_sheet=false run_control={"read_only": false} id="NFfneWRO6PYa" import matplotlib.pyplot as plt import pandas as pd import pylab as pl import numpy as np # %matplotlib inline # + [markdown] button=false new_sheet=false run_control={"read_only": false} id="RW_X1IN96PYb" # ### Downloading Data # # To download the data, we will use !wget to download it from IBM Object Storage. # # + button=false new_sheet=false run_control={"read_only": false} id="ywz7Z0Gl6PYb" # !wget -O FuelConsumption.csv https://cf-courses-data.s3.us.cloud-object-storage.appdomain.cloud/IBMDeveloperSkillsNetwork-ML0101EN-SkillsNetwork/labs/Module%202/data/FuelConsumptionCo2.csv # + [markdown] id="UDpcw8mn6PYc" # # # + [markdown] button=false new_sheet=false run_control={"read_only": false} id="Vq7ub_XE6PYc" # <h2 id="understanding_data">Understanding the Data</h2> # # ### `FuelConsumption.csv`: # # We have downloaded a fuel consumption dataset, **`FuelConsumption.csv`**, which contains model-specific fuel consumption ratings and estimated carbon dioxide emissions for new light-duty vehicles for retail sale in Canada. [Dataset source](http://open.canada.ca/data/en/dataset/98f1a129-f628-4ce4-b24d-6f16bf24dd64?utm_medium=Exinfluencer&utm_source=Exinfluencer&utm_content=000026UJ&utm_term=10006555&utm_id=NA-SkillsNetwork-Channel-SkillsNetworkCoursesIBMDeveloperSkillsNetworkML0101ENSkillsNetwork20718538-2021-01-01) # # * **MODELYEAR** e.g. 2014 # * **MAKE** e.g. Acura # * **MODEL** e.g. ILX # * **VEHICLE CLASS** e.g. SUV # * **ENGINE SIZE** e.g. 4.7 # * **CYLINDERS** e.g 6 # * **TRANSMISSION** e.g. A6 # * **FUELTYPE** e.g. z # * **FUEL CONSUMPTION in CITY(L/100 km)** e.g. 9.9 # * **FUEL CONSUMPTION in HWY (L/100 km)** e.g. 8.9 # * **FUEL CONSUMPTION COMB (L/100 km)** e.g. 9.2 # * **CO2 EMISSIONS (g/km)** e.g. 182 --> low --> 0 # # + [markdown] button=false new_sheet=false run_control={"read_only": false} id="e_v6rmm66PYd" # <h2 id="reading_data">Reading the data in</h2> # # + button=false new_sheet=false run_control={"read_only": false} id="RuigjyO56PYf" outputId="3956ad2e-abe6-46d8-8954-c0f5477ec27b" df = pd.read_csv("FuelConsumption.csv") # take a look at the dataset df.head() # + [markdown] id="msdL21J86PYh" # Let's select some features that we want to use for regression. # # + button=false new_sheet=false run_control={"read_only": false} id="Omrjwcva6PYi" outputId="5abc2316-6a50-4601-8436-0ce050c419a3" cdf = df[['ENGINESIZE','CYLINDERS','FUELCONSUMPTION_CITY','FUELCONSUMPTION_HWY','FUELCONSUMPTION_COMB','CO2EMISSIONS']] cdf.head(9) # + [markdown] id="dImZhtxA6PYi" # Let's plot Emission values with respect to Engine size: # # + button=false new_sheet=false run_control={"read_only": false} id="_71qt5vI6PYj" outputId="b8913f2f-a7cd-4b66-8b14-bf1bb46ab8cf" plt.scatter(cdf.ENGINESIZE, cdf.CO2EMISSIONS, color='blue') plt.xlabel("Engine size") plt.ylabel("Emission") plt.show() # + [markdown] button=false new_sheet=false run_control={"read_only": false} id="5YNrdxnk6PYj" # #### Creating train and test dataset # # Train/Test Split involves splitting the dataset into training and testing sets respectively, which are mutually exclusive. After which, you train with the training set and test with the testing set. # This will provide a more accurate evaluation on out-of-sample accuracy because the testing dataset is not part of the dataset that have been used to train the model. Therefore, it gives us a better understanding of how well our model generalizes on new data. # # We know the outcome of each data point in the testing dataset, making it great to test with! Since this data has not been used to train the model, the model has no knowledge of the outcome of these data points. So, in essence, it is truly an out-of-sample testing. # # Let's split our dataset into train and test sets. Around 80% of the entire dataset will be used for training and 20% for testing. We create a mask to select random rows using the **np.random.rand()** function: # # + button=false new_sheet=false run_control={"read_only": false} id="rbJenX826PYk" msk = np.random.rand(len(df)) < 0.8 train = cdf[msk] test = cdf[~msk] # + [markdown] button=false new_sheet=false run_control={"read_only": false} id="rv3LdVC_6PYk" # #### Train data distribution # # + button=false new_sheet=false run_control={"read_only": false} id="h00F9ptR6PYl" outputId="13c299a9-f181-4b50-b14b-d56c70c4f64c" plt.scatter(train.ENGINESIZE, train.CO2EMISSIONS, color='blue') plt.xlabel("Engine size") plt.ylabel("Emission") plt.show() # + [markdown] button=false new_sheet=false run_control={"read_only": false} id="ofv3vxVD6PYl" # <h2 id="multiple_regression_model">Multiple Regression Model</h2> # # + [markdown] id="OBxhVYSC6PYl" # In reality, there are multiple variables that impact the Co2emission. When more than one independent variable is present, the process is called multiple linear regression. An example of multiple linear regression is predicting co2emission using the features FUELCONSUMPTION_COMB, EngineSize and Cylinders of cars. The good thing here is that multiple linear regression model is the extension of the simple linear regression model. # # + button=false new_sheet=false run_control={"read_only": false} id="mvShmM5F6PYm" outputId="7d2fe231-05b1-441f-c1f2-97f027ca3224" from sklearn import linear_model regr = linear_model.LinearRegression() x = np.asanyarray(train[['ENGINESIZE','CYLINDERS','FUELCONSUMPTION_COMB']]) y = np.asanyarray(train[['CO2EMISSIONS']]) regr.fit (x, y) # The coefficients print ('Coefficients: ', regr.coef_) # + [markdown] id="WgRDSUqX6PYm" # As mentioned before, **Coefficient** and **Intercept** are the parameters of the fitted line. # Given that it is a multiple linear regression model with 3 parameters and that the parameters are the intercept and coefficients of the hyperplane, sklearn can estimate them from our data. Scikit-learn uses plain Ordinary Least Squares method to solve this problem. # # #### Ordinary Least Squares (OLS) # # OLS is a method for estimating the unknown parameters in a linear regression model. OLS chooses the parameters of a linear function of a set of explanatory variables by minimizing the sum of the squares of the differences between the target dependent variable and those predicted by the linear function. In other words, it tries to minimizes the sum of squared errors (SSE) or mean squared error (MSE) between the target variable (y) and our predicted output ($\hat{y}$) over all samples in the dataset. # # OLS can find the best parameters using of the following methods: # # * Solving the model parameters analytically using closed-form equations # * Using an optimization algorithm (Gradient Descent, Stochastic Gradient Descent, Newton’s Method, etc.) # # + [markdown] id="7NTM0BrF6PYp" # <h2 id="prediction">Prediction</h2> # # + button=false new_sheet=false run_control={"read_only": false} id="qH19XGFJ6PYq" outputId="71172272-c198-4b5f-e5e8-9179ef0002d4" y_hat= regr.predict(test[['ENGINESIZE','CYLINDERS','FUELCONSUMPTION_COMB']]) x = np.asanyarray(test[['ENGINESIZE','CYLINDERS','FUELCONSUMPTION_COMB']]) y = np.asanyarray(test[['CO2EMISSIONS']]) print("Residual sum of squares: %.2f" % np.mean((y_hat - y) ** 2)) # Explained variance score: 1 is perfect prediction print('Variance score: %.2f' % regr.score(x, y)) # + [markdown] id="wjo1O-C36PYr" # **Explained variance regression score:**\ # Let $\hat{y}$ be the estimated target output, y the corresponding (correct) target output, and Var be the Variance (the square of the standard deviation). Then the explained variance is estimated as follows: # # $\texttt{explainedVariance}(y, \hat{y}) = 1 - \frac{Var{ y - \hat{y}}}{Var{y}}$\ # The best possible score is 1.0, the lower values are worse. # # + [markdown] id="_NKDTME66PYr" # <h2 id="practice">Practice</h2> # Try to use a multiple linear regression with the same dataset, but this time use __FUEL CONSUMPTION in CITY__ and # __FUEL CONSUMPTION in HWY__ instead of FUELCONSUMPTION_COMB. Does it result in better accuracy? # # + id="MuMsX-sa6PYr" outputId="e7215edb-80ea-4b2d-8390-3be502aeae0e" # write your code here from sklearn import linear_model regr = linear_model.LinearRegression() x = np.asanyarray(train[['ENGINESIZE','CYLINDERS','FUELCONSUMPTION_CITY','FUELCONSUMPTION_HWY']]) y = np.asanyarray(train[['CO2EMISSIONS']]) regr.fit (x, y) # The coefficients print ('Coefficients: ', regr.coef_) # + id="4r_w5vJj6PYs" outputId="aa1054ab-9489-49f0-f982-69093d6877f3" y_hat = regr.predict(test[['ENGINESIZE','CYLINDERS','FUELCONSUMPTION_CITY','FUELCONSUMPTION_HWY']]) x = np.asanyarray(test[['ENGINESIZE','CYLINDERS','FUELCONSUMPTION_CITY','FUELCONSUMPTION_HWY']]) y = np.asanyarray(test[['CO2EMISSIONS']]) print("Residual sum of squares: %.2f" % np.mean((y_hat - y) ** 2)) # Explained variance score: 1 is perfect prediction print('Variance score: %.2f' % regr.score(x, y)) # + [markdown] id="FbJ7jr1k6PYt" # <details><summary>Click here for the solution</summary> # # ```python # regr = linear_model.LinearRegression() # x = np.asanyarray(train[['ENGINESIZE','CYLINDERS','FUELCONSUMPTION_CITY','FUELCONSUMPTION_HWY']]) # y = np.asanyarray(train[['CO2EMISSIONS']]) # regr.fit (x, y) # print ('Coefficients: ', regr.coef_) # y_= regr.predict(test[['ENGINESIZE','CYLINDERS','FUELCONSUMPTION_CITY','FUELCONSUMPTION_HWY']]) # x = np.asanyarray(test[['ENGINESIZE','CYLINDERS','FUELCONSUMPTION_CITY','FUELCONSUMPTION_HWY']]) # y = np.asanyarray(test[['CO2EMISSIONS']]) # print("Residual sum of squares: %.2f"% np.mean((y_ - y) ** 2)) # print('Variance score: %.2f' % regr.score(x, y)) # # ``` # # </details> #
Mulitple-Linear-Regression.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + from exFinder.IexFinder import * from IPython.display import HTML from pandas.io.formats.style import Styler EasyStyler = Styler.from_custom_template("templates", "myhtml.tpl") # - def exFinder(f): stsf = Xtreme() data = pd.DataFrame() data['stat Point'], data['Eigenvalues'], data['Type'] = stsf.exFinder(f) return HTML(EasyStyler(data).render(table_title=r"$f(x,y)=%s, H_f=%s$"%(sym.latex(f), sym.latex(stsf.H(f))))) # # Symbolic Calculation of Extreme Values # ### <NAME> ([LinkedIn](https://www.linkedin.com/in/zoufine-lauer-bare-14677a77), [OrcidID](https://orcid.org/0000-0002-7083-6909)) # # The ```exFinder``` package, provides methods that calculate the extreme values of multivariate functions $$f:\mathbb{R}^2\to\mathbb{R}$$ symbolically. It is based mathematically on ```SymPy``` and ```NumPy```. # + f = (x-2)**4+(x-2*y)**2 exFinder(f) # + f=y**2*(x-1)+x**2*(x+1) exFinder(f) # + f = sym.exp(-(x**2+y**2)) exFinder(f)
iexfinder.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Revealing Ferroelectric Switching Character Using Deep Recurrent Neural Networks # <NAME><sup>1,2,3*</sup>, <NAME><sup>4</sup>, <NAME><sup>1</sup>, <NAME><sup>5</sup>, <NAME><sup>1</sup>, <NAME><sup>6</sup>, <NAME><sup>7</sup>, <NAME><sup>8</sup>, <NAME><sup>8</sup>, <NAME><sup>6</sup>, <NAME><sup>4</sup>, and <NAME><sup>1,2*</sup> # # <sup>1</sup> Department of Materials Science and Engineering, University of California, Berkeley, Berkeley, CA 94720, USA # <sup>2</sup> Materials Sciences Division, Lawrence Berkeley National Laboratory, Berkeley, CA 94720, USA # <sup>3</sup> Department of Materials Science and Engineering, Lehigh University, Bethlehem, PA 18015, USA # <sup>4</sup> Department of Astronomy, University of California, Berkeley, Berkeley, CA 94720, USA # <sup>5</sup> Berkeley Institute of Data Science, University of California, Berkeley, Berkeley, CA 94720, USA # <sup>6</sup> Department of Materials Science and Engineering, University Texas at Arlington, Arlington, TX 76019, USA # <sup>7</sup> Department of Materials Science and Engineering and Materials Research Institute, The Pennsylvania State University, University Park, PA 16802-5006, USA # <sup>8</sup> Center for Nanophase Materials Sciences, Oak Ridge National Laboratory, Oak Ridge, TN 37830, USA # *<EMAIL>, <EMAIL> # # Keywords: ferroelectric, switching, domains, scanning-probe microscopy, neural network # # Table of Contents # * [Revealing Ferroelectric Switching Character Using Deep Recurrent Neural Networks](#Revealing-Ferroelectric-Switching-Character-Using-Deep-Recurrent-Neural-Networks) # * [Importing Packages](#Importing-Packages) # * [Settings](#Settings) # * [Export Figure Settings](#Export-Figure-Settings) # * [Plotting Format](#Plotting-Format) # * [Folders](#Folders) # * [Loads the Data](#Loads-the-Data) # * [Cleans the Raw Data](#Cleans-the-Raw-Data) # * [Visualize Cleaned Data](#Visualize-Cleaned-Data) # * [Structural Characterization](#Structural-Characterization) # * [Topography and Piezoresponse](#Topography-and-Piezoresponse) # * [Reciprical Space Maps of PbZr${_{0.2}}$Ti${_{0.8}}$O${_{3}}$ with Hierarchical Domain Structures](#Reciprical-Space-Maps-of-PbZr${_{0.2}}$Ti${_{0.8}}$O${_{3}}$-with-Hierarchical-Domain-Structures) # * [Initial PFM images](#Initial-PFM-images) # * [Final PFM Images](#Final-PFM-Images) # * [Band Excitation Piezoresponse Force Microscopy - Basic Analysis](#Band-Excitation-Piezoresponse-Force-Microscopy---Basic-Analysis) # * [Exports all images](#Exports-all-images) # * [Export Images for Movie](#Export-Images-for-Movie) # * [Plot Raw Band Excitation Spectra](#Plot-Raw-Band-Excitation-Spectra) # * [Loop Fitting Results](#Loop-Fitting-Results) # * [Classical Machine Learning Approaches](#Classical-Machine-Learning-Approaches) # * [Principal Component Analysis](#Principal-Component-Analysis) # * [Piezoresponse](#Piezoresponse) # * [Amplitude](#Amplitude) # * [Phase](#Phase) # * [Resonance Frequency](#Resonance-Frequency) # * [Quality Factor](#Quality-Factor) # * [Non-Negative Matrix Factorization](#Non-Negative-Matrix-Factorization) # * [Piezoresponse](#Piezoresponse) # * [Amplitude](#Amplitude) # * [Phase](#Phase) # * [Resonance Frequency](#Resonance-Frequency) # * [Quality Factor](#Quality-Factor) # * [Clustering](#Clustering) # * [Piezoresponse](#Piezoresponse) # * [Amplitude](#Amplitude) # * [Phase](#Phase) # * [Resonance Frequency](#Resonance-Frequency) # * [Quality Factor](#Quality-Factor) # * [PCA + Clustering](#PCA-+-Clustering) # * [Piezoresponse](#Piezoresponse) # * [Amplitude](#Amplitude) # * [Phase](#Phase) # * [Resonance](#Resonance) # * [Quality Factor](#Quality-Factor) # * [Deep Learning Long Short-Term Memory Reccurent Neural Network Autoencoder](#Deep-Learning-Long-Short-Term-Memory-Reccurent-Neural-Network-Autoencoder) # * [Piezoresponse](#Piezoresponse) # * [Building the model](#Building-the-model) # * [Train the model](#Train-the-model) # * [Loads Pre-Trained Model](#Loads-Pre-Trained-Model) # * [Model Validation](#Model-Validation) # * [Validation Loss](#Validation-Loss) # * [Training Results](#Training-Results) # * [Low Dimensional Layer](#Low-Dimensional-Layer) # * [Plot Embedding and Line Trace](#Plot-Embedding-and-Line-Trace) # * [Exports Training Images](#Exports-Training-Images) # * [Make Generator Movie](#Make-Generator-Movie) # * [Plots Generator Results](#Plots-Generator-Results) # * [Resonance](#Resonance) # * [Building the model](#Building-the-model) # * [Train the model](#Train-the-model) # * [Loads Pre-Trained Model](#Loads-Pre-Trained-Model) # * [Model Validation](#Model-Validation) # * [Validation Loss](#Validation-Loss) # * [Plot Embedding and Line Trace](#Plot-Embedding-and-Line-Trace) # * [Exports Training Images](#Exports-Training-Images) # * [Make Generator Movie](#Make-Generator-Movie) # * [Autoencoder Generator](#Autoencoder-Generator) # * [Phase Field](#Phase-Field) # # + [markdown] format="row" # # Importing Packages # - # !pip install -U moviepy keras tensorflow natsort tqdm scikit_image scikit_learn scipy # !pip install pillow==4.2.1 import imageio imageio.plugins.ffmpeg.download() # # Special Codes for Collaboratory # ## Provides access to google drive # + # if running on collaboratory set = True collaboratory = True if collaboratory: from google.colab import drive drive.mount('/content/drive') else: print('Running on local systems, if running on collaboratory please change above') # - # cd drive/My\ Drive import os if os.path.exists("./Revealing-Ferroelectric-Switching-Character-Using-Deep-Recurrent-Neural-Networks"): pass else: # !git clone https://github.com/jagar2/Revealing-Ferroelectric-Switching-Character-Using-Deep-Recurrent-Neural-Networks.git # cd Revealing-Ferroelectric-Switching-Character-Using-Deep-Recurrent-Neural-Networks # !git pull # + # imports useful packages import warnings warnings.filterwarnings('ignore') import imp from matplotlib.ticker import FormatStrFormatter import matplotlib.pyplot as plt import codes.analysis.rnn as rnn import codes.util as util import codes.analysis.machine_learning as ml import codes.analysis as an import codes.processing as p import codes.viz as viz import codes.util.input_output as io_transfer from sklearn.decomposition import NMF from scipy import io import numpy as np import os import os.path # loads the custom graphing format viz.format.custom_plt_format() plt.style.use('seaborn-white') # - # # Folders # builds folders where the data will be saved folder_structure = util.file.make_folder( './structure') folder_BE = util.file.make_folder( './Band_Excitation') folder_BE_Movie_files = util.file.make_folder( folder_BE + '/BE_Movie_Files') folder_BE_all_images = util.file.make_folder( folder_BE + '/BE_all_images') folder_BE_spectra = util.file.make_folder( folder_BE + '/BE_spectra') folder_BE_cleaned_spectra = util.file.make_folder( folder_BE + '/cleaned_spectra') folder_pca = util.file.make_folder( './pca') folder_nmf = util.file.make_folder( './nmf') folder_clustering = util.file.make_folder('./clustering') folder_pca_clustering = util.file.make_folder( './pca_clustering') folder_piezoresponse_autoencoder = util.file.make_folder( './piezoresponse_autoencoder') folder_resonance_autoencoder = util.file.make_folder( './resonance_autoencoder') folder_piezoresponse_autoencoder_movie = util.file.make_folder( folder_piezoresponse_autoencoder + '/movie') folder_piezoresponse_autoencoder_training_movie = util.file.make_folder( folder_piezoresponse_autoencoder + '/training_movie') folder_resonance_autoencoder_movie = util.file.make_folder( folder_resonance_autoencoder + '/movie') folder_resonance_autoencoder_training_movie = util.file.make_folder( folder_resonance_autoencoder + '/training_movie') folder_phase_field = util.file.make_folder( './Phase_Field') # # Download Data # + # Downloading data for Phase Field simulations and full training data # note these are big files >50 gb download_data = False url = 'https://zenodo.org/record/1482091/files/Phase_field.zip?download=1' filename = 'phase_field.zip' save_path = './Raw_Data/Phase_Field/' io_transfer.download_and_unzip(filename, url, save_path, download_data) url = 'https://zenodo.org/record/1482091/files/Trained_models.zip?download=1' filename = 'train_model_zip.zip' save_path = './Trained Models/' io_transfer.download_and_unzip(filename, url, save_path, download_data) # - # ## Settings # ### Export Figure Settings # Sets what object to export printing = { # exports eps vector graphics (note these files can be large) 'EPS': False, # exports png files 'PNG': False, # prints image series (note this can take some time) 'all_figures': False, # generates movies (note this can take some time) 'movies': False, # resolution of the images 'dpi': 300} # ### Plotting Format # sets the plotting format plot_format = { # adds scalebar to image 'add_scalebar': True, # sets the dimensions for the scalebar [(size of image),(size of scalebar)] 'scalebar': [2000, 500], # selects if the image will be rotated 'rotation': True, # selects the rotation angle of the image 'angle': 60.46, # sets the fraction of the image to crop 'frac_rm': 0.17765042979942694, # sets the resolution of the image 'dpi': 300, # sets the default colormap 'color_map': 'viridis', # sets if color bars should be added 'color_bars': True} # # Loads the Data # + # imports the raw band excitation data imported = {'data': io.matlab.loadmat('./Raw_Data/Data.mat'), 'validation_data': io.matlab.loadmat('Raw_Data/loop_1.mat')} # extracts the important information from the raw data raw = {'voltage': imported['data']['Voltagedata_mixed'], 'piezoresponse': imported['data']['Loopdata_mixed'], 'amplitude': imported['data']['OutA2_mixed'], 'phase': imported['data']['OutPhi1_mixed'], 'resonance': imported['data']['Outw2_mixed'], 'quality_factor': imported['data']['OutQ2_mixed'], 'val_piezoresponse': imported['validation_data']['piezo_1'], 'val_resonance': imported['validation_data']['resonance_loop_1']} # - # ## Cleans the Raw Data # + # adds a max min filter on the data to remove bad points p.filters.range_filter(raw['resonance'], [1300, 1340]) p.filters.range_filter(raw['val_resonance'], [1300, 1340]) # interpolates data that is non-real. This happens when the SHO fit fails interpolated = {'voltage': raw['voltage'], 'piezoresponse': p.filters.clean_interpolate(raw['piezoresponse'], 'linear').reshape(-1, raw['piezoresponse'].shape[2]), 'amplitude': p.filters.clean_interpolate(raw['amplitude'], 'linear').reshape(-1, raw['amplitude'].shape[2]), 'phase': p.filters.clean_interpolate(raw['phase'], 'linear').reshape(-1, raw['phase'].shape[2]), 'resonance': p.filters.clean_interpolate(raw['resonance'], 'linear').reshape(-1, raw['resonance'].shape[2]), 'quality_factor': p.filters.clean_interpolate(raw['quality_factor'], 'linear').reshape(-1, raw['quality_factor'].shape[2]), 'val_piezoresponse': p.filters.clean_interpolate(raw['val_piezoresponse'], 'linear').reshape(-1, raw['val_piezoresponse'].shape[2]), 'val_resonance': p.filters.clean_interpolate(raw['val_resonance'], 'linear').reshape(-1, raw['val_resonance'].shape[2])} # Uses Savitzky-Golay filter to remove outlier points sg_filtered = {'voltage': raw['voltage'], 'piezoresponse': p.filters.savgol(interpolated['piezoresponse'], fit_type='linear'), 'amplitude': p.filters.savgol(interpolated['amplitude'], fit_type='linear'), 'phase': p.filters.savgol(interpolated['phase'], fit_type='linear'), 'resonance': p.filters.savgol(interpolated['resonance'], fit_type='linear'), 'quality_factor': p.filters.savgol(interpolated['quality_factor'], fit_type='linear'), 'val_piezoresponse': p.filters.savgol(interpolated['val_piezoresponse'], fit_type='linear'), 'val_resonance': p.filters.savgol(interpolated['val_resonance'], fit_type='linear')} # normalized the data. This is important for training Neural Networks normalized = {'voltage': raw['voltage'], 'piezoresponse': p.filters.normalize(sg_filtered['piezoresponse']), 'amplitude': p.filters.normalize(sg_filtered['amplitude']), 'phase': p.filters.normalize(sg_filtered['phase']), 'resonance': p.filters.normalize(sg_filtered['resonance']), 'quality_factor': p.filters.normalize(sg_filtered['quality_factor']), 'val_piezoresponse': p.filters.normalize(sg_filtered['val_piezoresponse'], sg_filtered['piezoresponse']), 'val_resonance': p.filters.normalize(sg_filtered['val_resonance'], sg_filtered['resonance'])} # stores information which helps in making pretty axes. signal_info = {'voltage': dict( symbol='voltage', format_str='%3.d', units='Voltage (V)', y_lim=None, x_tick=np.linspace(-15, 15, 7), pca_range=None), 'amplitude': dict( symbol='A', format_str='%.0e', units='Amplitude (Arb. U.)', y_lim=None, y_tick=[], pca_range=None), 'phase': dict( symbol='Phi', format_str='%3.d', units='Phase (${^\circ}$)', y_lim=[-110, 110], y_tick=np.linspace(-90, 90, 5), pca_range=None), 'resonance': dict( symbol='w', format_str='%3.d', units='Resonance (kHz)', y_lim=[1326, 1329], y_tick=np.linspace(1320, 1329, 4), pca_range=None), 'quality_factor': dict( symbol='Q', format_str='%3.f', units='Quality Factor (Arb. U.)', y_lim=[210, 310], y_tick=np.linspace(215, 310, 5), pca_range=None), 'piezoresponse': dict( symbol='Piezoresponse', format_str='%.0e', units='Piezoresponse (Arb. U.)', y_lim=None, y_tick=[], pca_range=[-0.29, .29]) } # builds a single dictonary to hold all the data data = {'raw': raw, 'interpolated': interpolated, 'sg_filtered': sg_filtered, 'normalized': normalized, 'signal_info': signal_info} # - # ## Visualize Cleaned Data # + # Selects a random index to plot #i = np.random.randint(3600) # if user wants to show a specific point i = 100 # Plots the raws data (black) and cleaned data (red) viz.plot.cleaned_data(data, i, printing, folder_BE_cleaned_spectra) # - # **Figure J1 | Images showing preprocessing of data. a,** Piezoresponse **b,** amplitude **c,** phase **d,** resonance frequency **e,** quality factor. Raw data is shown in black, processed data shown in red. # # Structural Characterization # ## Topography and Piezoresponse # + # Description and properties of the plots signals = {'Topography': dict( c_lim=[], data_loc='topo_mixed'), 'Large-Periodicity Line Trace': dict( data_loc='topo_ca_caca_mixed', x_lim=[0, 2], y_lim=[-4, 2], shift=0), 'Small-Periodicity Line Trace': dict( data_loc='topo_mixed_caca', x_lim=[0, .5], y_lim=[0, 2], shift=0.8), 'Vertical Amplitude': dict( c_lim=[0, 4.5e-10], data_loc='Vert_Amp_mixed'), 'Vertical Phase': dict( c_lim=[], data_loc='vert_phase_mixed'), 'Lateral Amplitude': dict( c_lim=[0, .8e-11], data_loc='lateral_amp_mixed'), 'Lateral Phase': dict( c_lim=[], data_loc='lateral_phase_mixed') } # plots the PFM images and line traces across those images. viz.plot.pfm_w_line_trace(signals, imported, printing, folder_structure) # - # **Figure J2 | Piezoresponse force microscopy images of 400 nm thick PbZr${_{0.2}}$Ti${_{0.8}}$O${_{3}}$ heterostructures supported on NdScO${_3}$ (110). a,** Topography **b,** Line trace indicating the large scale sawtooth-like topography between the *c/*a/*c/*a and *a${_1}$/*a${_2}$/*a${_1}$/*a${_2}$ domain regions. **c,** Line trace indicating the small scale sawtooth-like topography within the c/a/c/a domain bands. Images of piezoresponse vertical **d,** amplitude and **e,** phase and lateral **f,** amplitude and **g,** phase. # ## Reciprical Space Maps of PbZr${_{0.2}}$Ti${_{0.8}}$O${_{3}}$ with Hierarchical Domain Structures viz.plot.rsm(imported, printing, folder_structure) # **Figure J3 |** Symmetric reciprocal space map of 400 nm thick PbZr${_{0.8}}$Ti${_{0.2}}$O${_{3}}$ heterostructures supported on NdScO${_3}$ (110). Map obtained around the substrate 220 diffraction condition. # ## Initial PFM images # + # (User) Sets the colorscale of [topography = (initial [-3e-9,3e-9]), #amplitude (initial [.5e-11,6.5e-11]), # phase (initial [40,260])] signals = {'Topography': dict( c_lim=[-3e-9, 3e-9], data_loc='HeightOriginal'), 'Amplitude': dict( c_lim=[.5e-11, 6.5e-11], data_loc='AmpOriginal'), 'Phase': dict( c_lim=[40, 260], data_loc='PhaseOriginal') } viz.plot.pfm(signals, imported, printing, folder_structure, 'Inital PFM') # - # **Figure J4 | Piezoresponse force microscopy images prior to band excitation piezoresponse force microscopy switching.** **a,** topographic and **b,** vertical **c,** phase piezoresponse force microscopy images of as grown 400 nm thick PbZr${_{0.2}}$Ti${_{0.8}}$O${_{3}}$ heterostructure supported on NdScO${_{3}}$ (110). # ## Final PFM Images # + # (User) Sets the colorscale of [topography = (initial [-3e-9,3e-9]), #amplitude (initial [.2e-10,1.5e-10]), # phase (initial [50,90])] signals = {'Topography': dict( c_lim=[-2e-9, 2e-9], data_loc='HeightFinal'), 'Amplitude': dict( c_lim=[.2e-10, 1.5e-10], data_loc='AmpFinal'), 'Phase': dict( c_lim=[50, 90], data_loc='PhaseFinal') } viz.plot.pfm(signals, imported, printing, folder_structure, 'Final PFM') # - # **Figure J5 | Piezoresponse force microscopy images following band excitation piezoresponse force microscopy switching.** **a,** topographic and **b,** vertical **c,** phase piezoresponse force microscopy images of as grown 400 nm thick PbZr${_{0.2}}$Ti${_{0.8}}$O${_{3}}$ heterostructure supported on NdScO${_{3}}$ (110). # # Band Excitation Piezoresponse Force Microscopy - Basic Analysis # ## Exports all images # Checks if user selected to export all figures if printing['all_figures']: # (User) Sets the colorscale {Initial Amplitude = [0.0020e-3, 0.1490e-3]; Phase = [-265,-30]; # Resonance = [1317,1330]; Quality Factor = [175,270]} signal_clim = {('Amplitude', 'A'): [0.0020e-3, 0.1490e-3], ('Phase', 'Phi'): [-265, -30], ('Resonance', 'w'): [1317, 1330], ('Quality Factor', 'Q'): [175, 270], } # prints all images from the switching studies viz.plot.band_excitation(imported['data'], signal_clim, plot_format, printing, folder_=folder_BE_all_images) # ## Export Images for Movie if printing['movies']: # (User) Sets the colorscale {Initial Amplitude = [0.0020e-3, 0.1490e-3]; Phase = [-265,-30]; # Resonance = [1317,1330]; Quality Factor = [175,270]} signal_clim = {('Amplitude', 'A', '%.0e'): [0.0020e-3, 0.1490e-3], ('Phase', 'Phi', '%.0d'): [-265, -30], ('Resonance', 'w', '%.0d'): [1317, 1330], ('Quality Factor', 'Q', '%.0d'): [175, 270], } # creates the images used to make the movie of the switching studies viz.plot.band_excitation_movie(imported, signal_clim, plot_format, printing, folder = folder_BE_Movie_files) # creates the movie of the switching studies if printing['movies']: util.file.make_movie('BE_Switching', folder_BE_Movie_files, folder_BE, 'png', 4, output_format='mp4') # ## Plot Raw Band Excitation Spectra # + # (User) selects index (index used in main manuscript as example [30,30], cycle 2) x = 30 y = 30 cycle = 2 # Sets the information for plotting. (User) can adjust scales. signal_clim = {'Amplitude': dict( symbol='A', format_str='%.0e', units='(Arb. U.)', y_lim=[], y_tick=[]), 'Phase': dict( symbol='Phi', format_str='%3.d', units='(${^\circ}$)', y_lim=[-110, 110], y_tick=np.linspace(-90, 90, 5)), 'Resonance': dict( symbol='w', format_str='%3.d', units='(kHz)', y_lim=[1326, 1329], y_tick=np.linspace(1320, 1329, 4)), 'Quality Factor': dict( symbol='Q', format_str='%3.f', units='', y_lim=[210, 310], y_tick=np.linspace(215, 310, 5)), 'Piezoresponse': dict( symbol='Piezoresponse', format_str='%.0e', units='(Arb. U.)', y_lim=[], y_tick=[]) } # plots the raw BE spectra viz.plot.band_excitation_spectra(x, y, cycle, imported['data'], signal_clim, printing, folder_BE_spectra) # - # **Figure J6 |** Example raw piezoresponse loops acquired during band excitation piezoresponse spectroscopy of PbZr${_{0.2}}$Ti${_{0.8}}$O${_{3}}$ with hierarchical domain structures. Showing **a,** amplitude, **b,** phase, **c,** resonance, **d,** quality factor, and **e,** piezoresponse (Acos${\phi}$) loop. # ## Loop Fitting Results # + # Sets the information for plotting. (User) can adjust scales. signal_clim = {'a1': dict( label='a${_1}$', data_loc='a1_mixed', format_str='%.1e', c_lim=[-1.5e-4, 0]), 'a2': dict( label='a${_2}$', data_loc='a2_mixed', format_str='%.1e', c_lim=[0, 1.5e-4]), 'a3': dict( label='a${_3}$', data_loc='a3_mixed', format_str='%.1e', c_lim=[-1e-6, 3e-6]), 'b1': dict( label='b${_1}$', data_loc='b1_mixed', format_str='%.1f', c_lim=[0, 10]), 'b2': dict( label='b${_2}$', data_loc='b2_mixed', format_str='%.1f', c_lim=[0, 50]), 'b3': dict( label='b${_3}$', data_loc='b3_mixed', format_str='%.1f', c_lim=[0, 12]), 'b4': dict( label='b${_4}$', data_loc='b4_mixed', format_str='%.1f', c_lim=[0, 25]), 'b5': dict( label='b${_5}$', data_loc='b5_mixed', format_str='%.1f', c_lim=[0, 12]), 'b6': dict( label='b${_6}$', data_loc='b6_mixed', format_str='%.1f', c_lim=[0, 12]), 'b7': dict( label='b${_7}$', data_loc='b7_mixed', format_str='%.1f', c_lim=[-15, 15]), 'b8': dict( label='b${_8}$', data_loc='b8_mixed', format_str='%.1f', c_lim=[-15, 15]), 'Loop Area': dict( label='Raw Area', data_loc='Acosarea_mixed', format_str='%.1e', c_lim=[5e-4, 4e-3]), 'Fitted Loop Area': dict( label='Fitted Area', data_loc='Acosareafit_mixed', format_str='%.1e', c_lim=[5e-4, 4e-3]), 'Raw/Fitted Loop Difference': dict( label='Raw/Fitted Diff.', data_loc='Acosareadif_mixed', format_str='%.1e', c_lim=[0, 1.5]), 'Raw Amplitude Centroid': dict( label='Raw Amp. Cent.', data_loc='AcoscentAc_mixed', format_str='%.1e', c_lim=[-2e-5, 2e-5]), 'Fitted Amplitude Centroid': dict( label='Fitted Amp. Cent.', data_loc='AcoscentAcfit_mixed', format_str='%.1e', c_lim=[-2e-5, 2e-5]), 'Raw Voltage Centroid': dict( label='Raw Volt. Cent.', data_loc='AcoscentV_mixed', format_str='%.1f', c_lim=[-1, 4]), 'Fitted Voltage Centroid': dict( label='Fitted Volt. Cent.', data_loc='AcoscentVfit_mixed', format_str='%.1f', c_lim=[-1, 4]), 'Loop Height': dict( label='Height', data_loc='Acosheight_mixed', format_str='%.1e', c_lim=[5e-5, 2.5e-4]), 'Loop Width': dict( label='Width', data_loc='Acoswidth_mixed', format_str='%.1f', c_lim=[12, 18]), 'Left Coercive field': dict( label='Left E${_c}$', data_loc='Al_mixed', format_str='%.1f', c_lim=[4, 11]), 'Right Coercive field': dict( label='Right E${_c}$', data_loc='Au_mixed', format_str='%.1f', c_lim=[4, 11]), 'Negative Nucleation Bias': dict( label='Neg. Nuc. Bias', data_loc='Acosnegnuc_mixed', format_str='%.1f', c_lim=[0, 6]), 'Positive Nucleation Bias': dict( label='Pos. Nuc. Bias', data_loc='Acosposnuc_mixed', format_str='%.1f', c_lim=[0, 6]), 'Loop Twist': dict( label='Twist', data_loc='Acostwist_mixed', format_str='%.1e', c_lim=[0, 2.5e-2]), 'Optimum Rotation Angle': dict( label='Opt. Rot. Angle', data_loc='optrotang_mixed', format_str='%.1f', c_lim=[235, 240]), 'Normalized Amplitude Centroid': dict( label='Norm. Amp. Cent.', data_loc='NormAcCent_mixed', format_str='%.1f', c_lim=[-15, 15]), 'Normalized Voltage Centroid': dict( label='Norm. Volt. Cent.', data_loc='NormVCent_mixed', format_str='%.1f', c_lim=[-10, 30])} viz.plot.loopfits(imported['data'], signal_clim, printing, folder_BE, plot_format) # - # **Figure J7 | Spatial maps of loop fitting parameters obtained from band excitation switching spectroscopy of PbZr${_{0.2}}$Ti${_{0.8}}$O${_{3}}$ with hierarchical domain structures. a,** a${_1}$ - represents the lowest piezoresponse amplitude. **b,** a${_2}$ - represents the highest piezoresponse value. **c,** a${_3}$ - Loop rotation as defined by tan${\delta}$. **d-g,** b${_{1-4}}$ - parameters specifying the curvature of the loop transitions. **h-i,** b${_{5-6}}$ - parameter specifying the rate of transitions between the curvatures of the loop. **j-k,** b${_{7-8}}$ - parameter specifying the voltage midpoint of the transitions. **l-m,** Raw (fitted) loop area the area enclosed by the raw (fitted) loop, representative of the work of switching. **n,** Area differential, the absolute difference between the area enclosed by the raw and fitted loop. **o-p,** Raw (fitted) amplitude centroid the center of mass of the amplitude of the raw (fitted) piezoresponse loop. **q-r,** Raw (fitted) voltage centroid the center of mass of the raw (fitted) piezoresponse loop. **s,** Loop height the vertical height in amplitude of the piezoelectric hysteresis loop. **t,** Loop width in volts. **u-v,** Left/Right E${_c}$ negative/positive piezoelectric coercive fields. **w-x,** Negative/positive nucleation bias, representing the voltage where the piezoresponse has changed by 3% of the loop height. **y,** Loop twist, the twists in shape of the piezoelectric hysteresis loops. **z,** Optimum rotation angle, the optimum ${\phi}$ found which maximizes Acos${\phi}$. **aa-ab,** Loop height (width) normalized amplitude (voltage) centroids. # # Classical Machine Learning Approaches # ## Principal Component Analysis # ### Piezoresponse # creates a dictionary to store the machine learning results machine_learning = {'pca': dict(), 'nmf': dict(), 'clustering': dict(), 'pca_clustering': dict()} # + # Computes the PCA # second index represents the number of components to compute machine_learning['pca']['piezoresponse'], _ = ml.pca( sg_filtered['piezoresponse'], 16) # Plots the PCA results viz.plot.pca_results(machine_learning['pca']['piezoresponse'], data, signal_info, printing, folder_pca, plot_format, 'piezoresponse', filename='piezoresponse') # - # **Figure J9 | Principal component analysis the piezoresponse obtained from band excitation switching spectroscopy of PbZr${_{0.2}}$Ti${_{0.8}}$O${_{3}}$ with hierarchical domain structures.** # ### Amplitude # + # Computes the PCA # second index represents the number of components to compute machine_learning['pca']['amplitude'], _ = ml.pca(sg_filtered['amplitude'], 16) # plots the pca results viz.plot.pca_results(machine_learning['pca']['amplitude'], data, signal_info, printing, folder_pca, plot_format, 'amplitude', filename='amplitude') # - # **Figure J10 | Principal component analysis the amplitude obtained from band excitation switching spectroscopy of PbZr${_{0.2}}$Ti${_{0.8}}$O${_{3}}$ with hierarchical domain structures.** # ### Phase # + # Computes the PCA # second index represents the number of components to compute machine_learning['pca']['phase'], _ = ml.pca(sg_filtered['phase'], 16) # plots the pca results viz.plot.pca_results(machine_learning['pca']['phase'], data, signal_info, printing, folder_pca, plot_format, 'phase', filename='phase') # - # **Figure J11 | Principal component analysis the phase obtained from band excitation switching spectroscopy of PbZr${_{0.2}}$Ti${_{0.8}}$O${_{3}}$ with hierarchical domain structures.** # ### Resonance Frequency # + # Computes the PCA # second index represents the number of components to compute machine_learning['pca']['resonance'], _ = ml.pca(sg_filtered['resonance'], 16) # plots the pca results viz.plot.pca_results(machine_learning['pca']['resonance'], data, signal_info, printing, folder_pca, plot_format, 'resonance', filename='resonance') # - # **Figure J12 | Principal component analysis the resonance frequency obtained from band excitation switching spectroscopy of PbZr${_{0.2}}$Ti${_{0.8}}$O${_{3}}$ with hierarchical domain structures.** # ### Quality Factor # + # Computes the PCA # second index represents the number of components to compute machine_learning['pca']['quality_factor'], _ = ml.pca( sg_filtered['quality_factor'], 16) # plots the pca results viz.plot.pca_results(machine_learning['pca']['quality_factor'], data, signal_info, printing, folder_pca, plot_format, 'quality_factor', filename='quality_factor') # - # **Figure J13 | Principal component analysis the quality factor obtained from band excitation switching spectroscopy of PbZr${_{0.2}}$Ti${_{0.8}}$O${_{3}}$ with hierarchical domain structures.** # ## Non-Negative Matrix Factorization # ### Piezoresponse # + # builds the model for NMF model = NMF(n_components=4, init='random', random_state=0, alpha=1e-7, l1_ratio=1) # computes the nmf machine_learning['nmf']['piezoresponse'] = ml.nmf( model, data['sg_filtered']['piezoresponse']) # plots the nmf results viz.plot.NMF(data['raw']['voltage'], machine_learning['nmf']['piezoresponse'], printing, plot_format, signal_info['piezoresponse'], folder=folder_nmf, letter_labels=True, custom_order=[0, 2, 3, 1]) # - # **Figure J14 | Non-negative matrix factorization of the piezoresponse obtained from band excitation switching spectroscopy of PbZr${_{0.2}}$Ti${_{0.8}}$O${_{3}}$ with hierarchical domain structures.** # ### Amplitude # + # builds the model for NMF model = NMF(n_components=4, init='random', random_state=0, alpha=1e-7, l1_ratio=1) # computes the nmf machine_learning['nmf']['amplitude'] = ml.nmf( model, data['sg_filtered']['amplitude']) # plots the nmf results viz.plot.NMF(data['raw']['voltage'], machine_learning['nmf']['amplitude'], printing, plot_format, signal_info['amplitude'], folder=folder_nmf, letter_labels=True, custom_order=[0, 2, 3, 1]) # - # **Figure J15 | Non-negative matrix factorization of the amplitude obtained from band excitation switching spectroscopy of PbZr${_{0.2}}$Ti${_{0.8}}$O${_{3}}$ with hierarchical domain structures.** # ### Phase # + # builds the model for NMF model = NMF(n_components=4, init='random', random_state=0, alpha=1e-7, l1_ratio=1) # computes the nmf machine_learning['nmf']['phase'] = ml.nmf(model, data['sg_filtered']['phase']) # plots the nmf results viz.plot.NMF(data['raw']['voltage'], machine_learning['nmf']['phase'], printing, plot_format, signal_info['phase'], folder=folder_nmf, letter_labels=True, custom_order=[0, 2, 3, 1]) # - # **Figure J16 | Non-negative matrix factorization of the phase obtained from band excitation switching spectroscopy of PbZr${_{0.2}}$Ti${_{0.8}}$O${_{3}}$ with hierarchical domain structures.** # ### Resonance Frequency # + # builds the model for NMF model = NMF(n_components=4, init='random', random_state=0, alpha=1e-7, l1_ratio=1) # computes the nmf machine_learning['nmf']['resonance'] = ml.nmf( model, data['sg_filtered']['resonance']) # plots the nmf viz.plot.NMF(data['raw']['voltage'], machine_learning['nmf']['resonance'], printing, plot_format, signal_info['resonance'], folder=folder_nmf, letter_labels=True, custom_order=[0, 2, 3, 1]) # - # **Figure J17 | Non-negative matrix factorization of the resonance frequency obtained from band excitation switching spectroscopy of PbZr${_{0.2}}$Ti${_{0.8}}$O${_{3}}$ with hierarchical domain structures.** # ### Quality Factor # + # builds the model for NMF model = NMF(n_components=4, init='random', random_state=0, alpha=1e-7, l1_ratio=1) # computes the nmf machine_learning['nmf']['quality_factor'] = ml.nmf( model, data['sg_filtered']['quality_factor']) # plots the nmf viz.plot.NMF(data['raw']['voltage'], machine_learning['nmf']['quality_factor'], printing, plot_format, signal_info['quality_factor'], folder=folder_nmf, letter_labels=True, custom_order=[0, 2, 3, 1]) # - # **Figure J18 | Non-negative matrix factorization of the quality factor obtained from band excitation switching spectroscopy of PbZr${_{0.2}}$Ti${_{0.8}}$O${_{3}}$ with hierarchical domain structures.** # ## Clustering # + # Sets the number of clusters in the divisive clustering clustering = {'initial_clusters': 2, 'c_clusters': 5, 'a_clusters': 4} # Sets the names of the maps names = {('c/a-a${_1}$/a${_2}$', 'cluster_ca'), ('a${_1}$/a${_2}$', 'a_map'), ('c/a', 'c_map')} # - # ### Piezoresponse # + # clusters the piezoresponse curves machine_learning['clustering']['piezoresponse'] = ml.k_means_clustering( data, 'piezoresponse', clustering, seed=42) # plots the cluster maps viz.plot.hierarchical_clustering(machine_learning['clustering']['piezoresponse'], names, plot_format) # - # **Figure J19 | Divisive clustering of the piezoresponse curves obtained from band excitation switching spectroscopy of PbZr${_{0.2}}$Ti${_{0.8}}$O${_{3}}$ with hierarchical domain structures.** # + # sets the y range for the plots signal_info['piezoresponse']['y_lim'] = [-1.5e-4, 1.5e-4] # plots the cluster maps and average hysteresis loops viz.plot.clustered_hysteresis(data['raw']['voltage'], data['sg_filtered']['piezoresponse'], machine_learning['clustering']['piezoresponse'], plot_format, signal_info, 'piezoresponse', printing, folder_clustering) # - # **Figure J20 | Divisive clustering of the piezoresponse curves obtained from band excitation switching spectroscopy of PbZr${_{0.2}}$Ti${_{0.8}}$O${_{3}}$ with hierarchical domain structures.** # ### Amplitude # + # clusters the amplitude curves machine_learning['clustering']['amplitude'] = ml.k_means_clustering( data, 'amplitude', clustering, seed=42) # plots the amplitude clustering maps viz.plot.hierarchical_clustering(machine_learning['clustering']['amplitude'], names, plot_format) # - # **Figure J21 | Divisive clustering of the amplitude curves obtained from band excitation switching spectroscopy of PbZr${_{0.2}}$Ti${_{0.8}}$O${_{3}}$ with hierarchical domain structures.** # plots the clustering map and average hysteresis loop viz.plot.clustered_hysteresis(data['raw']['voltage'], data['sg_filtered']['piezoresponse'], machine_learning['clustering']['amplitude'], plot_format, signal_info, 'amplitude', printing, folder_clustering) # **Figure J22 | Divisive clustering of the amplitude curves obtained from band excitation switching spectroscopy of PbZr${_{0.2}}$Ti${_{0.8}}$O${_{3}}$ with hierarchical domain structures.** # ### Phase # + # clusters the phase loops machine_learning['clustering']['phase'] = ml.k_means_clustering( data, 'phase', clustering, seed=42) # plots the cluster maps viz.plot.hierarchical_clustering(machine_learning['clustering']['phase'], names, plot_format) # - # **Figure J23 | Divisive clustering of the phase curves obtained from band excitation switching spectroscopy of PbZr${_{0.2}}$Ti${_{0.8}}$O${_{3}}$ with hierarchical domain structures.** # plots the clustering map and average hysteresis loop viz.plot.clustered_hysteresis(data['raw']['voltage'], data['sg_filtered']['piezoresponse'], machine_learning['clustering']['phase'], plot_format, signal_info, 'phase', printing, folder_clustering) # ### Resonance Frequency # + # clusters the resonance frequency machine_learning['clustering']['resonance'] = ml.k_means_clustering( data, 'resonance', clustering, seed=42) # plots the resonance frequency maps viz.plot.hierarchical_clustering(machine_learning['clustering']['resonance'], names, plot_format) # - # **Figure J24 | Divisive clustering of the resonance frequency curves obtained from band excitation switching spectroscopy of PbZr${_{0.2}}$Ti${_{0.8}}$O${_{3}}$ with hierarchical domain structures.** # plots the clusters with average hysteresis loops viz.plot.clustered_hysteresis(data['raw']['voltage'], data['sg_filtered']['piezoresponse'], machine_learning['clustering']['resonance'], plot_format, signal_info, 'resonance', printing, folder_clustering) # **Figure J25 | Divisive clustering of the resonance frequency curves obtained from band excitation switching spectroscopy of PbZr${_{0.2}}$Ti${_{0.8}}$O${_{3}}$ with hierarchical domain structures.** # ### Quality Factor # + # clusters the quality factor curves machine_learning['clustering']['quality_factor'] = ml.k_means_clustering( data, 'quality_factor', clustering, seed=42) # plots the cluster maps viz.plot.hierarchical_clustering(machine_learning['clustering']['quality_factor'], names, plot_format) # - # **Figure J26 | Divisive clustering of the quality factor curves obtained from band excitation switching spectroscopy of PbZr${_{0.2}}$Ti${_{0.8}}$O${_{3}}$ with hierarchical domain structures.** # plots the cluster maps and average hysteresis loops viz.plot.clustered_hysteresis(data['raw']['voltage'], data['sg_filtered']['piezoresponse'], machine_learning['clustering']['quality_factor'], plot_format, signal_info, 'quality_factor', printing, folder_clustering) # **Figure J27 | Divisive clustering of the quality factor curves obtained from band excitation switching spectroscopy of PbZr${_{0.2}}$Ti${_{0.8}}$O${_{3}}$ with hierarchical domain structures.** # ## PCA + Clustering # ### Piezoresponse # + signal = 'piezoresponse' # computes the PCA eigenvalues = ml.weights_as_embeddings(machine_learning['pca'][signal], data['sg_filtered'][signal]) # clusters the PCA results machine_learning['pca_clustering'][signal] = ml.k_means_clustering( data, signal, clustering, seed=42, pca_in=eigenvalues) # plots the cluster maps viz.plot.hierarchical_clustering(machine_learning['pca_clustering'][signal], names, plot_format) # - # **Figure J28 | Divisive clustering of the first 16 principal components of the piezoresponse curves obtained from band excitation switching spectroscopy of PbZr${_{0.2}}$Ti${_{0.8}}$O${_{3}}$ with hierarchical domain structures.** # plots the clustering results and average hysteresis curves viz.plot.clustered_hysteresis(data['raw']['voltage'], data['sg_filtered']['piezoresponse'], machine_learning['clustering'][signal], plot_format, signal_info, signal, printing, folder_pca_clustering) # **Figure J29 | Divisive clustering of the first 16 principal components of the piezoresponse curves obtained from band excitation switching spectroscopy of PbZr${_{0.2}}$Ti${_{0.8}}$O${_{3}}$ with hierarchical domain structures.** # ### Amplitude # + signal = 'amplitude' # computes the pca eigenvalues = ml.weights_as_embeddings(machine_learning['pca'][signal], data['sg_filtered'][signal]) # clusters the loops machine_learning['pca_clustering'][signal] = ml.k_means_clustering( data, signal, clustering, seed=42, pca_in=eigenvalues) # plots the clustering maps viz.plot.hierarchical_clustering(machine_learning['pca_clustering'][signal], names, plot_format) # - # **Figure J30 | Divisive clustering of the first 16 principal components of the amplitude curves obtained from band excitation switching spectroscopy of PbZr${_{0.2}}$Ti${_{0.8}}$O${_{3}}$ with hierarchical domain structures.** # plots the clustering maps and average hysteresis loops viz.plot.clustered_hysteresis(data['raw']['voltage'], data['sg_filtered']['piezoresponse'], machine_learning['clustering'][signal], plot_format, signal_info, signal, printing, folder_pca_clustering) # **Figure J31 | Divisive clustering of the first 16 principal components of the amplitude curves obtained from band excitation switching spectroscopy of PbZr${_{0.2}}$Ti${_{0.8}}$O${_{3}}$ with hierarchical domain structures.** # ### Phase # + signal = 'phase' # computes the pca eigenvalues = ml.weights_as_embeddings(machine_learning['pca'][signal], data['sg_filtered'][signal]) # clusters the pca machine_learning['pca_clustering'][signal] = ml.k_means_clustering( data, signal, clustering, seed=42, pca_in=eigenvalues) # plots the cluster maps viz.plot.hierarchical_clustering(machine_learning['pca_clustering'][signal], names, plot_format) # - # **Figure J32 | Divisive clustering of the first 16 principal components of the phase curves obtained from band excitation switching spectroscopy of PbZr${_{0.2}}$Ti${_{0.8}}$O${_{3}}$ with hierarchical domain structures.** # plots the clustering maps and average hysteresis loops viz.plot.clustered_hysteresis(data['raw']['voltage'], data['sg_filtered']['piezoresponse'], machine_learning['clustering'][signal], plot_format, signal_info, signal, printing, folder_pca_clustering) # **Figure J33 | Divisive clustering of the first 16 principal components of the phase curves obtained from band excitation switching spectroscopy of PbZr${_{0.2}}$Ti${_{0.8}}$O${_{3}}$ with hierarchical domain structures.** # ### Resonance # + signal = 'resonance' # computes the pca eigenvalues = ml.weights_as_embeddings(machine_learning['pca'][signal], data['sg_filtered'][signal]) # clusters the results machine_learning['pca_clustering'][signal] = ml.k_means_clustering( data, signal, clustering, seed=42, pca_in=eigenvalues) # plots the cluster maps viz.plot.hierarchical_clustering(machine_learning['pca_clustering'][signal], names, plot_format) # - # **Figure J34 | Divisive clustering of the first 16 principal components of the resonance frequency curves obtained from band excitation switching spectroscopy of PbZr${_{0.2}}$Ti${_{0.8}}$O${_{3}}$ with hierarchical domain structures.** # plots the clustering maps and average hysteresis loops viz.plot.clustered_hysteresis(data['raw']['voltage'], data['sg_filtered']['piezoresponse'], machine_learning['clustering'][signal], plot_format, signal_info, signal, printing, folder_pca_clustering) # **Figure J35 | Divisive clustering of the first 16 principal components of the resonance frequency curves obtained from band excitation switching spectroscopy of PbZr${_{0.2}}$Ti${_{0.8}}$O${_{3}}$ with hierarchical domain structures.** # ### Quality Factor # + signal = 'quality_factor' # computes the pca eigenvalues = ml.weights_as_embeddings(machine_learning['pca'][signal], data['sg_filtered'][signal]) # computes the cluster maps machine_learning['pca_clustering'][signal] = ml.k_means_clustering( data, signal, clustering, seed=42, pca_in=eigenvalues) # plots the cluster maps viz.plot.hierarchical_clustering(machine_learning['pca_clustering'][signal], names, plot_format) # - # **Figure J36 | Divisive clustering of the first 16 principal components of the quality factor curves obtained from band excitation switching spectroscopy of PbZr${_{0.2}}$Ti${_{0.8}}$O${_{3}}$ with hierarchical domain structures.** # plots the clustering maps and average hysteresis loops viz.plot.clustered_hysteresis(data['raw']['voltage'], data['sg_filtered']['piezoresponse'], machine_learning['clustering'][signal], plot_format, signal_info, signal, printing, folder_pca_clustering) # **Figure J37 | Divisive clustering of the first 16 principal components of the quality factor curves obtained from band excitation switching spectroscopy of PbZr${_{0.2}}$Ti${_{0.8}}$O${_{3}}$ with hierarchical domain structures.** # # Deep Learning Long Short-Term Memory Reccurent Neural Network Autoencoder # ## Piezoresponse # ### Building the model # + # selects the folder where the pre-trained models are located model_folder = './Trained Models/Piezoresponse/Bidirect_lstm_size064_enc4_emb16_dec4_lr3m05_drop0.2_l1norm_1m05_batchnorm_TT_001' # Function to build the model piezoresponse_model, run_id = rnn.rnn('lstm', 64, 4, 4, 16, data['sg_filtered']['piezoresponse'].shape[1], lr=3e-5, drop_frac=.2, l1_norm=1e-4, batch_norm=[True, True]) # - # ### Train the model # + code_folding=[] # select if the user will train a new model. # Note training requires GPU access and can take a long time (1-2 days) train_model = False if train_model: # trains the model saving results as checkpoints rnn.train_model(run_id, piezoresponse_model, data['normalized']['piezoresponse'], data['normalized']['val_piezoresponse'], folder_piezoresponse_autoencoder) # - # ### Loads Pre-Trained Model # + # loading the pre-trained weights piezoresponse_model.load_weights(model_folder + '/weights.15179-0.00.hdf5') # Updates the decoder based on decoding optimization. # this was done to improve the quality of the reconstruction. piezoresponse_model, piezoresponse_decoder = rnn.update_decoder(piezoresponse_model, './Trained Models/Piezoresponse/weights.00033723-0.0022.hdf5') # - # Displays the model summary piezoresponse_model.summary() # ## Model Validation # ### Validation Loss # + # loss for the training data print('Training Data Set:') score = piezoresponse_model.evaluate(np.atleast_3d(data['normalized']['piezoresponse']), np.atleast_3d(data['normalized']['piezoresponse'])) print('Test loss:', score) # loss for the validation data print('Validation Data Set:') score = piezoresponse_model.evaluate(np.atleast_3d(data['normalized']['val_piezoresponse']), np.atleast_3d(data['normalized']['val_piezoresponse'])) print('Validation loss:', score) # - # ### Training Results # plots the loss and an example reconstruction # set to plot a random loop # to plots a specific point add i=(pixel position) viz.plot.training_loss(model_folder, data, piezoresponse_model, 'piezoresponse', signal_info, printing, folder_piezoresponse_autoencoder) # **Figure J38 | Piezoresponse autoencoder traiing results. a,** Training loss (training - black) validation (red). Example hysteresis loop from the **b,** training, **c,** validation data set. Black curve shows the original measured data, red curve show the autoencoder reconstruction. # ### Low Dimensional Layer # Computes the low dimensional layer piezoresponse_embeddings = rnn.get_activations(piezoresponse_model, data['normalized']['piezoresponse'], 9) # + # defines the ranges for the images ranges = [0, 1.3e-2, 0, 0, 0, 0, 0, 6e-3, 0, 0, 0, 1.3e-2, 1e-2, 0, 0, 3e-3] # plots the embedding maps _ = viz.plot.embedding_maps(piezoresponse_embeddings, printing, plot_format, folder_piezoresponse_autoencoder, filename='./Piezoresponse_embeddings', ranges=ranges) # - # **Figure J39 | Output of low dimensional layer obtained from the piezoreponse autoencoder.** # ### Plot Embedding and Line Trace # + # rotates and crops the topography image crop_topo, scale = util.core.rotate_and_crop( np.flipud(imported['data']['HeightFinal'].reshape(1024, 1024).T)) # creates the figures and axes in a pretty way num_img = 10 fig, ax = viz.format.layout_fig(num_img, mod=num_img // 2) # plots the selected embeddings superimposed on the line trace for i, v in enumerate([1, 7, 11, 12, 15]): viz.plot.embedding_line_trace(ax, i, crop_topo, piezoresponse_embeddings[:, v], [0, ranges[v]], plot_format, number=num_img // 2) plt.tight_layout(pad=1) # saves the figure util.file.savefig(folder_piezoresponse_autoencoder + '/embedding_and_topography', printing) # - # **Figure J40 | Plots of selected embedding maps from piezoelectric autoencoder superimposed on average topography.** # ### Exports Training Images # Exports low dimensional layer computed after each epoch (with improvement) during training. This allows the visualization of the effect of L${_1}$ regularization. # + # selects to export training images # note this take a long time (1-2 hours) export_training_images = False if export_training_images: if np.int(io_transfer.get_size(model_folder) / 1e8) > 1: # exports all low dimensional layers from training viz.plot.training_images(piezoresponse_model, data, model_folder, printing, plot_format, folder_piezoresponse_autoencoder_training_movie) if printing['movies']: # Script to making movie util.file.make_movie('Piezoresponse_training_movie', folder_piezoresponse_autoencoder_training_movie, './', 'png', 10, output_format='mp4') # - # ### Make Generator Movie # Makes a movie where the magnitude of the embedding is manipulated and the decoder is used to generate the piezoresponse if printing['movies']: # defines the ranges for the embeddings ranges = [1.3e-2, 6e-3, 1.3e-2, 1e-2, 3e-3] # generates images for the generator movie _ = viz.plot.generator_movie(piezoresponse_decoder, piezoresponse_embeddings, data['raw']['voltage'], 100, 500, ranges, folder_piezoresponse_autoencoder_movie, plot_format, printing, graph_layout=[5, 5]) # Script to making movie util.file.make_movie('Piezoresponse_Generator_movie', folder_piezoresponse_autoencoder_movie, './', 'png', 10, output_format='mp4', reverse=True) # ### Plots Generator Results # + # defines the range for the embeddings ranges = [1.3e-2, 6e-3, 1.3e-2, 1e-2, 3e-3] # plots the embedding layer and the generated results viz.plot.generator_piezoresponse(piezoresponse_decoder, piezoresponse_embeddings, data['raw']['voltage'], ranges, 6, 100, printing, plot_format, folder_piezoresponse_autoencoder) # - # **Figure J41 | Plots of selected embedding maps from piezoelectric autoencoder bottom shows generated hysteresis loop obtained when varying each embedding.** The color of the piezoelectric hysteresis loop reflects the colors in the map # ## Resonance # ### Building the model # + # selects the folder where the pre-trained model is saved model_folder = './Trained Models/Resonance/Bidirect_lstm_size064_enc4_emb16_dec4_lr3m05_drop0.2_l1norm_0.0001_batchnorm_TT_001' # Function to build the model resonance_model, run_id = rnn.rnn( 'lstm', 64, 4, 4, 16, data['sg_filtered']['resonance'].shape[1], lr=3e-5, drop_frac=.2, l1_norm=1e-4, batch_norm=[True, True]) # - # ### Train the model # + code_folding=[] # select if the user will train a new model. # Note training requires GPU access and can take a long time (1-2 days) train_model = False if train_model: # trains the model saving each epoch (with improvement) as a checkpoint rnn.train_model( run_id, resonance_model, data['normalized']['resonance'], data['normalized']['val_resonance'], folder_resonance_autoencoder) # - # ### Loads Pre-Trained Model # + # loading the pre-trained weights resonance_model.load_weights(model_folder + '/weights.00022570-0.0123.hdf5') # loads the pre-trained weight from an optimized decoder # training of the decoder was done to minimize reconstruction error resonance_model, resonance_decoder = rnn.update_decoder( resonance_model, './Trained Models/Resonance/weights.00013412-0.0106.hdf5') # - # Displays the model summary resonance_model.summary() # ## Model Validation # ### Validation Loss # + # computes the training loss print('Training Data Set:') score = resonance_model.evaluate(np.atleast_3d(data['normalized']['resonance']), np.atleast_3d(data['normalized']['resonance'])) print('Test loss:', score) # computes the validation loss print('Validation Data Set:') score = resonance_model.evaluate(np.atleast_3d(data['normalized']['val_resonance']), np.atleast_3d(data['normalized']['val_resonance'])) print('Validation loss:', score) # - # plots the loss and an example reconstruction # set to plot a random loop # to plots a specific point add i=(pixel position) viz.plot.training_loss( model_folder, data, resonance_model, 'resonance', signal_info, printing, folder_resonance_autoencoder) # **Figure J43 | Resonance autoencoder traiing results. a,** Training loss (training - black) validation (red). Example hysteresis loop from the **b,** training, **c,** validation data set. Black curve shows the original measured data, red curve show the autoencoder reconstruction. # Computes the low dimensional layer resonance_embeddings = rnn.get_activations( resonance_model, data['normalized']['resonance'], 9) # + # defines the ranges for the images ranges = [0, 0, 0, 0, 6e-3, 0, 4e-2, 0, 6e-2, 1e-1, 0, 1e-3, 0, 0, 0, 1.6e-2] # plots the embedding maps _ = viz.plot.embedding_maps( resonance_embeddings, printing, plot_format, folder_resonance_autoencoder, filename='./Resonance_embeddings', ranges=ranges) # - # **Figure J44 | Output of low dimensional layer obtained from the resonance autoencoder.** # ### Plot Embedding and Line Trace # + # collects the c/a clustering results cluster_ca = machine_learning['clustering']['piezoresponse'][1] # makes a copy of the embeddings embedding_c = np.copy(resonance_embeddings) embedding_a = np.copy(resonance_embeddings) # splits the embeddings for the c and a domains embedding_c[np.where(cluster_ca == 1)] = 0 embedding_a[np.where(cluster_ca == 0)] = 0 # rotates and crops the topography image crop_topo, scale = util.core.rotate_and_crop( np.flipud(imported['data']['HeightFinal'].reshape(1024, 1024).T)) # defines the embedding ranges for the images ranges = [0, 0, 0, 0, 6e-3, 0, 4e-2, 0, 6e-2, 1e-1, 0, 1e-3, 0, 0, 0, 1.6e-2] # creates the figures and axes in a pretty way fig, ax = viz.format.layout_fig(6, mod=3) # plots the embedding superimposed on the line trace viz.plot.embedding_line_trace( ax, 0, crop_topo, embedding_c[:, 15], [0, 1.6e-2], plot_format) viz.plot.embedding_line_trace( ax, 1, crop_topo, embedding_a[:, 4], [0, 4.5e-3], plot_format) viz.plot.embedding_line_trace( ax, 2, crop_topo, embedding_a[:, 11], [0, 7e-4], plot_format) plt.tight_layout(pad=1) # saves the figure util.file.savefig( folder_resonance_autoencoder + '/embedding_and_topography', printing) # - # **Figure J45 | Plots of selected embedding maps from piezoelectric autoencoder superimposed on average topography.** # ### Exports Training Images # Exports low dimensional layer computed after each epoch (with improvement) during training. This allows the visualization of the effect of L${_1}$ regularization. # + # selects to export training images # note this take a long time (1-2 hours) export_training_images = False if export_training_images: if np.int(io_transfer.get_size(model_folder) / 1e8) > 1: viz.plot.training_images( resonance_model, data, model_folder, printing, plot_format, folder_resonance_autoencoder_training_movie, data_type='resonance') if printing['movies']: # Script to making movie util.file.make_movie( 'resonance_training_movie', folder_resonance_autoencoder_training_movie, './', 'png', 10, output_format='mp4') # - # ### Make Generator Movie # Makes a movie where the magnitude of the embedding is manipulated and the decoder is used to generate the piezoresponse if printing['movies']: # collects the c/a c cluster_ca = machine_learning['clustering']['piezoresponse'][1] # makes a copy of the resonance embeddings embedding_c = np.copy(resonance_embeddings) embedding_a = np.copy(resonance_embeddings) # extracts the embeddings for the c/a regions embedding_c[np.where(cluster_ca == 1)] = 0 embedding_a[np.where(cluster_ca == 0)] = 0 # defines the embedding ranges for the images ranges_a = [0, 0, 0, 0, 5e-3, 0, 4e-2, 0, 6e-2, 1e-1, 0, 7e-4, 0, 0, 0, 1.6e-2] ranges_c = [0, 0, 0, 0, 2e-3, 0, 4e-2, 0, 6e-2, 1e-1, 0, .7e-3, 0, 0, 0, 1.6e-2] # selects the embeding maps to plot index_a = [4, 6, 11] index_c = [4, 11, 15] # selects the number of images (embedding levels) to make number = 100 # selects the number of points to average the embedding between averaging_number = 50 # generates the embedding images _ = viz.plot.resonance_generator_movie( resonance_model, index_c, index_a, embedding_c, data['raw']['voltage'], embedding_a, ranges_c, ranges_a, number, averaging_number, resonance_decoder, plot_format, printing, folder_resonance_autoencoder_movie, graph_layout=[12, 3]) # Script to making movie util.file.make_movie( 'Resonance_Generator_movie', folder_resonance_autoencoder_movie, './', 'png', 10, output_format='mp4', reverse=True) # ### Autoencoder Generator # + # defines the ranges for the images ranges = [0, 0, 0, 0, 4.5e-3, 0, 4e-2, 0, 6e-2, 1e-1, 0, 7e-4, 0, 0, 0, 1.6e-2] # selects the embedding maps to plot index_a = [4, 6, 11] index_c = [4, 11, 15] # selects the number of curves to plot number = 8 # selects the number of pixels to average averaging_number = 50 # selects a subset of the generated plots plot_subselect = [[7, 6, 5], [7, 6, 5], [7, 6, 5]] # set the scales of the axes scales = [[1320, 1330], [-1.1, 1.1]] # plots the generated curves for the a domains viz.plot.resonance_generator( resonance_decoder, piezoresponse_decoder, index_a, embedding_a, ranges, number, averaging_number, plot_subselect, piezoresponse_embeddings, data['raw']['voltage'], data['sg_filtered']['resonance'], plot_format, printing, folder_resonance_autoencoder, scales, name_prefix='a_domains') # sets the embedding ranges for the c domains ranges = [0, 0, 0, 0, 2e-3, 0, 4e-2, 0, 6e-2, 1e-1, 0, .7e-3, 0, 0, 0, 1.6e-2] # selects a subset of the generated plots plot_subselect = [[7, 6, 5], [7, 6, 5], [7, 5, 3, 1]] # set the scales of the axes scales = [[1320, 1330], [-1.55, 1.55]] # plots the generated curves for the a domains viz.plot.resonance_generator( resonance_decoder, piezoresponse_decoder, index_c, embedding_c, ranges, number, averaging_number, plot_subselect, piezoresponse_embeddings, data['raw']['voltage'], data['sg_filtered']['resonance'], plot_format, printing, folder_resonance_autoencoder, scales, name_prefix='c_domains') # - # **Figure J45 | Plots of selected embedding maps from resonance autoencoder.** # Top shows embedding map, middle shows generated resonance hysteresis loop, bottom shows generated piezoelectric hysteresis loop obtained when varying each embedding. The color of the hysteresis loops reflects the colors in the map # # Phase Field # + # sets the position where the tip is located tip_positions = {'tip1': dict(pos=[42, 64, 20]), 'tip2': dict(pos=[50, 64, 20]), 'tip3': dict(pos=[62, 64, 20]), 'tip4': dict(pos=[72, 64, 20]), 'tip5': dict(pos=[74, 64, 20])} # sets the scale limits for the graphs clim = {'Polarization Z': [-1, 1], 'Landau Energy': [-10e7, 10e7], 'Elastic Energy': [-10e7, 10e7], 'Electrostatic Energy': [-10e7, 10e7], 'Gradient Energy': [-10e7, 10e7], 'Total Energy': [-10e7, 10e7]} # sets the information of the region to s6ho graph_info = dict(top=20, y_cut=64, x_lim=[120, 360], y_lim=[0, 100], clim=clim) # collection of information used for plotting the phase feild results Phase_field_information = {'tips': ['tip1', 'tip2', 'tip3', 'tip4', 'tip5'], 'folder': dict(time_series='./Raw_Data/Phase_Field/Polarization/data-PEloop/', polarization='./Raw_Data/Phase_Field/Polarization/', energy='./Raw_Data/Phase_Field/energy/'), 'time_step': [60, 0, 20], 'tip_positions': tip_positions, 'graph_info': graph_info, 'labels': ['Polarization Z', 'Landau Energy', 'Elastic Energy', 'Electrostatic Energy', 'Gradient Energy', 'Total Energy'], 'output_folder': folder_phase_field} # - # ## Phase Field Switching Images # plots the phase field results viz.phase_field.phase_field_switching(Phase_field_information, printing) # **Figure J46 | Phase-field simulations under local tip bias.** Maps show the polarization and various contributions to the energy at various tip positions. Maps show the switching under negative bias (left), initial state (center), positive bias (right). if printing['movies']: # exports all phase field images to create movie _ = viz.phase_field.movie(Phase_field_information, printing) # ## Makes Movies if printing['movies']: for i, tip in enumerate(Phase_field_information['tips']): util.file.make_movie('Switching_movie_' + tip, folder_phase_field + '/movie/' + tip, folder_phase_field + '/movie/', 'png', 5, output_format='gif') # ## Phase Field Hysteresis Loops viz.phase_field.phase_field_hysteresis(Phase_field_information, printing) # **Figure J47 | Phase-field simulations under local tip bias.** Plots show the extracted ferroelectric hysteresis loops at various tip positions.
Revealing Ferroelectric Switching Character Using Deep Recurrent Neural Networks-Collaboratory.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python (py36) # language: python # name: py36 # --- # + # Author: <NAME> import torch import torch.nn as nn import torch.nn.functional as F import torch.optim as optim torch.manual_seed(1) # - INPUT_DIM = 3 HIDDEN_DIM = 5 SEQ_LEN = 7 lstm = nn.LSTM(input_size=INPUT_DIM, hidden_size=HIDDEN_DIM, batch_first=True) inputs = torch.randn(SEQ_LEN, 1, INPUT_DIM) # + #inputs = [torch.randn(1, INPUT_DIM) for _ in range(SEQ_LEN)] # - inputs out, (hidden_state, cell_state) = lstm(inputs, (hidden.view(1, SEQ_LEN, HIDDEN_DIM), cell.view(1, SEQ_LEN, HIDDEN_DIM))) out.size(), (hidden_state.size(), cell_state.size()) out hidden_state.size(), hidden.size() cell_state hidden, cell = (torch.randn(SEQ_LEN, 1, HIDDEN_DIM), torch.randn(SEQ_LEN, 1, HIDDEN_DIM)) inputs[0].unsqueeze(0) out1, (hid1, cell1) = lstm(inputs[0].unsqueeze(0), (hidden[0].unsqueeze(0), cell[0].unsqueeze(0))) out1, hid1, cell1 inputs[0].unsqueeze(0).size(), (hidden[0].unsqueeze(0).size(), cell[0].unsqueeze(0).size()) out1.size(), (hid1.size(), cell1.size()) # # Demystifying `nn.LSTM` module # # ## What’s the difference between hidden and output in PyTorch LSTM? # # According to Pytorch documentation # # ```py # """ # Outputs: output, (h_n, c_n) # """ # ``` # # There are 2 types of usage of `nn.lstm` module. # # - **TYPE 1 (BULK MODE):** Feed all the input in a bulk to the `lstm` module # - **TYPE 2 (LOOP MODE):** Feed each element of the input to the `lstm` module in a loop # # ## How to interpret the BULK MODE? # # - `Outputs` comprises all the hidden states in the last layer (“last” `depth-wise`, not time-wise). # - $(h_n,c_n)$ comprises the hidden states after the last time step, $t=n$, so you could potentially feed them into another LSTM. # # ![image](https://i.stack.imgur.com/SjnTl.png) # # Simple example to show that both the approach **may not** generate same output for identical problem definition. # # **Example:** # # Say I have a sentence: _i love my city kolkata very much_. And we want to feed the sentence to a `nn.lstm` module using above 2 approaches. # # We have a sequence length = 7 here. # # We need to convert each token `["i", "love", "my", "city", "kolkata", "very", "much"]` to an embedding. For this demo we generate an random embedding of dimension of `3` SEQ_LEN = 7 IMPUT_DIM = EMBED_DIM = 3 HIDDEN_DIM = 5 # Conceptually `input dimension` and `embedding dimension` are same. As word ambeddings are the input to the lstm module. We can use both the term. torch.manual_seed(0) inputs = torch.randn(SEQ_LEN, 1, INPUT_DIM) inputs # ```py # "i" = [[-1.1258, -1.1524, -0.2506]], # "love" = [[-0.4339, 0.8487, -1.5551]], # "my" = [[-0.3414, 1.8530, 0.4681]], # "city" = [[-0.1577, 1.4437, 0.2660]], # "kolkata" = [[ 0.1665, 1.5863, 0.9463]], # "very" = [[-0.8437, 0.9318, 1.2590]], # "much" = [[ 2.0050, 0.0537, 0.6181]]] # ``` # Let's declare our `lstm` module lstm = nn.LSTM(input_size=INPUT_DIM, hidden_size=HIDDEN_DIM, batch_first=True) # One interesting fact: `nn.LSTM()` returns a function and we assigned the function in a variable name `lstm`. # # The function `lstm()` expects all the argument `inputs, (hidden, cell)` as 3D tensor. # # Now we can pass the entire embedding/input matrix `inputs` to the `lstm()` function. If you are using TYPE 1, then we can call `lstm()` in 2 ways: # # - Without `(hidden, cell)`. Then system initializes the `(hidden,cell)` with 0 # - With custom `(hidden, cell)` initialization # # Syntax: # # ```py # out, (hidden, cell) = lstm(inputs) # ``` # # Now in many LSTM example we will see this notation where bulk inputs are fed to the `lstm()` module. The confusion arrises when we see example where TYPE 2 approach is used and each input in fed over loop. However we can show both TYPE 1 and TYPE 2 approach are same if we use same `(hidden, cell)` initialization for both the cases. # # But technically there is a slight catch. And that is related to the tensor shape for `(hidden,cell)`. # # In practice, LSTM is a recurrent network. Which takes one embedding for one word and the corresponding `(hidden,cell)` and returns `out, (hidden, cell)`. Now in bulk approach, all are sent together. # # Let's initialize `(hidden, cell)` hidden, cell = (torch.randn(SEQ_LEN, 1, HIDDEN_DIM), torch.randn(SEQ_LEN, 1, HIDDEN_DIM)) # ## TYPE 2: out_1, (hid_1, cell_1) = lstm(inputs[0].unsqueeze(0), (hidden[0].unsqueeze(0), cell[0].unsqueeze(0))) out_1, hid_1, cell_1 out_2, (hid_2, cell_2) = lstm(inputs[1].unsqueeze(0), (hid_1, cell_1)) out_2, (hid_2, cell_2) # ## TYPE 1: out_type_1, (hidden_type_1, cell_type_1) = lstm(inputs, (hidden.view(1, SEQ_LEN, HIDDEN_DIM), cell.view(1, SEQ_LEN, HIDDEN_DIM))) out_type_1 # see, the first row of `out_type_1` is similar to `out_1`. But the subsequent rows of `out_type_1` are differnet, as the returned `(hidden,cell)` are fed back into `lstm()`. # # This arises one question. We initially initialized the `(hidden, cell)` for all the `7` tokens, but it seems redundant. But that's not the case. # Let's unroll the bulk and try to regenerate the `output_type_1` out_1, (hid_1, cell_1) = lstm(inputs[0].unsqueeze(0), (hidden[0].unsqueeze(0), cell[0].unsqueeze(0))) out_1, (hid_1, cell_1) # The first element is fine. But the twist comes next. See I am not passing the `(hid_1, cell_1)` for the token `inputs[1]` rather i am passing a reshaped version of ` (hidden[1], cell[1])` and that is creating the exact replica of `output_type_1[1]` out_2, (hid_2, cell_2) = lstm(inputs[1].unsqueeze(0), (hidden[1].unsqueeze(0), cell[1].unsqueeze(0))) out_2, (hid_2, cell_2) # We can go on like this.... # # **Observation:** # # If you see carefully, it seems, in bulk mode (in the above unrolled version), each output is not generated by the previous `(hidden, cell)` i.e $(h_{t-1}, c_{t-1})$ as seen by the above example (but the results are matching for `bulk output` and `unrolled version` of bulk output). # # # ## Following LSTM defition: # # Now lets feed the $(h_{t-1}, c_{t-1})$ (as per the original LSTM definition) to generate the next `out`. out_1, (hid_1, cell_1) = lstm(inputs[0].unsqueeze(0), (hidden[0].unsqueeze(0), cell[0].unsqueeze(0))) out_1, (hid_1, cell_1) out_2, (hid_2, cell_2) = lstm(inputs[1].unsqueeze(0), (hid_1, cell_1)) out_2, (hid_2, cell_2) # **Observation:** # # The `out_2` is different from `output_type_1[1]` (both denoting the second element) # + # Author: <NAME> import torch import torch.nn as nn import torch.nn.functional as F import torch.optim as optim # + torch.manual_seed(0) lstm = nn.LSTM(3, 3) # Input dim is 3, output dim is 3 inputs = [torch.randn(1, 3) for _ in range(5)] # make a sequence of length 5 # initialize the hidden state. hidden = (torch.randn(1, 1, 3), torch.randn(1, 1, 3)) for i in inputs: # Step through the sequence one element at a time. # after each step, hidden contains the hidden state. out, hidden = lstm(i.view(1, 1, -1), hidden) print(out) print(hidden) print # - # alternatively, we can do the entire sequence all at once. # the first value returned by LSTM is all of the hidden states throughout # the sequence. the second is just the most recent hidden state # (compare the last slice of "out" with "hidden" below, they are the same) # The reason for this is that: # "out" will give you access to all hidden states in the sequence # "hidden" will allow you to continue the sequence and backpropagate, # by passing it as an argument to the lstm at a later time # Add the extra 2nd dimension inputs = torch.cat(inputs).view(len(inputs), 1, -1) hidden = (torch.randn(1, 1, 3), torch.randn(1, 1, 3)) # clean out hidden state out, hidden = lstm(inputs, hidden) print(out) print(hidden)
notebooks/lstm_debugging.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ## Exploring the difference between numpy.zeros and numpy.empty with timeit # I was reading on [this thread](https://stackoverflow.com/questions/43145332/numpy-array-of-zeros-or-empty) on StackOverflow about the difference between a numpy array of zeros and empty. # # Does it make much difference when initializing an array? I decided to play around with Python's timeit to see the time difference. import timeit import numpy as np # We can initialize a big number to put into our loop first. big = 10**6; big # And then move on to timing our array initializations. # %timeit np.empty([big, big]) # %timeit np.zeros([big, big]) # Although there is a difference, it doesn't seem to be much.
notebooks/numpy-what-is-the-difference-between-numpy-empty-and-zeros.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 2 # language: python # name: python2 # --- # Principal Component Analysis applied to the Iris dataset. # # See [here](https://en.wikipedia.org/wiki/Iris_flower_data_set) for more information on this dataset. # #### New to Plotly? # Plotly's Python library is free and open source! [Get started](https://plot.ly/python/getting-started/) by downloading the client and [reading the primer](https://plot.ly/python/getting-started/). # <br>You can set up Plotly to work in [online](https://plot.ly/python/getting-started/#initialization-for-online-plotting) or [offline](https://plot.ly/python/getting-started/#initialization-for-offline-plotting) mode, or in [jupyter notebooks](https://plot.ly/python/getting-started/#start-plotting-online). # <br>We also have a quick-reference [cheatsheet](https://images.plot.ly/plotly-documentation/images/python_cheat_sheet.pdf) (new!) to help you get started! # ### Version import sklearn sklearn.__version__ # ### Imports # + print(__doc__) import plotly.plotly as py import plotly.graph_objs as go import numpy as np import matplotlib.pyplot as plt from sklearn import decomposition from sklearn import datasets # - # ### Calculations # + np.random.seed(5) centers = [[1, 1], [-1, -1], [1, -1]] iris = datasets.load_iris() X = iris.data y = iris.target pca = decomposition.PCA(n_components=3) pca.fit(X) X = pca.transform(X) # - # ### Plot Results def matplotlib_to_plotly(cmap, pl_entries): h = 1.0/(pl_entries-1) pl_colorscale = [] for k in range(pl_entries): C = map(np.uint8, np.array(cmap(k*h)[:3])*255) pl_colorscale.append([k*h, 'rgb'+str((C[0], C[1], C[2]))]) return pl_colorscale # + # Reorder the labels to have colors matching the cluster results y = np.choose(y, [1, 2, 0]).astype(np.float) trace = go.Scatter3d(x=X[:, 0], y=X[:, 1], z=X[:, 2], mode='markers', marker=dict(color=y, colorscale=matplotlib_to_plotly(plt.cm.spectral, 5), line=dict(color='black', width=1)) ) layout = go.Layout(scene= dict( xaxis=dict(ticks='', showticklabels=False), yaxis=dict(ticks='', showticklabels=False), zaxis=dict(ticks='', showticklabels=False), ) ) fig = go.Figure(data=[trace], layout=layout) # - py.iplot(fig) # ### License # Code source: # # <NAME> # # License: # # BSD 3 clause # + from IPython.display import display, HTML display(HTML('<link href="//fonts.googleapis.com/css?family=Open+Sans:600,400,300,200|Inconsolata|Ubuntu+Mono:400,700" rel="stylesheet" type="text/css" />')) display(HTML('<link rel="stylesheet" type="text/css" href="http://help.plot.ly/documentation/all_static/css/ipython-notebook-custom.css">')) # ! pip install git+https://github.com/plotly/publisher.git --upgrade import publisher publisher.publish( 'PCA-example-with-iris-Data-set.ipynb', 'scikit-learn/plot-pca-iris/', 'PCA example with Iris Data-set | plotly', ' ', title = 'PCA example with Iris Data-set | plotly', name = 'PCA example with Iris Data-set', has_thumbnail='true', thumbnail='thumbnail/pca-iris.jpg', language='scikit-learn', page_type='example_index', display_as='decomposition', order=1, ipynb= '~Diksha_Gabha/2915') # -
_posts/scikit/pca-example-with-iris-dataset/PCA-example-with-iris-Data-set.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import sqlite3 from selenium import webdriver from selenium.webdriver.remote.webelement import WebElement from selenium.webdriver.common.keys import Keys from selenium.webdriver.support.ui import WebDriverWait from selenium.webdriver.common.by import By from selenium.webdriver.support import expected_conditions as EC from bs4 import BeautifulSoup import requests import pandas as pd import csv import time # + df = pd.read_excel('C:/Develops/newspapers/중앙일보/NewsResult_20200901-20210430 (8).xlsx', names=['identical', 'date', 'press', 'name', 'title', 'c1', 'c2', 'c3', 'a1', 'a2', 'a3', 'person', 'place', 'institute', 'keyword', 'topkeyword', 'body', 'url', 'tf']) # 전처리 df = df[df.tf != '예외'] df = df[df.tf != '중복'] df = df[df.tf != '중복, 예외'] # df = df[~df.title.str.contains('경향포토')] # df = df[~df.title.str.contains('인터랙티브')] # df = df[~df.place.str.contains('korea', na=False)] # df = df[~df.place.str.contains('la', na=False)] # df = df[~df.place.str.contains('LA', na=False)] df = df.reset_index() df = df.drop(columns=['index'], axis=1) # - # ~4/1 7952, ~3/1 16355 df = df.iloc[0:7952] # 4/30 ~ 4/1 1달간 총 7952개(8시간 50분) *50개(3분20초) len(df) df text_0_7953_넣음 = pd.read_excel('C:/Develops/newspapers/중앙일보/text_0_7953_넣음.xls', header=None) len(text_0_7953_넣음) text_0_7953_넣음 = text_0_7953_넣음.values.tolist() judge['judge'] judge = pd.read_excel('C:/Develops/newspapers/중앙일보/judge.xls') judge judge_value = judge['judge'].values.tolist() judge_ko = judge['judge_ko'].values.tolist() # + # df['total_text'] = co_text # - df['total_body'] = text_0_7953_넣음 df['judge'] = judge_value df['judge_ko'] = judge_ko df df = pd.read_excel('C:/Develops/newspapers/중앙일보/j_press_combine.xls', names=['identical', 'date', 'press', 'name', 'title', 'c1', 'c2', 'c3', 'a1', 'a2', 'a3', 'person', 'place', 'institute', 'keyword', 'topkeyword', 'body', 'url', 'tf', 'total_body', 'judge', 'judge_ko']) df df.to_excel('C:/Develops/newspapers/중앙일보/j_press_combine.xls', header=None,index=False) db_news = sqlite3.connect('C:/Develops/newspapers/중앙일보/reset.db') c = db_news.cursor() c.execute("CREATE TABLE newspapers (id INTEGER PRIMARY KEY AUTOINCREMENT, identical TEXT, date TEXT, press TEXT, name TEXT, title TEXT, c1 TEXT, c2 TEXT, c3 TEXT, a1 TEXT, a2 TEXT, a3 TEXT, person TEXT, place TEXT, institute TEXT, keyword TEXT, topkeyword TEXT, body TEXT, url TEXT, tf TEXT, total_body TEXT, judge TEXT, judge_ko TEXT)") for row in df.iterrows(): identical = row[1][0] date = row[1][1] press = row[1][2] name = row[1][3] title = row[1][4] c1 = row[1][5] c2 = row[1][6] c3 = row[1][7] a1 = row[1][8] a2 = row[1][9] a3 = row[1][10] person = row[1][11] place = row[1][12] institute = row[1][13] keyword = row[1][14] topkeyword = row[1][15] body = row[1][16] url = row[1][17] tf = row[1][18] total_body = row[1][19] judge = row[1][20] judge_ko = row[1][21] c.execute("INSERT INTO newspapers (identical, date, press, name, title, c1, c2, c3, a1, a2, a3, person, place, institute, keyword, topkeyword, body, url, tf, total_body, judge, judge_ko) VALUES(?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?) ",(identical, date, press, name, title, c1, c2, c3, a1, a2, a3, person, place, institute, keyword, topkeyword, body, url, tf, total_body, judge, judge_ko)) db_news.commit() db_news.close() pos_dict neg_dict ---------------------------------------------------------------- , names=['identical', 'date', 'press', 'name', 'title', 'c1', 'c2', 'c3', 'a1', 'a2', 'a3', 'person', 'place', 'institute', 'keyword', 'topkeyword', 'body', 'url', 'tf', 'total_body', 'judge', 'judge_ko', 'pos_dict', 'neg_dict']
scraping/2.combine_to_sqlite.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + # %load_ext nb_black # %load_ext autoreload # %autoreload 2 # + import os from pathlib import Path from requests import get import pandas as pd import numpy as np from sklearn.metrics import mean_absolute_error, mean_squared_error from sklearn.preprocessing import LabelEncoder, StandardScaler from tensorflow.keras.utils import plot_model from tensorflow.keras.callbacks import EarlyStopping import logging logging.basicConfig(level=logging.WARN) # + from xplainet.input_utils import preproc_dataset from xplainet.model import build_model from xplainet.random_utils import setup_seed, SEED from sklearn.model_selection import ShuffleSplit import matplotlib.pyplot as plt from matplotlib.pyplot import imshow # %matplotlib inline from zipfile import ZipFile # - setup_seed() def download(url, out, force=False, verify=True): out.parent.mkdir(parents=True, exist_ok=True) if force and out.exists(): print(f"Removing file at {str(out)}") out.unlink() if out.exists(): print("File already exists.") return print(f"Downloading {url} at {str(out)} ...") # open in binary mode with out.open(mode="wb") as file: # get request response = get(url, verify=verify) for chunk in response.iter_content(100000): # write to file file.write(chunk) def plot_history(history): loss_list = [s for s in history.history.keys() if "loss" in s and "val" not in s] val_loss_list = [s for s in history.history.keys() if "loss" in s and "val" in s] acc_list = [s for s in history.history.keys() if "AUC" in s and "val" not in s] val_acc_list = [s for s in history.history.keys() if "AUC" in s and "val" in s] if len(loss_list) == 0: print("Loss is missing in history") return ## As loss always exists epochs = range(1, len(history.history[loss_list[0]]) + 1) ## Loss plt.figure(1) for l in loss_list: plt.plot( epochs, history.history[l], "b", label="Training loss (" + str(str(format(history.history[l][-1], ".5f")) + ")"), ) for l in val_loss_list: plt.plot( epochs, history.history[l], "g", label="Validation loss (" + str(str(format(history.history[l][-1], ".5f")) + ")"), ) plt.title("Loss") plt.xlabel("Epochs") plt.ylabel("Loss") plt.legend() plt.show() # ## Bank marketing : loading data # + dataset_name = "kaggle-house-prices" out = Path(os.getcwd() + "/data/" + dataset_name + "-train.csv") target = "SalePrice" to_remove = [] train = pd.read_csv(out, sep=",", low_memory=False) # - # !ls data train.shape train.columns if "Set" not in train.columns: print("Building tailored column") train_valid_index, test_index = next( ShuffleSplit(n_splits=1, test_size=0.1, random_state=SEED).split( range(train[target].shape[0]) ) ) train_index, valid_index = next( ShuffleSplit(n_splits=1, test_size=0.1, random_state=SEED).split( train_valid_index ) ) train["Set"] = "train" train["Set"][valid_index] = "valid" train["Set"][test_index] = "test" # train.to_csv((out.parent / "train_bench.csv").as_posix(), index=False) train_indices = train[train.Set == "train"].index valid_indices = train[train.Set == "valid"].index test_indices = train[train.Set == "test"].index input_train, params = preproc_dataset(train.loc[train_indices], target, ["Set"]) params len(train_indices) input_valid, _ = preproc_dataset(train.loc[valid_indices], target, ["Set"], params) input_test, _ = preproc_dataset(train.loc[test_indices], target, ["Set"], params) target_encoder = StandardScaler() target_encoder.fit(train[target].values.reshape(-1, 1)) y_train = target_encoder.transform(train[target].values[train_indices].reshape(-1, 1)) y_valid = target_encoder.transform(train[target].values[valid_indices].reshape(-1, 1)) y_test = target_encoder.transform(train[target].values[test_indices].reshape(-1, 1)) params model = build_model( params, lconv_dim=[4], lconv_num_dim=[8], emb_size=16, # For this problem, we need "tanh" as first layer, or else to standard scale the data beforehand activation_num_first_layer="tanh", output_activation=None, output_dim=1, # np.unique(y_train).shape[0], ) model.summary() # + # #!pip install pydot graphviz # - plot_model( model, # to_file="model.png", show_shapes=True, show_layer_names=True, rankdir="TB", expand_nested=False, dpi=96, ) y_train.shape counts = np.unique(y_train, return_counts=True)[1] counts = counts.sum() / counts # + active="" # class_weight = {} # for i, counts in enumerate(counts): # class_weight[i] = counts # class_weight # + active="" # class_weight = { # 0: 1, # 1: 1, # } # class_weight # - # %%time history = model.fit( input_train, y_train.reshape(-1, 1), epochs=2000, batch_size=1024, validation_data=(input_valid, y_valid.reshape(-1, 1),), verbose=2, callbacks=[EarlyStopping(monitor="val_loss", patience=50, verbose=1)], #class_weight=None#class_weight ) plot_history(history) model_auc = mean_absolute_error( y_pred=target_encoder.inverse_transform(model.predict(input_valid)).reshape(-1), y_true=train[target].values[valid_indices], ) model_auc model_auc = mean_squared_error( y_pred=target_encoder.inverse_transform(model.predict(input_valid)).reshape(-1), y_true=train[target].values[valid_indices], ) model_auc model_auc = mean_absolute_error( y_pred=target_encoder.inverse_transform(model.predict(input_test)).reshape(-1), y_true=train[target].values[test_indices], ) model_auc model_auc = mean_squared_error( y_pred=target_encoder.inverse_transform(model.predict(input_test)).reshape(-1), y_true=train[target].values[test_indices], ) model_auc from xplainet.model import predict, encode probs, explanations = predict(model, input_test) probs, encoded_output = encode(model, input_test) y_test.shape encoded_output.shape explanations.shape # + import matplotlib.pyplot as plt # plt.rcdefaults() import numpy as np import matplotlib.pyplot as plt def explain_plot(importances, columns): selection = np.argsort(-np.absolute(importances))[:10] # indexes = np.argsort(importances) performance = importances[selection] # print(performance.shape)512, 256, 128, 64, 32, 1 y_pos = np.arange(performance.shape[0]) plt.barh(y_pos, performance, align="center", alpha=0.5) plt.yticks(y_pos, columns[selection]) # plt.xlabel('Usage') plt.title("Feature importance") plt.show() # - all_cols = np.array(params["bool_cols"] + params["num_cols"] + params["cat_cols"]) all_cols # ## Explain global # + # Looks like this is broken for now. # - probs_train, explanations_train = predict(model, input_train) global_explain = np.abs(explanations_train).sum(axis=0) global_explain = global_explain / np.abs(global_explain).sum() global_explain explain_plot(global_explain, all_cols) # ## Explain local # + # Looks like this is broken for now. # - for i in range(20): explain_plot(explanations[i], all_cols) print(probs[i].item()) from sklearn.metrics import confusion_matrix probs_test, explanations_train = predict(model, input_test) # + # plt.barh(y_pos, performance, align="center", alpha=0.5) # plt.yticks(y_pos, columns[selection]) plt.ylabel("Predicted value") plt.xlabel("Truth value") plt.title("Scatter plot of prediction vs truth") plt.scatter( train[target].values[test_indices], target_encoder.inverse_transform(probs_test), ) plt.plot( [ np.min(train[target].values[test_indices]), np.max(train[target].values[test_indices]), ], [ np.min(train[target].values[test_indices]), np.max(train[target].values[test_indices]), ], ) plt.grid(True) plt.show() # - plt.hist( train[target].values[test_indices].reshape(-1) - target_encoder.inverse_transform(probs_test).reshape(-1), ) plt.xlabel("Error value") plt.ylabel("Counts") plt.show() plt.hist( np.abs( train[target].values[test_indices].reshape(-1) - target_encoder.inverse_transform(probs_test).reshape(-1), ) ) plt.xlabel("Error value (absolute)") plt.ylabel("Counts") plt.show() out = Path(os.getcwd() + "/data/" + dataset_name + "-test.csv") kaggle_test = train = pd.read_csv(out, sep=",", low_memory=False) input_kaggle, _ = preproc_dataset(kaggle_test, target, ["Set"], params) probs_kaggle, _ = predict(model, input_kaggle) probs_kaggle = target_encoder.inverse_transform(probs_kaggle) probs_kaggle test_df = kaggle_test[["Id"]] test_df["SalePrice"] = probs_kaggle test_df.to_csv("kaggle_submit.csv", index=False) # + # Scores 0.18047 on kaggle # https://www.kaggle.com/c/house-prices-advanced-regression-techniques/leaderboard#score # Should train with RMSLE ? # from tensorflow.keras import backend as K # def root_mean_squared_log_error(y_true, y_pred): # return K.sqrt(K.mean(K.square(K.log(1+y_pred) - K.log(1+y_true))))
conv1d-generic-houses-prices.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="v-zR01tNxpuO" colab_type="text" # # Chapter 3 - Veriables, Floats, Input and Printing # + [markdown] id="HHhcQEBax3Jl" colab_type="text" # **3.1 - Veriable Assignment** # + [markdown] id="hP-ePClYyREx" colab_type="text" # Değişkenler belleği yani rami kullanarak verileri içinde saklayan yapılara denir. # # Değişken kullanmamızdaki amaç veriyi sürekli yazmak yerine bir değişkene atayarak kolayca kullanabiliriz, ayrıca veriyi temsil edecek bir şekilde değişkenin içinde saklayarak verinin ne olduğunu açıklayarabiliriz # + id="8Nk0d2aDxMjf" colab_type="code" colab={} değiskenAdi = veri # + [markdown] id="92Ef7hTGzI5U" colab_type="text" # = işaretine atama ifadesi denir. # + [markdown] id="8uPjY0j_056T" colab_type="text" # Dünyanın yaklaşık olarak yüzey yüzey alanını hesaplayalım: # + id="pWfRmk8B3z2J" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="29c7dd32-ed33-4897-c3d4-227d16b791f3" radius_of_earth = 6371 pi = 3.14 surface_area = 4 * pi * (radius_of_earth ** 2) print(surface_area) # + [markdown] id="c-Mc7JUx4XAo" colab_type="text" # **yorumlar** # + id="1CCCJPFb4aFF" colab_type="code" colab={} # bu bir yorumdur """ bu çok satırlı bir yorumdur """ # + [markdown] id="9z2diJ8Q4fjj" colab_type="text" # Yazacağımız kodlarda kessinlikle yorum bulunması gerekir, bunun en büyük sebebi bir süre sonra o kodda neler olduğunu unuttuğumuzda yorumlar işte o zaman hatırlatıcı görevinde bulunurlar. # # # # # tek bir satırlık yorumlar için kullanılır # # """ """ # # çok satırlık yorumlar için kullanılır # # kodumuzda yorumları bulunması ileride bizde veya başkasına fayda sağlasa da gereksiz yorumlar, kodumuzda yer kaplayacak ve görüntü kirliliği oluşturacak. # + [markdown] id="O9302rEP5jKQ" colab_type="text" # **3.2 - Floating-point types** # + [markdown] id="7aXvnIvb6eQm" colab_type="text" # bizim bildiğimiz ondalık sayılar, python'da float olan sayılar bir veri türüdür. # + id="PPFarfKP60fo" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="b7da8111-390d-4bab-94f8-d8471801c1ee" pi = 3.14 ekranInc = 23.8 print(type(ekranInc)) # + [markdown] id="dPU66aEz6-OV" colab_type="text" # type() fonksiyonu ile verilerin hangi veri tipine sahip olduklarını öğrenebiliriz. # + [markdown] id="HdyEb4yf7OqU" colab_type="text" # float sayılarda integer sayılar gibi işlemler yapılabilir, ayrıca bu iki ayrı veri tipiyle işlemler yapabiliriz. # + id="pFYBXHjJ7a9p" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 67} outputId="c56e2847-0caf-461f-8e79-d6ef97d5fe51" print(100.5 * 5) print(2.0 // 1.0) print(2.0 * 4.2) # + [markdown] id="D12_TPmI79xo" colab_type="text" # neden sadece / bölme işaretini kullanmıyoruz? # + [markdown] id="KJXa4sbO8EZ8" colab_type="text" # Çünkü / bölme operatörüyle yapılan bölmelerin sonucu her zaman float türünde olur, # # // bölme operatörü ise sayıları tam böler bu yüzden sonuç tam sayı olur(integer). # + id="mAtXwkkj8Xwi" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 50} outputId="6b38536d-e50b-4fdb-fe7f-d92ab6977e7a" print(20 // 10) print(20 / 10) # + [markdown] id="QfVR0N039Rc7" colab_type="text" # negatif kayan noktalı sayıları da normal bir şekilde yazabiliriz: # + id="BKdMsiOh9ZHY" colab_type="code" colab={} -3.14 # + [markdown] id="2MwvV_QA9cfi" colab_type="text" # **3.3 - Getting user input** # + [markdown] id="nTf_ETHb9lEJ" colab_type="text" # # + [markdown] id="f1c0eNPa-cUr" colab_type="text" # Kullanıcıdan girdi almak için input() fonksiyonunu kullanırız. # # bu fonksiyon sayesinde kullanıcıya ulaşabilir, kullanıcıdan aldığımız verilerle işlemler yapabiliriz. # # Bu fonksiyonun en büyük ayrıntısı ise, kessinlikle kullanıcıdan gelen veriyi bir değişkene saklamamız lazım. O yüzdendir, input fonkisyonu kullandığımız değişkenlere kullanıcıdan beklediğimiz veriyi temsil eden bir isim koymamız lazım. # + id="GrI99uGz-84I" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 50} outputId="0e512019-8850-4767-f893-bf5f321fbc6a" userName = input('<NAME> yazınız?: ') print(userName) # + [markdown] id="BaHp4Z6Z_vZ4" colab_type="text" # input() fonksiyonu girdileri str olarak döndürür, bu yüzden eğer kullanıcının yazacaği ve bizim işlerimizi sayısal ise input() fonksiyonu sayısal bir veri tipine dönüştürmemiz lazım. # + [markdown] id="1TE-GmkPASvf" colab_type="text" # kullanıcının sayısal bir değer girmesine karşılık izlebileceğimiz iki yol var. # # 1. eval() kullanmak. # + [markdown] id="LXFzr9sxBx-0" colab_type="text" # eval() fonksiyonu tehlikeli bir fonksiyonudr. eğer tehlikeleri bertaraf edilemeyecek bir ortamda kullanılması önerilmez. # + id="KnTsjWp2AcNo" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 50} outputId="3306ed02-0a13-4cac-8d27-90ce0d3ab08d" x = eval(input('bir sayı giriniz: ')) print(x + 10) # + [markdown] id="Er4HMnlpAnOb" colab_type="text" # 2. int() veri tipi dönüştürücü kullanmak # # Bu yöntem pratiği ama en hataya meyili yöntemdir. Çünkü eğer input() fonksiyonunu int() veri tipiyle dönüştürmeye çalışırsak ve kullanıcı sayısal bir değer değilde farklı bir değer girerse, int() dönüştürücü veriyi dönüştüremeyecek ve ValueError hatası verecek. # # Fakat bu hatayı da try...except.. bloğu ile çözülebilir. # + [markdown] id="jiVR4twSE2_R" colab_type="text" # **3.4 - The Print Function** # + [markdown] id="zB3GCjqUFJCw" colab_type="text" # elimizde belli veriler var ve kullanıcıya bunu göstermek istiyoruz, veya kullanıcıdan girdi alıyoruz ve bu girdiyi ekrana print() fonksiyonuyla yazdırıyoruz. # # yani aslında bilgisayarı işlevleri olan, girdi alan bu girdilerle bir şeyler yapabilen ve sonra değer verip bunu ekranda gösterebilen makineler diyebiliriz. # + id="G4mitzmLF5c8" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 67} outputId="b8d78138-fe21-4a19-f544-c63925b6301e" print('Hello') print('Selamlar') print(10 + 100) # + [markdown] id="NaXGUrfcGFcm" colab_type="text" # **Recap:** # + [markdown] id="mkDDgmqEGJJH" colab_type="text" # - verileri programımızda nasıl saklayabileceğimizi öğrendik. Bu işlemi değişken oluşturup değişkene veri atayarak yaparız. # # - Yeni bir veri tipini öğrendik: Float. # # - Bölünmesinin iki şekilde yapabileceğini öğrendik: # # // tam bölme işlemi yapar tam sayı döndürür # # / küsüratlı böler float döndürür # # - input() fonksiyonu sayesinde kullanıcıdan giri alabiliriz # # - print() fonksiyonu sayesinde elimizdeki bilgileri ekrana yazdırabiliriz # # Tüm bu yeni öğrendiklerimizi bir programda toplayalım: # + id="Pyhq5hbXG5Ar" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 118} outputId="225fba50-7be2-4918-e983-66581c91cf5b" name = input('İsminizi yazınız: ') print('Selam',name +'.') number = int(input('Tam sayılı olarak 5 e böleceğiniz sayı: ')) print(number // 5) numberOne = int(input('Ondalıklı sayıl olarak 5 e böleceğiniz sayı: ')) print(numberOne / 5) # + [markdown] id="gg4rafp7IBNe" colab_type="text" # # 3.5 - Exercises # + [markdown] id="Hkdnk59lIGli" colab_type="text" # **Question 1** # + [markdown] id="ELaszb6lIJze" colab_type="text" # elimizde 11 tavşan var diyelim: numberOfRabbits = 11 # # bu tavşanlar her 3 ayda bir ikiye katlanıyor. # # 2 yıl sonra tavşan miktarı ne olacaktır? # + [markdown] id="YWt3CeS5PnHL" colab_type="text" # benim çözümüm :)) : # + id="WJ2hW6mkImYf" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="8510ba9e-bc72-43ef-c8e5-2af5b766b872" numberOfRabits = 11 months = 24 // 3 populasyon = numberOfRabits * months print(populasyon * 2) # + [markdown] id="qc5mHfiKPr-G" colab_type="text" # Doğrusu: # + id="payGa6gKP8Ly" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="a2a93ba5-536e-4173-8991-f88ca22eeabe" numberOfRabits = 11 months = 24 # her 3 ayda bir istediği için: pop = months // 3 topPopulasyon = numberOfRabits * (2 ** pop) print(topPopulasyon) # + [markdown] id="67mXmRvuQqo6" colab_type="text" # **Question 2** # + [markdown] id="Kyz_uw-YQtsY" colab_type="text" # Bu tavşan işleminin yıl olarak kullanıcının seçebilmesini sağlayın. # + id="fAaSoaKBQ-Qr" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 50} outputId="771cf5e6-f7e3-414d-9ed3-4a70af58a08b" numberOfRabits = 11 year = int(input('Seçtiğiniz yılı seçiniz: ')) month = 12 * year pop = month // 3 topPopulasyon = numberOfRabits * (2 ** pop) print(topPopulasyon) # + [markdown] id="IL15Ogn6SkV6" colab_type="text" # eğitmenin çözdüğü: # + id="quzStQcYRcdY" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 50} outputId="d03cdb4a-c8ab-41c2-db4c-eabfd1418c9f" number_of_rabbits = 11 years = int(input("Enter number of years: ")) months_passed = 12 * years times_pop_doubles = months_passed // 3 pop_after_time = number_of_rabbits * (2 ** times_pop_doubles) print(pop_after_time) # + [markdown] id="ewGfJbqZSpJd" colab_type="text" # **Question 3** # + [markdown] id="0QLZNqdIS7HJ" colab_type="text" # Diyelim kullanıcı x değikenine 3 verdi y değişkenine 11 # # şimdi programamız x değişkeni 11 y değişkeni 3 yapacak peki bunu nasıl yapacağız: # + id="1COfk7Z8TH-L" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 134} outputId="c075a661-86bf-4ef3-fc9e-124b7700ab30" x = input('x değişkeni için bir değer giriniz: ') y = input('y değişkeni için bir değer giriniz: ') print(x) print(y) askUser = input('Değerleri değiştirmek istiyor musunuz?(E veya H yazınız): ') if askUser == 'E': x,y = y,x print('x değişkenin yeni hali,',x) print('y değişkenin yeni hali,',y) elif askUser == 'H': print('peki.. bay bay...') print('x değişkenin değeri,',x) print('y değişkenin değeri,',y) else: print('Lütfen sadece sizden istenen harfleri yazınız...') # + [markdown] id="TVv5kiDuUvm-" colab_type="text" # eğitmenin: # + id="xp5ptysUUyfN" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 84} outputId="0d5d10bf-6e7b-4e67-e9f7-ed377582cb66" x = input('X value: ') y = input('Y values: ') tmp = x x = y y = tmp print('X after swap: '+ x) print('Y after swap: '+ y)
slitherIntoPython/chapterThree.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ! pip3 install -r 'requirements.txt' # ! python3 validate.py "/home/agnieszka/Repos/TACO/data" --model tf_efficientdet_d2 --batch-size 16 --dataset TACOCfg # ! python3 train.py "/home/agnieszka/Repos/TACO/data" --model tf_efficientdet_d0 -b 4 --lr .09 --warmup-epochs 2 --model-ema --dataset TACOCfg
efficientdet/notebooks/run-check.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # %matplotlib inline import matplotlib.pylab as plt import numpy as np # + # Step 1 - Define our data: # Input data - Of the form [X value, Y value, Bias term] X= np.array([ [-2,4,-1], [4,1,-1], [1,6,-1], [2,4,-1], [6,2,-1] ]) # Associated output labels #-First 2 examples are labeled '-1' and last 3 are # labeled "1" Y= np.array([-1,-1,1,1,1]) # - # Lets plot these examples on a 2D graph! #for each example for d, sample in enumerate(X): # Plot the negative samples (the first 2) if d < 2: plt.scatter(sample[0],sample[1],s=120,marker='_', linewidths=2) else: plt.scatter(sample[0],sample[1],s=120,marker='+',linewidths=2) # + # Lest perform stochastic gradient descent to learn the separating hyperplane def svm_sgd_plot(X,Y): # Initialize our SVMs weight vector with zeros w = np.zeros(len(X[0]) ) # The learning rate eta = 1 #how many iterations to train for epochs = 100000 #store misclassifications so we can plot how they change over time errors = [] #training part, gradient descent part for epoch in range (1,epochs): error = 0 for i,x in enumerate(X): # misclassification if (Y[i]*np.dot(X[i],w)) < 1: #misclassified update for ours weights w = w + eta* ( (X[i] * Y[i]) + (-2 * (1/epoch) * w)) error = 1 else: #correct classification, update our weights w = w + eta * (-2 * (1/epoch)*w) errors.append(error) # lets plot the rate of classification errors during training for our plt.plot(errors,'|') plt.ylim(0.5,1.5) plt.axes().set_yticklabels([]) plt.xlabel("Epoch") plt.ylabel("Misclassified") plt.show() return w # - w = svm_sgd_plot(X,Y) # + for d, sample in enumerate(X): # Plot the negative samples (the first 2) if d < 2: plt.scatter(sample[0],sample[1],s=120,marker='_', linewidths=2) else: plt.scatter(sample[0],sample[1],s=120,marker='+',linewidths=2) # Add our test samples plt.scatter(2,2,s=120,marker='_',linewidths=2, color = "yellow") plt.scatter(4,3,s=120,marker='+',linewidths=2, color = "blue") # Print the hyperplane calculated by svm_sgd() x2 = [w[0],w[1],-w[1],w[0]] x3 = [w[0],w[1],w[1],-w[0]] x2x3 = np.array([x2,x3]) X,Y,U,V = zip(*x2x3) ax = plt.gca() ax.quiver(X,Y,U,V,scale=1,color='blue') # Print the hyperplane calculated by svm_sgd() x2 = [w[0],w[1],-w[1],w[0]] x3 = [w[0],w[1],w[1],-w[0]] x2x3 = np.array([x2,x3]) X,Y,U,V = zip(*x2x3) ax = plt.gca() ax.quiver(X,Y,U,V,scale=1,color='blue') # + # ax.quiver? # -
SirajRaval_Ejemplos/SVM/SVM1.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3.8.10 64-bit # language: python # name: python3 # --- # + import os import numpy as np import json from pathlib import Path from keras.models import Model import tensorflow as tf from tensorflow import keras from tensorflow.keras.layers import ( BatchNormalization, SeparableConv2D, MaxPooling2D, Activation, Flatten, Dropout, Dense, AveragePooling2D,Conv2D ) from keras.layers import ( Input, Activation, Dense, Flatten ) #from keras.layers.convolutional import ( Conv2D, MaxPooling2D , AveragePooling2D) //DMR se utilizo tensorflow.keras.layers from keras.layers.merge import add #from keras.layers.normalization import BatchNormalization //DMR se utilizo tensorflow.keras.layers from keras.regularizers import l2 from keras import backend as K from keras.preprocessing.image import ImageDataGenerator from keras.utils import np_utils from keras.callbacks import ReduceLROnPlateau, CSVLogger, EarlyStopping # - # ## Descargamos el dataset de los archivos locales # + path = os.path.abspath(os.getcwd())+'\\resultados\\' # Guardamos la configuracion del NumPy np.load np_load_old = np.load # Modificamos un parametro del np.load por defecto que no permitia realizar la descarga np.load = lambda *a,**k: np_load_old(*a, allow_pickle=True, **k) train_data = np.load(path+'train_data.npy') train_labels = np.load(path+'train_labels.npy') test_data = np.load(path+'test_data.npy') test_labels = np.load(path+'test_labels.npy') labels = np.load(path+'labels.npy') # Restauramo np.load para su futuro uso regular np.load = np_load_old # - # Validamos el tamaño del dataset print( "train data shape: ", train_data.shape ) print( "train label shape: ", train_labels.shape ) print( "test data shape: ", test_data.shape ) print( "test_labels.shape: ", test_labels.shape ) # + ## funcion para devolver un registro aleatorio def shuffle_data(train_data, train_labels ): size = len(train_data) train_idx = np.arange(size) np.random.shuffle(train_idx) return train_data[train_idx], train_labels[train_idx] train_data, train_labels = shuffle_data(train_data, train_labels) # + lr_reducer = ReduceLROnPlateau(factor=np.sqrt(0.1), cooldown=0, patience=5, min_lr=0.5e-6) early_stopper = EarlyStopping(min_delta=0.001, patience=10) csv_logger = CSVLogger('resnet50_tiny_ImageNet.csv') batch_size = 100 nb_classes = 200 nb_epoch = 10 # input image dimensions img_rows, img_cols = 64, 64 # The images are RGB img_channels = 3 input_shape = (img_rows, img_cols, img_channels) # The data, shuffled and split between train and test sets: X_train = train_data Y_train = train_labels X_test = test_data Y_test = test_labels del train_data del train_labels del test_data del test_labels X_train = X_train.astype('float32') X_test = X_test.astype('float32') # subtract mean and normalize mean_image = np.mean(X_train, axis=0) X_train -= mean_image X_test -= mean_image X_train /= 128. X_test /= 128. # + # Se utilia un modelo de KERAS el resnet50 resnet50v2 = keras.applications.ResNet50V2(include_top=True, classes=nb_classes, input_shape=input_shape, weights=None) #resnet50v2.summary() resnet50v2.compile(loss='categorical_crossentropy', optimizer="adam", metrics=['acc']) # + print('Using real-time data augmentation.') # This will do preprocessing and realtime data augmentation: datagen = ImageDataGenerator( featurewise_center=False, # set input mean to 0 over the dataset samplewise_center=False, # set each sample mean to 0 featurewise_std_normalization=False,# divide inputs by std of the dataset samplewise_std_normalization=False, # divide each input by its std zca_whitening=False, # apply ZCA whitening rotation_range=0, # randomly rotate images in the range (degrees, 0 to 180) width_shift_range=0.1, # randomly shift images horizontally (fraction of total width) height_shift_range=0.1, # randomly shift images vertically (fraction of total height) horizontal_flip=True, # randomly flip images vertical_flip=False ) # randomly flip images # Compute quantities required for featurewise normalization # (std, mean, and principal components if ZCA whitening is applied). datagen.fit( X_train ) # - nb_epoch = 10 # epoch 1-10 # Fit the model on the batches generated by datagen.flow(). resnet50v2.fit_generator( datagen.flow(X_train, Y_train, batch_size=batch_size), steps_per_epoch=X_train.shape[0] // batch_size, validation_data=(X_test, Y_test), epochs=nb_epoch, verbose=1, callbacks=[lr_reducer, early_stopper, csv_logger] ) resnet50v2.save('models/resnet50v2') # + resnet50v2 = tf.keras.models.load_model('models/resnet50v2') # Check its architecture # resnet50v2.summary()
03.Codigo/2.ModeloTL.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Problem 28 # # Starting with the number 1 and moving to the right in a clockwise direction a 5 by 5 spiral is formed as follows: # ``` # 21 22 23 24 25 # 20 7 8 9 10 # 19 6 1 2 11 # 18 5 4 3 12 # 17 16 15 14 13 # ``` # It can be verified that the sum of the numbers on the diagonals is 101. # # What is the sum of the numbers on the diagonals in a 1001 by 1001 spiral formed in the same way? # # --- # + # First lets start with an example and figure out which numbers fall on the diagonal # + import numpy as np numbers = np.array([[21, 22, 23, 24, 25], [20, 7, 8, 9, 10], [19, 6, 1, 2, 11], [18, 5, 4, 3, 12], [17, 16, 15, 14, 13]]) # - # check the sum first np.sum(numbers*np.eye(5) + numbers*np.fliplr(np.eye(5))) - numbers[2,2] # If one has a range, figure out which indexes fall on the diagonal print('Index') print(np.arange(0, 25, 1)) print('Range') print(np.arange(1, 26, 1)) # + # 0, 2, 4, 6, 8, 12, 16, 20, 24 # So maybe the formula is: # start with 1, then increse by 2 four times, then increase by 4 four times, then increase by 6 etc # - # Implement this is a varied range def varied_step_range(start,stop): i = 0 stepsize = 2 while start < stop: if i == 4: i = 0 stepsize += 2 # increase sep size by 2 after 4 sums yield start start += stepsize i += 1 # %%timeit -n3 -r10 values = np.arange(1, int(1001*1001 + 1)) summed_diag = np.sum(values[list(varied_step_range(0, len(values+1)))]) print(summed_diag)
solutions/problem_28_number_spiral_diagonals.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Create Questions for AMT task import json import uuid import os import sys import random import pandas as pd import numpy as np path = "../result/sbert_context_bert_generate_result_20200421.csv" df = pd.read_csv(path) df.head(5) df.iloc[9315, 3] def create_question(df, random_state=42): np.random.seed(random_state) sample_df = df.sample(frac=1, random_state=random_state) for index, item in sample_df.iterrows(): context = item["input"] questionid = str(uuid.uuid4()) # add uuid as a question id answer = np.random.binomial(1, 0.5) if answer == 0: A = "gt_story" B = "pred_story" GT = "A" Generated = "B" elif answer == 1: A = "pred_story" B = "gt_story" GT = "B" Generated = "A" sample_df.at[index, "questionid"] = questionid sample_df.at[index, "A"] = item[A] sample_df.at[index, "B"] = item[B] sample_df.at[index, "GT"] = GT sample_df.at[index, "Generated"] = Generated return sample_df created_questions_df = create_question(df) # + # # save the created csv just once. # because the uuid is randomly generated every time you run the script. # created_questions_df.to_csv("./proc/created_questions_20200422.csv", index=False) # - # check if the uuid duplicated. created_questions_df["questionid"].nunique()
MPP_StoryCompletion/AmazonMTurk/create_question_csv.ipynb