code
stringlengths
38
801k
repo_path
stringlengths
6
263
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python [default] # language: python # name: python3 # --- import numpy as np from keras.models import Model from keras.layers import Input from keras.layers.core import Dense from keras.layers.convolutional import Conv2D from keras.layers.wrappers import TimeDistributed from keras import backend as K import json from collections import OrderedDict def format_decimal(arr, places=6): return [round(x * 10**places) / 10**places for x in arr] DATA = OrderedDict() # ### TimeDistributed # **[wrappers.TimeDistributed.0] wrap a Dense layer with units 4 (input: 3 x 6)** # + data_in_shape = (3, 6) layer_0 = Input(shape=data_in_shape) layer_1 = TimeDistributed(Dense(4))(layer_0) model = Model(inputs=layer_0, outputs=layer_1) # set weights to random (use seed for reproducibility) weights = [] for i, w in enumerate(model.get_weights()): np.random.seed(4000 + i) weights.append(2 * np.random.random(w.shape) - 1) model.set_weights(weights) weight_names = ['W', 'b'] for w_i, w_name in enumerate(weight_names): print('{} shape:'.format(w_name), weights[w_i].shape) print('{}:'.format(w_name), format_decimal(weights[w_i].ravel().tolist())) data_in = 2 * np.random.random(data_in_shape) - 1 result = model.predict(np.array([data_in])) data_out_shape = result[0].shape data_in_formatted = format_decimal(data_in.ravel().tolist()) data_out_formatted = format_decimal(result[0].ravel().tolist()) print('') print('in shape:', data_in_shape) print('in:', data_in_formatted) print('out shape:', data_out_shape) print('out:', data_out_formatted) DATA['wrappers.TimeDistributed.0'] = { 'input': {'data': data_in_formatted, 'shape': data_in_shape}, 'weights': [{'data': format_decimal(w.ravel().tolist()), 'shape': w.shape} for w in weights], 'expected': {'data': data_out_formatted, 'shape': data_out_shape} } # - # **[wrappers.TimeDistributed.1] wrap a Conv2D layer with 6 3x3 filters (input: 5x4x4x2)** # + data_in_shape = (5, 4, 4, 2) layer_0 = Input(shape=data_in_shape) layer_1 = TimeDistributed(Conv2D(6, (3,3), data_format='channels_last'))(layer_0) model = Model(inputs=layer_0, outputs=layer_1) # set weights to random (use seed for reproducibility) weights = [] for i, w in enumerate(model.get_weights()): np.random.seed(4010 + i) weights.append(2 * np.random.random(w.shape) - 1) model.set_weights(weights) weight_names = ['W', 'b'] for w_i, w_name in enumerate(weight_names): print('{} shape:'.format(w_name), weights[w_i].shape) print('{}:'.format(w_name), format_decimal(weights[w_i].ravel().tolist())) data_in = 2 * np.random.random(data_in_shape) - 1 result = model.predict(np.array([data_in])) data_out_shape = result[0].shape data_in_formatted = format_decimal(data_in.ravel().tolist()) data_out_formatted = format_decimal(result[0].ravel().tolist()) print('') print('in shape:', data_in_shape) print('in:', data_in_formatted) print('out shape:', data_out_shape) print('out:', data_out_formatted) DATA['wrappers.TimeDistributed.1'] = { 'input': {'data': data_in_formatted, 'shape': data_in_shape}, 'weights': [{'data': format_decimal(w.ravel().tolist()), 'shape': w.shape} for w in weights], 'expected': {'data': data_out_formatted, 'shape': data_out_shape} } # - # ### export for Keras.js tests print(json.dumps(DATA))
notebooks/layers/wrappers/TimeDistributed.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # scaredyrat-FC analyzes data from fear conditioning protocol # + # notebook formatting and loading settings, imports - keep same # %matplotlib inline # %reload_ext autoreload # %autoreload 2 import numpy as np import pandas as pd import matplotlib.pyplot as plt import seaborn as sns import math import os, sys sys.path.append('../src') import scaredyrattools as sr # - # # Change variables in this cell: # + inpath = r'C:\Users\Sean\Desktop\ShanskyLab_Projects\TestData\Summer_1_Darting_2018' outpath = r'C:\Users\Sean\Desktop\ShanskyLab_Projects\TestData\Summer_1_Darting_2018\SR_OUT' ntones = 12 #filelist = ['Raw data-PFC-PAG DREADDs July 2018-Trial 1.xlsx', # 'Raw data-PFC-PAG DREADDs July 2018-Trial 4.xlsx', # 'Raw data-PFC-PAG DREADDs July 2018-Trial 5.xlsx', # 'Raw data-PFC-PAG DREADDs July 2018-Trial 6.xlsx', # 'Raw data-PFC-PAG DREADDs July 2018-Trial 13.xlsx'] filelist = [] for entry in os.scandir(inpath): if entry.is_file(): filelist.append(entry.path) #print(filelist) # - ## don't need to change this - stays constant across files sheetlist = ['Track-Arena 1-Subject 1', 'Track-Arena 2-Subject 1', 'Track-Arena 3-Subject 1', 'Track-Arena 4-Subject 1'] # # Function calls below for file in filelist: for sheet in sheetlist: ## set input/output info ID,ctx,anim = sr.animal_read(inpath,file,sheet) print(ctx) print(ID) # Check that ctx=="Fear Conditioning" if(ctx != "Fear Conditioning" or ID == "-1" or ID=="nan" or (isinstance(ID,float) and math.isnan(float(ID)))): continue ## use scaredyrattools function to find where each epoch is baseline =sr.find_baseline(anim) tones = sr.find_tones(anim, ntones) pretones = sr.find_pretones(anim, ntones) shocks = sr.find_shock_responses(anim, ntones) pshocks = sr.find_postshocks(anim, ntones) ## use scaredyrattools function to calculate baseline freezing baselineFreezing, bFTs = sr.get_baseline_freezing(baseline, freezingThreshold=0.1, binSecs=1) BaselineOutfile = outpath + '/FC-baseline-freezing-{}.csv' BaselineOutfile = BaselineOutfile.format(ID) bFreezing = pd.concat([baselineFreezing],axis=1) bFreezing.to_csv(BaselineOutfile) ## use scaredyrattools function to find top n velocities mtone = sr.get_top_vels(tones,10,ntones) mptone = sr.get_top_vels(pretones,10,ntones) mshock = sr.get_top_vels(shocks,5,ntones) mpshock = sr.get_top_vels(pshocks,10,ntones) ## define names of CSV files to save to mToneOutfile = outpath + '/FC-tone-max-vels-{}.csv' mToneOutfile = mToneOutfile.format(ID) mPToneOutfile = outpath + '/FC-pretone-max-vels-{}.csv' mPToneOutfile = mPToneOutfile.format(ID) mShockOutfile = outpath + '/FC-shock-max-vels-{}.csv' mShockOutfile = mShockOutfile.format(ID) mPShockOutfile = outpath + '/FC-postshock-max-vels-{}.csv' mPShockOutfile = mPShockOutfile.format(ID) ## send files to previously defined CSVs mtone.to_csv(mToneOutfile) mptone.to_csv(mPToneOutfile) mshock.to_csv(mShockOutfile) mpshock.to_csv(mPShockOutfile) ## use scaredyrattools function to find means tmeans = sr.get_means(tones,'Tone',ntones) ptmeans = sr.get_means(pretones,'Pre-tone',ntones) smeans = sr.get_means(shocks,'Shock',ntones) pshockmeans = sr.get_means(pshocks,'Post-shock',ntones) allmeans = pd.concat([tmeans, ptmeans, smeans, pshockmeans],axis=1) ## use scaredyrattools function to find SEMs tSEMs = sr.get_SEMs(tones,'Tone',ntones) ptSEMs = sr.get_SEMs(pretones,'Pre-tone',ntones) sSEMs = sr.get_SEMs(shocks,'Shock',ntones) pshockSEMs = sr.get_SEMs(pshocks,'Post-shock',ntones) allSEMs = pd.concat([tSEMs, ptSEMs, sSEMs, pshockSEMs],axis=1) ## use scaredyrattools function to find medians tmeds = sr.get_meds(tones,'Tone',ntones) ptmeds = sr.get_meds(pretones,'Pre-tone',ntones) smeds = sr.get_meds(shocks,'Shock',ntones) pshockmeds = sr.get_meds(pshocks,'Post-shock',ntones) allmeds = pd.concat([tmeds, ptmeds, smeds, pshockmeds],axis=1) ## sr freezing tools toneFreezing, FTs = sr.get_freezing(tones,ntones,freezingThreshold=0.1, binSecs=1) ptFreezing, ptFTs = sr.get_freezing(pretones,ntones,freezingThreshold=0.1, binSecs=1) shockFreezing, shockFTs = sr.get_freezing(shocks,ntones,freezingThreshold=0.1, binSecs=1) pshockFreezing, pshockFTs = sr.get_freezing(pshocks,ntones,freezingThreshold=0.1, binSecs=1) allFreezing = pd.concat([toneFreezing, ptFreezing, shockFreezing, pshockFreezing],axis=1) ## sr darting tools toneDarting, DTs = sr.get_darting(tones,ntones,dartThreshold=20, binSecs=2) ptDarting, ptDTs = sr.get_darting(pretones,ntones,dartThreshold=20, binSecs=2) shockDarting, shockDTs = sr.get_darting(shocks,ntones,dartThreshold=20, binSecs=2) pshockDarting, pshockDTs = sr.get_darting(pshocks,ntones,dartThreshold=20, binSecs=2) allDarting = pd.concat([toneDarting, ptDarting, shockDarting, pshockDarting],axis=1) ## define names of CSV files to save to meanOutfile = outpath + '/FC-mean-{}.csv' meanOutfile = meanOutfile.format(ID) SEMOutfile = outpath + '/FC-SEM-{}.csv' SEMOutfile = SEMOutfile.format(ID) medOutfile = outpath + '/FC-med-{}.csv' medOutfile = medOutfile.format(ID) freezingOutfile = outpath + '/FC-freezing-{}.csv' freezingOutfile = freezingOutfile.format(ID) dartingOutfile = outpath + '/FC-darting-{}.csv' dartingOutfile = dartingOutfile.format(ID) allmeans.to_csv(meanOutfile) allSEMs.to_csv(SEMOutfile) allmeds.to_csv(medOutfile) allFreezing.to_csv(freezingOutfile) allDarting.to_csv(dartingOutfile) ## plot stuff vels = pd.DataFrame(anim['Velocity']) plt.style.use('seaborn-white') plt.figure(figsize=(16,8),facecolor='white',edgecolor='white') # Plots main velocity in black line1, = plt.plot(vels,color='k',linewidth=0.1,label='ITI') # Loops through tones, plots each one in cyan i = 1 while i <= ntones: tone = sr.find_tone_vels(anim,i) line2, = plt.plot(tone,color='c',linewidth=0.5,label='Tone') i += 1 # Loops through shocks, plots each one in magenta i = 1 while i <= ntones: sresponse = sr.find_shock_vels(anim,i) line3, = plt.plot(sresponse,color='m',linewidth=0.5,label='Shock') i += 1 # Loops through freezing bins, plots each below the x-axis for timebin in FTs: plt.plot([timebin[0],timebin[1]],[-0.3,-0.3],color='#ff4f38',linewidth=3) # Loops through darting bins, plots each below the x-axis for timebin in DTs: plt.plot([timebin[0],timebin[1]],[-0.7,-0.7],color='#167512',linewidth=3) plt.ylim(-1,35) sns.despine(left=True, bottom=True, right=True) plt.title(ID + " Fear Conditioning") plt.legend(handles=[line1,line2,line3]) plt.ylabel('Velocity (cm/s)') plt.xlabel('Trial time (s)') ## define where to save the fig fname = outpath + '/FC-plot-{}' fname = fname.format(ID) plt.savefig(fname, dpi=300) plt.show() plt.close()
dev/scaredyrat-FC.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: tf1.11_gpu # language: python # name: tf111_gpu # --- # <img src="../Pics/MLSb-T.png" width="160"> # <br><br> # <center><u><H1>GloVe-Yelp-Comments-Classification</H1></u></center> import tensorflow as tf from keras.backend.tensorflow_backend import set_session config = tf.ConfigProto() config.gpu_options.allow_growth = True config.log_device_placement = True sess = tf.Session(config=config) set_session(sess) from keras.models import Sequential from keras.layers import Embedding, Flatten, Dense, Dropout from keras.preprocessing.text import Tokenizer from keras.preprocessing.sequence import pad_sequences from keras.callbacks import EarlyStopping, ModelCheckpoint import numpy as np import os import pandas as pd import matplotlib.pyplot as plt import re import nltk import string import plotly.offline as py import plotly.graph_objs as go py.init_notebook_mode(connected=True) from nltk.corpus import stopwords from nltk.stem import SnowballStemmer from sklearn.manifold import TSNE # %matplotlib inline # ## Load the data: df = pd.read_csv('../data/yelp.csv') df.head() df= df.dropna() df=df[['text','stars']] df.head() labels = df['stars'].map(lambda x : 1 if int(x) > 3 else 0) print(labels[10:20]) def clean_text(text): ## Remove puncuation text = text.translate(string.punctuation) ## Convert words to lower case and split them text = text.lower().split() ## Remove stop words stops = set(stopwords.words("english")) text = [w for w in text if not w in stops and len(w) >= 3] text = " ".join(text) # Clean the text text = re.sub(r"[^A-Za-z0-9^,!.\/'+-=]", " ", text) text = re.sub(r"what's", "what is ", text) text = re.sub(r"\'s", " ", text) text = re.sub(r"\'ve", " have ", text) text = re.sub(r"n't", " not ", text) text = re.sub(r"i'm", "i am ", text) text = re.sub(r"\'re", " are ", text) text = re.sub(r"\'d", " would ", text) text = re.sub(r"\'ll", " will ", text) text = re.sub(r",", " ", text) text = re.sub(r"\.", " ", text) text = re.sub(r"!", " ! ", text) text = re.sub(r"\/", " ", text) text = re.sub(r"\^", " ^ ", text) text = re.sub(r"\+", " + ", text) text = re.sub(r"\-", " - ", text) text = re.sub(r"\=", " = ", text) text = re.sub(r"'", " ", text) text = re.sub(r"(\d+)(k)", r"\g<1>000", text) text = re.sub(r":", " : ", text) text = re.sub(r" e g ", " eg ", text) text = re.sub(r" b g ", " bg ", text) text = re.sub(r" u s ", " american ", text) text = re.sub(r"\0s", "0", text) text = re.sub(r" 9 11 ", "911", text) text = re.sub(r"e - mail", "email", text) text = re.sub(r"j k", "jk", text) text = re.sub(r"\s{2,}", " ", text) text = text.split() stemmer = SnowballStemmer('english') stemmed_words = [stemmer.stem(word) for word in text] text = " ".join(stemmed_words) return text df['text'] = df['text'].map(lambda x: clean_text(x)) df.head(10) maxlen = 50 embed_dim = 100 max_words = 20000 tokenizer = Tokenizer(num_words=max_words) tokenizer.fit_on_texts(df['text']) sequences = tokenizer.texts_to_sequences(df['text']) data = pad_sequences(sequences, maxlen=maxlen, padding='post') data[0] vocab_size = len(tokenizer.word_index) + 1 vocab_size labels = np.asarray(labels) print('Shape of data:', data.shape) print('Shape of label:', labels.shape) # ## Creating datasets: validation_split = .2 indices = np.arange(data.shape[0]) np.random.shuffle(indices) data = data[indices] labels = labels[indices] val_samples = int(validation_split * data.shape[0]) X_train = data[:-val_samples] y_train = labels[:-val_samples] x_val = data[-val_samples:] y_val = labels[-val_samples:] # ## Load the GloVe embeddings dir = '../data/GloVe/glove.6B' embed_index = dict() f = open(os.path.join(dir, 'glove.6B.100d.txt'), encoding="utf8") for line in f: values = line.split() word = values[0] coefs = np.asarray(values[1:], dtype='float32') embed_index[word] = coefs f.close() print('%s Word vectors' % len(embed_index)) # ## Create a weight matrix: # + embed_matrix = np.zeros((max_words, embed_dim)) for word, i in tokenizer.word_index.items(): if i < max_words: embed_vector = embed_index.get(word) if embed_vector is not None: embed_matrix[i] = embed_vector # - # ## Creating the model: model = Sequential() model.add(Embedding(max_words, embed_dim, weights=[embed_matrix], input_length=maxlen)) model.add(Flatten()) model.add(Dropout(0.5)) model.add(Dense(32, activation='relu')) model.add(Dropout(0.5)) model.add(Dense(1, activation='sigmoid')) model.summary() model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy']) early_stopping = EarlyStopping(monitor='val_loss', patience=5, mode='min') save_best = ModelCheckpoint('../data/yelp_comments.hdf', save_best_only=True, monitor='val_loss', mode='min') # %%time model.fit(X_train, y_train, epochs=20, validation_data=(x_val, y_val), batch_size=128, verbose=1, callbacks=[early_stopping, save_best]) # ## Making predictions: model.load_weights(filepath = '../data/yelp_comments.hdf') pred = model.predict(x_val) # ## Word embeddings visualization: glove_embds = model.layers[0].get_weights()[0] words = [] for word, i in tokenizer.word_index.items(): words.append(word) # ## Visualizing words: def plot_words(data, start, stop, step): trace = go.Scatter( x = data[start:stop:step,0], y = data[start:stop:step, 1], mode = 'markers', text= words[start:stop:step] ) layout = dict(title= 't-SNE_factor1 vs t-SNE_factor2', yaxis = dict(title='t-SNE_factor2'), xaxis = dict(title='t-SNE_factor1'), hovermode= 'closest') fig = dict(data = [trace], layout= layout) py.iplot(fig) # %%time glove_tsne_embds = TSNE(n_components=2).fit_transform(glove_embds) plot_words(glove_tsne_embds, 0, 100, 1) # ## Reference: # # https://nlp.stanford.edu/projects/glove/
DEEP_NLP_resources/3-Deep-Learning for NLP/GloVe-Yelp-Comments-Classification.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3.8.12 ('mycobot') # language: python # name: python3 # --- import ikpy.chain import numpy as np import ikpy.utils.plot as plot_utils my_chain = ikpy.chain.Chain.from_urdf_file('./mycobot_desc/mycobot_urdf.urdf') target_position = [ 0.1, 0.1, 0.3] print("The angles of each joints are : ", my_chain.inverse_kinematics(target_position)) real_frame = my_chain.forward_kinematics(my_chain.inverse_kinematics(target_position)) print("Computed position vector : %s, original position vector : %s" % (real_frame[:3, 3], target_position)) # Optional: support for 3D plotting in the NB # If there is a matplotlib error, uncomment the next line, and comment the line below it. # %matplotlib inline import matplotlib.pyplot as plt fig, ax = plot_utils.init_3d_figure() my_chain.plot(my_chain.inverse_kinematics(target_position), ax, target=target_position) plt.xlim(-0.1, 0.1) plt.ylim(-0.1, 0.1)
RobotArmCtrlClient/ikpy_tutorial.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ### Background # This notebook includes an example exercise about observing how the amount of data affects to the histogram made of that data. The exercise consists of a theory part and a practical part. # # Example: Invariant mass histogram # In this exercise the CMS (Compact Muon Solenoid) detector and the concept of invariant mass is introduced. With the real data collected by CMS detector the effect of the amount of data to the histogram made of that data is observed. # ### CMS detector # At CERN particles are accelerated and collided with the LHC (Large Hadron Collider) particle accelerator. With the CMS detector the new particles created in these collisions can be observed and measured. In the image below is the opened CMS detector. # # <img src="../../Images/CMS.jpg" alt="CMS-ilmaisin avattuna" style="height: 400px"> # # (Image: Domenico Salvagnin, https://commons.wikimedia.org/wiki/File:CMS@CERN.jpg) # <br> # <br> # <br> # ### Invariant mass # Invariant mass $M$ is a value that can be calculated from the results of measurements of the CMS detector. Invariant mass is a mathematical concept, not a physical mass. # # For example let's take a situation where a particle A decays to two particles B and C. The invariant mass of the two particles B and C is determined by the equation # # $$ M = \sqrt{(E_1 + E_2)^2-(\vec{p_1} + \vec{p_2})^2}, $$ # # where $E_1$ and $E_2$ are the energies of the decay products and $\vec{p_1}$ and $\vec{p_2}$ the momenta of the decay products. # # The invariant mass can be used to examine the excistence of the particle A. If particles B and C stem from the decay of the particle A, the invariant mass of them equals the physical mass of the particle A. If particles B and C stem from some other process than decay of A (there are enormous amount of processes in particle collisions), the invariant mass of B and C is something else. # # So by determing the energies and the momenta of B and C the invariant mass to them can be calculated. This can be done to large amount of particle pairs. By doing that the excistence of the particle A might be proved. # # In this excercise the values of invariant masses are already calculated. # ### Let's try! # Let's start to observe the real data collected by CMS detector. We will focus on the decay of Z-boson to two muons (muon and antimuon). # # We will use the data collected in 2011 [1]. From the primary dataset 10851 collision events where have been exactly two muons have been selected to the file "Zmumu_Run2011A_masses.csv". (The selection has been done with the code that is openly available at https://github.com/tpmccauley/dimuon-filter.) # # The file includes readily calculated values of the invariant masses of two muons for the 10851 events. A histogram is a great tool for observing the values. The histogram reperesents how many values of $M$ there have been at certain range of values. # # In the next exercise the mission is to examine __how the amount of data used affects to the histogram made of that data.__ # <br> # <br> # <br> # [1] CMS collaboration (2016). DoubleMu primary dataset in AOD format from RunA of 2011 (/DoubleMu/Run2011A-12Oct2013-v1/AOD). CERN Open Data Portal. DOI: [10.7483/OPENDATA.CMS.RZ34.QR6N](http://doi.org/10.7483/OPENDATA.CMS.RZ34.QR6N). # ### 1) Getting the file and the masses # Let's start with the code where the needed Python modules are imported and the data file is gotten. Explore the comments in the code and run the code by first clicking the cell active and then pressing _Ctrl_ + _Enter_. After that you can move to the part two. You might get a warning related to _matplotlib_ module but you don't have to worry about that. # + # Import the needed modules. Pandas is for the data-analysis, numpy for scientific calculation # and matplotlib.pyplot for making plots. Modules are named as pd, np and plt. import pandas as pd import numpy as np import matplotlib.pyplot as plt # Create a new DataFrame structure from the file "Zmumu_Run2011A_masses.csv" dataset = pd.read_csv('../../Data/Zmumu_Run2011A_masses.csv') # Create a Series structure (basically a list) and name it to "invariant_mass". # Save the column "M" from the "dataset" to the variable "invariant_mass". invariant_mass = dataset['M'] # Create an empty list "selected", where the selected amount of invariant masses will be saved. selected = [] # - # ### Selecting the amount of data # The code below asks how many events will be selected to the histogram. After that the code plots the histogram of the selected invariant masses. # # Run the code by clicking the code cell active and by pressing _Ctrl_ + _Enter_. You can re-run the code and enter the new amount of data by pressing _Ctrl_ + _Enter_ again. # # Examine how the amount of the data used affects to the histogram. Which values of the invariant mass there seems to be most? What you can conclude from those values? # # By examining the code predict what will happen if you enter a number bigger than 10851 for the asked amount of data. Try your prediction by running the code. # + # Ask user to enter the number of events wanted. Save the number to variable "amount". amount = int(input('Enter the amount of events wanted: ')) # Check if user have selected more events than there are available. # If not select that amount of invariant masses from the variable "invariant_mass". # Masses will be selected in order. if amount > 10851: print('''You have tried to select more data than there are available in the file. The histogram couldn't be drawn. The maximum amount of the data is 10851.''') else: for f in range(amount): M = invariant_mass[f] selected.append(M) print('\n You selected {} invariant mass values from the whole data.'.format(amount)) # Jupyter Notebook uses "magic functions". With this function it is possible to plot # the histogram straight to notebook. # %matplotlib inline # Create the histogram from data in variable "selected". Set bins and range to histogram. plt.hist(selected, bins=120, range=(60,120)) # Set y-axis from 0 to 800. axes = plt.gca() axes.set_ylim([0,800]) # Name the axises and give the title. plt.xlabel('Invariant mass [GeV]') plt.ylabel('Number of events') plt.title('Histogram of invariant masses of two muons\n') # Empty the variable "selected" for the next run. selected = [] # - # ### 3) Evolving of the histogram when the amount of data increases # Let's observe with series of images how the histogram will change when the amount of data are increased. # # The code below will create 11 different histograms of invariant masses from the same data. Between every image 1000 more values of invariant masses are taken to the histogram. Run the code by clicking the code cell active and pressing _Ctrl_ + _Enter_. Observe the images and explain what you see. # Loop where a new histogram is plotted after 1000 events until 10000 events have reached. for a in range(0,10851,1000): T = invariant_mass[0:a] # %matplotlib inline plt.hist(T, bins=120, range=(60,120)) # Set y-axis from 0 to 800. axes = plt.gca() axes.set_ylim([0,800]) plt.xlabel('Invariant mass [GeV]') plt.ylabel('Number of events') plt.title('Histogram of invariant masses of two muons for {} events\n'.format(len(T))) plt.show()
Exercises-with-open-data/Warming-up/Invariant-mass-histogram-select-data.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # First Study on Brazilian Cities Transparency Portal # In this dataset we have a population projection for each Brazilian city in the year of 2013. # # # + import pandas as pd import numpy as np # We first collected the data with population estimatives, # we can use it later to do some comparisions or to use it later cities = pd.read_excel('../data/Cidades - estimativa 2013.xlsx', converters={'COD. UF': np.str, 'COD. MUNIC': np.str}, sheetname=None, header=0) # + data = pd.DataFrame() for key in cities.keys(): data = pd.concat([data, cities[key]]) data = data.reset_index(drop=True) data.shape # - # We should see 5570 rows because that's the number of cities that IBGE says that Brazil have. The different amount of rows leads me to believe there are metadata from the `.xlsx` messing with our data # ## Translating column names data.rename(columns={ 'UF': 'state', 'COD. UF': 'state_id', 'COD. MUNIC': 'city_id', 'NOME DO MUNICÍPIO': 'city_name', 'POPULAÇÃO ESTIMADA': 'population_projection' }, inplace=True) data.head() # ## Formating `city_id` # # Formatting `city_id` to conform with the ids displayed on the Brazilian cesus files data['city_id'] = data['city_id'].apply(lambda x: x.zfill(5)) # ## Checking out a `unique_id` for each city data[data['city_id'] == '00108'] # + UNIQUE_IDS = data.loc[:,['state_id', 'city_id']] for i in range(len(UNIQUE_IDS['state_id'])): UNIQUE_IDS.loc[i,'ids'] = '{}{}'.format(UNIQUE_IDS.loc[i,'state_id'], UNIQUE_IDS.loc[i,'city_id']) UNIQUE_IDS.head() # - len(set(UNIQUE_IDS['ids'])) UNIQUE_IDS.shape # + brazilian_states = {'RO': 'rondonia', 'AC': 'acre', 'AM': 'amazonas', 'RR': 'roraima', 'PA': 'para', 'AP': 'amapa', 'TO': 'tocantis', 'MA': 'maranhao', 'PI': 'piaui', 'CE': 'ceara', 'RN': 'rio_grande_do_norte', 'PB': 'paraiba', 'PE': 'pernambuco', 'AL': 'alagoas', 'SE': 'sergipe', 'BA': 'bahia', 'MG': 'mina_gerais', 'ES': 'epirito_santo', 'RJ': 'rio_de_janeiro', 'SP': 'sao_paulo', 'PR': 'parana', 'SC': 'santa_catarina', 'RS': 'rio_grande_do_sul', 'MS': 'mato_grosso_do_sul', 'MT': 'mato_grosso', 'GO': 'goias', 'DF': 'distrito_federal'} census_link = "ftp.ibge.gov.br/Censos/Censo_Demografico_2010/resultados/total_populacao_{}.zip" # - # ## Gathering cities with @cuducos Brazilian Cities script # # @cuducos had already made a script with all Brazilian Cities and its code and state associated, here in [this repository](https://github.com/cuducos/brazilian-cities). # # We checked and it is the best way to get the cities in the right way. # + from serenata_toolbox.datasets import fetch fetch('2017-05-22-brazilian-cities.csv', '../data') # - brazilian_cities = pd.read_csv('../data/2017-05-22-brazilian-cities.csv') brazilian_cities.head() brazilian_cities.shape # ## Normalizing its form # # It is necessary to normalize all information in order to use it to our necessities, so we managed to: # - Lowercase all states # - Remove all acentuation and normalize cities names # - And for our case we remove spaces to generate the pattern we want brazilian_cities['state'] = brazilian_cities['state'].apply(str.lower) # + import unicodedata def normalize_string(string): if isinstance(string, str): nfkd_form = unicodedata.normalize('NFKD', string.lower()) return nfkd_form.encode('ASCII', 'ignore').decode('utf-8') # - brazilian_cities['normalized_name'] = brazilian_cities['name'].apply(lambda x: normalize_string(x)) brazilian_cities['normalized_name'] = brazilian_cities['normalized_name'].apply(lambda x: x.replace(' ', '')) brazilian_cities.head() # ## Getting all cities that are part of Transparency Portal # # There are some cities that we already know that have a page with transparency and open data. The main objective here is to find how many cities have that. # # Pattern: `{city}-{state}.portaltp.com.br` portal_url = 'https://{}-{}.portaltp.com.br/' brazilian_cities['transparency_portal_url'] = brazilian_cities.apply(lambda row: portal_url.format( row['normalized_name'], row['state']), axis=1) brazilian_cities.head(20) # (Getting all of the status code for each city might take a while so we added the prints only for feedback) # + import requests def get_status(url): try: print(requests.head(url).status_code) return requests.head(url).status_code except requests.ConnectionError: print(404) return 404 # + # %%time colatina = brazilian_cities[brazilian_cities['code'] == 320150]['transparency_portal_url'].values[0] statusOK = get_status(colatina) abaete = brazilian_cities[brazilian_cities['code'] == 310020]['transparency_portal_url'].values[0] statusNOK = get_status(abaete) # - br_cities = brazilian_cities.loc[:10,:].copy() # %%time br_cities.loc[:,'status_code'] = br_cities.apply(lambda x: get_status(x['transparency_portal_url']), axis=1) br_cities # This will take too long considering we have 5570 cities to address. # # Let's try using [grequests](https://pypi.python.org/pypi/grequests). # # I know that we can find two different status code in the first 10 cities urls test. So let's use those 10 to test grequests ;) # + import grequests rs = (grequests.get(u) for u in list(br_cities['transparency_portal_url'])) # + def exception_handler(request, exception): return 404 responses = grequests.map(rs, exception_handler=exception_handler) # + codes = [int(x) for x in br_cities['status_code'].values] print(pd.unique(codes), pd.unique(responses)) # - responses # The result above got me wondering where were those 200 statuses code we've seen before. I tested the code on the command line and they are there. So a little reasearch and I found that apparently it is not possible to run async tasks easily on a jupyter notebook [ref](http://ipywidgets.readthedocs.io/en/latest/examples/Widget%20Asynchronous.html). # # With that in mind we decided to write a script that generates the infomartion we want: Open Data url for each brazilian city data = br_cities[br_cities['status_code'] == 404].copy().reset_index(drop=True) data # There are some cities that we already know that have a page with transparency and open data but the pattern is different from the one above. # # Second Pattern: `cm{city}-{state}.portaltp.com.br` portal_url = 'https://cm{}-{}.portaltp.com.br/' data['transparency_portal_url'] = data.apply(lambda row: portal_url.format( row['normalized_name'], row['state']), axis=1) data # We still need to update the status code column # %%time data.loc[:,'status_code'] = data.apply(lambda x: get_status(x['transparency_portal_url']), axis=1) data # study purposes data.loc[8, 'status_code'] = 200 data data.loc[data['status_code'] == 404, 'transparency_portal_url'] = None data br_cities.loc[br_cities['status_code'] == 404, 'transparency_portal_url'] = None br_cities # + unnecessary_columns = ['normalized_name', 'status_code'] br_cities = pd.merge(br_cities.drop(unnecessary_columns, axis=1), data.drop(unnecessary_columns, axis=1), on=['code', 'name', 'state'], how='left') br_cities['transparency_portal_url'] = br_cities \ .apply(lambda row: row['transparency_portal_url_x'] or row['transparency_portal_url_y'], axis=1) unnecessary_columns = ['transparency_portal_url_x', 'transparency_portal_url_y'] br_cities = br_cities.drop(unnecessary_columns, axis=1) br_cities # - # # Conclusions # # After all that study, we find that in that pattern of transparency portals list there are already 279 cities, from them 19 are returning an Internal Server Error (Status Code: 5XX). # # It is something like 5% of all Brazilian existing cities! # # Below we have a table with all those cities with portals ;) with_tp_portal = pd.read_csv('../data/2017-05-30-cities_with_tp_portal.csv') with_tp_portal.shape with_tp_portal
research/develop/2017-05-19-jtemporal-cities-transparency-portal.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ## Spark # # Before you turn this problem in, make sure everything runs as expected. First, restart the kernel (in the menubar, select Kernel → Restart) and then run all cells (in the menubar, select Cell → Run All). You can speak with others regarding the assignment but all work must be your own. # # # ### This is a 20 point assignment graded from answers to questions. # # # #![Spark Logo](http://spark-mooc.github.io/web-assets/images/ta_Spark-logo-small.png) + ![Python Logo](http://spark-mooc.github.io/web-assets/images/python-logo-master-v3-TM-flattened_small.png) # # **Word Count Lab: Building a word count application** # #### This lab will build on the techniques covered in the Spark tutorial to develop a simple word count application. The volume of unstructured text in existence is growing dramatically, and Spark is an excellent tool for analyzing this type of data. # #### *Part 1:* Creating a base RDD and pair RDDs # #### *Part 2:* Counting with pair RDDs # #### *Part 3:* Finding unique words and a mean value # # #### Note that, for reference, you can look up the details of the relevant methods in [Spark's Python API](https://spark.apache.org/docs/latest/api/python/pyspark.html#pyspark.RDD) #Don't need to do this if running on databricks. import pyspark #sc = pyspark.SparkContext('local[*]') # ### ** Part 1: Creating a base RDD and pair RDDs ** # #### In this part of the lab, we will explore creating a base RDD with `parallelize` and using pair RDDs to count words. # #### ** (1a) Create a base RDD ** # #### We'll start by generating a base RDD by using a Python list and the `sc.parallelize` method. Then we'll print out the type of the base RDD. wordsList = ['cat', 'elephant', 'rat', 'rat', 'cat'] wordsRDD = sc.parallelize(wordsList, 4) # Print out the type of wordsRDD print (type(wordsRDD)) # #### What does it mean that the list is an RDD? What special operations does this enable and how is it different from a dataset? # # # # #### ** (1b) Pluralize and test ** # #### Let's use a `map()` transformation to add the letter 's' to each string in the base RDD we just created. We'll define a Python function that returns the word with an 's' at the end of the word. Please replace `<FILL IN>` with your solution. If you have trouble, the next cell has the solution. After you have defined `makePlural` you can run the third cell which contains a test. If you implementation is correct it will print `1 test passed`. # #### This is the general form that exercises will take, except that no example solution will be provided. Exercises will include an explanation of what is expected, followed by code cells where one cell will have one or more `<FILL IN>` sections. The cell that needs to be modified will have `# TODO: Replace <FILL IN> with appropriate code` on its first line. Once the `<FILL IN>` sections are updated and the code is run, the test cell can then be run to verify the correctness of your solution. The last code cell before the next markdown section will contain the tests. # + # TODO: Replace <FILL IN> with appropriate code def makePlural(word): """Adds an 's' to `word`. Note: This is a simple function that only adds an 's'. No attempt is made to follow proper pluralization rules. Args: word (str): A string. Returns: str: A string with 's' added to it. """ return <FILL IN> makePlural('cat') # - # #### ** (1c) Apply `makePlural` to the base RDD ** # #### Now pass each item in the base RDD into a [map()](http://spark.apache.org/docs/latest/api/python/pyspark.html#pyspark.RDD.map) transformation that applies the `makePlural()` function to each element. And then call the [collect()](http://spark.apache.org/docs/latest/api/python/pyspark.html#pyspark.RDD.collect) action to see the transformed RDD. # TODO: Replace <FILL IN> with appropriate code pluralRDD = wordsRDD.map(<FILL IN>) print(pluralRDD.collect()) # #### ** (1d) Pass a `lambda` function to `map` ** # #### Let's create the same RDD using a `lambda` function. # Let's remember that a lampda function # # lambda x: x + 1 # TODO: Replace <FILL IN> with appropriate code pluralLambdaRDD = wordsRDD.map(<FILL IN>) print(pluralLambdaRDD.collect()) # #### ** (1e) Length of each word ** # #### Now use `map()` and a `lambda` function to return the number of characters in each word. You can do this with the length function. We'll `collect` this result directly into a variable. # TODO: Replace <FILL IN> with appropriate code pluralLengths = (pluralRDD .map(lambda w: <FILL IN>) .collect()) print(pluralLengths) # #### ** (1f) Pair RDDs ** # #### The next step in writing our word counting program is to create a new type of RDD, called a pair RDD. A pair RDD is an RDD where each element is a pair tuple `(k, v)` where `k` is the key and `v` is the value. In this example, we will create a pair consisting of `('<word>', 1)` for each word element in the RDD. # #### We can create the pair RDD using the `map()` transformation with a `lambda()` function to create a new RDD. # TODO: Replace <FILL IN> with appropriate code wordPairs = wordsRDD.map(<FILL IN> w: (w, 1)) print(wordPairs.collect()) # ### ** Part 2: Counting with pair RDDs ** # #### Now, let's count the number of times a particular word appears in the RDD. There are multiple ways to perform the counting, but some are much less efficient than others. # #### A naive approach would be to `collect()` all of the elements and count them in the driver program. While this approach could work for small datasets, we want an approach that will work for any size dataset including terabyte- or petabyte-sized datasets. In addition, performing all of the work in the driver program is slower than performing it in parallel in the workers. For these reasons, we will use data parallel operations. # #### ** (2a) `groupByKey()` approach ** # #### An approach you might first consider (we'll see shortly that there are better ways) is based on using the [groupByKey()](http://spark.apache.org/docs/latest/api/python/pyspark.html#pyspark.RDD.groupByKey) transformation. As the name implies, the `groupByKey()` transformation groups all the elements of the RDD with the same key into a single list in one of the partitions. There are two problems with using `groupByKey()`: # + #### The operation requires a lot of data movement to move all the values into the appropriate partitions. # + #### The lists can be very large. Consider a word count of English Wikipedia: the lists for common words (e.g., the, a, etc.) would be huge and could exhaust the available memory in a worker. # # #### Use `groupByKey()` to generate a pair RDD of type `('word', iterator)`. # TODO: Replace <FILL IN> with appropriate code # Note that groupByKey requires no parameters <FILL IN> = wordPairs.groupByKey() for key, value in wordsGrouped.collect(): print('{0}: {1}'.format(key, list(value))) # #### ** (2b) Use `groupByKey()` to obtain the counts ** # #### Using the `groupByKey()` transformation creates an RDD containing 3 elements, each of which is a pair of a word and a Python iterator. # #### Now sum the iterator using a `map()` transformation. The result should be a pair RDD consisting of (word, count) pairs. # TODO: Replace <FILL IN> with appropriate code wordCountsGrouped = wordsGrouped.map(<FILL IN> kv: (kv[0], len(kv[1]))) print(wordCountsGrouped.collect()) # #### ** (2c) Counting using `reduceByKey` ** # #### A better approach is to start from the pair RDD and then use the [reduceByKey()](http://spark.apache.org/docs/latest/api/python/pyspark.html#pyspark.RDD.reduceByKey) transformation to create a new pair RDD. The `reduceByKey()` transformation gathers together pairs that have the same key and applies the function provided to two values at a time, iteratively reducing all of the values to a single value. `reduceByKey()` operates by applying the function first within each partition on a per-key basis and then across the partitions, allowing it to scale efficiently to large datasets. # TODO: Replace <FILL IN> with appropriate code # Note that reduceByKey takes in a function that accepts two values and returns a single value wordCounts = wordPairs.reduceByKey(lambda a,b : a + b) print(wordCounts.collect()) # #### ** (2d) All together ** # #### The expert version of the code performs the `map()` to pair RDD, `reduceByKey()` transformation, and `collect` in one statement. # TODO: Replace <FILL IN> with appropriate code wordCountsCollected = (wordsRDD .map(lambda w: (w,1)) .reduceByKey(lambda a, b : a + b) .collect()) print(<FILL IN>) # ### ** Part 3: Finding unique words and a mean value ** # + #See Unique words. uniqueWords = wordsRDD.distinct().count() print (uniqueWords) # - # #### ** (3a) Mean using `reduce` ** # #### Find the mean number of words per unique word in `wordCounts`. # #### Use a `reduce()` action to sum the counts in `wordCounts` and then divide by the number of unique words. First `map()` the pair RDD `wordCounts`, which consists of (key, value) pairs, to an RDD of values. # TODO: Replace <FILL IN> with appropriate code from operator import add totalCount = (wordCounts .map(lambda kv: kv[0]) .count()) average = totalCount / float(uniqueWords) print (totalCount) print (round(average, 2)) # ### ** Part 4: Apply word count to a file ** # #### In this section we will finish developing our word count application. We'll have to build the `wordCount` function, deal with real world problems like capitalization and punctuation, load in our data source, and compute the word count on the new data. # #### ** (4a) `wordCount` function ** # #### First, define a function for word counting. You should reuse the techniques that have been covered in earlier parts of this lab. This function should take in an RDD that is a list of words like `wordsRDD` and return a pair RDD that has all of the words and their associated counts. # + # TODO: Replace <FILL IN> with appropriate code def wordCount(wordListRDD): """Creates a pair RDD with word counts from an RDD of words. Args: wordListRDD (RDD of str): An RDD consisting of words. Returns: RDD of (str, int): An RDD consisting of (word, count) tuples. """ return wordListRDD.map(lambda w: (w, 1)).reduceByKey(lambda a, b: a + b) print (wordCount(wordsRDD).collect()) # -
site/_build/jupyter_execute/notebooks/10-big-data/03-spark-questions.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 (ipykernel) # language: python # name: python3 # --- # + [markdown] slideshow={"slide_type": "slide"} # # This notebook display the logical structure of the model # As many models also the Asia model consist of a recursive (no feedback) pre-model, a simultaneous core-model and a recursive post_model. # # Below the structure is displayed as an Adjacency matrices. # # The first takes all feedback into account. It shows the economic feedback structure. # # The second only looks at the contemporanousi feedback. This is useful when solving the model. Only the core-model has to be solved iterative. # # Behind the matrices is a Graph showing all interdependencies en the model. # + [markdown] slideshow={"slide_type": "skip"} # # Import modelflow # + slideshow={"slide_type": "skip"} from modelclass import model model.widescreen() # + [markdown] slideshow={"slide_type": "slide"} # # Load model and data, and run # + slideshow={"slide_type": "-"} masia,baseline = model.modelload('../Asia_sep7.pcim',run=1,silent=1) # + slideshow={"slide_type": "slide"} fig = masia.plotadjacency(size=(40,40),nolag=True,title='Structure when we look across lags') #fig.savefig('graph/Asia causality structure across lags.pdf') # + slideshow={"slide_type": "slide"} fig = masia.plotadjacency(size=(40,40),nolag=False,title='Structure only one periode') #fig.savefig('graph/Asia causality structure one periode.pdf') # + [markdown] slideshow={"slide_type": "slide"} # # Some subgraphs # As the model is large, it is onlyu possible to look at partiel graphs of the logical structure. # + slideshow={"slide_type": "-"} # Indonesian gdp masia.IDN_YER.tracepre(up=2,HR=0,size=(10,10)) # + slideshow={"slide_type": "slide"} masia.IDN_CO2.tracepre(up=2,HR=0,size=(10,10))
Asia/Additional material/Asia logical structure.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + id="7JYnK4SIwOy6" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 1000} outputId="7c3e40f2-345c-4aa2-ab49-99cdcb1c4d6d" executionInfo={"status": "ok", "timestamp": 1581550544935, "user_tz": -60, "elapsed": 27605, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AAuE7mDT1Bg5F3Gw33ovY3_BztzKN_dzS1ZSf-w9QbbHhQ=s64", "userId": "15075618550147527080"}} # !pip install datadotworld # !pip install datadotworld[pandas] # + id="Oe9oc7B1w11u" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 56} outputId="ab1a5d67-97c1-44ea-871a-44d7e2d1dad9" executionInfo={"status": "ok", "timestamp": 1581550645608, "user_tz": -60, "elapsed": 12200, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AAuE7mDT1Bg5F3Gw33ovY3_BztzKN_dzS1ZSf-w9QbbHhQ=s64", "userId": "15075618550147527080"}} # !dw configure # + id="CAnSFTNqvwH5" colab_type="code" colab={} from google.colab import drive import pandas as pd import numpy as np import datadotworld as dw # + id="IJI5d5xLxAGy" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 132} outputId="9c7fb6b8-1eb0-41d0-f44f-0916b94c003f" executionInfo={"status": "ok", "timestamp": 1581550717600, "user_tz": -60, "elapsed": 23094, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AAuE7mDT1Bg5F3Gw33ovY3_BztzKN_dzS1ZSf-w9QbbHhQ=s64", "userId": "15075618550147527080"}} drive.mount("/content/drive") # + id="UEI-eTEfxG_w" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 36} outputId="45a68856-b872-4ff6-ccd1-a9f68d8ff053" executionInfo={"status": "ok", "timestamp": 1581550776978, "user_tz": -60, "elapsed": 448, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AAuE7mDT1Bg5F3Gw33ovY3_BztzKN_dzS1ZSf-w9QbbHhQ=s64", "userId": "15075618550147527080"}} # cd "drive/My Drive/Colab Notebooks/dw_matrix" # + id="o1AVaOqIxbBg" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 36} outputId="5905325f-f9d4-4cdc-fbfd-3df61dda7c3a" executionInfo={"status": "ok", "timestamp": 1581550787185, "user_tz": -60, "elapsed": 2013, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AAuE7mDT1Bg5F3Gw33ovY3_BztzKN_dzS1ZSf-w9QbbHhQ=s64", "userId": "15075618550147527080"}} # ls # + id="-3CWQK2hxdIa" colab_type="code" colab={} # !mkdir data # + id="YV9TTDdrxexg" colab_type="code" colab={} # !echo 'data' > .gitignore # + id="ZxsxAQ55xs8x" colab_type="code" colab={} # !git add .gitignore # + id="7lKIo-vgxy8y" colab_type="code" colab={} data=dw.load_dataset('datafiniti/mens-shoe-prices') # + id="VgKuiiJTyIHM" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 132} outputId="283b063e-d304-4fb1-97a2-4de7a3e7972a" executionInfo={"status": "ok", "timestamp": 1581551031223, "user_tz": -60, "elapsed": 1633, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AAuE7mDT1Bg5F3Gw33ovY3_BztzKN_dzS1ZSf-w9QbbHhQ=s64", "userId": "15075618550147527080"}} df = data.dataframes['7004_1'] df.shape # + id="DSmSf-45yQc4" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 734} outputId="d305e5af-08d7-439c-f504-fcc766642e27" executionInfo={"status": "ok", "timestamp": 1581551048508, "user_tz": -60, "elapsed": 367, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AAuE7mDT1Bg5F3Gw33ovY3_BztzKN_dzS1ZSf-w9QbbHhQ=s64", "userId": "15075618550147527080"}} df.sample(5) # + id="5_1a_X2SydVP" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 245} outputId="2d9cbf21-0f6e-4881-fc33-6ddc16023cd3" executionInfo={"status": "ok", "timestamp": 1581551066436, "user_tz": -60, "elapsed": 462, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AAuE7mDT1Bg5F3Gw33ovY3_BztzKN_dzS1ZSf-w9QbbHhQ=s64", "userId": "15075618550147527080"}} df.columns # + id="HeC7Ad_yyhsC" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 112} outputId="8a32a680-5c9d-4155-c3ca-75e3894a6f86" executionInfo={"status": "ok", "timestamp": 1581551085458, "user_tz": -60, "elapsed": 491, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AAuE7mDT1Bg5F3Gw33ovY3_BztzKN_dzS1ZSf-w9QbbHhQ=s64", "userId": "15075618550147527080"}} df.prices_currency.unique() # + id="HzEzL7KqymUk" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 283} outputId="dc47949e-e40f-4c2a-93f1-7edae04cf064" executionInfo={"status": "ok", "timestamp": 1581551146656, "user_tz": -60, "elapsed": 535, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AAuE7mDT1Bg5F3Gw33ovY3_BztzKN_dzS1ZSf-w9QbbHhQ=s64", "userId": "15075618550147527080"}} df.prices_currency.value_counts(normalize=True) # + id="wVGWqV2Qysp5" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 36} outputId="5a1e70a5-e127-45fa-daef-6dc951eb8cc1" executionInfo={"status": "ok", "timestamp": 1581551231821, "user_tz": -60, "elapsed": 528, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AAuE7mDT1Bg5F3Gw33ovY3_BztzKN_dzS1ZSf-w9QbbHhQ=s64", "userId": "15075618550147527080"}} df_usd=df[ df.prices_currency == 'USD' ].copy() df_usd.shape # + id="imkjOu9RzC-Q" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 284} outputId="388ee7b2-48bc-41e3-e6e5-d15d63ef6f56" executionInfo={"status": "ok", "timestamp": 1581551400064, "user_tz": -60, "elapsed": 524, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AAuE7mDT1Bg5F3Gw33ovY3_BztzKN_dzS1ZSf-w9QbbHhQ=s64", "userId": "15075618550147527080"}} df_usd['prices_amountmin'] = df_usd.prices_amountmin.astype(np.float) df_usd['prices_amountmin'].hist() # + id="5nHO9ybwzU8N" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 36} outputId="91ef1fa8-fe13-4515-e31b-41a63fcd54fd" executionInfo={"status": "ok", "timestamp": 1581551519970, "user_tz": -60, "elapsed": 549, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AAuE7mDT1Bg5F3Gw33ovY3_BztzKN_dzS1ZSf-w9QbbHhQ=s64", "userId": "15075618550147527080"}} filter_max = np.percentile( df_usd['prices_amountmin'], 99) filter_max # + id="wOKitgaV0EcB" colab_type="code" colab={} df_usd_filter = df_usd[df_usd['prices_amountmin'] < filter_max] # + id="HwZkJRcD0bFN" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 284} outputId="0ff74b60-2691-4fc1-9b11-ed4750a71249" executionInfo={"status": "ok", "timestamp": 1581551628385, "user_tz": -60, "elapsed": 1808, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AAuE7mDT1Bg5F3Gw33ovY3_BztzKN_dzS1ZSf-w9QbbHhQ=s64", "userId": "15075618550147527080"}} df_usd_filter.prices_amountmin.hist(bins=100) # + id="J05VMsFX0l2_" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 36} outputId="deb2cad2-d13c-4a23-d832-f576f73d2f3f" executionInfo={"status": "ok", "timestamp": 1581551748919, "user_tz": -60, "elapsed": 3131, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AAuE7mDT1Bg5F3Gw33ovY3_BztzKN_dzS1ZSf-w9QbbHhQ=s64", "userId": "15075618550147527080"}} # ls # + id="PyvRphbE1Hp-" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 36} outputId="99e0c767-0603-463c-c605-7f4c91818f30" executionInfo={"status": "ok", "timestamp": 1581551765684, "user_tz": -60, "elapsed": 486, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AAuE7mDT1Bg5F3Gw33ovY3_BztzKN_dzS1ZSf-w9QbbHhQ=s64", "userId": "15075618550147527080"}} # cd data/ # + id="Cu27ShyC1MZS" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 36} outputId="305d0d93-80b7-4d20-eabd-fcf41081b676" executionInfo={"status": "ok", "timestamp": 1581551842503, "user_tz": -60, "elapsed": 1788, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AAuE7mDT1Bg5F3Gw33ovY3_BztzKN_dzS1ZSf-w9QbbHhQ=s64", "userId": "15075618550147527080"}} # ls matrix_one # + id="PizaZzyd1Ngz" colab_type="code" colab={} # ls # + id="AmaD4QNx1jnu" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 36} outputId="6e905dc2-41ec-4e7f-c240-54af02690259" executionInfo={"status": "ok", "timestamp": 1581551868565, "user_tz": -60, "elapsed": 489, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AAuE7mDT1Bg5F3Gw33ovY3_BztzKN_dzS1ZSf-w9QbbHhQ=s64", "userId": "15075618550147527080"}} # cd .. # + id="xd0j8Njk1lhH" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 36} outputId="befbef43-e901-451c-cb4f-b829241fe55c" executionInfo={"status": "ok", "timestamp": 1581551874220, "user_tz": -60, "elapsed": 2253, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AAuE7mDT1Bg5F3Gw33ovY3_BztzKN_dzS1ZSf-w9QbbHhQ=s64", "userId": "15075618550147527080"}} # ls # + id="ISVlk5dI1mcP" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 36} outputId="882c26e4-8d12-4177-9e25-0b66615a7b3f" executionInfo={"status": "ok", "timestamp": 1581551984410, "user_tz": -60, "elapsed": 1661, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AAuE7mDT1Bg5F3Gw33ovY3_BztzKN_dzS1ZSf-w9QbbHhQ=s64", "userId": "15075618550147527080"}} # !git add matrix_one/day3.ipynb # + id="YYsx1j582BgO" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 36} outputId="c8320e27-5726-41c6-b3a3-f31692c5a3b2" executionInfo={"status": "ok", "timestamp": 1581551994604, "user_tz": -60, "elapsed": 1572, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AAuE7mDT1Bg5F3Gw33ovY3_BztzKN_dzS1ZSf-w9QbbHhQ=s64", "userId": "15075618550147527080"}} # ls # + id="6RP66D0q2EBm" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 36} outputId="de682f25-83bb-4088-e03c-7d2d58c10848" executionInfo={"status": "ok", "timestamp": 1581551998898, "user_tz": -60, "elapsed": 618, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AAuE7mDT1Bg5F3Gw33ovY3_BztzKN_dzS1ZSf-w9QbbHhQ=s64", "userId": "15075618550147527080"}} # cd marix_one/ # + id="ait_cDzg2FTj" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 36} outputId="dd1ae077-e68c-4f2f-8e84-e9f70804256f" executionInfo={"status": "ok", "timestamp": 1581552002786, "user_tz": -60, "elapsed": 2168, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AAuE7mDT1Bg5F3Gw33ovY3_BztzKN_dzS1ZSf-w9QbbHhQ=s64", "userId": "15075618550147527080"}} # ls # + id="SAEy5LPI2F4E" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 36} outputId="c5b4bf5d-341a-4bb6-c55e-2ee266a8878e" executionInfo={"status": "ok", "timestamp": 1581552020039, "user_tz": -60, "elapsed": 474, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AAuE7mDT1Bg5F3Gw33ovY3_BztzKN_dzS1ZSf-w9QbbHhQ=s64", "userId": "15075618550147527080"}} # cd .. # + id="Yf7kwP1w2KgI" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 36} outputId="a0f74dab-6fc2-4d92-d5f8-82d17fa367c5" # !git add matrix_one/day3.ipynb # + id="9ryZ4dmv2OZH" colab_type="code" colab={}
marix_one/day3.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + import pandas as pd import matplotlib.pyplot as plt import mplfinance as mpf import numpy as np from tqdm.notebook import tqdm pd.set_option('display.max_columns', 500) df_full = pd.read_pickle('data/candlestick_15m_ETHBTC_2107291134.pkl') # This contains all the feature columns df_full.set_index('open_time', inplace=True) df_full.sort_index(inplace=True) df_full.tail() # - df_col_full = list(df_full.columns) df_col_diff = df_col_full.copy() for c in ['open','high','low','close']: df_col_diff.remove(c) df = df_full[['open','high','low','close']].copy() # + def add_lookback_trades(df, levels=1, cols=['open','high','low','close']): ''' Adds [cols] columns for previous records to each row. New columns are suffixed with the record backstep. For example: open_1 means the open price 1 record back. ''' new_df = df.copy() for l in range(1,levels+1): new_cols={c:f'{c}_{l}' for c in cols} new_df = new_df.merge(df[cols].shift(l).bfill().rename(columns=new_cols), how='left', left_index=True, right_index=True) return new_df def add_lookforward_trades(df, levels=1, cols=['open','high','low','close']): ''' Adds [cols] columns for future records to each row. New columns are suffixed with the record forwardstep. For example: open_1 means the open price 1 record forward. ''' new_df = df.copy() for l in range(1,levels+1): new_cols={c:f'{c}_{l}' for c in cols} new_df = new_df.merge(df[cols].shift(-l).ffill().rename(columns=new_cols), how='left', left_index=True, right_index=True) return new_df def calculate_atr(df, period=14): ''' Calculate the Average True Range (ATR) ''' df2=df[['high','low','close']].copy() df2=add_lookback_trades(df2,1,['high','low','close']) df2['tr_calc'] = np.max([df2.high, df2.close_1], axis=0) - np.min([df2.low, df2.close_1], axis=0) return df2.tr_calc.rolling(period, min_periods=1).mean() # + def keep_only_first(y, keep=2): ''' Keep only the first X "True" values in a sequence. For example: T,F,F,T,T,T,F,T,F and keep=2 T,F,F,T,T,F,F,T,F * ''' boolean_out=False if y.dtype == bool: boolean_out=True new_y = y.copy().astype(int) prev_ones=0 for r in new_y.iteritems(): if r[1]==0 and prev_ones>0: prev_ones=0 elif r[1]==1 and prev_ones<keep: prev_ones+=1 elif r[1]==1: new_y.at[r[0]]=0 if boolean_out: new_y=new_y.astype(bool) return new_y def to_buy(x, reverse=False): ''' If first candle to pass high/low column is the target, mark te row "True" for buying ''' high_col = x.pass_high_col high_col_num = int(high_col[high_col.find('_')+1:]) low_col = x.pass_low_col low_col_num = int(low_col[low_col.find('_')+1:]) if not reverse: if high_col_num < low_col_num and high_col_num > 1: return True elif low_col_num == 1: if not x[low_col] and x[high_col]: return True else: return False else: return False else: if low_col_num < high_col_num and low_col_num > 1: return True elif high_col_num == 1: if not x[high_col] and x[low_col]: return True else: return False else: return False def build_Xy(df, df_full, window=14, threshold_ratio=(0.04,0.02), use_atr=True, atr_ratio=(2,1), reverse=False, debug=False): ''' Build the X,y datasets Parameters ---------- window : int number of records to look into the future to determine if this was a buy threshold_ratio : tuple(float,float) The high/low percentage to calculate target/stop-loss. Ignore if use_atr is True. use_atr : boolean Use the ATR to calculate stop-loss atr_ratio : tuple(int,int) The high/low multiplier for ATR to calculate target/stop-loss. Only used when use_atr is True. reverse : boolean buying the other currency thus reverse logic debug : boolean show messages ''' if use_atr==False and (not type(threshold_ratio) == tuple or len(threshold_ratio) != 2): raise Exception("Parameter 'threshold_ratio' must be a tuple of size 2") if use_atr==True and (not type(atr_ratio) == tuple or len(atr_ratio) != 2): raise Exception("Parameter 'atr_ratio' must be a tuple of size 2") df2 = df.copy() print(' Build: Adding look-forward trades') if debug else None df2 = add_lookforward_trades(df2, levels=window, cols=['high','low']) high_cols = [f'high_{i}' for i in range(1,window+1)] low_cols = [f'low_{i}' for i in range(1,window+1)] # Calculate target and stop-loss print(' Build: Calculating target and stop-loss') if debug else None if not reverse: if use_atr: df2['atr'] = calculate_atr(df2) df2['stop_loss'] = df2.low-(df2.atr*atr_ratio[1]) df2['target'] = df2.close+(df2.atr*atr_ratio[0]) else: df2['stop_loss'] = df2.close-df2.close*threshold_ratio[1] df2['target'] = df2.close+df2.close*threshold_ratio[0] else: # reverse if use_atr: df2['atr'] = calculate_atr(df2) df2['stop_loss'] = df2.high+(df2.atr*atr_ratio[1]) df2['target'] = df2.close-(df2.atr*atr_ratio[0]) else: df2['stop_loss'] = df2.close+df2.close*threshold_ratio[1] df2['target'] = df2.close-df2.close*threshold_ratio[0] # Determine if price crossed the target and/or stop-loss print(' Build: Determining break points') if debug else None for h,l in zip(high_cols, low_cols): if not reverse: df2[h] = df2[h]>=df2.target df2[l] = df2[l]<=df2.stop_loss else: # reverse df2[l] = df2[l]<=df2.target df2[h] = df2[h]>=df2.stop_loss # Identify the first column that pass the target/stop-loss # WARNING: this is assuming idxmax() will continue to # return the FIRST column with a True value. print(' Build: Identifying target/stop-loss columns') if debug else None df2['pass_high_col'] = df2[high_cols].idxmax(axis=1) df2['pass_low_col'] = df2[low_cols].idxmax(axis=1) print(' Build: Determining buy records') if debug else None df2['buy'] = df2.apply(to_buy,reverse=reverse,axis=1) # Build X/y X = df.merge(df_full, how='left', left_index=True, right_index=True) y = df2[['buy']].astype(int) print(' Build: Dropping Unnecessary columns') if debug else None columns_to_drop = ['pair_id','close_time'] X.drop(columns=columns_to_drop, inplace=True) print (' Build: Dropping NaN rows') if debug else None not_nan_rows = X.notnull().all(axis=1) X = X[not_nan_rows] y = y[not_nan_rows] print(' Build: Converting booleans to ints') if debug else None for col in X.columns: if X[col].dtype.kind in ['b','O']: X[col] = X[col].astype(int) #print(' Build: Trimming buy sequence to 2') #y.buy = keep_only_first(y.buy, keep=2) return X, y, df2 window=30 # - # # Based on ATR Ratio # + # y-axis is the ratio of buys/not-buys for all data in the # x-axis is the ratio denominator rng=range(1,10) results2=[] for i in tqdm(rng): _, y, _ = build_Xy(df, df_full[df_col_diff], window=window, use_atr=True, atr_ratio=(i*2,i), reverse=True) results2.append(y.sum() / len(y)) plt.plot(rng, results2) # - results3=[] rng = range(1,10) for i in tqdm(rng): _, y, _ = build_Xy(df, df_full[df_col_diff], window=window, use_atr=True, atr_ratio=(i*3,i), reverse=True) results3.append(y.sum() / len(y)) plt.plot(rng, results3) results4=[] rng = range(1,10) for i in tqdm(rng): _, y, _ = build_Xy(df, df_full[df_col_diff], window=window, use_atr=True, atr_ratio=(i*4,i), reverse=True) results4.append(y.sum() / len(y)) plt.plot(rng, results4) results5=[] rng = range(1,10) for i in tqdm(rng): _, y, _ = build_Xy(df, df_full[df_col_diff], window=window, use_atr=True, atr_ratio=(i*5,i), reverse=True) results5.append(y.sum() / len(y)) plt.plot(rng, results5) results6=[] rng = range(1,7) for i in tqdm(rng): _, y, _ = build_Xy(df, df_full[df_col_diff], window=window, use_atr=True, atr_ratio=(i*6,i), reverse=True) results6.append(y.sum() / len(y)) plt.plot(rng, results6) results2 results3 # # Based on Percentage Threshold [0.00125*2**i for i in range(7)] results_p1=[] rng = [0.00125*2**i for i in range(7)] for i in tqdm(rng): _, y, _ = build_Xy(df, df_full[df_col_diff], window=window, use_atr=False, threshold_ratio=(i,i/2), reverse=True) results_p1.append(y.sum() / len(y)) plt.plot(rng, results_p1) results_p2=[] rng = [0.00125*2**i for i in range(7)] for i in tqdm(rng): _, y, _ = build_Xy(df, df_full[df_col_diff], window=window, use_atr=False, threshold_ratio=(i,i/3), reverse=True) results_p2.append(y.sum() / len(y)) plt.plot(rng, results_p2) results_p3=[] rng = [0.00125*2**i for i in range(7)] for i in tqdm(rng): _, y, _ = build_Xy(df, df_full[df_col_diff], window=window, use_atr=False, threshold_ratio=(i,i/4), reverse=True) results_p3.append(y.sum() / len(y)) plt.plot(rng, results_p3) results_p4=[] rng = [0.00125*2**i for i in range(7)] for i in tqdm(rng): _, y, _ = build_Xy(df, df_full[df_col_diff], window=window, use_atr=False, threshold_ratio=(i,i/5), reverse=True) results_p4.append(y.sum() / len(y)) plt.plot(rng, results_p4) # # Figures # + fig,ax = plt.subplots(1, 2, figsize=(15,7)) ax[0].plot(range(1,10), results2, label='2x') ax[0].plot(range(1,10), results3, label='3x') ax[0].plot(range(1,10), results4, label='4x') ax[0].plot(range(1,10), results5, label='5x') ax[0].plot(range(1,7), results6, label='6x') ax[1].plot([0.00125*2**i for i in range(7)], results_p1, label='/2') ax[1].plot([0.00125*2**i for i in range(7)], results_p2, label='/3') ax[1].plot([0.00125*2**i for i in range(7)], results_p3, label='/4') ax[1].plot([0.00125*2**i for i in range(7)], results_p4, label='/5') ax[0].set_title('Percentage of TRUE Label Data\nATR Ratio') ax[1].set_title('Percentage of TRUE Label Data\nPercentage Ratio') ax[0].legend() ax[1].legend() # - [0.00125*2**i for i in range(7)] results_p1
playground/nicks/0_find_atr_ratio.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] deletable=false editable=false nbgrader={"cell_type": "markdown", "checksum": "f4eea91e74e5bccfd5373418adcf51fe", "grade": false, "grade_id": "cell-11f79d0940899143", "locked": true, "schema_version": 3, "solution": false, "task": false} # <div class="alert alert-danger"> # # **Read the `Instructions` notebook** before you start working on this problem set! It contains instructions on how to create the submission package and a detailed description of the provided classes (*BayesNet* and *Variable*). # # </div> # + deletable=false editable=false nbgrader={"cell_type": "code", "checksum": "0a2152424ac0aedb136b96c86ab16f4a", "grade": false, "grade_id": "cell-98c1f957645e7920", "locked": true, "schema_version": 3, "solution": false, "task": false} import numpy as np import matplotlib.pyplot as plt from bayesian_network import BayesNet # + [markdown] deletable=false editable=false nbgrader={"cell_type": "markdown", "checksum": "165d5434d3cbbe7f503d4407e2c791b6", "grade": false, "grade_id": "cell-8a9b6cf1529e8501", "locked": true, "schema_version": 3, "solution": false, "task": false} # # Rejection Sampling # # <div class="alert alert-warning"> # Implement the rejection sampling algorithm and use it to approximate $P(A \mid D, E)$. # </div> # # In Rejection Sampling, one samples from the full joint distribution and throws away ('rejects') all samples that do not correspond to the evidence. # # ## Implementation # # Implement # - `sample_forward`, and # - `rejection_sampling`. # # `sample_forward` returns a set of samples from the full joint distribution $P(\mathcal{X})$. # # `rejection_sampling` uses *sample_forward*, throws away all random events with mismatching evidence $\mathbf{E}$, and estimates the distribution of the query variable $X$. In other words, it computes an approximation of $P(X \mid \mathbf{E}=e)$. For simplicity, we will only consider probabilistic queries with one query variable $X$. # # We will need a function that samples from a discrete 1-D probability distribution. For this purpose, use the `sample_categorical()` function from the Instructions notebook, which takes a 1-D NumPy array representing a probability distribution and draws a sample from it: # + deletable=false editable=false nbgrader={"cell_type": "code", "checksum": "0c13833192071b03e7ecba5aedd2b3d4", "grade": false, "grade_id": "cell-b118e5c1ec95408e", "locked": true, "schema_version": 3, "solution": false, "task": false} from utils import sample_categorical help(sample_categorical) # + [markdown] deletable=false editable=false nbgrader={"cell_type": "markdown", "checksum": "62705f005520a5ab01c19071e7e1ceb4", "grade": false, "grade_id": "cell-7cfea4f9237cc0aa", "locked": true, "schema_version": 3, "solution": false, "task": false} # ### Forward Sampling # <div class="alert alert-warning"> # Implement the <i>sample_forward</i> function, which samples from the full joint distribution of a Bayesian network $\mathcal{B}$. (2 points) # </div> # # The `sample_forward` function must return one object: # - samples from $\mathcal{B}$ of type `np.ndarray`, with shape `(sample_size, len(bayes_net))` # # # **Hint**: Iterating over the *BayesNet* object returns the *Variable* objects in topological ordering. The probability distribution of variable $X$ given its parents $\mathit{pa}(X)$, $P(X \mid \mathit{pa}(X))$, can be obtained by passing the (possibly incomplete) random event to the variable, i.e., `variable(samples[i])`. # + deletable=false nbgrader={"cell_type": "code", "checksum": "d53cdcf4356e8a96e4aa6166a98baeee", "grade": false, "grade_id": "cell-a21d29b8eb812ba1", "locked": false, "schema_version": 3, "solution": true, "task": false} def sample_forward(bayes_net: BayesNet, sample_size: int) -> np.ndarray: ''' Samples from the full joint distribution. :param bayes_net: A Bayesian network of type BayesNet. :param sample_size: The number of samples to draw from the Bayesian network. :returns: A NumPy array of type np.int64 with shape (sample_size, len(bayes_net)) containing samples from the Bayesian network ''' # array holding the samples samples = np.empty((sample_size, len(bayes_net)), np.int64) # YOUR CODE HERE for i in range(sample_size): for variable in bayes_net: distribution = variable(samples[i]) samples[i][variable.id] = sample_categorical(distribution) return samples # + deletable=false editable=false nbgrader={"cell_type": "code", "checksum": "b323da9b830dd209f5ad67037fd87c83", "grade": true, "grade_id": "cell-a9f3cf6d211b01fa", "locked": true, "points": 2, "schema_version": 3, "solution": false, "task": false} # sanity checks bayes_net = BayesNet([(np.array([0.5, 0.5]), [0])]) samples = sample_forward(bayes_net, 3) assert type(samples) == np.ndarray, f'\nWrong output type!\nExpected: np.ndarray\nGiven:\t {type(samples)}' assert samples.shape == (3, 1), f'\nWrong output shape!\nExpected: (3, 1)\nGiven:\t {samples.shape}' assert samples.dtype == np.int64, f'\nWrong numpy array data type!\nExpected: np.int64\nGiven:\t {samples.dtype}' # + [markdown] deletable=false editable=false nbgrader={"cell_type": "markdown", "checksum": "078e0a5aacbc0aa57cd3d13392eae317", "grade": false, "grade_id": "cell-39ba0a156c9805da", "locked": true, "schema_version": 3, "solution": false, "task": false} # ### Rejection Sampling # <div class="alert alert-warning"> # Implement the <i>rejection_sampling</i> function, which estimates the probability distribution over the query variable $X$ given evidence $\mathbf{E}=\mathbf{e}$, i.e. $P(X \mid \mathbf{E}=\mathbf{e})$. Use the <i>sample_forward</i> function implemented in the previous step to draw random events from $P(\mathcal{X})$. (3 points) # </div> # # The `rejection_sampling` function must return one object: # - The probability distribution over variable $X$ of type `np.ndarray` with shape `(bayes_net[query_variable].num_values,)`. If the sample from the FJD does not contain any random events with matching evidence, return a NumPy array with the same shape as $P(X \mid \mathbf{E}=\mathbf{e})$ containing only values `np.nan`, i.e. `np.full(bayes_net[query_variable].num_values, np.nan)`. # # **Hint**: Use `np.bincount` to count the number of occurrences of each value. # + deletable=false nbgrader={"cell_type": "code", "checksum": "91f86a117743f29fb178050cd230b0b3", "grade": false, "grade_id": "cell-0f9107f6630622b7", "locked": false, "schema_version": 3, "solution": true, "task": false} def rejection_sampling(bayes_net: BayesNet, query_variable: int, evidence: dict={}, sample_size: int=100) -> np.ndarray: ''' Estimates the distribution of the query variale given the value of the evidence variables. :param bayes_net: A Bayesian network of type BayesNet. :param query_variable: Id of the query variable (int). :param evidence: A dictionary of evidence variables (keys: int) and their correponding values (values: int). :param sample_size: The number of samples to use for the estimation. :returns: A NumPy array of type np.float64 representing the conditional distribution of the query variable given evidence, or a NumPy array of the same shape but with all values set to np.nan if no sample with matching evidence is found. ''' # forward sample... samples = sample_forward(bayes_net, sample_size) # YOUR CODE HERE for key, value in evidence.items(): samples = samples[samples[:,key] == value] variable = bayes_net[query_variable] if len(samples) == 0: distribution = np.full(variable.num_values, np.nan) else: distribution = np.bincount(samples[:,query_variable], minlength=variable.num_values) / len(samples) return distribution # + deletable=false editable=false nbgrader={"cell_type": "code", "checksum": "eaebadb65aad4cd9a13fd3878778bf57", "grade": true, "grade_id": "cell-212c51d460b7c21a", "locked": true, "points": 3, "schema_version": 3, "solution": false, "task": false} # sanity checks bayes_net = BayesNet([(np.array([0.25, 0.25, 0.5]), [0])]) np.random.seed(0) distribution = rejection_sampling(bayes_net, 0, {0:0}, 3) assert distribution.shape == (3,), f'\nWrong output shape!\nExpected: (3,)\nGiven:\t {distribution.shape}' assert np.all(np.isnan(distribution)) distribution = rejection_sampling(bayes_net, 0, {}, 1000) assert type(distribution) == np.ndarray, f'\nWrong output type!\nExpected: np.ndarray\nGiven:\t {type(distribution)}' assert distribution.shape == (3,), f'\nWrong output shape!\nExpected: (3,)\nGiven:\t {distribution.shape}' assert distribution.dtype == np.float64, f'\nWrong numpy array data type!\nExpected: np.float64\nGiven:\t {distribution.dtype}' # + [markdown] deletable=false editable=false nbgrader={"cell_type": "markdown", "checksum": "b92570a65a3df8ee431597ec7018bb06", "grade": false, "grade_id": "cell-9548b463f05a9ca6", "locked": true, "schema_version": 3, "solution": false, "task": false} # ## Example # # # Now that we can approximate arbitrary probabilistic queries let us compare an approximation's accuracy to an exact solution. # # Consider the following Bayesian Network (all variables are binary): # # <img width='30%' src='bn.svg'> # # The conditional probability tables are given as: # # <table style="float: left;margin:5px;"><tr><th>P(A)</th><th>$a_0$<br></th><th>$a_1$</th></tr><tr><td>-</td><td>0.2</td><td>0.8</td></tr></table> # # <table style="float: left;margin:5px;"><tr><th>P(C)</th><th>$c_0$<br></th><th>$c_1$</th></tr><tr><td>-</td><td>0.9</td><td>0.1</td></tr></table> # # <table style="float: left;margin:5px;"><tr><th>P(B | A)</th><th>$a_0$<br></th><th>$a_1$</th></tr><tr><td>$b_0$</td><td>0.9</td><td>0.2</td></tr><tr><td>$b_1$</td><td>0.1</td><td>0.8</td></tr></table> # # # <table style="float: left;margin:5px;"><tr><th rowspan="2">P(D | B, C)</th><th colspan="2">$b_0$<br></th><th colspan="2">$b_1$</th></tr><tr><td>$c_0$</td><td>$c_1$</td><td>$c_0$</td><td>$c_1$</td></tr><tr><td>$d_0$<br></td><td>0.1</td><td>0.2</td><td>0.99</td><td>0.8</td></tr><tr><td>$d_1$</td><td>0.9</td><td>0.8</td><td>0.01</td><td>0.2</td></tr></table> # # <table style="float: left;margin:5px;"><tr><th>P(E | C)</th><th>$c_0$</th><th>$c_1$</th></tr><tr><td>$e_0$</td><td>0.7</td><td>0.4</td></tr><tr><td>$e_1$</td><td>0.3</td><td>0.6</td></tr></table> # + [markdown] deletable=false editable=false nbgrader={"cell_type": "markdown", "checksum": "56c50aca9f805a32cebb6050cda362de", "grade": false, "grade_id": "cell-64db2a31f0478058", "locked": true, "schema_version": 3, "solution": false, "task": false} # First, let us store these tables as global NumPy arrays and create the BayesNet object: # + deletable=false editable=false nbgrader={"cell_type": "code", "checksum": "9cfc7aaa152d2bcda10d4e3b54145722", "grade": false, "grade_id": "cell-ea9cddd63a7d7cbe", "locked": true, "schema_version": 3, "solution": false, "task": false} _A_, _B_, _C_, _D_, _E_ = 0, 1, 2, 3, 4 A = np.array([0.2, 0.8]) B_A = np.array([[0.9, 0.2], [0.1, 0.8]]) C = np.array([0.9, 0.1]) D_BC = np.array([[[0.1, 0.2], [0.99, 0.8]], [[0.9, 0.8], [0.01, 0.2]]]) E_C = np.array([[0.7, 0.4], [0.3, 0.6]]) bayes_net = BayesNet([ (A, [_A_]), (B_A, [_B_, _A_]), (C, [_C_]), (D_BC, [_D_, _B_, _C_]), (E_C, [_E_, _C_]) ]) # + [markdown] deletable=false editable=false nbgrader={"cell_type": "markdown", "checksum": "fb762b7e568220eb7cbd17ad77011b9c", "grade": false, "grade_id": "cell-bafa6289ac7cb845", "locked": true, "schema_version": 3, "solution": false, "task": false} # ### Exact Computation # <div class="alert alert-warning"> # Compute $P(A \mid D, E)$ exactly (up to floating point precision). (1 point) # </div> # # Feel free to do this in a very inefficient manner, e.g., by first computing the joint probability distribution and then normalizing by the evidence. # + deletable=false nbgrader={"cell_type": "code", "checksum": "0fcef0d653d169844b6b716a5b89880f", "grade": false, "grade_id": "cell-56090956bc549236", "locked": false, "schema_version": 3, "solution": true, "task": false} A_DE = None # YOUR CODE HERE fjdt = 1 for variable in bayes_net: fjdt = fjdt * variable.pdt # Sum over the axis, that are not in our evidence A_DE = fjdt.sum(axis=(_B_,_C_)) # Normalize by the sum of our A A_DE = A_DE / A_DE.sum(axis=_A_) # print("A_DE=\n", A_DE) # print("A_DE.shape=", A_DE.shape) # print("A_DE.sum(axis=0)=", A_DE.sum(axis=0)) # + deletable=false editable=false nbgrader={"cell_type": "code", "checksum": "1da7f1348ef6e565742af4b425e01013", "grade": true, "grade_id": "cell-b902e5cb2058921e", "locked": true, "points": 1, "schema_version": 3, "solution": false, "task": false} # sanity checks assert A_DE is not None assert A_DE.shape == (2,2,2) assert np.all(np.isclose(A_DE.sum(axis=0), 1)) # + [markdown] deletable=false editable=false nbgrader={"cell_type": "markdown", "checksum": "f373f54668415794f0f0ca8a6b4aea29", "grade": false, "grade_id": "cell-7e8e699e6fa63332", "locked": true, "schema_version": 3, "solution": false, "task": false} # ### Comparison # # Run the following code cell to plot the average [Kullback-Leibler divergence](https://en.wikipedia.org/wiki/Kullback%E2%80%93Leibler_divergence) between the exact distribution $P(A \mid D, E)$ and the approximations computed with rejection sampling. Different lines represent different value assignments to the evidence variables $D$ and $E$. # # **Hint**: The computation of the approximations might take a while... # + deletable=false editable=false nbgrader={"cell_type": "code", "checksum": "97b96cf61241da4c3c8097cf4a420bda", "grade": false, "grade_id": "cell-e52d6309ba540279", "locked": true, "schema_version": 3, "solution": false, "task": false} from utils import approx_error np.random.seed(0) errs = {} sample_counts = np.array([10, 20, 40, 80, 160, 320, 640, 1280]) # compute approximation error for d, e in zip([0, 0, 1, 1], [0, 1, 0, 1]): errs[(d, e)] = approx_error( bayes_net, rejection_sampling, A_DE[:, d, e], _A_, {_D_:d, _E_:e}, sample_counts, n_runs=100 ) # plot plt.figure(figsize=(10, 8)) plt.title('Kullback-Leiber Divergence') for d, e in zip([0, 0, 1, 1], [0, 1, 0, 1]): plt.plot(sample_counts, errs[(d, e)], label=f'e:{e}, d:{d}', lw=2) plt.legend() plt.xscale('log') plt.yscale('log') plt.xlim(sample_counts.min(), sample_counts.max()) plt.xlabel('Number of samples') plt.show() # + [markdown] deletable=false editable=false nbgrader={"cell_type": "markdown", "checksum": "e3eb95b7f1a98395d6db373a6cba30d1", "grade": false, "grade_id": "cell-79018e749c52e130", "locked": true, "schema_version": 3, "solution": false, "task": false} # <div class="alert alert-warning"> # Answer the following question in <b>one sentence</b>! (1 point) # </div> # # Have a look at the average approximation errors when sampling $10^3$ samples with forward sampling. Why is the approximation error of $P(A \mid e=1, d=1)$ higher then the approximation error of $P(A \mid e=0, d=0)$? # + [markdown] deletable=false nbgrader={"cell_type": "markdown", "checksum": "fb387c8bbdf706d723f976c28559a5ae", "grade": true, "grade_id": "cell-9eb92effb71c8f69", "locked": false, "points": 1, "schema_version": 3, "solution": true, "task": false} # Because $P(e=1, d=1)$ is less likely than $P(e=0, d=0)$ and therefore it does not get sampled that often so the error is higher$ # + [markdown] deletable=false editable=false nbgrader={"cell_type": "markdown", "checksum": "109edff3b6804fe9f4faea4b0affc085", "grade": false, "grade_id": "cell-8504a946ed33dc68", "locked": true, "schema_version": 3, "solution": false, "task": false} # <div class="alert alert-warning"> # Store the answer to the following question into the provided result variable! (1 point) # </div> # # Assume $d=1$ and $e=0$. On average, how many of the random events sampled from $P(\mathcal{X})$ will be accepted by the rejection sampling algorithm (i.e., not rejected)? Give the fraction of samples accepted, e.g. 0.5, if every 2nd sample contains the correct evidence. # + deletable=false nbgrader={"cell_type": "code", "checksum": "a36163c0d7a6ea8ff5e72f93c00f1ff1", "grade": false, "grade_id": "cell-2d245d14311f7b64", "locked": false, "schema_version": 3, "solution": true, "task": false} result = None # YOUR CODE HERE p_d_e = fjdt.sum(axis=(_A_,_B_,_C_)) print(p_d_e) p_d1_e0 = p_d_e[1,0] print(p_d1_e0) result = p_d1_e0 # + deletable=false editable=false nbgrader={"cell_type": "code", "checksum": "4c45b58100568f827587fc81f3d38c83", "grade": true, "grade_id": "cell-a6cb6d992ca3d332", "locked": true, "points": 1, "schema_version": 3, "solution": false, "task": false} assert result is not None # -
Problem 1.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="37vrnAFPW6_C" colab_type="text" # #Exercise 10.02: Analyse the categorical variables from the Ames Housing dataset # In this exercise, we will continue our dataset exploration by analysing the categorical variables of this dataset. To do so, we will implement our own describe functions. # The dataset used for this exercise is the Ames Housing dataset compiled by <NAME>: http://www.amstat.org/publications/jse/v19n3/decock.pdf # + [markdown] id="2Ox-pHUPX78-" colab_type="text" # 1. Open on a new Colab notebook and import the pandas package # + id="HEiOAwQPW0qb" colab_type="code" colab={} import pandas as pd # + [markdown] id="C6NbwMIwX_84" colab_type="text" # 2. Assign the link to the AMES dataset to a variable called 'file_url': # + id="j7whidfaYjns" colab_type="code" colab={} file_url = 'https://raw.githubusercontent.com/TrainingByPackt/The-Data-Science-Workshop/master/Chapter10/dataset/ames_iowa_housing.csv' # + [markdown] id="VzoH_wvLuD0p" colab_type="text" # 3. Using the read_csv method from the package pandas, load the dataset into a new variable called 'df': # + id="nFMz2jNVt-xy" colab_type="code" colab={} df = pd.read_csv(file_url) # + [markdown] id="qJ-1EVGS6yZL" colab_type="text" # 4. Create a new dataframe called 'obj_df' with only the columns that are of numerical types using the method select_dtypes from pandas packages and pass in the value 'number' to the parameter 'include: # # + id="qwSvQHzZhN2q" colab_type="code" colab={} obj_df = df.select_dtypes(include='object') # + [markdown] id="eW-0FR24hJ5T" colab_type="text" # # 5. Using the attribute **columns** from pandas, extract the list of columns of this dataframe 'obj_df' and assign it to a new variable called 'num_cols' and print its content: # + [markdown] id="mVuWicDG3X7B" colab_type="text" # Expected Output: # # ![Figure 27 - List of categorical variables](https://docs.google.com/uc?export=download&id=1n5gkiNhA0bPeZFUTdrXEssgqJtqwuBW0) # # + id="vdBz2p9w6w4K" colab_type="code" outputId="06c091c4-c294-4ed2-8438-69a1f27c6b00" colab={"base_uri": "https://localhost:8080/", "height": 187} obj_cols = obj_df.columns obj_cols # + [markdown] id="kE2u0_H3aJWs" colab_type="text" # 6. Create a function called 'describe_object' that takes a **pandas** dataframe and a column name as input parameters. Then inside the function, print out the name of the given column, its number of unique values using the method **nunique()** and the list of values and their occurence using the method **value_counts()**: # + id="3yRUe0E7aJgk" colab_type="code" colab={} def describe_object(df, col_name): print(f"\nCOLUMN: {col_name}") print(f"{df[col_name].nunique()} different values") print(f"List of values:") print(df[col_name].value_counts(dropna=False, normalize=True)) # + [markdown] id="H1YRwGZgOYmp" colab_type="text" # 7. Test this function by providing the 'df' dataframe and the column 'MSZoning': # + [markdown] id="hTglykjp3hZG" colab_type="text" # Expected Output: # # ![Figure 28 - Display of the created function for the column ‘MSZoning’](https://docs.google.com/uc?export=download&id=1PE8Ive1mpHZeeC0GN5wDrjxvkaufT5Pv) # # + id="QRLrQjc5bl5b" colab_type="code" outputId="0b94009d-3775-41ce-a4b5-a2e5f40b36a3" colab={"base_uri": "https://localhost:8080/", "height": 187} describe_object(df, 'MSZoning') # + [markdown] id="z-c3WNFIwWDG" colab_type="text" # For the column ‘MSZoning’, the value ‘RL’ represents almost 79% of the values while ‘C (all) is only present less than 1% of the rows. # + [markdown] id="6sE7pqfMf-NR" colab_type="text" # 8. Create a for loop that will call the created function for every element from the list 'obj_cols': # + [markdown] id="CkuSqu9p3tJK" colab_type="text" # Expected Output: # # ![Figure 29 - Display of the created function for the first columns contained in ‘obj_cols’](https://docs.google.com/uc?export=download&id=1cou8u_aD4Z8ec1EqDKRPFXJGGyEQUzh_) # # # + id="hv3qvtqZf-TL" colab_type="code" outputId="adf18174-a440-4fd6-c7f2-59461011644f" colab={"base_uri": "https://localhost:8080/", "height": 1000} for col_name in obj_cols: describe_object(df, col_name) # + [markdown] id="ZO9eRjpFwQvD" colab_type="text" # We can confirm that the column ‘Street’ is almost constant as 99.6% of the rows contains the same value: ‘Pave’. For the column ‘Alley’, almost 94% of the rows have missing values. We can also notice that 'MiscFeature' contains missing value for 96% of the rows so this column doesn't contain much information. # # + [markdown] id="_-yGDjckRbwV" colab_type="text" # Excellent! We just analysed all the categorical variables from this dataset in one go. We saw how to look at the distribution of all the values contained in one feature. We also found that some of them are dominated by a single value and others have mainly missing values.
Chapter10/chapter10_exercise2.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import defense import tensorflow as tf from skimage.measure import compare_ssim import argparse import imutils import cv2 import numpy as np from albumentations import * config = tf.ConfigProto(gpu_options=tf.GPUOptions(allow_growth=True)) sess = tf.Session(config=config) # + def l2_distortion(img1, img2): if len(img1.shape) == 4: n = img1.shape[0] l = np.mean(np.sqrt(np.sum((img1.reshape((n, -1)) - img2.reshape((n, -1))) ** 2, axis=1) / np.product(img1.shape[1:])), axis=0) else: l = np.sqrt(np.sum(img1 - img2) ** 2 / np.product(img1.shape)) return l def ssim_score(cleandata,data): # cleandata = (cleandata * 255).astype('uint8') # data = (data * 255).astype('uint8') SSIM = [] for i in range(cleandata.shape[0]): ssim = compare_ssim(cleandata[i], data[i], multichannel=True, data_range = 1.0) SSIM.append(ssim) SSIM = np.asarray(SSIM) return SSIM.mean() def PSNR(original, compressed): mse = np.mean((original - compressed) ** 2) if(mse == 0): # MSE is zero means no noise is present in the signal . # Therefore PSNR have no importance. return 100 max_pixel = 255.0 psnr = 20 * np.log10(max_pixel / np.sqrt(mse)) return psnr def psnr_score(cleandata,data): PSNR_list = [] for i in range(cleandata.shape[0]): psnr = PSNR(cleandata[i], data[i]) PSNR_list.append(psnr) PSNR_list = np.asarray(PSNR_list) return PSNR_list.mean() # - # the seleted data from the imagenet validation set cleandata = np.load("./data/clean100data.npy") cleanlabel = np.load("./data/clean100label.npy") def defended(imgbatch): defbatch = np.zeros_like(imgbatch) for i in range(cleandata.shape[0]): defbatch[i]=defense.defend_WebP(cleandata[i]) return defbatch # middata = cleandata+0.01 data = defended(cleandata) print(l2_distortion(cleandata,data)) print(ssim_score(cleandata,data)) print(psnr_score(cleandata*255,data*255)) plt.imshow(augimg(cleandata[0]))
Com_l2_SSIM.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] id="XiEEOSApViQo" # # Métodos Específicos de Lista em Python # + [markdown] id="bpPjA8JVViQo" # - list.append(valor)<br> # Adiciona um valor ao final de uma lista # + [markdown] id="gpkYqzvdViQo" # Uso: # vendas = [150, 320] # vendas.append(110) # Resultado: # vendas = [150, 320, 110] # + [markdown] id="DGA4nkylViQo" # - list.extend(lista2)<br> # Adiciona todos os valores da lista2 na lista original # + [markdown] id="55qW5iIdViQp" # Uso: # vendas = [150, 320, 110, 450, 390, 370] # vendas_2semestre = [440, 470, 900, 1000, 1100, 1050] # vendas.extend(vendas_2semestre) # Resultado: # vendas = [150, 320, 110, 450, 390, 370, 440, 470, 900, 1000, 1100, 1050] # + [markdown] id="p8M8_983ViQp" # - list.insert(posicao, valor)<br> # Adiciona um valor em uma posição específica em uma lista. Não é recomendado usar a não ser que seja realmente necessário inserir em uma posição específica, porque o método append é mais eficiente. # + [markdown] id="JXu1buUUViQp" # Uso: # vendas = [150, 320] # vendas.insert(1, 110) # Resultado: # vendas = [150, 110, 320] # Obs: # Compare com o caso do list.append para ver a diferença # + [markdown] id="wwkTJjiUViQp" # - list.remove(valor)<br> # Remove o valor da lista (apenas a 1ª ocorrência, então caso haja 2 vezes o valor na lista, apenas a 1ª será removida). Além disso, dá um erro caso valor não exista dentro da lista. # + [markdown] id="SFTmdpk_ViQp" # Uso: # vendedores = ['João', 'Julia', 'Maria', 'Ana', 'Paulo', 'Marcus'] # vendedores.remove('Maria') # Resultado: # vendedores = ['João', 'Julia', 'Ana', 'Paulo', 'Marcus'] # + [markdown] id="u6ZUIdKQViQp" # - list.pop(posicao)<br> # Remove o item que está na posicao (índice) passado. Além disso, esse item é dado como resultado do pop, portanto pode ser armazenado em uma variável ou usado para outra coisa na mesma linha de código. # + [markdown] id="pSJy5lVAViQp" # Uso:<br> # vendedores = ['João', 'Julia', 'Maria', 'Ana', 'Paulo', 'Marcus']<br> # vendedores.pop(2)<br> # Resultado:<br> # vendedores = ['João', 'Julia', 'Ana', 'Paulo', 'Marcus'] # + [markdown] id="dqvw3vI2ViQp" # - list.clear()<br> # Remove todos os itens de uma lista # + [markdown] id="uGJHsiryViQp" # Uso: # vendedores = ['João', 'Julia', 'Maria', 'Ana', 'Paulo', 'Marcus'] # vendedores.clear() # Resultado: # vendedores = [] # + [markdown] id="uDRRE6rdViQp" # - list.index(valor)<br> # Retorna a posição do valor dentro da lista (em qual índice está o valor). Dá erro caso não haja o valor dentro da lista. # + [markdown] id="aYI3rDkNViQp" # Uso: # vendedores = ['João', 'Julia', 'Maria', 'Ana', 'Paulo', 'Marcus'] # posicao_Joao = vendedores.index('João') # Resultado: # posicao_Joao = 0 # + [markdown] id="Xb-Do8xvViQp" # - list.count(valor)<br> # Retorna a quantidade de vezes que o valor aparece na lista # + [markdown] id="Lga7PFtXViQq" # Uso: # vendedores = ['João', 'Julia', 'Maria', 'Ana', 'Paulo', 'Marcus', 'João'] # qtde_Joao = vendedores.count('João') # Resultado: # qtde_Joao = 2 # + [markdown] id="WNBFfk6nViQq" # - list.sort(reverse=False)<br> # Ordena os valores da lista em ordem crescente, ou alfabética, (reverse=False) ou decrescente (reverse=True). # + [markdown] id="ZBQL2GSSViQq" # Uso: # vendas = [150, 300, 190, 480] # vendas.sort(reverse=True) # Resultado: # vendas = [480, 300, 190, 150] # + [markdown] id="q6XpeL_yViQq" # - list.reverse()<br> # Inverte a ordem dos elementos de uma lista. # + [markdown] id="LMtfecEHViQq" # Uso: # vendas = [150, 300, 190, 480] # vendas.reverse() # Resultado: # vendas = [480, 190, 300, 150] # + [markdown] id="plMfNqx1ViQq" # - list.copy()<br> # Cria uma cópia da lista original. Outra opção é fazer lista2 = lista1[:] # + [markdown] id="hS4K-QzmViQq" # Uso: # vendas = [150, 300, 190, 480] # vendas2 = vendas.copy() # Resultado: # vendas2 = [150, 300, 190, 480]
listas/.ipynb_checkpoints/parte14-checkpoint.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 (ipykernel) # language: python # name: python3 # --- k = [[128, 128], [256, 256], [512, 512], [512, 512]] k[0][-1] import torch import torch.nn as nn w = torch.empty(3, 5) nn.init.constant_(w, 0.3) # + attn = torch.randint(1, 10, [1, 2, 2, 3]) print("attn -->", attn) pos_enc = torch.randint(1, 10, [1, 1, 2, 3]) print("pos_enc -->", pos_enc) res = torch.einsum('bmnf,bmnf->bmf', attn, pos_enc) print(res) res1 = torch.sum(attn * pos_enc, dim=-2) print(res1) # - src = torch.randint(1, 10, [2, 2, 3]) print("src -->", src) print("src[:, :, None] -->", src[:, :, None]) print("src[:, None] -->", src[:, None]) final_res = (src[:, :, None] - src[:, None]) print("final_res shape -->", final_res) print("final_res ** 2 -->", final_res ** 2) final = torch.sum(final_res ** 2, dim=-1) print("final shape -->", final) print("src sum -->", torch.sum(src, dim=-1)) raw_size = final.size() print("raw_size -->", raw_size[0]) idx = torch.randint(1, 10, [2, 2, 2]) print(idx.shape) idx = idx.reshape(2, -1) print(idx) print("=====================") idx = idx[..., None].expand(-1, -1, 3) raw_size = idx.size() print(*raw_size) res = torch.randint(1, 10, [2, 4, 6]) print(res.reshape(*raw_size, -1).shape) t = torch.tensor([[1, 2, 3], [4, 5, 6]]) # 当dim=1时, 看索引矩阵的行,寻找索引所在行的第索引个值 t1 = torch.gather(t, 1, torch.tensor([[1, 0], [1, 0]])) # 当dim=0时, 看索引矩阵的列, 寻找索引所在列的第索引个值 t2 = torch.gather(t, 0, torch.tensor([[1, 0, 1], [0, 0, 1]])) print(t) print(t1) print(t2) # + MLPS = [[[16, 16, 32], [32, 32, 64]], [[64, 64, 128], [64, 96, 128]], [[128, 196, 256], [128, 196, 256]], [[256, 256, 512], [256, 384, 512]]] channel_in = 6 skip_channel_list = [6] for k in range(4): mlps = MLPS[k].copy() channel_out = 0 for idx in range(mlps.__len__()): mlps[idx] = [channel_in] + mlps[idx] channel_out += mlps[idx][-1] + 3 print('mlps : ', mlps) skip_channel_list.append(channel_out) channel_in = channel_out print('channel_out: ', channel_out) # - FP_MLPS = [[128, 128], [256, 256], [512, 512], [512, 512]] for k in range(FP_MLPS.__len__()): pre_channel = FP_MLPS[k + 1][-1] if k + 1 < len(FP_MLPS) else channel_out mlp = [pre_channel + skip_channel_list[k]] + FP_MLPS[k] print(f'pre_channel : {pre_channel}, mlp : {mlp}') print(f'skip_channel : {skip_channel_list}, FP_MLPS : {FP_MLPS[k]}') import numpy as np x = np.random.randn(3, 3, 3) print('x:', x) print('x[0]:', x[:, :, 0]) str1 = "'shapenet'" print(str1) if str1 == "'shapenet'": print("-------") if str1 == 'shapenet': print("++++++++") print(torch.__version__) print(torch.cuda.device_count()) from setuptools import setup from torch.utils.cpp_extension import BuildExtension, CUDAExtension rpn_cls = torch.randint(1, 1000, [1, 16384, 3]) rpn_scores_raw = rpn_cls[..., 0:3] fea = rpn_cls.transpose(1, 2) print(rpn_cls.shape) print(rpn_scores_raw.shape) print(fea.shape) pred_reg = torch.randint(1, 100, [16384, 76]) x_bin = torch.argmax(pred_reg[:, 0: 12], dim=1) print(x_bin.unsqueeze(dim=1).shape) test = torch.randint(1, 100, [2, 4096, 16]) print(test.shape) test = torch.max(test, 1, keepdim=True) print(test.values.shape) test_s = torch.randint(1, 100, [1, 1, 16]) test_s = test_s.view(-1, 16, 1).repeat(1, 1, 1024).transpose(1, 2) print(test_s.shape) cat_fea = torch.randint(1, 100, [1, 1024, 64]) fin = torch.cat([test_s, cat_fea], 2) print(fin.shape) q = [8] w = [128, 128] q+w import cv2 image = cv2.imread('./doc/teaser.png') cv2.imshow("xb2", image) # + import numpy as np import mayavi.mlab points = np.loadtxt("data/KITTI/object/training/velodyne/000000.bin") x = points[:, 0] y = points[:, 1] # y position of point z = points[:, 2] # z position of point d = np.sqrt(x ** 2 + y ** 2) # pointcloud = np.fromfile(str("/home/yuchao/PointRCNN/test.txt"), dtype=np.float32, count=-1).reshape([-1,4]) # print(pointcloud.shape) # x = pointcloud[:, 0] # x position of point # y = pointcloud[:, 1] # y position of point # z = pointcloud[:, 2] # z position of point # r = pointcloud[:, 3] # reflectance value of point # d = np.sqrt(x ** 2 + y ** 2) # Map Distance from sensor vals='height' if vals == "height": col = z else: col = d fig = mayavi.mlab.figure(bgcolor=(0, 0, 0), size=(640, 500)) mayavi.mlab.points3d(x, y, z, col, # Values used for Color mode="point", colormap='spectral', # 'bone', 'copper', 'gnuplot' # color=(0, 1, 0), # Used a fixed (r,g,b) instead figure=fig, ) x=np.linspace(5,5,50) y=np.linspace(0,0,50) z=np.linspace(0,5,50) mayavi.mlab.plot3d(x,y,z) mayavi.mlab.show() # + #-*-coding:utf-8-*- import numpy as np import matplotlib.pyplot as plt from mpl_toolkits.mplot3d import Axes3D def readXYZfile(filename, Separator): data = [[], [], []] f = open(filename,'r') line = f.readline() num = 0 while line: #按行读入点云 c,d,e = line.split(Separator) data[0].append(c) #X坐标 data[1].append(d) #Y坐标 data[2].append(e) #Z坐标 num = num + 1 line = f.readline() f.close() #string型转float型 x = [ float(data[0] ) for data[0] in data[0] ] z = [ float(data[1] ) for data[1] in data[1] ] y = [ float(data[2] ) for data[2] in data[2] ] print("读入点的个数为:{}个。".format(num)) point = [x,y,z] return point #三维离散点图显示点云 def displayPoint(data,title): #解决中文显示问题 plt.rcParams['font.sans-serif']=['SimHei'] plt.rcParams['axes.unicode_minus'] = False #点数量太多不予显示 while len(data[0]) > 20000: print("点太多了!") exit() #散点图参数设置 fig=plt.figure() ax=Axes3D(fig) ax.set_title(title) ax.scatter3D(data[0], data[1],data[2], c = 'r', marker = '.') ax.set_xlabel('x') ax.set_ylabel('y') ax.set_zlabel('z') plt.show() if __name__ == "__main__": data = readXYZfile("/home/yuchao/PointRCNN/test.txt",',') displayPoint(data, "兔子") # + import matplotlib.pyplot as plt from mpl_toolkits.mplot3d import Axes3D import numpy as np np.random.seed(0) points = np.loadtxt("/home/yuchao/PointRCNN/test.txt") fig = plt.figure(1) plt.clf() ax = Axes3D(fig) ax.scatter(points[:, 0], points[:, 1], points[:, 2], c='r',marker='.', s=10, linewidths=1, alpha=1,cmap='spectral') ax.set_title("bunny") ax.set_xlabel("1st ") ax.w_xaxis.set_ticklabels([]) ax.set_ylabel("2nd ") ax.w_yaxis.set_ticklabels([]) ax.set_zlabel("3rd ") ax.w_zaxis.set_ticklabels([]) plt.show() # - import pathlib import re filepaths = pathlib.Path('data/KITTI/object/training/label_2').glob('*.txt') prog = re.compile(r'^\d{6}.txt$') filepaths = filter(lambda f: prog.match(f.name), filepaths) # 过滤不符合条件的元素 image_ids = [int(p.stem) for p in filepaths] image_ids = sorted(image_ids) # image_ids = list(range(image_ids)) # print(image_ids) # for p in filepaths: # print(p) # print(p.stem) x = 'data/KITTI/object/training/label_2/000279.txt' print(x / ('000001' + '.txt')) def get_split_parts(num, num_part): same_part = num // num_part remain_num = num % num_part if remain_num == 0: return [same_part] * num_part else: return [same_part] * num_part + [remain_num] print(get_split_parts(520, 50)) with open('data/KITTI/object/training/label_2/000002.txt', 'r') as f: lines = f.readlines() # if len(lines) == 0 or len(lines[0]) < 15: # content = [] # else: content = [line.strip().split(' ') for line in lines] print(content) import tqdm import time with tqdm.trange(1, 100, desc='epochs') as tbar, \ tqdm.tqdm(total=1000, leave=False, desc='train') as pbar: for epoch in tbar: time.sleep(0.1) for i in range(1000): time.sleep(1) pbar.update(10) # pbar.set_postfix(dict(total_it=it)) # tbar.set_postfix(disp_dict) tbar.refresh() x = {'q': 1, 'w': 2, 'e': 3} print(x.keys()) # + import torch import numpy as np test = torch.zeros([10, 3, 5]) x = torch.tensor(np.array([2, 3, 4])) min_d = torch.tensor(np.array([1, 1, 1])) max_d = torch.tensor(np.array([4, 4, 4])) f = test[[1, 3, 5], ...] print(f.shape) # + import torch import numpy as np pred_boxes3d = torch.rand(4, 64, 7) raw_scores = torch.rand(4, 64, 1) norm_scores = torch.sigmoid(raw_scores) print(norm_scores.shape) inds = norm_scores > 0.1 print(inds.shape) for k in range(len(raw_scores)): cur_inds = inds[k].view(-1) pred_boxes3d_selected = pred_boxes3d[k, cur_inds] print("pred_boxes3d_selected -->", pred_boxes3d_selected.shape) raw_scores_selected = raw_scores[k, cur_inds] print("raw_scores_selectes -->", raw_scores_selected.shape) # -
.ipynb_checkpoints/test-checkpoint.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="view-in-github" colab_type="text" # <a href="https://colab.research.google.com/github/anasir514/colab/blob/main/dictio_02.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # + colab={"base_uri": "https://localhost:8080/"} id="FOko0-oW_09-" outputId="808d228b-2778-400a-c94c-f56d43917179" toc = {"Introduction":1, "Chapter 1":4, "Chapter 2":11, "Chapter 3":25, "Chapter 4":30} toc["Epilogue"] = 39 # Epilogue starts on page 39 toc["Chapter 3"] = 24 # Chapter 3 now starts on page 24 print(toc) # What are the current contents? print("Chapter 5" in toc) # Is there a Chapter 5?
dictio_02.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # SSD7 Training Tutorial # # This tutorial explains how to train an SSD7 on the Udacity road traffic datasets, and just generally how to use this SSD implementation. # # Disclaimer about SSD7: # As you will see below, training SSD7 on the aforementioned datasets yields alright results, but I'd like to emphasize that SSD7 is not a carefully optimized network architecture. The idea was just to build a low-complexity network that is fast (roughly 127 FPS or more than 3 times as fast as SSD300 on a GTX 1070) for testing purposes. Would slightly different anchor box scaling factors or a slightly different number of filters in individual convolution layers make SSD7 significantly better at similar complexity? I don't know, I haven't tried. # + from keras.optimizers import Adam from keras.callbacks import ModelCheckpoint, EarlyStopping, ReduceLROnPlateau, TerminateOnNaN, CSVLogger,LearningRateScheduler from keras import backend as K from keras.models import load_model from math import ceil import numpy as np from matplotlib import pyplot as plt from models.keras_ssd7 import build_model from keras_loss_function.keras_ssd_loss import SSDLoss from keras_layers.keras_layer_AnchorBoxes import AnchorBoxes from keras_layers.keras_layer_DecodeDetections import DecodeDetections from keras_layers.keras_layer_DecodeDetectionsFast import DecodeDetectionsFast from ssd_encoder_decoder.ssd_input_encoder import SSDInputEncoder from ssd_encoder_decoder.ssd_output_decoder import decode_detections, decode_detections_fast from data_generator.object_detection_2d_data_generator import DataGenerator from data_generator.object_detection_2d_misc_utils import apply_inverse_transforms from data_generator.data_augmentation_chain_variable_input_size import DataAugmentationVariableInputSize from data_generator.data_augmentation_chain_constant_input_size import DataAugmentationConstantInputSize from data_generator.data_augmentation_chain_original_ssd import SSDDataAugmentation # %matplotlib inline # - # ## 1. Set the model configuration parameters # # The cell below sets a number of parameters that define the model configuration. The parameters set here are being used both by the `build_model()` function that builds the model as well as further down by the constructor for the `SSDInputEncoder` object that is needed to to match ground truth and anchor boxes during the training. # # Here are just some comments on a few of the parameters, read the documentation for more details: # # * Set the height, width, and number of color channels to whatever you want the model to accept as image input. If your input images have a different size than you define as the model input here, or if your images have non-uniform size, then you must use the data generator's image transformations (resizing and/or cropping) so that your images end up having the required input size before they are fed to the model. to convert your images to the model input size during training. The SSD300 training tutorial uses the same image pre-processing and data augmentation as the original Caffe implementation, so take a look at that to see one possibility of how to deal with non-uniform-size images. # * The number of classes is the number of positive classes in your dataset, e.g. 20 for Pascal VOC or 80 for MS COCO. Class ID 0 must always be reserved for the background class, i.e. your positive classes must have positive integers as their IDs in your dataset. # * The `mode` argument in the `build_model()` function determines whether the model will be built with or without a `DecodeDetections` layer as its last layer. In 'training' mode, the model outputs the raw prediction tensor, while in 'inference' and 'inference_fast' modes, the raw predictions are being decoded into absolute coordinates and filtered via confidence thresholding, non-maximum suppression, and top-k filtering. The difference between latter two modes is that 'inference' uses the decoding procedure of the original Caffe implementation, while 'inference_fast' uses a faster, but possibly less accurate decoding procedure. # * The reason why the list of scaling factors has 5 elements even though there are only 4 predictor layers in tSSD7 is that the last scaling factor is used for the second aspect-ratio-1 box of the last predictor layer. Refer to the documentation for details. # * `build_model()` and `SSDInputEncoder` have two arguments for the anchor box aspect ratios: `aspect_ratios_global` and `aspect_ratios_per_layer`. You can use either of the two, you don't need to set both. If you use `aspect_ratios_global`, then you pass one list of aspect ratios and these aspect ratios will be used for all predictor layers. Every aspect ratio you want to include must be listed once and only once. If you use `aspect_ratios_per_layer`, then you pass a nested list containing lists of aspect ratios for each individual predictor layer. This is what the SSD300 training tutorial does. It's your design choice whether all predictor layers should use the same aspect ratios or whether you think that for your dataset, certain aspect ratios are only necessary for some predictor layers but not for others. Of course more aspect ratios means more predicted boxes, which in turn means increased computational complexity. # * If `two_boxes_for_ar1 == True`, then each predictor layer will predict two boxes with aspect ratio one, one a bit smaller, the other one a bit larger. # * If `clip_boxes == True`, then the anchor boxes will be clipped so that they lie entirely within the image boundaries. It is recommended not to clip the boxes. The anchor boxes form the reference frame for the localization prediction. This reference frame should be the same at every spatial position. # * In the matching process during the training, the anchor box offsets are being divided by the variances. Leaving them at 1.0 for each of the four box coordinates means that they have no effect. Setting them to less than 1.0 spreads the imagined anchor box offset distribution for the respective box coordinate. # * `normalize_coords` converts all coordinates from absolute coordinate to coordinates that are relative to the image height and width. This setting has no effect on the outcome of the training. img_height = 480 # Height of the input images img_width = 640 # Width of the input images img_channels = 3 # Number of color channels of the input images intensity_mean = 127.5 # Set this to your preference (maybe `None`). The current settings transform the input pixel values to the interval `[-1,1]`. intensity_range = 127.5 # Set this to your preference (maybe `None`). The current settings transform the input pixel values to the interval `[-1,1]`. n_classes =20 # Number of positive classes scales = [0.08, 0.16, 0.32, 0.64, 0.96] # An explicit list of anchor box scaling factors. If this is passed, it will override `min_scale` and `max_scale`. aspect_ratios = [0.5, 1.0, 2.0] # The list of aspect ratios for the anchor boxes two_boxes_for_ar1 = True # Whether or not you want to generate two anchor boxes for aspect ratio 1 steps = None # In case you'd like to set the step sizes for the anchor box grids manually; not recommended offsets = None # In case you'd like to set the offsets for the anchor box grids manually; not recommended clip_boxes = False # Whether or not to clip the anchor boxes to lie entirely within the image boundaries variances = [1.0, 1.0, 1.0, 1.0] # The list of variances by which the encoded target coordinates are scaled normalize_coords = True # Whether or not the model is supposed to use coordinates relative to the image size # ## 2. Build or load the model # # You will want to execute either of the two code cells in the subsequent two sub-sections, not both. # ### 2.1 Create a new model # # If you want to create a new model, this is the relevant section for you. If you want to load a previously saved model, skip ahead to section 2.2. # # The code cell below does the following things: # 1. It calls the function `build_model()` to build the model. # 2. It optionally loads some weights into the model. # 3. It then compiles the model for the training. In order to do so, we're defining an optimizer (Adam) and a loss function (SSDLoss) to be passed to the `compile()` method. # # `SSDLoss` is a custom Keras loss function that implements the multi-task log loss for classification and smooth L1 loss for localization. `neg_pos_ratio` and `alpha` are set as in the paper. # + # 1: Build the Keras model K.clear_session() # Clear previous models from memory. model = build_model(image_size=(img_height, img_width, img_channels), n_classes=n_classes, mode='training', l2_regularization=0.0005, scales=scales, aspect_ratios_global=aspect_ratios, aspect_ratios_per_layer=None, two_boxes_for_ar1=two_boxes_for_ar1, steps=steps, offsets=offsets, clip_boxes=clip_boxes, variances=variances, normalize_coords=normalize_coords, subtract_mean=intensity_mean, divide_by_stddev=intensity_range) # 2: Optional: Load some weights #model.load_weights('./ssd7_weights.h5', by_name=True) # 3: Instantiate an Adam optimizer and the SSD loss function and compile the model adam = Adam(lr=0.001, beta_1=0.9, beta_2=0.999, epsilon=1e-08, decay=0.0) ssd_loss = SSDLoss(neg_pos_ratio=3, alpha=1.0) model.compile(optimizer=adam, loss=ssd_loss.compute_loss) # - # ### 2.2 Load a saved model # # If you have previously created and saved a model and would now like to load it, simply execute the next code cell. The only thing you need to do is to set the path to the saved model HDF5 file that you would like to load. # # The SSD model contains custom objects: Neither the loss function, nor the anchor box or detection decoding layer types are contained in the Keras core library, so we need to provide them to the model loader. # # This next code cell assumes that you want to load a model that was created in 'training' mode. If you want to load a model that was created in 'inference' or 'inference_fast' mode, you'll have to add the `DecodeDetections` or `DecodeDetectionsFast` layer type to the `custom_objects` dictionary below. # + # TODO: Set the path to the `.h5` file of the model to be loaded. model_path = 'ssd7_Instance_epoch-11_loss-3.6377_val_loss-2.9869.h5' # We need to create an SSDLoss object in order to pass that to the model loader. ssd_loss = SSDLoss(neg_pos_ratio=3, alpha=1.0) K.clear_session() # Clear previous models from memory. model = load_model(model_path, custom_objects={'AnchorBoxes': AnchorBoxes, 'compute_loss': ssd_loss.compute_loss}) # - # ## 3. Set up the data generators for the training # # The code cells below set up data generators for the training and validation datasets to train the model. You will have to set the file paths to your dataset. Depending on the annotations format of your dataset, you might also have to switch from the CSV parser to the XML or JSON parser, or you might have to write a new parser method in the `DataGenerator` class that can handle whatever format your annotations are in. The [README](https://github.com/pierluigiferrari/ssd_keras/blob/master/README.md) of this repository provides a summary of the design of the `DataGenerator`, which should help you in case you need to write a new parser or adapt one of the existing parsers to your needs. # # Note that the generator provides two options to speed up the training. By default, it loads the individual images for a batch from disk. This has two disadvantages. First, for compressed image formats like JPG, this is a huge computational waste, because every image needs to be decompressed again and again every time it is being loaded. Second, the images on disk are likely not stored in a contiguous block of memory, which may also slow down the loading process. The first option that `DataGenerator` provides to deal with this is to load the entire dataset into memory, which reduces the access time for any image to a negligible amount, but of course this is only an option if you have enough free memory to hold the whole dataset. As a second option, `DataGenerator` provides the possibility to convert the dataset into a single HDF5 file. This HDF5 file stores the images as uncompressed arrays in a contiguous block of memory, which dramatically speeds up the loading time. It's not as good as having the images in memory, but it's a lot better than the default option of loading them from their compressed JPG state every time they are needed. Of course such an HDF5 dataset may require significantly more disk space than the compressed images. You can later load these HDF5 datasets directly in the constructor. # # Set the batch size to to your preference and to what your GPU memory allows, it's not the most important hyperparameter. The Caffe implementation uses a batch size of 32, but smaller batch sizes work fine, too. # # The `DataGenerator` itself is fairly generic. I doesn't contain any data augmentation or bounding box encoding logic. Instead, you pass a list of image transformations and an encoder for the bounding boxes in the `transformations` and `label_encoder` arguments of the data generator's `generate()` method, and the data generator will then apply those given transformations and the encoding to the data. Everything here is preset already, but if you'd like to learn more about the data generator and its data augmentation capabilities, take a look at the detailed tutorial in [this](https://github.com/pierluigiferrari/data_generator_object_detection_2d) repository. # # The image processing chain defined further down in the object named `data_augmentation_chain` is just one possibility of what a data augmentation pipeline for unform-size images could look like. Feel free to put together other image processing chains, you can use the `DataAugmentationConstantInputSize` class as a template. Or you could use the original SSD data augmentation pipeline by instantiting an `SSDDataAugmentation` object and passing that to the generator instead. This procedure is not exactly efficient, but it evidently produces good results on multiple datasets. # # An `SSDInputEncoder` object, `ssd_input_encoder`, is passed to both the training and validation generators. As explained above, it matches the ground truth labels to the model's anchor boxes and encodes the box coordinates into the format that the model needs. # ### Note: # # The example setup below was used to train SSD7 on two road traffic datasets released by [Udacity](https://github.com/udacity/self-driving-car/tree/master/annotations) with around 20,000 images in total and 5 object classes (car, truck, pedestrian, bicyclist, traffic light), although the vast majority of the objects are cars. The original datasets have a constant image size of 1200x1920 RGB. I consolidated the two datasets, removed a few bad samples (although there are probably many more), and resized the images to 300x480 RGB, i.e. to one sixteenth of the original image size. In case you'd like to train a model on the same dataset, you can download the consolidated and resized dataset I used [here](https://drive.google.com/open?id=1tfBFavijh4UTG4cGqIKwhcklLXUDuY0D) (about 900 MB). # + train_dataset = DataGenerator() val_dataset=DataGenerator() # TODO: Set the paths to the dataset here. Pascal_VOC_dataset_images_dir = '../datasets/ICUB_Instance/train_img' Pascal_VOC_dataset_annotations_dir = '../datasets/ICUB_Instance/train_ann' Pascal_VOC_dataset_image_set_filename = '../datasets/ICUB_Instance/train.txt' Pascal_VOC_dataset_image_set_filename_val = '../datasets/ICUB_Instance/val.txt' # The XML parser needs to now what object class names to look for and in which order to map them to integers. classes = ['background','book1','book2','book3','book4','book5', 'cellphone1','cellphone2','cellphone3','cellphone4','cellphone5', 'mouse1','mouse2','mouse3','mouse4','mouse5', 'ringbinder1','ringbinder2','ringbinder3','ringbinder4','ringbinder5'] train_dataset.parse_xml(images_dirs=[Pascal_VOC_dataset_images_dir], image_set_filenames=[Pascal_VOC_dataset_image_set_filename], annotations_dirs=[Pascal_VOC_dataset_annotations_dir], classes=classes, include_classes='all', exclude_truncated=False, exclude_difficult=False, ret=False) val_dataset.parse_xml(images_dirs=[Pascal_VOC_dataset_images_dir], image_set_filenames=[Pascal_VOC_dataset_image_set_filename_val], annotations_dirs=[Pascal_VOC_dataset_annotations_dir], classes=classes, include_classes='all', exclude_truncated=False, exclude_difficult=False, ret=False) #train_dataset.create_hdf5_dataset(file_path='train_dataset_icub.h5', # resize=False, # variable_image_size=True, # verbose=True) #train_dataset.create_hdf5_dataset(file_path='val_dataset_icub.h5', # resize=False, # variable_image_size=True, # verbose=True) train_dataset_size = train_dataset.get_dataset_size() val_dataset_size = val_dataset.get_dataset_size() # + # 3: Set the batch size. #train_dataset = DataGenerator(load_images_into_memory=False, hdf5_dataset_path='train_dataset_icub.h5') #val_dataset = DataGenerator(load_images_into_memory=False, hdf5_dataset_path='val_dataset_icub.h5') #train_dataset_size = train_dataset.get_dataset_size() #val_dataset_size = val_dataset.get_dataset_size() batch_size = 2 # 4: Define the image processing chain. data_augmentation_chain = DataAugmentationConstantInputSize(random_brightness=(-48, 48, 0.5), random_contrast=(0.5, 1.8, 0.5), random_saturation=(0.5, 1.8, 0.5), random_hue=(18, 0.5), random_flip=0.5, random_translate=((0.03,0.5), (0.03,0.5), 0.5), random_scale=(0.5, 2.0, 0.5), n_trials_max=3, clip_boxes=True, overlap_criterion='area', bounds_box_filter=(0.3, 1.0), bounds_validator=(0.5, 1.0), n_boxes_min=1, background=(0,0,0)) # 5: Instantiate an encoder that can encode ground truth labels into the format needed by the SSD loss function. # The encoder constructor needs the spatial dimensions of the model's predictor layers to create the anchor boxes. predictor_sizes = [model.get_layer('classes4').output_shape[1:3], model.get_layer('classes5').output_shape[1:3], model.get_layer('classes6').output_shape[1:3], model.get_layer('classes7').output_shape[1:3]] ssd_input_encoder = SSDInputEncoder(img_height=img_height, img_width=img_width, n_classes=n_classes, predictor_sizes=predictor_sizes, scales=scales, aspect_ratios_global=aspect_ratios, two_boxes_for_ar1=two_boxes_for_ar1, steps=steps, offsets=offsets, clip_boxes=clip_boxes, variances=variances, matching_type='multi', pos_iou_threshold=0.5, neg_iou_limit=0.3, normalize_coords=normalize_coords) # 6: Create the generator handles that will be passed to Keras' `fit_generator()` function. train_generator = train_dataset.generate(batch_size=batch_size, shuffle=True, transformations=[data_augmentation_chain], label_encoder=ssd_input_encoder, returns={'processed_images', 'encoded_labels'}, keep_images_without_gt=False) val_generator = val_dataset.generate(batch_size=batch_size, shuffle=False, transformations=[], label_encoder=ssd_input_encoder, returns={'processed_images', 'encoded_labels'}, keep_images_without_gt=False) # - # ## 4. Set the remaining training parameters and train the model # # We've already chosen an optimizer and a learning rate and set the batch size above, now let's set the remaining training parameters. # # I'll set a few Keras callbacks below, one for early stopping, one to reduce the learning rate if the training stagnates, one to save the best models during the training, and one to continuously stream the training history to a CSV file after every epoch. Logging to a CSV file makes sense, because if we didn't do that, in case the training terminates with an exception at some point or if the kernel of this Jupyter notebook dies for some reason or anything like that happens, we would lose the entire history for the trained epochs. Feel free to add more callbacks if you want TensorBoard summaries or whatever. def lr_schedule(epoch): if epoch < 10: return 0.001 elif epoch < 20: return 0.0001 else: return 0.00001 # + # Define model callbacks. # TODO: Set the filepath under which you want to save the weights. model_checkpoint = ModelCheckpoint(filepath='ssd7_Instance_epoch-{epoch:02d}_loss-{loss:.4f}_val_loss-{val_loss:.4f}.h5', monitor='val_loss', verbose=1, save_best_only=True, save_weights_only=False, mode='auto', period=1) csv_logger = CSVLogger(filename='ssd7_training_log.csv', separator=',', append=True) early_stopping = EarlyStopping(monitor='val_loss', min_delta=0.0, patience=10, verbose=1) reduce_learning_rate = ReduceLROnPlateau(monitor='val_loss', factor=0.2, patience=8, verbose=1, epsilon=0.001, cooldown=0, min_lr=0.00001) learning_rate_scheduler = LearningRateScheduler(schedule=lr_schedule, verbose=1) callbacks = [model_checkpoint, csv_logger, early_stopping, learning_rate_scheduler] # - # I'll set one epoch to consist of 1,000 training steps I'll arbitrarily set the number of epochs to 20 here. This does not imply that 20,000 training steps is the right number. Depending on the model, the dataset, the learning rate, etc. you might have to train much longer to achieve convergence, or maybe less. # # Instead of trying to train a model to convergence in one go, you might want to train only for a few epochs at a time. # # In order to only run a partial training and resume smoothly later on, there are a few things you should note: # 1. Always load the full model if you can, rather than building a new model and loading previously saved weights into it. Optimizers like SGD or Adam keep running averages of past gradient moments internally. If you always save and load full models when resuming a training, then the state of the optimizer is maintained and the training picks up exactly where it left off. If you build a new model and load weights into it, the optimizer is being initialized from scratch, which, especially in the case of Adam, leads to small but unnecessary setbacks every time you resume the training with previously saved weights. # 2. You should tell `fit_generator()` which epoch to start from, otherwise it will start with epoch 0 every time you resume the training. Set `initial_epoch` to be the next epoch of your training. Note that this parameter is zero-based, i.e. the first epoch is epoch 0. If you had trained for 10 epochs previously and now you'd want to resume the training from there, you'd set `initial_epoch = 10` (since epoch 10 is the eleventh epoch). Furthermore, set `final_epoch` to the last epoch you want to run. To stick with the previous example, if you had trained for 10 epochs previously and now you'd want to train for another 10 epochs, you'd set `initial_epoch = 10` and `final_epoch = 20`. # 3. Callbacks like `ModelCheckpoint` or `ReduceLROnPlateau` are stateful, so you might want ot save their state somehow if you want to pick up a training exactly where you left off. # + # TODO: Set the epochs to train for. # If you're resuming a previous training, set `initial_epoch` and `final_epoch` accordingly. initial_epoch =13 final_epoch = 15 steps_per_epoch = 1000 history = model.fit_generator(generator=train_generator, steps_per_epoch=steps_per_epoch, epochs=final_epoch, callbacks=callbacks, validation_data=val_generator, validation_steps=ceil(val_dataset_size/batch_size), initial_epoch=initial_epoch) # - # Let's look at how the training and validation loss evolved to check whether our training is going in the right direction: plt.figure(figsize=(20,12)) plt.plot(history.history['loss'], label='loss') plt.plot(history.history['val_loss'], label='val_loss') plt.legend(loc='upper right', prop={'size': 24}); # The validation loss has been decreasing at a similar pace as the training loss, indicating that our model has been learning effectively over the last 30 epochs. We could try to train longer and see if the validation loss can be decreased further. Once the validation loss stops decreasing for a couple of epochs in a row, that's when we will want to stop training. Our final weights will then be the weights of the epoch that had the lowest validation loss. # ### 5. Make predictions # # Now let's make some predictions on the validation dataset with the trained model. For convenience we'll use the validation generator which we've already set up above. Feel free to change the batch size. # # You can set the `shuffle` option to `False` if you would like to check the model's progress on the same image(s) over the course of the training. # + # 1: Set the generator for the predictions. predict_generator = val_dataset.generate(batch_size=8, shuffle=True, transformations=[], label_encoder=None, returns={'processed_images', 'processed_labels', 'filenames'}, keep_images_without_gt=False) # + from skimage import data import numpy as np import matplotlib.image as mpimg batch_images = np.zeros((1,480,640,3),int) print(batch_images.shape) batch_images[0] = data.imread('D:\\2nd_Semester\\CV\\Project\\part1\\part1\\mouse\\mouse10\\ROT3D\\day7\\left\\00007450.jpg') #batch_images[0]=mpimg.imread('D:\\two.jpg') # + # 2: Generate samples batch_images, batch_labels, batch_filenames = next(predict_generator) i = 0 # Which batch item to look at print("Image:", batch_filenames[i]) print() print("Ground truth boxes:\n") print(batch_labels[i]) # + # 3: Make a prediction y_pred = model.predict(batch_images) y_pred.shape # - # Now let's decode the raw predictions in `y_pred`. # # Had we created the model in 'inference' or 'inference_fast' mode, then the model's final layer would be a `DecodeDetections` layer and `y_pred` would already contain the decoded predictions, but since we created the model in 'training' mode, the model outputs raw predictions that still need to be decoded and filtered. This is what the `decode_detections()` function is for. It does exactly what the `DecodeDetections` layer would do, but using Numpy instead of TensorFlow (i.e. on the CPU instead of the GPU). # # `decode_detections()` with default argument values follows the procedure of the original SSD implementation: First, a very low confidence threshold of 0.01 is applied to filter out the majority of the predicted boxes, then greedy non-maximum suppression is performed per class with an intersection-over-union threshold of 0.45, and out of what is left after that, the top 200 highest confidence boxes are returned. Those settings are for precision-recall scoring purposes though. In order to get some usable final predictions, we'll set the confidence threshold much higher, e.g. to 0.5, since we're only interested in the very confident predictions. # + # 4: Decode the raw prediction `y_pred` y_pred_decoded = decode_detections(y_pred, confidence_thresh=0.2, iou_threshold=0.25, top_k=1, normalize_coords=normalize_coords, img_height=img_height, img_width=img_width) np.set_printoptions(precision=2, suppress=True, linewidth=90) print("Predicted boxes:\n") print(' class conf xmin ymin xmax ymax') print(y_pred_decoded[i]) # - # ##### Finally, let's draw the predicted boxes onto the image. Each predicted box says its confidence next to the category name. The ground truth boxes are also drawn onto the image in green for comparison. # + # 5: Draw the predicted boxes onto the image plt.figure(figsize=(20,12)) plt.imshow(batch_images[0]) current_axis = plt.gca() colors = plt.cm.hsv(np.linspace(0, 1, n_classes+1)).tolist() # Set the colors for the bounding boxes classes = ['background','book1','book2','book3','book4','book5', 'cellphone1','cellphone2','cellphone3','cellphone4','cellphone5', 'mouse1','mouse2','mouse3','mouse4','mouse5', 'ringbinder1','ringbinder2','ringbinder3','ringbinder4','ringbinder5'] # Just so we can print class names onto the image instead of IDs # Draw the ground truth boxes in green (omit the label for more clarity) for box in batch_labels[i]: xmin = box[1] ymin = box[2] xmax = box[3] ymax = box[4] label = '{}'.format(classes[int(box[0])]) current_axis.add_patch(plt.Rectangle((xmin, ymin), xmax-xmin, ymax-ymin, color='green', fill=False, linewidth=2)) #current_axis.text(xmin, ymin, label, size='x-large', color='white', bbox={'facecolor':'green', 'alpha':1.0}) # Draw the predicted boxes in blue for box in y_pred_decoded[i]: xmin = box[-4] ymin = box[-3] xmax = box[-2] ymax = box[-1] color = colors[int(box[0])] label = '{}: {:.2f}'.format(classes[int(box[0])], box[1]) current_axis.add_patch(plt.Rectangle((xmin, ymin), xmax-xmin, ymax-ymin, color=color, fill=False, linewidth=2)) current_axis.text(xmin, ymin, label, size='x-large', color='white', bbox={'facecolor':color, 'alpha':1.0}) # + import numpy as np import cv2 frames=np.zeros((1,480,640,3)) cap = cv2.VideoCapture(0) colors = plt.cm.hsv(np.linspace(0, 1, n_classes+1)).tolist() font = cv2.FONT_HERSHEY_SIMPLEX fontScale = 1 fontColor = (255,255,255) lineType = 2 while(True): # Capture frame-by-frame ret, frame = cap.read() x = 20 y = 40 w = 100 h = 75 frames[0] = frame y_pred = model.predict(frames) y_pred_decoded = decode_detections(y_pred, confidence_thresh=0.5, iou_threshold=0.25, top_k=1, normalize_coords=normalize_coords, img_height=img_height, img_width=img_width) np.set_printoptions(precision=2, suppress=True, linewidth=90) for box in y_pred_decoded[i]: xmin = int(box[-4]) ymin = int(box[-3]) xmax = int(box[-2]) ymax = int(box[-1]) color = colors[int(box[0])] cv2.putText(frame,classes[int(box[0])], (xmin,ymin), font, fontScale, fontColor, lineType) cv2.rectangle(frame, (xmin, xmin), (xmax, ymax), color, 2) # Our operations on the frame come here if cv2.waitKey(1) & 0xFF == ord('q'): break else: cv2.rectangle(frame, (x, y), (x+w, y+h), (0, 255, 0), 2) # Display the resulting frame cv2.imshow('frame',frame) # When everything done, release the capture cap.release() cv2.destroyAllWindows()
ssd7_training_Instance.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + import sys import json from pathlib import Path from dateutil import parser import requests import fiona import pandas as pd import geopandas as gpd DIR = Path('..') sys.path.append(str(DIR)) import pyotp as otp DATA_DIR = DIR/'data/' # %load_ext autoreload # %autoreload 2 # - # # Palyground # + # setting the analysis date and time dt = parser.parse('25/Oct/2018 8:00:00 AM') in_gdf = gpd.read_file(str(DATA_DIR/'sample_locations.geojson')) # - in_gdf otp.route( locations_gdf = in_gdf.head(2), #a pair of locations in geodataframe fromat mode = 'TRANSIT,WALK', trip_name = 'test', date_time = dt, control_vars = { 'maxWalkDistance':'500', 'wheelchair':False, }, ) otp.service_area( in_gdf = in_gdf, id_field = 'stop_name', mode = "BICYCLE", breaks = [500, 1000], #in seconds date_time = dt, control_vars = {'maxWalkDistance':'400'}, ) od_set = gpd.read_file(str(DATA_DIR/'od_set.geojson')) od_set['location_name'] = 'location: '+ od_set.index.astype(str) od_set otp.od_matrix( origins = od_set.head(2), destinations = od_set.head(2), mode = "TRANSIT,WALK", origins_name = 'location_name', destinations_name = 'location_name', date_time = dt, control_vars = {'maxWalkDistance':'400'}, )
ipynb/example.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: pushnet # language: python # name: pushnet # --- from eval_utils import export_from_mlflow # + mlflow_uri='../mlflow' mlflow_experiment_name='pushnet' metrics=['test_acc_mean', 'test_acc_std', 'seconds_per_epoch_mean', 'seconds_per_epoch_std', ] results_df = export_from_mlflow(mlflow_uri=mlflow_uri, mlflow_experiment_name=mlflow_experiment_name, metrics=metrics, ) # - results_df.groupby(['dataset_name', 'model_name', 'model_parameters.variant']).first()[metrics]
src/evaluate_results.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # %matplotlib inline # # # ====================================================================== # Compute source power spectral density (PSD) of VectorView and OPM data # ====================================================================== # # Here we compute the resting state from raw for data recorded using # a Neuromag VectorView system and a custom OPM system. # The pipeline is meant to mostly follow the Brainstorm [1]_ # `OMEGA resting tutorial pipeline <bst_omega_>`_. # The steps we use are: # # 1. Filtering: downsample heavily. # 2. Artifact detection: use SSP for EOG and ECG. # 3. Source localization: dSPM, depth weighting, cortically constrained. # 4. Frequency: power spectral density (Welch), 4 sec window, 50% overlap. # 5. Standardize: normalize by relative power for each source. # :depth: 1 # # # Preprocessing # ------------- # # + # Authors: <NAME> <<EMAIL>> # <NAME> <<EMAIL>> # <NAME> <<EMAIL>> # # License: BSD (3-clause) import os.path as op from mne.filter import next_fast_len import mne print(__doc__) data_path = mne.datasets.opm.data_path() subject = 'OPM_sample' subjects_dir = op.join(data_path, 'subjects') bem_dir = op.join(subjects_dir, subject, 'bem') bem_fname = op.join(subjects_dir, subject, 'bem', subject + '-5120-5120-5120-bem-sol.fif') src_fname = op.join(bem_dir, '%s-oct6-src.fif' % subject) vv_fname = data_path + '/MEG/SQUID/SQUID_resting_state.fif' vv_erm_fname = data_path + '/MEG/SQUID/SQUID_empty_room.fif' vv_trans_fname = data_path + '/MEG/SQUID/SQUID-trans.fif' opm_fname = data_path + '/MEG/OPM/OPM_resting_state_raw.fif' opm_erm_fname = data_path + '/MEG/OPM/OPM_empty_room_raw.fif' opm_trans_fname = None opm_coil_def_fname = op.join(data_path, 'MEG', 'OPM', 'coil_def.dat') # - # Load data, resample. We will store the raw objects in dicts with entries # "vv" and "opm" to simplify housekeeping and simplify looping later. # # # + raws = dict() raw_erms = dict() new_sfreq = 90. # Nyquist frequency (45 Hz) < line noise freq (50 Hz) raws['vv'] = mne.io.read_raw_fif(vv_fname, verbose='error') # ignore naming raws['vv'].load_data().resample(new_sfreq) raws['vv'].info['bads'] = ['MEG2233', 'MEG1842'] raw_erms['vv'] = mne.io.read_raw_fif(vv_erm_fname, verbose='error') raw_erms['vv'].load_data().resample(new_sfreq) raw_erms['vv'].info['bads'] = ['MEG2233', 'MEG1842'] raws['opm'] = mne.io.read_raw_fif(opm_fname) raws['opm'].load_data().resample(new_sfreq) raw_erms['opm'] = mne.io.read_raw_fif(opm_erm_fname) raw_erms['opm'].load_data().resample(new_sfreq) # Make sure our assumptions later hold assert raws['opm'].info['sfreq'] == raws['vv'].info['sfreq'] # - # Do some minimal artifact rejection just for VectorView data # # titles = dict(vv='VectorView', opm='OPM') ssp_ecg, _ = mne.preprocessing.compute_proj_ecg( raws['vv'], tmin=-0.1, tmax=0.1, n_grad=1, n_mag=1) raws['vv'].add_proj(ssp_ecg, remove_existing=True) # due to how compute_proj_eog works, it keeps the old projectors, so # the output contains both projector types (and also the original empty-room # projectors) ssp_ecg_eog, _ = mne.preprocessing.compute_proj_eog( raws['vv'], n_grad=1, n_mag=1, ch_name='MEG0112') raws['vv'].add_proj(ssp_ecg_eog, remove_existing=True) raw_erms['vv'].add_proj(ssp_ecg_eog) fig = mne.viz.plot_projs_topomap(raws['vv'].info['projs'][-4:], info=raws['vv'].info) fig.suptitle(titles['vv']) fig.subplots_adjust(0.05, 0.05, 0.95, 0.85) # Explore data # # kinds = ('vv', 'opm') n_fft = next_fast_len(int(round(4 * new_sfreq))) print('Using n_fft=%d (%0.1f sec)' % (n_fft, n_fft / raws['vv'].info['sfreq'])) for kind in kinds: fig = raws[kind].plot_psd(n_fft=n_fft, proj=True) fig.suptitle(titles[kind]) fig.subplots_adjust(0.1, 0.1, 0.95, 0.85) # Alignment and forward # --------------------- # # src = mne.read_source_spaces(src_fname) # This line removes source-to-source distances that we will not need. # We only do it here to save a bit of memory, in general this is not required. del src[0]['dist'], src[1]['dist'] bem = mne.read_bem_solution(bem_fname) fwd = dict() trans = dict(vv=vv_trans_fname, opm=opm_trans_fname) # check alignment and generate forward with mne.use_coil_def(opm_coil_def_fname): for kind in kinds: dig = True if kind == 'vv' else False fig = mne.viz.plot_alignment( raws[kind].info, trans=trans[kind], subject=subject, subjects_dir=subjects_dir, dig=dig, coord_frame='mri', surfaces=('head', 'white')) mne.viz.set_3d_view(figure=fig, azimuth=0, elevation=90, distance=0.6, focalpoint=(0., 0., 0.)) fwd[kind] = mne.make_forward_solution( raws[kind].info, trans[kind], src, bem, eeg=False, verbose=True) del trans, src, bem # Compute and apply inverse to PSD estimated using multitaper + Welch. # Group into frequency bands, then normalize each source point and sensor # independently. This makes the value of each sensor point and source location # in each frequency band the percentage of the PSD accounted for by that band. # # # + freq_bands = dict( delta=(2, 4), theta=(5, 7), alpha=(8, 12), beta=(15, 29), gamma=(30, 45)) topos = dict(vv=dict(), opm=dict()) stcs = dict(vv=dict(), opm=dict()) snr = 3. lambda2 = 1. / snr ** 2 for kind in kinds: noise_cov = mne.compute_raw_covariance(raw_erms[kind]) inverse_operator = mne.minimum_norm.make_inverse_operator( raws[kind].info, forward=fwd[kind], noise_cov=noise_cov, verbose=True) stc_psd, sensor_psd = mne.minimum_norm.compute_source_psd( raws[kind], inverse_operator, lambda2=lambda2, n_fft=n_fft, dB=False, return_sensor=True, verbose=True) topo_norm = sensor_psd.data.sum(axis=1, keepdims=True) stc_norm = stc_psd.sum() # same operation on MNE object, sum across freqs # Normalize each source point by the total power across freqs for band, limits in freq_bands.items(): data = sensor_psd.copy().crop(*limits).data.sum(axis=1, keepdims=True) topos[kind][band] = mne.EvokedArray( 100 * data / topo_norm, sensor_psd.info) stcs[kind][band] = \ 100 * stc_psd.copy().crop(*limits).sum() / stc_norm.data del inverse_operator del fwd, raws, raw_erms # - # Now we can make some plots of each frequency band. Note that the OPM head # coverage is only over right motor cortex, so only localization # of beta is likely to be worthwhile. # # Theta # ----- # # # + def plot_band(kind, band): """Plot activity within a frequency band on the subject's brain.""" title = "%s %s\n(%d-%d Hz)" % ((titles[kind], band,) + freq_bands[band]) topos[kind][band].plot_topomap( times=0., scalings=1., cbar_fmt='%0.1f', vmin=0, cmap='inferno', time_format=title) brain = stcs[kind][band].plot( subject=subject, subjects_dir=subjects_dir, views='cau', hemi='both', time_label=title, title=title, colormap='inferno', clim=dict(kind='percent', lims=(70, 85, 99))) brain.show_view(dict(azimuth=0, elevation=0), roll=0) return fig, brain fig_theta, brain_theta = plot_band('vv', 'theta') # - # Alpha # ----- # # fig_alpha, brain_alpha = plot_band('vv', 'alpha') # Beta # ---- # Here we also show OPM data, which shows a profile similar to the VectorView # data beneath the sensors. # # fig_beta, brain_beta = plot_band('vv', 'beta') fig_beta_opm, brain_beta_opm = plot_band('opm', 'beta') # Gamma # ----- # # fig_gamma, brain_gamma = plot_band('vv', 'gamma') # References # ---------- # .. [1] <NAME>, <NAME>, <NAME>, <NAME>, <NAME>. # Brainstorm: A User-Friendly Application for MEG/EEG Analysis. # Computational Intelligence and Neuroscience, vol. 2011, Article ID # 879716, 13 pages, 2011. doi:10.1155/2011/879716 # #
dev/_downloads/6035dcef33422511928bd2247a3d092d/plot_source_power_spectrum_opm.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] colab_type="text" id="view-in-github" # <a href="https://colab.research.google.com/github/NeuromatchAcademy/course-content/blob/master/tutorials/W1D1-ModelTypes/student/W1D1_Tutorial1.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # + [markdown] colab_type="text" id="5Uu0L6pRg-JB" # # # NMA Model Types Tutorial 1: "What" models # # Welcome to Neuromatch Academy! # # Here are your objectives for this tutorial: # # - Load a dataset with spiking activity from hundreds of neurons and understand how it is organized # - Make plots to visualize characteristics of the spiking activity across the population # - Compute the distribution of "inter-spike intervals" (ISIs) for a single neuron # - Consider several formal models of this distribution's shape and fit them to the data "by hand" # + cellView="form" colab={"base_uri": "https://localhost:8080/", "height": 519} colab_type="code" id="BnELLgvM0Yjs" outputId="b3e8f69b-343d-4d9e-95e2-75b044f9e8e6" #@title Video: Intro from IPython.display import YouTubeVideo video = YouTubeVideo(id='6ft5nyDZIh0', width=854, height=480, fs=1) print("Video available at https://youtube.com/watch?v=" + video.id) video # + [markdown] colab_type="text" id="YOCsVZYBhDMi" # ## Setup: Imports, helper functions, and data retreival # # Tutorial notebooks typically begin with several set-up steps that are hidden from view by default. # # **Important:** Even though the code is hidden, you still need to run it so that the rest of the notebook can work properly. Step through each cell, either by pressing the play button in the upper-left-hand corner or with a keyboard shortcut (`Cmd-Return` on a Mac, `Ctrl-Enter` otherwise). A number will appear inside the brackets (e.g. `[3]`) to tell you that the cell was executed and what order that happened in. # # If you are curious to see what is going on inside each cell, you can double click to expand. Once expanded, double-click the white space to the right of the editor to collapse again. # + cellView="form" colab={} colab_type="code" id="83AqE2hlg9H-" #@title Imports #@markdown Python requires you to explictly "import" libraries #@markdown before their functions are available to use. #@markdown We always do that at the beginning of each notebook or script. import functools import io import requests import ipywidgets as widgets import matplotlib.pyplot as plt from matplotlib.ticker import MaxNLocator import numpy as np import scipy as sp from scipy.io import loadmat from scipy.optimize import curve_fit import scipy.stats as stats from IPython.display import display, Markdown # numpy print formatting np.set_printoptions(formatter={'float': '{:7.3f}'.format}) # + cellView="form" colab={} colab_type="code" id="velBtnwIM0Dl" #@title Helper functions #@markdown Most of the tutorials make use of helper functions #@markdown to simplify the code that you need to write. They are defined here. # Please don't edit these, or worry about understanding them now! def restrict_spike_times(spike_times, interval): """Given a spike_time dataset, restrict to spikes within given interval. Args: spike_times (sequence of np.ndarray): List or array of arrays, each inner array has spike times for a single neuron. interval (tuple): Min, max time values; keep min <= t < max. Returns: np.ndarray: like `spike_times`, but only within `interval` """ t_interval = (5, 15) # units are seconds after start of recording interval_spike_times = [] for spikes in spike_times: interval_mask = (spikes >= t_interval[0]) & (spikes < t_interval[1]) interval_spike_times.append(spikes[interval_mask]) return np.array(interval_spike_times, object) # + cellView="form" colab={} colab_type="code" id="FyJb8B5vFYND" #@title Figure parameters #@markdown This cell defines the way that figures will appear by default. # %matplotlib inline # %config InlineBackend.figure_format='retina' fig_w, fig_h = (6, 4) plt.rcParams.update({'figure.figsize': (fig_w, fig_h)}) # + cellView="form" colab={} colab_type="code" id="9O9uom_44lAZ" #@title Data retrieval #@markdown This cell downloads the example dataset that we will use in this tutorial. r = requests.get('https://osf.io/sy5xt/download') if r.status_code != 200: print('oops') spike_times = np.load(io.BytesIO(r.content), allow_pickle=True)['spike_times'] # + [markdown] colab_type="text" id="E81PWKTGym7P" # --- # # ## Exploring the Steinmetz dataset # # In this tutorial we will explore the structure of a neuroscience dataset. # # We consider a subset of data from a study of [Steinmetz _et al._ (2019)](https://www.nature.com/articles/s41586-019-1787-x). In this study, Neuropixels probes were implanted in the brains of mice. Electrical potentials were measured by hundreds of electrodes along the length of each probe. Each electrode's measurements captured local variations in the electric field due to nearby spiking neurons. A spike sorting algorithm was used to infer spike times and cluster spikes according to common origin: a single cluster of sorted spikes is causally attributed to a single neuron. # # In particular, a single recording session of spike times and neuron assignments was loaded and assigned to `spike_times` in the preceding setup. # # Typically a dataset comes with some information about its structure. However, this information may be incomplete. You might also apply some transformations or "pre-processing" to create a working representation of the data of interest, which might go partly undocumented depending on the circumstances. In any case it is important to be able to use the available tools to investigate unfamiliar aspects of a data structure. # # Let's see what our data looks like... # + [markdown] colab_type="text" id="U3a_9c4sjQ7c" # ### Warming up with `spike_times` # + [markdown] colab_type="text" id="lHvAgKuMJGt3" # What is the Python type of our variable? # + colab={"base_uri": "https://localhost:8080/", "height": 34} colab_type="code" id="WsZrKJUFZ38z" outputId="a28511a7-0492-4990-d38c-bbe22c8197a1" type(spike_times) # + [markdown] colab_type="text" id="mdzX4_CUFp2U" # You should see `numpy.ndarray`, which means that it's a normal NumPy array. # # If you see an error message, it probably means that you did not execute the set-up cells at the top of the notebook. So go ahead and make sure to do that. # # Once everything is running properly, we can ask the next question about the dataset: what's its shape? # + colab={"base_uri": "https://localhost:8080/", "height": 34} colab_type="code" id="_dYl5pCrIlRa" outputId="b471286f-69ba-4ac5-e92b-bb719f873b5c" spike_times.shape # + [markdown] colab_type="text" id="B1LEO0d4Fz3f" # There are 734 entries in one dimension, and no other dimensions. What is the Python type of the first entry, and what is *its* shape? # + colab={"base_uri": "https://localhost:8080/", "height": 52} colab_type="code" id="VR63MAS91Dgn" outputId="87fb8741-1980-425d-928d-ade4b3964914" idx = 0 print( type(spike_times[idx]), spike_times[idx].shape, sep="\n", ) # + [markdown] colab_type="text" id="SNNE2OqLGSDM" # It's also a NumPy array with a 1D shape! Why didn't this show up as a second dimension in the shape of `spike_times`? That is, why not `spike_times.shape == (734, 826)`? # # To investigate, let's check another entry. # + colab={"base_uri": "https://localhost:8080/", "height": 52} colab_type="code" id="34VOEiufG-Ec" outputId="6efb49dd-df39-4ef2-8b99-4f9430956213" idx = 321 print( type(spike_times[idx]), spike_times[idx].shape, sep="\n", ) # + [markdown] colab_type="text" id="225jJ8LgaV5W" # It's also a 1D NumPy array, but it has a different shape. Checking the NumPy types of the values in these arrays, and their first few elements, we see they are composed of floating point numbers (not another level of `np.ndarray`): # + colab={"base_uri": "https://localhost:8080/", "height": 191} colab_type="code" id="C5tROGLzaqeI" outputId="2c51bbcb-2d25-42b1-e2a6-e283b3ddcacb" i_neurons = [0, 321] i_print = slice(0, 5) for i in i_neurons: print( "Neuron {}:".format(i), spike_times[i].dtype, spike_times[i][i_print], "\n", sep="\n" ) # + [markdown] colab_type="text" id="KOFA2ntcZBiy" # Note that this time we've checked the NumPy `dtype` rather than the Python variable type. These two arrays contain floating point numbers ("floats") with 32 bits of precision. # # The basic picture is coming together: # - `spike_times` is 1D, its entries are NumPy arrays, and its length is the number of neurons (734): by indexing it, we select a subset of neurons. # - An array in `spike_times` is also 1D and corresponds to a single neuron; its entries are floating point numbers, and its length is the number of spikes attributed to that neuron. By indexing it, we select a subset of spike times for that neuron. # # Visually, you can think of the data structure as looking something like this: # # ``` # | . . . . . | # | . . . . . . . . | # | . . . | # | . . . . . . . | # ``` # # Before moving on, we'll calculate and store the number of neurons in the dataset and the number of spikes per neuron: # + colab={"base_uri": "https://localhost:8080/", "height": 52} colab_type="code" id="98RflSLUuIdx" outputId="41ccd1e2-07f7-4b03-db4f-d24c85e22a11" n_neurons = len(spike_times) total_spikes_per_neuron = [len(spike_times_i) for spike_times_i in spike_times] print(f"Number of neurons: {n_neurons}") print(f"Number of spikes for first five neurons: {total_spikes_per_neuron[:5]}") # + [markdown] colab_type="text" id="c3m0WSTACaSh" # If the second line in that cell confused you, it's called a "list comprehension", which is a shorthand way to write # # ```python # total_spikes_per_neuron = [] # for spike_times_i in spike_times: # total_spikes_per_neuron.append(len(spike_times_i) # ``` # + cellView="form" colab={"base_uri": "https://localhost:8080/", "height": 519} colab_type="code" id="whP8rNm-1IeF" outputId="eb68ee6a-175a-4446-8785-d3768eb80124" #@title Video: Exploring the dataset from IPython.display import YouTubeVideo video = YouTubeVideo(id='sHp98o22GHM', width=854, height=480, fs=1) print("Video available at https://youtube.com/watch?v=" + video.id) video # + [markdown] colab_type="text" id="239jckGLuHb5" # ### Getting warmer: counting and plotting total spike counts # # As we've seen, the number of spikes over the entire recording is variable between neurons. More generally, some neurons tend to spike more than others in a given period. Lets explore what the distribution of spiking looks like across all the neurons in the dataset. # + [markdown] colab_type="text" id="zblGrgIVQgLk" # Are most neurons "loud" or "quiet", compared to the average? To see, we'll define bins of constant width in terms of total spikes and count the neurons that fall in each bin. This is known as a "histogram". # # You can plot a histogram with the matplotlib function `plt.hist`. If you just need to compute it, you can use the numpy function `np.histogram` instead. # + colab={"base_uri": "https://localhost:8080/", "height": 295} colab_type="code" id="jQtz2HtsEiwd" outputId="3273def8-5911-4337-dd64-726a6841aa6f" n_bins = 50 plt.hist(total_spikes_per_neuron, n_bins) plt.xlabel("Total spikes per neuron") plt.ylabel("Number of neurons") # + [markdown] colab_type="text" id="vrp7PWtZeG-H" # Let's see what percentage of neurons have a below-average spike count: # + colab={"base_uri": "https://localhost:8080/", "height": 34} colab_type="code" id="yWcnId0_FHb2" outputId="ccbfc69d-7cad-47d9-c84d-0c74f9a2223c" mean_spike_count = np.mean(total_spikes_per_neuron) frac_below_mean = (total_spikes_per_neuron < mean_spike_count).mean() print(f"{frac_below_mean:2.1%} of neurons are below the mean") # + [markdown] colab_type="text" id="_OH6T-ikImSJ" # We can also see this by adding the average spike count to the histogram plot: # + colab={"base_uri": "https://localhost:8080/", "height": 295} colab_type="code" id="r80VSqHuIx26" outputId="8c5e33a2-7bc1-4c84-b177-4aa4618a5925" plt.hist(total_spikes_per_neuron, n_bins) plt.xlabel("Total spikes per neuron") plt.ylabel("Number of neurons") plt.axvline(mean_spike_count, color="orange", label="Mean neuron") plt.legend() # + [markdown] colab_type="text" id="qwcMvOddf8lm" # This shows that the majority of neurons are relatively "quiet" compared to the mean, while a small number of neurons are exceptionally "loud": they must have spiked more often to reach a large count. # # ### Exercise: Comparing mean and median neurons # # If the mean neuron is more active than 68% of the population, what does that imply about the relationship between the mean neuron and the median neuron? # # *Exercise objective:* Reproduce the plot above, but add the median neuron. # # + colab={} colab_type="code" id="xITqNSatw7pP" # To complete the exercise, uncomment the code and fill in the missing parts (...) # median_spike_count = ... # Hint: Try the function np.median # plt.hist(...) # plt.axvline(..., color="orange", label="Mean neuron") # plt.axvline(..., color="limegreen", label="Median neuron") # plt.xlabel("Total spikes per neuron") # plt.ylabel("Number of neurons") # plt.legend() # + [markdown] colab={"base_uri": "https://localhost:8080/", "height": 286} colab_type="text" id="-_Fc-lizmTfY" outputId="41c8bbf8-f79c-4490-ff8c-46ea1ab0d3a3" # **Example output:** # # ![Solution hint](https://raw.githubusercontent.com/NeuromatchAcademy/course-content/master/tutorials/W1D1-ModelTypes/static/W1D1_Tutorial1_Solution_7183720f_0.png) # # # + [markdown] colab_type="text" id="dAilqW_Bxuk6" # # *Bonus:* The median is the 50th percentile. What about other percentiles? Can you show the interquartile range on the histogram? # + [markdown] colab_type="text" id="qupPEXRjrsfi" # --- # # ## Visualizing neuronal spiking activity # + [markdown] colab_type="text" id="9ytWqlqIs95u" # #### Getting a subset of the data # # Now we'll visualize trains of spikes. Because the recordings are long, we will first define a short time interval and restrict the visualization to only the spikes in this interval. We defined a utility function, `restrict_spike_times`, to do this for you. If you call `help()` on the function, it will tell you a little bit about itself: # + colab={"base_uri": "https://localhost:8080/", "height": 243} colab_type="code" id="F7EeTMtLguVy" outputId="34507b8c-09f7-4f9b-8e55-a12517306c38" help(restrict_spike_times) # + colab={} colab_type="code" id="HHw20g3P4fCI" t_interval = 5, 15 # units are seconds after start of recording interval_spike_times = restrict_spike_times(spike_times, t_interval) # + [markdown] colab_type="text" id="gP_sc8L7zHqg" # Is this a representative interval? What fraction of the total spikes fall in this interval? # + colab={"base_uri": "https://localhost:8080/", "height": 34} colab_type="code" id="4bA_Twv5fV05" outputId="e091ac3f-8a61-495c-b44f-a92e64fba215" original_counts = sum([len(spikes) for spikes in spike_times]) interval_counts = sum([len(spikes) for spikes in interval_spike_times]) frac_interval_spikes = interval_counts / original_counts print(f"{frac_interval_spikes:.2%} of the total spikes are in the interval") # + [markdown] colab_type="text" id="HCMu5XGZf_Lk" # How does this compare to the ratio between the interval duration and the experiment duration? # # We can approximate the experiment duration by taking the minimum and maximum spike time in the whole dataset. To do that, we "concatenate" all of the neurons into one array and then use `np.ptp` ("peak-to-peak") to get the difference between the maximum and minimum value: # + colab={"base_uri": "https://localhost:8080/", "height": 34} colab_type="code" id="zgX397FNj9zG" outputId="80cfe03e-258c-45d6-d983-2bd98240ac2b" spike_times_flat = np.concatenate(spike_times) experiment_duration = np.ptp(spike_times_flat) interval_duration = t_interval[1] - t_interval[0] frac_interval_time = interval_duration / experiment_duration print(f"{frac_interval_time:.2%} of the total time is in the interval") # + [markdown] colab_type="text" id="iOiCg3lIY6JG" # These two values are similar. This suggests the average spike rate of the neuronal population is not very different in this interval compared to the entire recording. # # ### Plotting spike trains and rasters # # Now that we have a representative subset, we're ready to plot the spikes, using the matplotlib `plt.eventplot` function. Let's look at a single neuron first: # + colab={"base_uri": "https://localhost:8080/", "height": 295} colab_type="code" id="HchIghkaNXKM" outputId="f019a7f7-4b11-49b2-bd28-c3a272f72112" neuron_idx = 1 plt.eventplot(interval_spike_times[neuron_idx], color=".2") plt.xlabel("Time (s)") plt.yticks([]) # + [markdown] colab_type="text" id="L282dtXQCO6w" # We can also plot multiple neurons. Here are three: # + colab={"base_uri": "https://localhost:8080/", "height": 295} colab_type="code" id="m6c_Yd_7yEPp" outputId="3daed3cc-b800-455c-be92-0092eca4d596" neuron_idx = [1, 11, 51] plt.eventplot(interval_spike_times[neuron_idx], color=".2") plt.xlabel("Time (s)") plt.yticks([]) # + [markdown] colab_type="text" id="1NJdg_TjyB9_" # This makes a "raster" plot, where the spikes from each neuron appear in a different row. # # Plotting a large number of neurons can give you a sense for the characteristics in the population. Let's show every 5th neuron that was recorded: # + colab={"base_uri": "https://localhost:8080/", "height": 295} colab_type="code" id="_b_wMUPmiPiS" outputId="45542ae2-f691-4a62-fe98-fc14dcd1aaf8" neuron_idx = np.arange(0, len(spike_times), 5) plt.eventplot(interval_spike_times[neuron_idx], color=".2") plt.xlabel("Time (s)") plt.yticks([]) # + [markdown] colab_type="text" id="YDWveM6NjaBG" # *Question*: How does the information in this plot relate to the histogram of total spike counts that you saw above? # + cellView="form" colab={"base_uri": "https://localhost:8080/", "height": 519} colab_type="code" id="JkAoniu21pcm" outputId="ae745a71-a58e-490a-86d6-68ed059b6d23" #@title Video: Visualizing spiking activity from IPython.display import YouTubeVideo video = YouTubeVideo(id='4jNqXqr79o0', width=854, height=480, fs=1) print("Video available at https://youtube.com/watch?v=" + video.id) video # + [markdown] colab_type="text" id="KO4SaTXByZly" # --- # # ## Inter-spike intervals and their distributions # + [markdown] colab_type="text" id="Pks6reFdeatu" # Given the ordered arrays of spike times for each neuron in `spike_times`, which we've just visualized, what can we ask next? # # Scientific questions are informed by existing models. So, what knowledge do we already have that can inform questions about this data? # # We know that there are physical constraints on neuron spiking. It is thermodynamically inevitable that a neuron must spend energy to drive the cellular machinery that produces a spike. A neuron's reserves of energy are not bottomless, but must be actively replenished by metabolism. Therefore neurons should have a refractory period: they can only fire as quickly as these processes can support, and there is a minimum delay between consecutive spikes of the same neuron. # # More generally, we can ask "how long does a neuron wait to spike again?" or "what is the longest a neuron will wait?" Can we transform spike times into something else, to address questions like these more directly? # # We can consider the inter-spike times (or interspike intervals: ISIs). These are simply the time differences between consecutive spikes of the same neuron. # # ### Exercise: Plot the distribution of ISIs for a single neuron # # *Exercise objective:* make a histogram, like we did for spike counts, to show the distribution of ISIs for one of the neurons in the dataset. # # Do this in three steps: # # 1. Extract the spike times for one of the neurons # 2. Compute the ISIs (the amount of time between spikes, or equivalently, the difference between adjacent spike times) # 3. Plot a histogram with the array of individual ISIs # + colab={} colab_type="code" id="eOyAEEiA0Vle" # To complete the exercise, uncomment the code and fill missing parts (...) # 1. Extract spike times for one neuron single_neuron_idx = 283 # single_neuron_spikes = ... # 2. Compute the ISIs # single_neuron_isis = ... # Hint: try the function np.diff # 3. Plot the histogram with the array n_bins = 50 # plt.hist(...) # plt.xlabel(...) # plt.ylabel(...) # plt.axvline(..., color="orange", label="Mean ISI") # plt.legend() # + [markdown] colab={"base_uri": "https://localhost:8080/", "height": 287} colab_type="text" id="WT6uQh560L24" outputId="6ab75e1f-3fa1-4086-b34d-bc8fcd0a0497" # **Example output:** # # ![Solution hint](https://raw.githubusercontent.com/NeuromatchAcademy/course-content/master/tutorials/W1D1-ModelTypes/static/W1D1_Tutorial1_Solution_4a35d3da_0.png) # # # + [markdown] colab_type="text" id="W8GmZDFV6JjP" # --- # # In general, the shorter ISIs are predominant, with counts decreasing rapidly (and smoothly, more or less) with increasing ISI. However, counts also rapidly decrease to zero with _decreasing_ ISI, below the maximum of the distribution (8-11 ms). The absence of these very low ISIs agrees with the refractory period hypothesis: the neuron cannot fire quickly enough to populate this region of the ISI distribution. # # Check the distributions of some other neurons. To resolve various features of the distributions, you might need to play with the value of `n_bins`. Using too few bins might smooth over interesting details, but if you use too many bins, the random variability will start to dominate. # # You might also want to restrict the range to see the shape of the distribution when focusing on relatively short or long ISIs. *Hint:* the third argument to `plt.hist` sets the interval to define bins over. # + [markdown] colab_type="text" id="bns9w278zVx6" # --- # # ## What is the functional form of an ISI distribution? # + cellView="form" colab={"base_uri": "https://localhost:8080/", "height": 519} colab_type="code" id="CloN1bOL2FSN" outputId="4be2e30f-c911-4a55-d68d-75440ed3805d" #@title Video: What models from IPython.display import YouTubeVideo video = YouTubeVideo(id='4NoqWMWC1ZY', width=854, height=480, fs=1) print("Video available at https://youtube.com/watch?v=" + video.id) video # + [markdown] colab_type="text" id="AxGOkl-AfvFs" # The ISI histograms seem to follow continuous, monotonically decreasing functions above their maxima. The function is clearly non-linear. Could it belong to a single family of functions? # # To motivate the idea of using a mathematical function to explain physiological phenomena, let's define a few different function forms that we might expect the relationship to follow: exponential, inverse, and linear. # + colab={} colab_type="code" id="uaJcbGPv-1AX" def exponential(xs, scale, rate, x0): """A simple parametrized exponential function, applied element-wise. Args: xs (np.ndarray or float): Input(s) to the function. scale (float): Linear scaling factor. rate (float): Exponential growth (positive) or decay (negative) rate. x0 (float): Horizontal offset. """ ys = scale * np.exp(rate * (xs - x0)) return ys def inverse(xs, scale, x0): """A simple parametrized inverse function (`1/x`), applied element-wise. Args: xs (np.ndarray or float): Input(s) to the function. scale (float): Linear scaling factor. x0 (float): Horizontal offset. """ ys = scale / (xs - x0) return ys def linear(xs, slope, y0): """A simple linear function, applied element-wise. Args: xs (np.ndarray or float): Input(s) to the function. slope (float): Slope of the line. y0 (float): y-intercept of the line. """ ys = slope * xs + y0 return ys # + [markdown] colab_type="text" id="AQbn-rpageDC" # Here is an interactive demo where you can vary the parameters of these functions and see how well the resulting outputs correspond to the data. Adjust the parameters by moving the sliders and see how close you can get the lines to follow the falling curve of the histogram. This will give you a taste of what you're trying to do when you *fit a model* to data. # + cellView="form" colab={"base_uri": "https://localhost:8080/", "height": 502, "referenced_widgets": ["8c8246eb6eb24d53afaf6389a200334b", "abebc3b5f8764ead80237960c1424ba9", "db5a66478d97400c921d30545c0577d6", "<KEY>", "<KEY>", "<KEY>", "<KEY>", "<KEY>", "<KEY>", "<KEY>", "618203fd3f604e5196b21542b51a0b18", "<KEY>", "<KEY>", "878af4319e974a5088c9511e01a9db3d", "<KEY>", "e078d284eea9481882d0db3a32a3a5ff", "7d183a1aeadf445ea3d7b1b9deaed702", "76ce09987d354707a2b8d8e600ceadbe", "3cd650db36c54f18b6d58a715da87088", "<KEY>", "0d8d0fc1735a45248a185e083f81cb6c", "d13ae5b465bd47378d014f2ac36feec8", "<KEY>", "10be374f49e24b9ab30a3cc656f69046", "2a7ece2ec06c4261a8c5221a4c0900f0"]} colab_type="code" id="NGIGUXtV9Y9v" outputId="6efc00b8-b5e2-4314-87f2-631602239fcc" #@title ISI functions explorer #@markdown When cells have sliders, there's hidden code that implements an interactive demo. #@markdown You don't need to worry about how the code works – but you do need to **run the cell** to enable the sliders. # Don't worry about understanding this code! It's to setup an interactive plot. single_neuron_idx = 283 single_neuron_spikes = spike_times[single_neuron_idx] single_neuron_isis = np.diff(single_neuron_spikes) counts, edges = np.histogram( single_neuron_isis, n_bins, (0, single_neuron_isis.max()) ) functions = dict( exponential=exponential, inverse=inverse, linear=linear, ) colors = dict( exponential="C1", inverse="C2", linear="C4", ) @widgets.interact( exp_scale=widgets.FloatSlider(1000, min=0, max=20000, step=1000), exp_rate=widgets.FloatSlider(-10, min=-200, max=50, step=10), exp_x0=widgets.FloatSlider(0.1, min=-0.5, max=0.5, step=0.05), inv_scale=widgets.FloatSlider(1000, min=0, max=2e2, step=100), inv_x0=widgets.FloatSlider(0, min=-1, max=1, step=0.1), lin_slope=widgets.FloatSlider(-1e5, min=-5e5, max=1e5, step=100), lin_y0=widgets.FloatSlider(10000, min=0, max=20000, step=1000), ) def fit_plot( exp_scale=1000, exp_rate=-10, exp_x0=0.1, inv_scale=1000, inv_x0=0, lin_slope=-1e5, lin_y0=2000, ): """Helper function for plotting function fits with interactive sliders.""" func_params = dict( exponential=(exp_scale, exp_rate, exp_x0), inverse=(inv_scale, inv_x0), linear=(lin_slope, lin_y0), ) f, ax = plt.subplots() ax.fill_between(edges[:-1], counts, step="post", alpha=.5) xs = np.linspace(1e-10, edges.max()) for name, function in functions.items(): ys = function(xs, *func_params[name]) ax.plot(xs, ys, lw=3, color=colors[name], label=name); ax.set( xlim=(edges.min(), edges.max()), ylim=(0, counts.max() * 1.1), xlabel="ISI (s)", ylabel="Number of spikes", ) ax.legend() # + cellView="form" colab={"base_uri": "https://localhost:8080/", "height": 519} colab_type="code" id="nPmQp6pV12xt" outputId="f89a9a54-fc3a-49d0-bd47-14fb94cd5a5f" #@title Video: Fitting models by hand from IPython.display import YouTubeVideo video = YouTubeVideo(id='gZ3YcChh-CY', width=854, height=480, fs=1) print("Video available at https://youtube.com/watch?v=" + video.id) video # + [markdown] colab_type="text" id="rI0h02Scdt6g" # ## Summary # # In this tutorial, we loaded some neural data and poked at it to understand how the dataset is organized. Then we made some basic plots to visualize (1) the average level of activity across the population and (2) the distribution of ISIs for an individual neuron. In the very last bit, we started to think about using mathematical formalisms to understand or explain some physiological phenomenon. # # This is the first step towards developing models that can tell us something about the brain. That's what we'll focus on in the next two tutorials.
tutorials/W1D1_ModelTypes/student/W1D1_Tutorial1.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ## I. Introduction to Python # # **Econometrics** is a powerful and essential study for solving real-world problems. The practical implementation and application of econometric methids and tools helps tremendously with undestanding the concepts. Nowadays, a vast majority of peole will have to deal with some sort of data analysis in their career. Learning how to use some serious data analysis software is an invaluable asset for anyone of economics, business adminstration, and related fields. # # Choosing a software package for learning econometrics could be a tough question to answer. Possibly the most important aspect is that it is widely used both in and outside academia. A large and active user community helps the software to remain up to date and increases the chances that somebody else has already solved the problem at hand. **Python** can be an ideal candidate for starting to learn econometrics and data analysis. It has a hugh user base, especially in the fields of data science, machine learning, and artificial intelligence, where it arguably is the most popular software overall. Also, Python is completely free and available for all relevant operating systems. # # In this section, we provide a gentle introduction of Python, cover some of the basic knowledge of the software, demonstrate with examples, and provide examples for practice. # # ### Topics: # # 1. Working Directory # 2. Python Objects # 3. Modules # 4. External Data # 5. Base Graphics # ### 1. Working Directory # # Similar to many statistical software, when we are working on a particar project with the software, we need to interact with different files, such as import or export a data file, save a generated figure as a graphic file, store regression tables as text, spreadsheet, or LATEX file. Whenever we provide Python with a file name, it can include the full path on the computer. The full (i.e. "absolute") path to a script file might be something like below on a Mac or Linus system. # # ``` # /Users/Econometric-with-Python/Introduction_to_Python.ipynb # ``` # # The path is provided for Unix based operating systems using forward slashes. If you are a Window user, you usually use back slashes instead of forward slashes, but the Unix-style will also work in Python. On a Window system, a valid path would be # # ``` # C:/Users/MyUserName/Desktop/Econometric-with-Python/Introduction_to_Python.ipynb # ``` # # If we do not provide any path, Python will use the current "working directory" for reading or writing files. AFter importing the module **os**, it can be obtained by the command *os.getcwd()*. To change the working directry, use the command *os.chdir(path)*. Relative pathys, are interpreted related to the current working directory. For a neat file organization, best practice is to generate a directory for each project (say *MyEconProject*) with several sub-directories (say *PyScripts*, *data*, and *images*). At the beginning of the script, we can use the command *os.chdir()* to set the working directory for the project and afterwards refer to a data set in the respective sub-directory as **data/MyData.csv** and to a graphics file as **images/MyFigure.png**. # # Here is an example, # # ``` Python # # Loading the os module # import os # # # Check the current working directory # os.getcwd() # # # Change the working directory to desktop (Window) # os.chdir('C:/Users/MyUserName/Desktop') # Note: "MyUserName" should be the actual username of the machine # # # Change the working directory to desktop (Mac / Linus) # os.chdir('Users/Desktop') # # # Check to see if the working directory is changed # os.getcwd() # ``` # # #### Practice: # # Check your current working directory using the command *os.getcwd()*. Then, Change the working directory to **Desktop** using the command *os.chdir()*. Check to see if the working directory has changed. Again, using the command *os.chdir()* change back to the original directory and check afterward. # Loading the os Module # Check your current working directory # Change the working directory to the Desktop # Check to see if the directory has changed # Change the directory back to the original one # Check again to see if you are in the original directory # ### 2. Python Objects # # Python is an **Object Oriented Programming (OOP)** language, which relies on the concept of classes and objects. It is used to structure a software program into simple, reusable pieces of code blueprints (usually called classes), which are used to create individual instances of objects. Python can work with numbers, lists, arrays, texts, data sets, graphs, functions, and many objects of different types. This section covers the most important ones we will frequently encounter in econometric analysis. We begin with the built-in objects that are available with the standard distribution of Python, then introduce objects included in the modules **numpy** and **pandas**. # # #### Variables # # Just like many statistical software packages, we often want to store results of calculation to reuse them later. For this, we can assign result to a **variable**. A variable has a name and by this name we can access the assigned object. # # Here are some examples: # # ``` Python # # Assigning a value 5 to a variable x # x = 5 # print(f'x has a value of: {x}') # # # Assigning a value 10 to a variable y # y = 10 # print(f'y has a value of: {y}') # # # Assigning the value y divided by x to a varialbe z # z = y / x # print(f'z has a value of: {z}') # ``` # # #### Practice: # # Try to complete the following assignments and print their values. # # - Assign 3 + 4 to variable "a" # - Assign 3.14 to variable "b" # - Assign "Hello World" to variable "c" # Assign 3 + 4 to variable "a" # Assign 3.14 to variable "b" # Assign "Hello World" to variable "c" # #### Objects in Python # # Once you assigned different values to the variables, you might wonder what kind of objects we have dealth with so far. In fact, we can use the command "**type**" to identify the object type. # # Here are some examples: # # ``` Python # # Assigning a value 5 to a variable x # x = 5 # x_type = type(x) # print(f'x is a: {x_type}') # # # Assigning a value 2.5 to a variable y # y = 2.5 # y_type = type(y) # print(f'y is a: {y_type}') # # # Assigning the value "Python" to a varialbe z # z = "Python" # z_type = type(z) # print(f'z is a: {z_type}') # ``` # # The command **type** tells us that we have created integers (**int**), floating point numbers (**float**), and text object / string (**str**). The data type not only defines what values can be stored, but also the actions can be perform on these objects. For example, if we want to add an integer to a string, Python will return: # # ``` # TypeError: unsupported operand type(s) for +: 'int' and 'str' # ``` # # #### Practice: # # Try to use the **type** command to identify the data type of the following values, # # - 987654321 # - 3.1415926535 # - "Hello World" # Enter your code here! # Enter your code here! # Enter your code here! # Scalar data types like *int*, *float*, or *str* contain only one single value. A **Boolean** value, also called **logical** value, is another scalar data type that will become useful if you want to execute code only if one or more conditions are met. An object of type *bool* can only take one of two values: **True** or **False**. The easiest way to generate them is to state claims which are either true or false and let Python decide. **Table 1.1** is listing the main logical operators: # # #### Logical Operators # # **Table 1.1** # # | Operator | Description | Syntax | # | :---: | :--- | :---: | # | == | x is equal to y | x == y | # | < | x is less than y | x < y | # | <= | x is less than or equal to y | x <= y | # | > | x is greater than y | x > y | # | >= | x is greater than or equal to y | x >= y | # | != | x is NOT equal to y | x != y | # | not | NOT b (i.e. True, if b is False | not b | # | or | either a or b is True (or both) | a or b | # | and | both a and b are True | a and b | # # As we saw in previous examples, scalar types differ in what kind of data they can be used for: # # - int: whole numbers, for example 5 or 10000 # - float: numbers with a decimal point, for example 2.25 or 12345.00 # - str: any sequence of characters delimited by either single or double quotes, for example 'python' or "Hello World" # - bool: either **True** or **False** # # #### Collection of Objects # # For statistical calculations, we often need to work with data sets including many numbers or texts instead of scalars. The simplest way we can collect components (even components of different types) is called a **list** in Python terminlogy, which is similar to **vector** in R. To define a **list**, we can collect differetn values using square brackets [value1, value2, ...]. We can access a list entry by providing the position (starting at 0) within square brackets next to the variable name referencing the list. We can also access a range of values by using their starting position *i* and end position *j* with the syntex listname[i:(j+1)]. # # Here are some examples: # # ``` Python # # Assign a list of letters 'a' to 'f' to a variable "letters" # letters = ['a', 'b', 'c', 'd', 'e', 'f'] # # # Access the letter 'a' in the list # letters[0] # # # Access the letter 'd' in the list # letters[3] # # # Access the letter 'd' to 'f' in the list # letter[3:6] # ``` # # A key characteristic of a **list** is the order of included components. The order allows us to access its components by a position. **Dictionaries** (dict), on the other hand, are unordered sets of components. We access components by their unique **key**. # # Here are some examples: # # ``` Python # # Define and print a dict: # x = ['Tom', 'Peter', 'Nancy'] # y = [19, 24, 20] # z = [False, True, True] # person_dict1 = dict(name = x, age = y, college = z) # print(f'person_dict1: \n{person_dict1}\n') # # # Another way to define the dict: # person_dict2 = {'name': x, 'age': y, 'college': z} # print(f'person_dict2: \n{person_dict2}\n') # # # Check data type: # print(f'data type: {type(person_dict1)}\n') # # # Access 'age': # ages = personal_dict1['age'] # print(f'ages: {ages}\n') # # # Access 'age' of Peter # peter_age = person_dict1['age'][1] # print(f"Peter's age: {peter_age}\n") # # # Add 2 years to Peter's age and change his college status # person_dict1['age'][1] = person_dict1['age'][1] + 2 # person_dict1['college'][1] = False # print(f'person_dict1: \n{person_dict1}\n') # # # Add a new variable 'income': # person_dict1['income'] = [100, 250, 200] # # # Delete variable 'age': # del person_dict1['age'] # print(f'person_dict1: \n{person_dict1}\n') # ``` # # There are many more important data types and we covered only the ones that most relevant for the topics discussed in this document. **Table 1.2** summarizes these built-in data types plus a simple example in case you have to look them up later. # # **Table 1.2** # # | Python Type | Data Type | Example | # | :--- | :--- | :--- | # | int | Integer | x = 10 | # | float | Floating Point Number | x = 3.14 | # | str | String | x = 'hello world' | # | bool | Boolean | x = True | # | list | List | x = [1, 3, 5, 7, 9] | # | dict | Dict | x = {'A': [1. 2. 3], 'B': ['a', 'b', 'c']} | # # #### Practice: # # 1. Create the two lists and assign them to the variable x and y respectively. # # Variable: x # Values: '1/1/2020', '1/2/2020', '1/3/2020' # # Variable: y # Values: 62, 67, 58 # # 2. Create a dictionary and assign the key "Date" and "Temp" for x and y. # # 3. Print the temperture on 1/2/2020. # # 4. Change the temperture on 1/3/2020 to 60, then print the dict to check the value has updated. # # Create the lists here. # Create the dict here. # What is the temperture on 1/2/2020? # Change the temperture on 1/3/2020 to 60 # ### 3. Modules # # **Modules** are Python files that contain functions and variables. We can access these modules and make use of their code to solve different problems. The standard distribution of Python already comes with a number of built-in modules. To make use of their commands we have to import these modules first. # # Here, we demonstrate how to import the **math** module and label it as an alias object. We can choose whatever name we want, but usually these aliases follow a naming convention, which could be found on the documentation of the module. After the import, functions and variables are accessed by the dot (.) syntax, which is realted to the concetp of object orientation. # # Here is an example: # # ``` Python # # Import the math module as an alias "m" # import math as m # # # Using the square root function in math module # x = m.sqrt(16) # print(f'Square Root of 16 is: {x}\n') # # # Using the pi variable in math module # y = m.pi # print(f'The value of pi is: {y}\n') # # # Using the Euler's number in math module # z = m.e # print(f"Euler's Number: {z}\n") # ``` # # The functionality of Python can also be extended relatively easily by advanced users. These is not only useful to those who are able and willing to do this, but also for a novice user who can easily make use of a wealth of extensions generated by a big and active community. Since these extensions are mostly programmed in Python, everybody can check and improve the code submitted by a user, so the quality control works very well. The Anaconda distribution of Python alreayd comes with a number of external modules, also called *packages*, that we need for data analyses. # # On top of the packages that come with the standard installation or Anaconda, there are countless packages available for download. If they meet certain quality criteria, they can be published on the offical "Python Package Index" (PyPI) servers at https://pypi.org/. Downloading and installing these packages is simple, which we can either run the command line or type "pip install modulenames". Of course, the installation only has to be done once per machine / user and needs an active internet connection. # # Here is an example to install a package using Juypter Notebook, # # ``` Python # # Install wooldridge package # # !pip install wooldridge # # # Import package from library # import wooldridge # ``` # # #### Practice: # # Try to install the following packages using the pip install command. # # - wooldridge (Data sets from Introductory Econometrics: A Modern Approach (6th ed, <NAME>) # - numpy (NumPy is the fundamental package for array computing with Python) # - pandas (Powerful data structures for data analysis, time series, and statistics) # - pandas_datareader (Data readers extracted from the pandas codebase, shoudl be compatible with recent pandas versions) # - statsmodels (Statistical computations and models for Python) # - matplotlib (Python plotting and data visualization package) # - scipy (SciPy, Scientific library for Python) # - patsy (A Python package for describing statistical mdoels and for building design matrices) # - linearmodels (Instrumental variable and linear panel models for Python) # # Install the packages here # Import the installed packages # #### Objects from Different Modules # # As we installed the packages in the last section, we would like to discuss a little bit about the objects in these modules. We begin with the **numpy** objects. # # #### Objects in numpy # # Before we start with numpy, make sure that you have the Anaconda distribution or install **numpy** as explained in the previous section. For more information about the module, see [Walt, Colbert, and Varoquaux (2011)](https://arxiv.org/pdf/1102.1523.pdf). It is standard to import the module udner the alias **np** when working with numpy, so the first line of code always is: # # ``` Python # import numpy as np # ``` # # The most important data type in **numpy** is the multidimensional array (**ndarry**). We first introduce the definition of this data type as well as the basics of accessing and manipulating arrays. Second, we will demonstrate functions and methods that become useful when working on econometric problems. # # To create a simple array, provide a **list** to the function **np.array**. We can also create a two dimenional array by providing multiple lists within square brackets. Instead of a two-dimensional array, we often call this data type a matrix. Matrices are important tool for econometric analyses. # # Note: Appendix D of Wooldridge (2019) introduces the basic concepts of matrix algebra. # # The syntax for defining a **numpy** array is: # # ``` Python # array1D = np.array(listname) # array2D = np.array([list1, list2, list3]) # ``` # # Within a provided list, the **numpy** array requires a homogenous data type. If we enter lists including elements of different type, numpy will convert them to a homogeneous data type. # # For example, *np.array(['a', 2])*, becomes an array of strings. # # Indexing one-dimensional arrays is similar to the procedure with the data type **list**. Two dimensional arrays are accessed by two comma separated values within the square brackets. The first number gives the row, the second number gives the column (starting at 0 for the first row or column. Just as with a **list**, accessing ranges of values with ":" excludes the upper limit. # # Here are some examples: # # ``` Python # # Import numpy module as an alias "np" # import numpy as np # # # Define an array in numpy: # nparray1D = np.array([100, 3, 32.4, 5.0]) # print(f'type(nparray1D): {type(nparray1D)}\n') # # # Define a matrix in numpy: # nparray2D = np.array([[1, 2, 3, 4], # [10, 20, 30, 40], # [3, 6, 9, 12]]) # # # Get the dimension of nparray2D # dim = nparray2D.shape # print(f'Dimension: {dim}\n') # # # Access elements by indices # third_elem = nparray1D[2] # print(f'Third Element: {thrid_elem}\n') # # # Access element in 2nd row and 3rd column in a matrix # second_third_elem = nparray2D[1, 2] # print(f'2nd Row and 3 Column Element: {second_third_elem}\n') # # # Access each row in the 2nd and 3rd column # second_to_third_elem = nparray2D[:, 1:3] # print(f'2nd and 3rd column in each row: {second_to_third_elem}\n') # # # Access elements by lists # first_third_elem = nparray1D[[0, 2]] # print(f'1st and 3rd elements in an array: {first_third_elem}\n') # # # Same with Boolean lists: # first_third_elem2 = boolarray[[True, False, True, False]] # print(f'1st and 3rd elements: {first_third_elem2}\n') # # k = np.array([[True, False, False, False], # [False, False, True, False], # [True, False, True, False]]) # # # 1st element in 1st row, 3rd element in 2nd row, ... # elem_by_index = nparray2D[k] # print(f'Element by index: {elem_by_index}\n') # ``` # # **numpy** has also some predefined and useful special cases of one and two-dimensional arrays. Here are some examples: # # ``` Python # # Array of integers defined by the arguments start, end, and sequence length: # sequence = np.linspace(0, 2, num = 11) # print(f'Sequence: \n{sequence}\n') # # # Sequence of integers starting at 0, ending at 5 - 1: # squence_int = np.arange(5) # print(f'Sequence: \n{sequence_int}\n') # # # Initialize array with each element set to zero: # zero_array = np.zeros((4, 3)) # print(f'Zero Array: \n{zero_array}\n') # # # Uninitialized array (filled with arbitrary nonsense elements): # empty_array = np.empty((2,3)) # print(f'Empty Array: \n{empty_array}/n') # ``` # # **Table 1.3** lists important functions and methods in **numpy**, which we can apply them to the data type ndarray, but they usually work for many built-in types too. Functions are often vectorized meaning that they are applied to each of the elements separately (in a very efficient way). Methods on an object referenced by **x** are invoked by using the **x.somemethod()** syntax. # # **Table 1.3 Important numpy Functions and Methods** # # | Functions / Methods | Description | # | :--- | :--- | # | add(x, y) or x + y | Elements-wise sum of all elements in x and y | # | subtract(x, y) or x - y | Elements-wise subtraction of all elements in x and y | # | divide(x, y) or x / y | Elements-wise division of all elements in x and y | # | multiply(x, y) or x * y | Elements-wise multiplication of all elements in x and y | # | exp(x) | Elements-wise exponential of all elements in x | # | sqrt(x) | Elements-wise square root of all elements in x | # | log(x) | Elements-wise natural log of all elements in x | # | linalg.inv(x) | Inverse of x | # | x.sum() | Sum of all elements in x | # | x.min() | Minimum of all elements in x | # | x.max() | Maximum of all elements in x | # | x.dot(y) or x@y | Matrix multiplication of x and y | # | x.transpose() or x.T | Transpose of x | # # **numpy** has a power matrix algebra system. Basic matrix algebra includes: # # - Matrix addition using the operator + as long as the matrices have the same dimensions. # - The operator * does not do matrix multiplication but rather element-wise multiplciation. # - Matrix multiplication is done with the operator **@** (or the **dot method**) as long as the dimensions of the matrices are compatible or "commutative", which means number of columns in the first matrix is the same as the number of rows in the second matrix. # - Transpose of a matrix **x**: as **x.T** # - Inverse of matrix **x**: as **linalg.inv(x)** # # Here are some examples: # # ```Python # # Define two 2D matrices in numpy: # matA = np.array([[4, 9, 8], # [2, 6, 3]]) # # matB = np.array([[1, 5, 2], # [6, 6, 0], # [4, 8, 3]]) # # # Use a numpy exponential function: # exp_A = np.exp(matA) # print(f'Exponential of Matrix A: \n{exp_A}\n') # # # Add an element in Matrix B to all elements in Matrix A # add_A = matA + matB[[0,1]] # same as np.add(matA, matB[[0,1]]) # print(f'New Matrix A: \n{add_A}\n') # # # Use the transpose method: # matA_tr = matA.transpose() # print(f'Matrix A Transpose: \n{matA_tr}\n') # # # Matrix algebra: matrix multiplication # matprod = matA.dot(matB) # same as matA @ matB # print(f'Multipication of Matrix A and B: \n{matprod}\n') # ``` # #### Practice: # # 1. Create a 3 x 3 matrix with any random numbers and assign it to matA # 2. Create a 2 x 3 matrix with any random numbers and assign it to matB # 3. Multiply matA to matB to see if it return any error. If you get an error, why is it so? # 4. Fix the error by tranposing matB, then multiply matA to matB again. # Create matA and matB # Multiply matA to matB # Transpose matB # Multiply matA to matB again # #### Objects in pandas # # The module **pandas** builds on top of data types introduced in previous sections and allows us to work with something we will encounter almost every time we discuss an econometric application: a **data frame**. A data frame is a structure that collects several variables and can be thought of as a rectangular shape with the rows representing the observational units and the columns representing the variables. A data frame can contain variables of different data types (for example a numerical list, a one-dimentional ndarray, str, and so on). Before we start working with **pandas**, make sure it is installed. The standard alias of this module is **pd**, so when working with **pandas**, the first line of code always is: # # ``` Python # import pandas as pd # ``` # # The most important data type in pandas is **DataFrame**, which we will often simply refer to as "data frame". One strength of pandas is the existence of a whole set of operations that work on the index of a **DataFrame**. The index contains information on the observational unit, like the person answering a questionnaire or the date of a stock price we want to work with. Accessing elements of a DataFrame object can be done in multiple ways: # # - Access columns / variable by name: # ``` # df['varname1] or df['[varname1', 'varname2']] # ``` # - Access rows / observations by integer position i to j: # ``` # df[i: (j+1)] (also works with the index names of df) # ``` # - Access variables and observations by names: # ``` # df.loc['rowname', 'colname'] # ``` # - Access variables and observations by row and column integer position i and j: # ``` # df.iloc[i, j] # ``` # # If we define a DataFrame by a combination of several DataFrames, they are automatically matched with their indices. # # Here are some examples: # # ``` Python # import numpy as np # import pandas as pd # # # Define a pandas DataFrame: # icecream_sales = np.array([30, 40, 35, 130, 120, 60]) # weather_coded = np.array([0, 1, 0, 1, 1, 0]) # customers = np.array([2000, 2100, 1500, 8000, 7200, 2000]) # df = pd.DataFrame({'icecream_sales': icecream_sales, # 'weather_code': weather_coded, # 'customers': customers}) # # # Define and assign an index (six ends of month starting in April, 2010) # # (details on generating indices are given in later section): # ourIndex = pd.date_range(start='04/2010', freq='M', periods=6) # df.set_index(ourIndex, inplace=True) # # # Print the DataFrame # print(f'Data Frame: \n{df}\n') # # # Access columns by variable names: # subset1 = df[['icecream_sales', 'customers']] # print(f'Subset 1: \n{subset1}\n') # # # Access second to fourth row: # subset2 = df[1:4] # same as df['2010-05-31':'2010-07-31] # print(f'Subset 2: \n{subset2}\n') # # # Access rows and columns by index and variable names: # subset3 = df.loc['2010-05-31', 'customers'] # same as df.iloc[1,2] # print(f'Subset 3: \n{subset3}\n') # # # Access rows and columns by index and variable integer positions: # subset4 = df.iloc[1:4, 0:2] # same as df.loc['2010-05-31':2010-07-31', ['icecream_sales', 'weather']] # print(f'Subset 4: \n{subset4}\n') # ``` # # Many economic variables of interest have a qualitative rather than quantitative interpretation. They only take a finite set of values and the outcomes don't necessarily have a numerical meaning. Instead, they represent qualitative information. Examples include gender, academic major, grade, marital status, state, product type or brand. In some of these examples, the order of the outcome has a natural interpretation (such as the grades), in others, it does not (such as state). # # As a specific example, suppose we have asked our customer to rate a product on a scale between 0 (= "bad"), 1 (= "okay"), and 2 (= "good"). We have stored the answers of our ten respondents in terms of numbers 0, 1, and 2 in a list. We could work directly with these numbers, but often, is convenient to use so-call data type **Categorical**. One advantage is that we can attach label to the outcomes. We extend a modified example, where the variable *weather* is coded and demonstrate how to assign meaningful labels. The example also includes some methods from **Table 1.4** below. # # **Table 1.4 Important pandas Methods** # # | pandas Methods | Description | # | :--- | :--- | # | df.head() | First 5 observations in df | # | df.tail() | Last 5 observations in df | # | df.describe() | Descriptive statistics of df | # | df.set_index(x) | Set the index of df as x | # | df['x'] or df.x | Access column x in df | # | df.iloc(i, j) | Access variables and observations in df by integer position | # | df.loc(names_i, names_j) | Access variables and observations in df by names | # | df['x'].shift(i) | Create a by *i* rows shifted variable of x | # | df['x'].diff(i) | Creates a variable that contains the *i*th difference of x | # | df.groupby('x').function() | Apply a function to subgroups of df according to x | # # Here are some examples: # # ``` Python # import numpy as np # import pandas as pd # # # Define a pandas DataFrame: # icecream_sales = np.array([30, 40, 35, 130, 120, 60]) # weather_coded = np.array([0, 1, 0, 1, 1, 0]) # customers = np.array([2000, 2100, 1500, 8000, 7200, 2000]) # df = pd.DataFrame({'icecream_sales': icecream_sales, # 'weather_code': weather_coded, # 'customers': customers}) # # # Define and assign an index (six ends of month starting in April, 2010) # # (details on generating indices are given in later section): # ourIndex = pd.date_range(start='04/2010', freq='M', periods=6) # df.set_index(ourIndex, inplace=True) # # # Include sales two months ago: # df['icecream_sales_lag2'] = df['icecream_sales'].shift(2) # print(f'Data frame with lag column: \n{df}\n') # # # Use a pandas.Categorical object to attach labels (0 = bad; 1 = good): # df['weather'] = pd.Categorical.from_codes(codes = df['weather_coded'], # categories = ['bad', 'good']) # print(f'Data frame with label column: \n{df}\n') # # # Mean sales for each weather category: # group_means = df.groupby('weather').mean() # print(f'Mean sales for each weather category: \n{group_means}\n') # ``` # # #### Practice: # # Use the starter code to create a DataFrame and answer the following question using the pandas function / method. # # 1. What is the score for <NAME>? # 2. Did <NAME> pass the exam? # 3. What is Vivian's last name? # 4. List the pass_exam status for Joyce, Thomas, Vivan, and Chris. # 5. What is the average score for those who passed the exam? # Define the DataFrame import numpy as np import pandas as pd first_name = np.array(['Jack', 'Joyce', 'Thomas', 'Vivian', 'Chris', 'Eric']) last_name = np.array(['Whopper', 'Peyton', 'Chan', 'Kama', 'Smith', 'Rosero']) pass_exam = np.array([True, True, True, False, True, False]) scores = np.array([80, 88, 92, 64, 75, 58]) student_df = pd.DataFrame({'first': first_name, 'last': last_name, 'pass': pass_exam, 'scores':scores}) # What is the score for <NAME>? # Did <NAME> pass the exam? # What is Vivian's last name? # List the pass_exam status for Joyce, Thomas, Vivan, and Chris. # What is the average score for those who passed the exam? # ### 4. External Data # # In previous sections, we entered all of our data manually in the script files. This is a very untypical way of getting data into our machine and we are introducing more useful alternatives in this section. These are based on the fact that many data sets are already stored somewhere else in data formats that Python can handle. # # #### Data Sets in the Examples # # We will reproduce many of hte examples from Wooldridge (2019). The companion web site for the textbook provides the sample data sets in different formats. If you have an access code that came with the textbook, they can be downlaoded free of charge. The Stata data sets are also made available online at the "Instructional Stata Datasets for Econometrics" collections from Boston College, maintained by <NAME>. # # Fortunately, we do not have to download each data set manually and import them by the functions discussed. Instead, we can use the external module **wooldridge**. It's not part of the Anaconda distribution and you have to install **wooldridge** as explained in the earlier section. When working with **wooldridge**, the first line of code always is: # # ``` Python # import wooldridge as woo # ``` # # The data sets from this module are pandas data type. Here is an example: # # ``` Python # # Load data from wooldridge module # wage1 = woo.dataWoo('wage1') # # # Check the data type # print(f'wage1 Data Type: \n{type(wage1)}\n') # # # Overview of the data set # print(f'Overview of wage1: \n{wage1.head()}\n') # ``` # # Probably all software packages that handle data are capable of working with data stored as text files. This makes them a natural way to exchagne data between different programs and users. Common file name extensions for such data files are *RAW* ,*CSV*, or *TXT*. Most statistics and spreadsheet programs come with their own file format to save and load data. While it is basically always possible to exchange data via text files, it maight be convenient to be able to directly read or write data in the native format of some other software. # # Fortunately, the **pandas** module provides the possibility for importing and exporting data from/to text files and many programs. This includes, # # - Text file (TXT) with **read_table()** and **to_table()** # - Comma-separated values (CSV) with**read_csv()** and **to_csv()** # - MS Excel (XLS and XLSX) with **read_excel()** and **to_excel()** # - Stata (DTA) with **read_stata()** and **to_stata()** # - SAS (XPORT and SSD) with **read_sas()** and **to_sas()** # # Table 1.5 shows two flavors of a raw text file containing the same data. The file *sales.txt* contains a header with the variable names. In the file sales.csv, the columns are separated by a comma. # # **Table 1.5** # # | (a) sales.txt | (b) sales.csv | # | :--- | :--- | # | year product1 product2 product3 | | # | 2008 0 1 2 | 2008, 0, 1, 2 | # | 2009 3 2 4 | 2008, 3, 2, 4 | # | 2010 6 3 4 | 2008, 6, 3, 4 | # | 2011 9 5 2 | 2008, 9, 5, 2 | # | 2012 7 9 3 | 2008, 7, 9, 3 | # | 2013 8 6 2 | 2008, 8, 6, 2 | # # Text files for storing data come in different flavors, mainly differing in how the columns of the table are separated. The pandas commands **read_table()** and **read_csv** provides possibliities for reading many flavors of text files which are then stored as **DataFrame** object. # # Here are some examples: # # ``` Python # # Import pandas # import pandas as pd # # # Import csv with pandas: # file_path = 'data/sales.csv' # df1 = pd.read_csv(file_path, delimiter = ',', header = None, # names = ['year', 'product1', 'product2', 'product3']) # print(f' Data Frame from csv: \n{df1}\n') # # # Import txt with pandas: # file_path = 'data/sales.txt' # df2 = pd.read_table(file_path, delimiter = ' ') # print(f' Data Frame from txt: \n{df2}\n') # # # Add a row to df1 # df3 = df1.append({'year'}: 2014, 'product1': 10, 'product2': 8, 'product3': 2}, ignore_index = True) # print(f' Data Frame with added row: \n{df3}\n') # # # Export with pandas: # df3.to_csv('data/sales2.csv') # ``` # # The command **read_csv()** includes many optional arguments that can be added. Many of these arguments are detected automatrically by pandas, but you can also specify them explicitly. The most important arguments are: # # - **header**: Integer specifying the row that includes the variable naems. Can also be **None**. # - **sep**: Often columns are separated by a comman, i.e. **sep = ','** (default). Instead, an arbitrary other character can be given. **sep = ';'** might be another relevant example of a separator. # - **names**: If no header is specified, you can provide a list of variable names. # - **index_col**: The values in column **index_col** are used as an index. # # The last part of this section deals with importing data from other sources than local files on your machine. We will use an extension of **pandas** called "**pandas_datareader**", which makes it straightforward to query online databases. It is not part of the Anaconda distribution and you have to install **pandas_datareader** as explained in the earlier section. The first line of code always is: # # ``` Python # import pandas_datareader as pdr # ``` # # Here is an example to demonstrate the workflow of importing stock data of Ford Motor Company. All we have to do is specify start and end date and the data source, which is *Yahoo Finance* in this case. # # ``` Python # # Download data for 'F' (= Ford Motor Company) and define start and end: # tickers = ['F'] # source = 'yahoo' # start_date = '2014-01-01' # end_date = '2015-12-31' # # # Use pandas_datareader for the import: # f_data = pdr.data.DataReader(tickers, 'yahoo', start_date, end_date) # # # Print the imported data: # print(f'Head of Data: \n{f_data.head()}\n') # print(f'Tail of Data: \n{f_data.tail()}\n') # ``` # # #### Practice: # # Download the Dow Jones Index data from 2020-01-03 to 2020-06-30 with the starter code. Then complete the following task. # # 1. Print the maximum Volume and the average Close values. # 2. Print the data from 2020-01-01 to 2020-01-10. # 3. Print the data from 2020-06-21 to 2020-06-30. # 4. Export the data set to the data folder and name it as DJI.csv # + # Starter Code: Download Dow Jones Index Data import pandas_datareader as pdr tickers = '^DJI' source = 'stooq' start_date = '2020-01-03' end_date = '2020-06-30' dji_df = pdr.data.DataReader(tickers, source, start_date, end_date) dji_df.head() # + # Print the maximum Volume value # Print the average Close value # - # Print the data from 2020-01-01 to 2020-01-10 # Print the data from 2020-06-21 to 2020-06-30 # Export the data set to the data folder and name it as DJI.csv # ### Base Graphics with matplotlib # # The module **matplotlib** is a popular and versatile tool for producing all kinds of graphs in *Python*. Here, we discuss the overall base approach for producing graphs and the most important general types of graphs. For more information, see [Hunter (2007)](https://ieeexplore.ieee.org/document/4160265). Some specific graphs used for descriptive statistics will be introduced in the later section. # # Before we start producing our own graphs, make sure that we use the Anaconda distribution or install **matplotlib** as explained in the previous section. When working with **matplotlib**, the first line of code always is: # # ``` Python # import matplotlib.pyplot as plt # ``` # # #### Basic Graphs: # # One very general type is a two-way graph with an abscissa and an ordinate that typically represent two variable like *X* and *Y*. # # If we have data in two lists **x** and **y**, we can easily generate scatter plots, line plots, or similar two-way graphs. The command plot is capable of these types of graphs and we will see some of the more specialized uses later on. # # Here is an example: # # ``` Python # # Create data: # x = [1, 3, 4, 8, 11] # y = [0, 2, 7, 5, 8, 9] # # # Plot and save: # plt.plot(x, y, color = 'black') # plt.savefig('images/graphs-basics-a.pdf') # plt.close() # ``` # # The last two lines export the created plot as a PDF file to the folder *images* and reset the plot to create a completely new one. If the folder *images* does not exist yet we must create one first to execute the code. # # Two important arguments of the **plot** command are **linestyle** and **marker**. The argument **linestyle** takes the values '-' (default), '--', ':', and many more. The argument **marker** is empty by default, and can take '**o**', '**v**', and many more. # # The plot command can be used to create a **function plot**, i.e. function values *y = f(x)* are plotted against x. To plot a smooth function, the first step is to generate a fine grid of *x* values. In the following example, we choose **linspace** from **numpy** and control the number of *x* values with **num**. The plotting of the function works exactly as in the previous example. We choose the quadratic function and the standard normal density. # # ``` Python # import scipy.stats as stats # import numpy as np # import matplotlib.pyplot as plt # # # Support of quadratic function # # Creates an array with 100 equispaced elements from -3 to 2 # x1 = np.linspace(-3, 2, num = 100) # # # Function values for all these values # y1 = x1 ** 2 # # # Plot quadratic function # plt.plot(x1, y1, linestyle = '-', color = 'black') # plt.savefig('images/graphs-fuctions-a.pdf') # plt.close() # # # Same for normal density # x2 = np.linspace(-4, 4, num = 100) # y2 = stats.norm.pdf(x2) # # # Plot normal density function # plt.plot(x2, y2, linestyle = '-', color = 'black') # plt.savefig('images/graphs-functions-b.pdf') # plt.close() # ``` # # #### Customizing Graphs with Options # # As alreayd demonstrated in the examles, these plots can be adjusted very flexibly. A few examples: # # - The width of hte lines can be changed using the argument **linewidth** (default: linewidth = 1). # - The size of the marker symbols can be changed using the argument **markersize** (default: markersize = 1) # - The color of the lines and symbols can be changed using the argument **color**. It can be specifiided in several ways: # - By name: 'blue', 'green', 'red', 'cyan', 'magenta', 'yellow', 'black', 'white'. # - Gray scale by a string encoding a number between 0 (black) and 1 (white), for example **plt.plot(x1, y1, linestyle = '-', color = '0.4')**. # - By RGBA values provided by (r, g, b, a) with each letter representing a number between 0 and 1, for example **plt.plot(x1, y1, linestyle = '-', color = (0.9, 0.2, 0.1, 0.3))**. This is useful for fine-tuning colors. # # You can also add more elements to change the appearance of your plot: # - A title can be added using **title('My Title')**. # - The horizontal and vertical axis can be labeled using **xlabel('My x axis label')** and **ylabel('My y axis label')**. # - The limites of the horizontal and vertical axis can be chosen using **xlim(min, max)** and **xlim(min, max)**, respectively. # # #### Overlaying Several Plots: # # Often we want to plot more than one set of variables or multiple graphical elements. This is an easy task, becasue each plot is added ot the previous one by default. We are using the following example to demosntrates some oft he options for plotting multiple graphical elements in the same graph. # # ``` Python # import scipy.stats as stats # import numpy as np # import matplotlib.pyplot as plt # # # Support for all normal densities # x = np.linspace(-4, 4, num = 100) # # # Get different density evaluations # y1 = stats.norm.pdf(x, 0, 1) # y2 = stats.norm.pdf(x, 1, 0.5) # y3 = stats.norm.pdf(x, 0, 2) # # # Plot the density functions # plt.plot(x, y1, linestyle = '-', color = 'black', label = 'Standard Normal') # plt.plot(x, y2, linestyle = '--', color = '0.3', label = 'mu = 1, sigma = 0.5') # plt.plot(x, y3, linestyle = ':', color = '0.6', label = '$\mu = 0$, $\sigma = 2$') # plt.xlim(-3, 4) # plt.title('Normal Densities') # plt.ylabel('$\phi(x)$') # plt.xlabel('x') # plt.legend() # plt.savefig('images/graphs-building.pdf') # plt.close() # ``` # # In this example, we can also see some useful commands for adding elements to an existing graph. # # - axhline(y = value) adds a horizontal line at **y**. # - axvline(x = value) adds a vertical line at **x**. # - legend() addes a legend based on the string provided in each graphical element in label. **matplotlib** finds the best position for the legend in the graph. # # In the legend, but also everywhere within a graph (title, axis labels, etc ...) we can also use Greek letters, equations, and similar features in a relatively straightforward way. This is done using respective LaTeX command ($\ math symbols name$). # # #### Exporting to a File # # By default, a graph generated in one of the ways we discussed above will be displayed. *Python* offers the possibility to export the generated plots automatically using specific commands. Among the different graphics formats, the PNG (Portable Network Graphics) format is very useful for saving plots to use them in a word processor or similar program. For LaTeX users, PS, EPS, and SVG are available and PDF is very useful. # # Here is an example of the export syntax: # # ``` Python # plt.savefig('filepath/filename.format') # ``` # # To set the width and height of our graph in inches, we start our code with **plt.figure(figsize = (width, height))**. Below example demonstrates the complete procedure. # # ``` Python # import scipy.stats as stats # import numpy as np # import matplotlib.pyplot as plt # # # Support for all normal densities # x = np.linspace(-4, 4, num = 100) # # # Get different density evaluations # y1 = stats.norm.pdf(x, 0, 1) # y2 = stats.norm.pdf(x, 0, 3) # # # Plot the density functions (a) # plt.figure(figsize = (4, 6)) # plt.plot(x, y1, linestyle = '-', color = 'black') # plt.plot(x, y2, linestyle = '--', color = '0.3') # plt.savefig('images/graphs-export-a.pdf') # plt.close() # # # Plot the density functions (b) # plt.figure(figsize = (6, 4)) # plt.plot(x, y1, linestyle = '-', color = 'black') # plt.plot(x, y2, linestyle = '--', color = '0.3') # plt.savefig('images/graphs-export-b.pdf') # plt.close() # ``` # #### Practice: # # Plot two normal distribution functions on the same graph. One with mean = 5 and standard deviation = 2, the other one with mean = 3 and standard deviation = 5. Set the figure size 6 x 4, include title, x label, y label, and legend in the graph. # # Once finished, export the graph to the *images* folder and save it as "**normal-distribution.png**". # + # Enter your code here
.ipynb_checkpoints/Introduction to Python-checkpoint.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernel_info: # name: python3 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # AdaBoost Regressor # + outputHidden=false inputHidden=false import numpy as np import matplotlib.pyplot as plt import pandas as pd import warnings warnings.filterwarnings("ignore") # fix_yahoo_finance is used to fetch data import fix_yahoo_finance as yf yf.pdr_override() # + outputHidden=false inputHidden=false # input symbol = 'AMD' start = '2014-01-01' end = '2019-01-01' # Read data dataset = yf.download(symbol,start,end) # View Columns dataset.head() # + outputHidden=false inputHidden=false dataset['Open_Close'] = (dataset['Open'] - dataset['Adj Close'])/dataset['Open'] dataset['High_Low'] = (dataset['High'] - dataset['Low'])/dataset['Low'] dataset['Increase_Decrease'] = np.where(dataset['Volume'].shift(-1) > dataset['Volume'],1,0) dataset['Buy_Sell_on_Open'] = np.where(dataset['Open'].shift(-1) > dataset['Open'],1,0) dataset['Buy_Sell'] = np.where(dataset['Adj Close'].shift(-1) > dataset['Adj Close'],1,0) dataset['Returns'] = dataset['Adj Close'].pct_change() dataset = dataset.dropna() dataset.head() # + outputHidden=false inputHidden=false X = dataset[['Open', 'High', 'Low', 'Volume']].values y = dataset['Buy_Sell'].values # + outputHidden=false inputHidden=false # from sklearn.cross_validation import train_test_split from sklearn.model_selection import train_test_split X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 1/4, random_state = 0) # + outputHidden=false inputHidden=false from sklearn.ensemble import AdaBoostClassifier ada = AdaBoostClassifier(n_estimators=180, random_state=0) # + outputHidden=false inputHidden=false ada.fit(X_train, y_train) # Compute the probabilities of obtaining the positive class y_pred_proba = ada.predict_proba(X_test)[:,1] # + outputHidden=false inputHidden=false ada.feature_importances_ # + outputHidden=false inputHidden=false ada.predict(X_test) # + outputHidden=false inputHidden=false ada.score(X, y) # + outputHidden=false inputHidden=false from sklearn.metrics import roc_auc_score # Evaluate test-set roc_auc_score ada_roc_auc = roc_auc_score(y_test, y_pred_proba) # Print roc_auc_score print('ROC AUC score: {:.2f}'.format(ada_roc_auc))
Stock_Algorithms/AdaBoost_Regressor.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import sympy as sym import matplotlib.pyplot as plt # %matplotlib inline sym.init_printing() # # Caracterización de Sistemas en el Dominio del Tiempo # # ## Respuesta al impulso # # A continuación se discutirá sobre la respuesta $y(t)$ de un sistema $\mathcal{H}$, Lineal e Invariante en el Tiempo (LTI), ante una señal de entrada $x(t)$. # # Tenga en cuenta que la señal de entrada puede representarse como una integral: # # \begin{equation} # x(t) = \int_{-\infty}^{\infty} x(\tau) \cdot \delta(t-\tau) \; d \tau # \end{equation} # # Si se introduce esta representación de la señal de entrada en la transformación que realiza el sistema $\mathcal{H}$ para formar la salida $y(t) = \mathcal{H} \{ x(t) \}$, se obtiene: # # # \begin{equation} # y(t) = \mathcal{H} \left\{ \int_{-\infty}^{\infty} x(\tau) \cdot \delta(t-\tau) \; d \tau \right\} # \end{equation} # # # donde $\mathcal{H} \{ \cdot \}$ representa un operador que encuentra la salida del sistema. Como $x(\tau)$ puede verse como una constante ante la variable $t$, la expresión para $y(t)$ puede escribirse como: # # # \begin{equation} # y(t) = \int_{-\infty}^{\infty} x(\tau) \cdot \mathcal{H} \left\{ \delta(t-\tau) \right\} \; d \tau # \end{equation} # # Observe que $\mathcal{H} \{\cdot\}$ opera únicamente sobre el impulso de Dirac. # # La respuesta de un sistema ante una señal impulso de Dirac es conocida como la # [*respuesta al impulso*](https://en.wikipedia.org/wiki/Impulse_response). # # \begin{equation} # h(t) = \mathcal{H} \left\{ \delta(t) \right\} # \end{equation} # # Si el sistema es invariante en el tiempo, la respuesta a un impulso desplazado es $\mathcal{H} \left\{ \delta(t-\tau) \right\} = h(t-\tau)$. Así, para un sistema LTI se obtiene que: # # \begin{equation} # y(t) = \int_{-\infty}^{\infty} x(\tau) \cdot h(t-\tau) \; d \tau # \end{equation} # # # Esta operación es conocida como [*convolución*](https://en.wikipedia.org/wiki/Convolution). Se simboliza mediante $*$, de manera que la integral se represente como $y(t) = x(t) * h(t)$ o, para algunos autores, $y(t) = (x*h)(t)$. # # La respuesta $y(t)$ de un sistema LTI \mathcal{H} ante una entrada $x(t)$ se determina completamente a partir de su respuesta impulsional $h(t)$ como la convolución de la entrada $x(t)$ con la respuesta impulsional $h(t)$. # # Un sistema LTI con $x(t)$ como entrada y $y(t)$ como salida es descrito mediante la siguiente Ecuación Diferencial Ordinaria (ODE): # # # \begin{equation} # y(t) + \frac{d}{dt} y(t) = x(t) # \end{equation} # # Se desea encontrar la respuesta del sistema ante una entrada $x(t) = e^{- 2 t} \cdot \epsilon(t)$ # # ### Solución A # Primero, se va a resolver la ODE teniendo en cuenta que deben satisfacerse las condiciones iniciales $y(t)\big\vert_{t = 0-} = 0$ y $\frac{d}{dt}y(t)\big\vert_{t = 0-} = 0$ relacionadas con la causalidad. # # a. Defina la ODE en`SymPy` # + t = sym.symbols('t', real=True) x = sym.Function('x')(t) y = sym.Function('y')(t) ode = sym.Eq(y + y.diff(t), x) ode # - # Se resuelve la ODE para la entrada establecida y considerando que las constantes de integración deben garantiza las condiciones inicales. # ode.subs(x, xinput) # + xinput = sym.exp(-2*t)*sym.Heaviside(t) solution = sym.dsolve(ode.subs(x, xinput)) solution # + integration_constants = sym.solve( (solution.rhs.limit(t, 0, '-'), solution.rhs.diff(t).limit(t, 0, '-')), 'C1') y1 = solution.subs(integration_constants) y1 # - # Se grafica la señal de salida obtenida al resolver la ODE. plt.rcParams['figure.figsize'] = 7, 2 sym.plot(y1.rhs, (t,-1,10), ylabel=r'$y(t)$'); # ### Solución B # Ahora se va a resolver el problema a través de la respuesta impulsional. # # La respuesta impulsional $h(t)$ se encuentra al resolver la ODE para una señal impulso de Dirac como entrada, $x(t) = \delta(t)$. ode # + h = sym.Function('h')(t) solution2 = sym.dsolve(ode.subs(x, sym.DiracDelta(t)).subs(y, h)) integration_constants = sym.solve((solution2.rhs.limit( t, 0, '-'), solution2.rhs.diff(t).limit(t, 0, '-')), 'C1') h = solution2.subs(integration_constants) h # - # La respuesta impulsional se grafica. plt.rcParams['figure.figsize'] = 7, 2 sym.plot(h.rhs, (t,-1,10), ylabel=r'$h(t)$'); # Ahora se calcula la convolución $y(t) = x(t) * h(t)$ como: # # \begin{equation} # y(t) = \int_{-\infty}^{\infty} x(\tau) \cdot h(t-\tau) \; d \tau # \end{equation} # # Debido a que $h(t)$ y $x(t)$ son señales causales, la expresión de convolución se convierte en: # # \begin{equation} # y(t) = \int_{0}^{t} x(\tau) \cdot h(t - \tau) \; d\tau \; \forall t \geq 0 # \end{equation} # # # Observe que $y(t) = 0 \; \forall t<0$. h h.rhs h.lhs # + tau = sym.symbols('tau', real=True) y2 = sym.integrate(xinput.subs(t, tau) * h.rhs.subs(t, t-tau), (tau, 0, t)) y2 # - y1 # Se grafica la señal de salida obtenida mediante la convolución. plt.rcParams['figure.figsize'] = 7, 2 sym.plot(y2, (t,-1,10), ylabel=r'$y(t)$'); # Las señales obtenidas con los dos métodos deben ser iguales. Verifiquemos las dos señales. y1 y2 # Al comparar las dos señales graficamente se obtiene: plt.rcParams['figure.figsize'] = 7, 2 graf1 = sym.plot(y1.rhs, (t,-1,10), ylabel=r'$y(t)$', show=False, line_color='red',legend=True, label = 'ODE') graf2 = sym.plot(y2, (t,-1,10), show=False, legend=True, label = 'convolucion') graf1.extend(graf2) graf1.show()
.ipynb_checkpoints/03-1_LTI_Respuesta_impulsional-checkpoint.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import jieba import random import warnings from itertools import chain warnings.filterwarnings('ignore') # + def check_lanuage(string): # conclude if the language of string is Chinese for char in string: if char>='\u4e00' and char<='\u9fa5': return True return False def conclude_pattern(string): if(string.startswith('?')): if (len(string)>2): case1=string[1:].startswith('*') and all(char.encode('utf-8').isalpha() for char in string[2:]) case2=all(char.encode('utf-8').isalpha() for char in string[1:]) return case1 or case2 if(len(string)==2): return all(char.encode('utf-8').isalpha() for char in string[1:]) return False def split_words(string): output_words=[] if not check_lanuage(string): output_words=string.split() else: temp_chars='' for word in jieba.cut(string): if word=='?': temp_chars+=word elif word=='*': if len(temp_chars)==1: temp_chars+=word elif word.encode('utf-8').isalpha(): if(len(temp_chars)>=1): temp_chars+=word else: if(len(temp_chars)>=2): output_words.append(temp_chars) temp_chars='' output_words.append(word) if(len(temp_chars)>=2): output_words.append(temp_chars) return output_words # + def conclude_same(string1,string2): if string1=='' and string2=='': return True if conclude_pattern(string1[0]): return True if string1[0]==string2[0]: return conclude_same(string1[1:],string2[1:]) return False def get_same_start_point(rule_string,saying_string): if not rule_string: return len(saying_string) for index,char in enumerate(saying_string): if(char==rule_string[0]): if(conclude_same(rule_string[1:],saying_string[index+1:])): return index return None def pattern_match_result(rule:list,saying:list): if not rule and not saying: return [] string,rest=rule[0],rule[1:] if conclude_pattern(string): index=get_same_start_point(rest,saying) string=string.replace('?*','?') return [(string,saying[:index])]+pattern_match_result(rule[1:],saying[index:]) if string==saying[0]: return pattern_match_result(rule[1:],saying[1:]) else: print('not match') def match_result_to_dict(result:list): output_dict={} if not result: return output_dict _,wordlists=zip(*result) language=any(check_lanuage(word) for word in chain.from_iterable(wordlists)) for key,word_list in result: output_dict[key]=' '.join(word_list) if not language else ''.join(word_list) return output_dict def pattern_match_conclude(rule:list,saying:list): if not rule and not saying: return True string,rest=rule[0],rule[1:] if conclude_pattern(string): index=get_same_start_point(rest,saying) if index!=None: if '*' not in string and index!=1:return False return pattern_match_conclude(rule[1:],saying[index:]) else: return False if string==saying[0]: return pattern_match_conclude(rule[1:],saying[1:]) else: return False def pattern_match(rule:list,saying:list): if pattern_match_conclude(rule,saying): return pattern_match_result(rule,saying) #print("{} can't be pattern matched to {}".format(rule,saying)) return [] # - rule_string1=split_words('你?P今天?*X过的好吗??*Y') print(rule_string1) saying_string1=split_words('你忘记问今天朋友过的好吗?或许你是对的') print(saying_string1) print(match_result_to_dict(pattern_match(rule_string1,saying_string1))) print('******************************************************') rule_string2=split_words('你?*P今天?*X过的好吗??*Y') print(rule_string2) saying_string2=split_words('你忘记问今天朋友过的好吗?或许你是对的') print(saying_string2) print(match_result_to_dict(pattern_match(rule_string2,saying_string2))) print('******************************************************') rule_string3=split_words('?*x you ?*y') print(rule_string3) saying_string3=split_words('where are you from?') print(saying_string3) print(match_result_to_dict(pattern_match(rule_string3,saying_string3))) print('******************************************************') rule_string4=split_words('This is ?*x to ?y this ?*Z') print(rule_string4) saying_string4=split_words('This is my first time to face this annoying problem') print(saying_string4) print(match_result_to_dict(pattern_match(rule_string4,saying_string4))) rule_responses = { '?*x hello ?*y': ['How do you do', 'Please state your problem'], '?*x I want ?*y': ['what would it mean if you got ?y', 'Why do you want ?y', 'Suppose you got ?y soon'], '?*x if ?*y': ['Do you really think its likely that ?y', 'Do you wish that ?y', 'What do you think about ?y', 'Really-- if ?y'], '?*x no ?*y': ['why not?', 'You are being a negative', 'Are you saying \'No\' just to be negative?'], '?*x I was ?*y': ['Were you really', 'Perhaps I already knew you were ?y', 'Why do you tell me you were ?y now?'], '?*x I feel ?*y': ['Do you often feel ?y ?', 'What other feelings do you have?'], '?*x你好?*y': ['你好呀', '请告诉我你的问题'], '?*x我想?*y': ['你觉得?y有什么意义呢?', '为什么你想?y', '你可以想想你很快就可以?y了'], '?*x我想要?*y': ['?x想问你,你觉得?y有什么意义呢?', '为什么你想?y', '?x觉得... 你可以想想你很快就可以有?y了', '你看?x像?y不', '我看你就像?y'], '?*x喜欢?*y': ['喜欢?y的哪里?', '?y有什么好的呢?', '你想要?y吗?'], '?*x讨厌?*y': ['?y怎么会那么讨厌呢?', '讨厌?y的哪里?', '?y有什么不好呢?', '你不想要?y吗?'],} def get_response_from_saying(saying:str,rule_responses:dict): language=check_lanuage(saying) rules=list(rule_responses.keys()) saying_list=split_words(saying) max_nums=0 max_rule=rules[0] best_result=[] for rule in rules: rule_list=split_words(rule) result=pattern_match(rule_list,saying_list) if len(result)>max_nums: max_nums=len(result) max_rule=rule best_result=result if best_result: pattern_result=match_result_to_dict(best_result) best_response=random.choice(rule_responses[max_rule]) return ' '.join([pattern_result.get(word,word) for word in split_words(best_response)]) if not language else ''.join([pattern_result.get(word,word) for word in split_words(best_response)]) print('{} can\'t be pattern matched to any pattern of answer mode designed in advance'.format(saying)) return '' for _ in range(5): print(get_response_from_saying(saying='为啥我想要学习',rule_responses=rule_responses)) print('******************************************************') print(get_response_from_saying(saying='为啥我知道要学习',rule_responses=rule_responses)) print('******************************************************') for _ in range(5): print(get_response_from_saying(saying='Do you know that I was little sad a moment ago',rule_responses=rule_responses))
rule_and_pattern_based_lanuage_generation/source_code/pattern_match_based_language_generation.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + import numpy as np import pandas as pd from sklearn.ensemble import IsolationForest from sklearn.metrics import f1_score, precision_score, recall_score from sklearn.model_selection import StratifiedKFold, GridSearchCV from pyod.models.auto_encoder import AutoEncoder from os import listdir from os.path import isfile, join import warnings warnings.filterwarnings(action='ignore') # - # ### Undersampling, Autoencoder, Isolation Forest # + DATA_PATH = '../source_data/' datasets = [ 'letter', 'cardio', #'mnist', #'speech' ] methods = [ IsolationForest, AutoEncoder ] skf = StratifiedKFold(n_splits=5, random_state=179) params = { 'ABOD': { 'contamination': [0.05, 0.1, 0.15] }, 'LocalOutlierFactor': { 'n_neighbors': [2, 5, 10, 20], 'contamination': ['auto'], 'novelty': [True] }, 'SOD': { 'contamination': [0.05, 0.1, 0.15] }, 'LOCI': { 'contamination': [0.05, 0.1, 0.15] }, 'KNN': { 'contamination': [0.05, 0.1, 0.15] }, 'SOD': { 'contamination': [0.05, 0.1, 0.15] }, 'IsolationForest': { }, 'AutoEncoder': { 'hidden_neurons': [32, 16, 16, 32], 'verbose': 0 } } def get_y(method_name, y): y_sym = y * 2 - 1 if method_name in ['LocalOutlierFactor', 'IsolationForest']: return y_sym return y for dataset in datasets: X = pd.read_csv(DATA_PATH + dataset + '_x.csv', header=None).values y = pd.read_csv(DATA_PATH + dataset + '_y.csv', header=None).values.ravel() print("dataset:", dataset) print("X shape:", X.shape) print(f"y shape: {y.size} ({round(y[y == 0].size / y.size, 2)}% outliers)") preds_df = pd.DataFrame() for method in methods: preds = np.zeros(y.size) method_name = method.__name__ print("method:", method_name) for train_val_index, test_index in skf.split(X, y): y_cur = get_y(method_name, y) X_train_val, X_test = X[train_val_index], X[test_index] y_train_val, y_test = y_cur[train_val_index], y_cur[test_index] clf = GridSearchCV( method(), params[method_name], scoring='f1', cv=4, refit=True ) clf.fit(X_train_val, y_train_val) preds[test_index] = clf.decision_function(X_test) preds_df[method_name] = preds preds_df.to_csv(DATA_PATH + dataset + '_preds.csv') print() # - np.unique(y_train_val)
notebooks/Untitled.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3.7 # language: python # name: python3 # --- # + [markdown] slideshow={"slide_type": "slide"} # # Introduction to Computer Programming # + [markdown] slideshow={"slide_type": "-"} tags=["remove-cell"] # **CS1302 Introduction to Computer Programming** # ___ # + [markdown] slideshow={"slide_type": "slide"} # ## Computer # + [markdown] slideshow={"slide_type": "subslide"} # ### What is a computer? # + [markdown] slideshow={"slide_type": "subslide"} # Is computer a calculator that is bigger and more advanced? # + [markdown] slideshow={"slide_type": "-"} # <center><figure> # <a title="Ccha23 / CC BY-SA (https://creativecommons.org/licenses/by-sa/4.0)" href="https://commons.wikimedia.org/wiki/File:Calculator_app.png"><img width="400" alt="Calculator app" src="https://upload.wikimedia.org/wikipedia/commons/thumb/4/48/Calculator_app.png/512px-Calculator_app.png"></a> # <figcaption>A calculator on a computer.</figcaption> # </figure> # </center> # + [markdown] slideshow={"slide_type": "subslide"} # If computer is a calculator, then, is [abacus](https://en.wikipedia.org/wiki/Abacus) the first computer invented? # + [markdown] slideshow={"slide_type": "-"} # <center><figure> # <a title="Encyclopædia Britannica / Public domain" href="https://commons.wikimedia.org/wiki/File:Abacus_6.png"><img width="400" alt="Abacus 6" src="https://upload.wikimedia.org/wikipedia/commons/a/af/Abacus_6.png"></a> # <figcaption>Abacus - an ancient mechanical computing device.</figcaption> # </figure> # </center> # + [markdown] slideshow={"slide_type": "fragment"} # Is your [smartphone](https://en.wikipedia.org/wiki/Samsung_DeX) a computer? # What defines a computer? # + [markdown] slideshow={"slide_type": "fragment"} # - In addition to performing arithmetic calculations, a computer is designed in such a way that # - we can write different programs (in a process called **programming** or **software development**) # - for the computer to execute to perform different tasks. # + [markdown] slideshow={"slide_type": "slide"} # ### What is the architecture of a computer? # + [markdown] slideshow={"slide_type": "fragment"} # A computer contains three main hardware components: # - Input device # - Processing unit # - Output device # + [markdown] slideshow={"slide_type": "subslide"} # #### Peripherals # + [markdown] slideshow={"slide_type": "-"} # <center><figure> # <a title="Unsplash" href="https://unsplash.com/photos/gyRa86ExKTw"><img width="600" alt="Computer peripherals" src="https://images.unsplash.com/flagged/photo-1551954810-43cd6aef5b1f?ixlib=rb-1.2.1&ixid=eyJhcHBfaWQiOjEyMDd9&auto=format&fit=crop&w=3580&q=80"></a> # <figcaption>Computer Peripherals.</figcaption> # </figure> # </center> # + [markdown] slideshow={"slide_type": "fragment"} # Input and output devices connected to a computer are called *peripherals*. # They allow users to interact with the computer in different ways. # + [markdown] slideshow={"slide_type": "subslide"} # **Exercise** Some examples of output devices are: # - Monitor # - Speaker # # Can you give an awesome example in the following box? # + [markdown] nbgrader={"grade": true, "grade_id": "cell-3ea9d712eccdf31c", "locked": false, "points": 0, "schema_version": 3, "solution": true, "task": false} slideshow={"slide_type": "-"} # - 3D printer available at [CityU](https://www.cityu.edu.hk/lib/create/3dprint.htm) # + [markdown] slideshow={"slide_type": "subslide"} # **Exercise** Some examples of input devices are: # - Keyboard # - Mouse # # Can you give an awesome example? # + [markdown] nbgrader={"grade": true, "grade_id": "cell-1c411172f0ed411b", "locked": false, "points": 0, "schema_version": 3, "solution": true, "task": false} slideshow={"slide_type": "-"} # - 3D scanner available at [CityU](https://www.cityu.edu.hk/lib/create/3dscan.htm) # + [markdown] slideshow={"slide_type": "subslide"} # **Exercise** Many devices are both input and output device. Can you give at least 3 examples? # + [markdown] nbgrader={"grade": true, "grade_id": "cell-e1982fbce01506b3", "locked": false, "points": 0, "schema_version": 3, "solution": true, "task": false} slideshow={"slide_type": "-"} # - hard disk # - CD/DVD Rom (writable) # - touch screen # + [markdown] slideshow={"slide_type": "slide"} # #### Central Processing Unit # + [markdown] slideshow={"slide_type": "-"} # <center><figure> # <a title="Unsplash" href="https://unsplash.com/photos/CKpBhTXvLis"><img width="600" alt="CPU" src="https://images.unsplash.com/photo-1555617981-dac3880eac6e?ixlib=rb-1.2.1&ixid=eyJhcHBfaWQiOjEyMDd9&auto=format&fit=crop&w=1950&q=80"></a> # <figcaption>An Intel CPU.</figcaption> # </figure> # </center> # + [markdown] slideshow={"slide_type": "fragment"} # The brain of a computer is its processor unit, or the [**C**entral **P**rocesisng **U**nit (CPU)](https://en.wikipedia.org/wiki/Central_processing_unit). # It is located on the [*motherboard*](https://en.wikipedia.org/wiki/Motherboard) and connects to different peripherals using different [*connectors*](https://en.wikipedia.org/wiki/Category:Computer_connectors). # + [markdown] slideshow={"slide_type": "fragment"} # Two important components in the CPU are: # - **A**rithmetic and **L**ogic **U**nit (**ALU**): Performs arithmetics like a calculator (but for binary numbers) # - **C**ontrol **U**nit (**CU**): Directs the operations of the processor in executing a program. # + [markdown] slideshow={"slide_type": "subslide"} # Let's run a CPU Simulator below from a [GitHub project](https://github.com/pddring/cpu-simulator). # - Note that all values are zeros in the RAM (**R**andom **A**cess **M**emory) initially. # - Under Settings, click `Examples->Add two numbers`. Observe that the values in the RAM have changed. # - Click `Run` at the bottom right-hand corner. # + slideshow={"slide_type": "-"} language="html" # <iframe src="https://tools.withcode.uk/cpu" width="800" height="800"> # </iframe> # + [markdown] slideshow={"slide_type": "slide"} # ## Programming # + [markdown] slideshow={"slide_type": "subslide"} # ### What is programming? # + [markdown] slideshow={"slide_type": "fragment"} # Programming is the process of writing programs. # But what is a program? # + [markdown] nbgrader={"grade": false, "grade_id": "cell-7675978e85548d96", "locked": true, "schema_version": 3, "solution": false, "task": false} slideshow={"slide_type": "fragment"} # **Exercise** You have just seen a program written in [machine language](https://en.wikipedia.org/wiki/Machine_code). Where is it? # + [markdown] nbgrader={"grade": true, "grade_id": "cell-08298600f10cfc25", "locked": false, "points": 0, "schema_version": 3, "solution": true, "task": false} slideshow={"slide_type": "-"} # The first six lines of binary sequences in the RAM. The last line `Ends` the program. # + [markdown] slideshow={"slide_type": "fragment"} # - The CPU is capable of carrying out # - a set of instructions such as *addition*, *negation*, *Copy*, etc. # - some numbers stored in the RAM. # - Both the instructions and the numbers are represented as binary sequences. # - E.g., in Intel-based CPU, the command for addition is like **00000011 00000100** # + [markdown] slideshow={"slide_type": "subslide"} # ### Why computer uses binary representation? # + slideshow={"slide_type": "fragment"} language="html" # <iframe width="912" height="513" src="https://www.youtube.com/embed/Xpk67YzOn5w" allowfullscreen></iframe> # + [markdown] slideshow={"slide_type": "fragment"} # **Exercise** The first electronic computer, called [Electronic Numerical Integrator and Computer (ENIAC)](https://en.wikipedia.org/wiki/ENIAC), was programmed using binary circuitries, namely *switches* that can be either `On` or `Off`. # # <center> # <figure> # <a title="United States Army / Public domain" href="https://commons.wikimedia.org/wiki/File:Two_women_operating_ENIAC.gif"><img width="512" alt="Two women operating ENIAC" src="https://upload.wikimedia.org/wikipedia/commons/8/8c/Two_women_operating_ENIAC_%28full_resolution%29.jpg"></a> # <figcaption>Programmers controlling the switches of ENIAC.</figcaption> # </figure> # </center> # # However, it did not represent values efficiently in binary. 10 binary digits (bits) was used to represent a decimal number 0 to 9. # Indeed, how many decimals can be represented by 10 bits? # + nbgrader={"grade": true, "grade_id": "cell-1023083cd0aca779", "locked": false, "points": 0, "schema_version": 3, "solution": true, "task": false} slideshow={"slide_type": "-"} 2 ** 10 # because there are that many binary sequences of length 10. # + [markdown] slideshow={"slide_type": "fragment"} # As mentioned in the video, there are *International Standards* for representing characters: # - [ASCII](https://en.wikipedia.org/wiki/ASCII) (American Standard Code for Information Interchange) maps English letters and some other symbols to 8-bits (8 binary digits, also called a byte). E.g., `A` is `01000001`. # - [Unicode](https://en.wikipedia.org/wiki/Unicode) can also represent characters in different languages such as Chinese, Japanese...etc. # + [markdown] slideshow={"slide_type": "fragment"} # There are additional standards to represent numbers other than non-negative integers: # - [2's complement format](https://en.wikipedia.org/wiki/Two%27s_complement) for negative integers (e.g. -123) # - [IEEE floating point format](https://en.wikipedia.org/wiki/IEEE_754) for floating point numbers such as $1.23 x 10^{-4}$. # + [markdown] slideshow={"slide_type": "subslide"} # **Why define different standards?** # + [markdown] slideshow={"slide_type": "fragment"} # - Different standards have different benefits. ASCII requires less storage for a character, but it represents less characters. # - Although digits are also represented in ASCII, the 2's complement format is designed for arithmetic operations. # + [markdown] slideshow={"slide_type": "slide"} # ## Different generations of programming languages # + [markdown] slideshow={"slide_type": "fragment"} # Machine language is known as the **1st Generation Programming Language**. # + [markdown] slideshow={"slide_type": "fragment"} # **Are we going to start with machine language?** # Start with learning 2's complement and the binary codes for different instructions? # + [markdown] slideshow={"slide_type": "fragment"} # No. Programmers do not write machine codes directly because it is too hard to think in binary representations. # + [markdown] slideshow={"slide_type": "fragment"} # Instead, programmers write human-readable **mnemonics** such as **ADD**, **SUB**..., # called **Assembly language**, or the **2nd Generation Programming Language**. # + [markdown] slideshow={"slide_type": "fragment"} # <center> # <figure> # <a title="Swtpc6800 en:User:Swtpc6800 <NAME> / Public domain" href="https://commons.wikimedia.org/wiki/File:Motorola_6800_Assembly_Language.png"><img width="600" alt="Motorola 6800 Assembly Language" src="https://upload.wikimedia.org/wikipedia/commons/f/f3/Motorola_6800_Assembly_Language.png"></a> # <figcaption> # A Code written in an assembly language. # </figcaption> # </figure> # </center> # + [markdown] slideshow={"slide_type": "subslide"} # **Are you going to learn an assembly language?** # + [markdown] slideshow={"slide_type": "fragment"} # Both machine language and assembly language are low-level language which # - are difficult to write for complicated tasks (requiring many lines of code), and # - are platform specific: # - the sets of instructions and their binary codes can be different for different [types of CPUs](https://en.wikipedia.org/wiki/Comparison_of_CPU_microarchitectures), and # - different operating systems use [different assembly languages/styles](https://en.wikipedia.org/wiki/X86_assembly_language). # + [markdown] slideshow={"slide_type": "fragment"} # Anyone want to learn assembly languages, and write a program in many versions to support different platforms? # + [markdown] slideshow={"slide_type": "fragment"} # Probably for programmers who need to write fast or energy-efficient code such as # - a driver that controls a 3D graphics card, and # - a program that control a microprocessor with limited power supply. # + [markdown] slideshow={"slide_type": "fragment"} # But even in the above cases, there are often better alternatives. Play with the following microprocessor simulator: # - Click `CHOOSE A DEMO->LED`. # - Click `RUN SCRIPT` and observes the LED of the board. # - Run the demos `ASSEMBLY` and `MATH` respectively and compare their capabilities. # # + slideshow={"slide_type": "-"} language="html" # <iframe width="900", height="1000" src="https://micropython.org/unicorn/"></iframe> # + [markdown] slideshow={"slide_type": "slide"} # ## High-level Language # + slideshow={"slide_type": "fragment"} language="html" # <iframe width="912" height="513" src="https://www.youtube.com/embed/QdVFvsCWXrA" allowfullscreen></iframe> # + [markdown] slideshow={"slide_type": "subslide"} # Programmer nowadays use human-readable language known as the **3rd generation language (3GL)** or **high-level language.** # - Examples includes: C, C++, Java, JavaScript, Basic, Python, PHP, ... # + [markdown] slideshow={"slide_type": "slide"} # ### What is a high-level language? # + [markdown] slideshow={"slide_type": "fragment"} # - A code written in high-level language gets converted automatically to a low-level machine code for the desired platform. # - Hence, it *abstracts* away details that can be handled by the computer (low-level code) itself. # + [markdown] slideshow={"slide_type": "fragment"} # For instance, a programmer needs not care where a value should be physically stored if the computer can find a free location automatically to store the value. # + [markdown] slideshow={"slide_type": "fragment"} # Different high-level languages can have different implementations of the conversion processes: # - **Compilation** means converting a program well before executing of the program. E.g., C++ and Java programs are compiled. # - **Interpretation** means converting a program on-the-fly during the execution of a program. E.g., JavaScript and Python programs are often interpreted. # # Roughly speaking, compiled programs run faster but interpreted programs are more flexible and can be modified at run time. # (The [truth](https://finematics.com/compiled-vs-interpreted-programming-languages/) is indeed more complicated than required for this course.) # + [markdown] slideshow={"slide_type": "slide"} # ### What programming language will you learn? # + [markdown] slideshow={"slide_type": "fragment"} # You will learn to program using **Python**. The course covers: # - Basic topics including *values*, *variables*, *conditional*, *iterations*, *functions*, *composite data types*. # - Advanced topics that touch on functional and object-oriented programming. # - Engineering topics such as *numerical methods*, *optimizations*, and *machine learning*. # # See the [course homepage](https://canvas.cityu.edu.hk/courses/36768) for details. # + [markdown] slideshow={"slide_type": "subslide"} # **Why Python?** # + slideshow={"slide_type": "fragment"} language="html" # <iframe width="912" height="513" src="https://www.youtube.com/embed/Y8Tko2YC5hA?end=200" allowfullscreen></iframe> # + [markdown] slideshow={"slide_type": "fragment"} # In summary: # - Python is expressive and can get things done with fewer lines of code as compared to other languages. # - Python is one of the most commonly used languages. It has an extensive set of libraries for Mathematics, graphics, AI, Machine Learning, etc. # - Python is Free and Open Source, so you get to see everything and use it without restrictions. # - Python is portable. The same code runs in different platforms without modifications. # + [markdown] slideshow={"slide_type": "subslide"} # **How does a Python program look like?** # + slideshow={"slide_type": "fragment"} # for step-by-step execution using mytutor # %reload_ext mytutor # + slideshow={"slide_type": "-"} # %%mytutor -h 400 # The program here reads the cohort and reports which year the user is in # Assumption: Input is integer no greater than 2020 import datetime # load a library to tell the current year cohort = input("In which year did you join CityU? ") year = datetime.datetime.now().year - int(cohort) + 1 print("So you are a year", year, "student.") # + [markdown] slideshow={"slide_type": "fragment"} # A Python program contains *statements* just like sentences in natural languages. # E.g., `cohort = input("In which year did you join CityU? ")` is a statement that gives some value a name called `cohort`. # + [markdown] slideshow={"slide_type": "fragment"} # For the purpose of computations, a statement often contains *expressions* that evaluate to certain values. # E.g., `input("In which year did you join CityU? ")` is an expression with the value equal to what you input to the prompt. # That value is then given the name `cohort`. # + [markdown] slideshow={"slide_type": "fragment"} # Expressions can be composed of: # - *Functions* such as `input`, `now`, and `int`, etc., which are like math functions the return some values based on its arguments, if any. # - *Literals* such as the string `"In which year did you join CityU? "` and the integer `1`. They are values you type out literally. # - *Variables* such as `cohort` and `year`, which are meaningful names to values. # + [markdown] slideshow={"slide_type": "fragment"} # To help others understand the code, there are also *comments* that start with `#`. # These are descriptions meant for human to read but not to be executed by the computer. # + [markdown] nbgrader={"grade": false, "grade_id": "cell-6c906df220a97280", "locked": true, "schema_version": 3, "solution": false, "task": false} slideshow={"slide_type": "subslide"} # **Exercise** What do you think the next generation programmimng should be? # + [markdown] nbgrader={"grade": true, "grade_id": "cell-205d445ede20461d", "locked": false, "points": 0, "schema_version": 3, "solution": true, "task": false} slideshow={"slide_type": "fragment"} # Perhaps programming using natural languages. Write programs that people enjoy reading, like [literate programming](https://www.youtube.com/watch?v=bTkXg2LZIMQ). # Indeed, Jupyter notebook is arguably a step towards this direction. See [nbdev](https://github.com/fastai/nbdev). # + language="html" # <iframe width="912" height="513" src="https://www.youtube.com/embed/bTkXg2LZIMQ" allowfullscreen></iframe>
Lecture1/Introduction to Computer Programming.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # Although Basenji is unaware of the locations of known genes in the genome, we can go in afterwards and ask what a model predicts for those locations to interpret it as a gene expression prediction. # # To do this, you'll need # * Trained model # * Gene Transfer Format (GTF) gene annotations # * BigWig coverage tracks # * Gene sequences saved in my HDF5 format. # # First, make sure you have an hg19 FASTA file visible. If you have it already, put a symbolic link into the data directory. Otherwise, I have a machine learning friendly simplified version you can download in the next cell. # + import os, subprocess if not os.path.isfile('data/hg19.ml.fa'): subprocess.call('curl -o data/hg19.ml.fa https://storage.googleapis.com/basenji_tutorial_data/hg19.ml.fa', shell=True) subprocess.call('curl -o data/hg19.ml.fa.fai https://storage.googleapis.com/basenji_tutorial_data/hg19.ml.fa.fai', shell=True) # - # Next, let's grab a few CAGE datasets from FANTOM5 related to heart biology. # # These data were processed by # 1. Aligning with Bowtie2 with very sensitive alignment parameters. # 2. Distributing multi-mapping reads and estimating genomic coverage with bam_cov.py if not os.path.isfile('data/CNhs11760.bw'): subprocess.call('curl -o data/CNhs11760.bw https://storage.googleapis.com/basenji_tutorial_data/CNhs11760.bw', shell=True) subprocess.call('curl -o data/CNhs12843.bw https://storage.googleapis.com/basenji_tutorial_data/CNhs12843.bw', shell=True) subprocess.call('curl -o data/CNhs12856.bw https://storage.googleapis.com/basenji_tutorial_data/CNhs12856.bw', shell=True) # Then we'll write out these BigWig files and labels to a samples table. # + lines = [['index','identifier','file','clip','sum_stat','description']] lines.append(['0', 'CNhs11760', 'data/CNhs11760.bw', '384', 'sum', 'aorta']) lines.append(['1', 'CNhs12843', 'data/CNhs12843.bw', '384', 'sum', 'artery']) lines.append(['2', 'CNhs12856', 'data/CNhs12856.bw', '384', 'sum', 'pulmonic_valve']) samples_out = open('data/heart_wigs.txt', 'w') for line in lines: print('\t'.join(line), file=samples_out) samples_out.close() # - # Predictions in the portion of the genome that we trained might inflate our accuracy, so we'll focus on chr9 genes, which have formed my typical test set. Then we use [basenji_hdf5_genes.py](https://github.com/calico/basenji/blob/master/bin/basenji_hdf5_genes.py) to create the file. # # The most relevant options are: # # | Option/Argument | Value | Note | # |:---|:---|:---| # | -g | data/human.hg19.genome | Genome assembly chromosome length to bound gene sequences. | # | -l | 262144 | Sequence length. | # | -c | 0.333 | Multiple genes per sequence are allowed, but the TSS must be in the middle 1/3 of the sequence. | # | -p | 3 | Use 3 threads via # | -t | data/heart_wigs.txt | Save coverage values from this table of BigWig files. | # | -w | 128 | Bin the coverage values at 128 bp resolution. | # | fasta_file | data/hg19.ml.fa | Genome FASTA file for extracting sequences. | # | gtf_file | data/gencode_chr9.gtf | Gene annotations in gene transfer format. | # | hdf5_file | data/gencode_chr9_l262k_w128.h5 | Gene sequence output HDF5 file. | # ! basenji_hdf5_genes.py -g data/human.hg19.genome -l 131072 -c 0.333 -p 3 -t data/heart_wigs.txt -w 128 data/hg19.ml.fa data/gencode_chr9.gtf data/gencode_chr9.h5 # Now, you can either train your own model in the [Train/test tutorial](https://github.com/calico/basenji/blob/master/tutorials/train_test.ipynb) or download one that I pre-trained. if not os.path.isdir('models/heart'): os.mkdir('models/heart') if not os.path.isfile('models/heart/model_best.tf.meta'): subprocess.call('curl -o models/heart/model_best.tf.index https://storage.googleapis.com/basenji_tutorial_data/model_best.tf.index', shell=True) subprocess.call('curl -o models/heart/model_best.tf.meta https://storage.googleapis.com/basenji_tutorial_data/model_best.tf.meta', shell=True) subprocess.call('curl -o models/heart/model_best.tf.data-00000-of-00001 https://storage.googleapis.com/basenji_tutorial_data/model_best.tf.data-00000-of-00001', shell=True) # Finally, you can offer data/gencode_chr9_l262k_w128.h5 and the model to [basenji_test_genes.py](https://github.com/calico/basenji/blob/master/bin/basenji_test_genes.py) to make gene expression predictions and benchmark them. # # The most relevant options are: # # | Option/Argument | Value | Note | # |:---|:---|:---| # | -o | data/gencode_chr9_test | Output directory. | # | --rc | | Average the forward and reverse complement to form prediction. | # | -s | | Make scatter plots, comparing predictions to experiment values. | # | --table | | Print gene expression table. | # | params_file | models/params_small.txt | Table of parameters to setup the model architecture and optimization. | # | model_file | models/gm12878_best.tf | Trained saved model prefix. | # | genes_hdf5_file | data/gencode_chr9_l262k_w128.h5 | HDF5 file containing the gene sequences, annotations, and experiment values. | # ! basenji_test_genes.py -o output/gencode_chr9_test --rc -s --table models/params_small.txt models/heart/model_best.tf data/gencode_chr9.h5 # In the output directory *output/gencode_chr9_test/* are several tables and plots describing gene prediction accuracy. For example *gene_cors.txt* contains Spearman and Pearson correlations for predictions versus experimental measurements for all genes and nonzero genes. # ! cat output/gencode_chr9_test/gene_cors.txt # *gene_table.txt.gz* contains specific gene predictions and experimental measurements. # ! gunzip -c output/gencode_chr9_test/gene_table.txt.gz | head # And *gene_scatterX.pdf* plots gene predictions versus experimental measurements for each dataset indexed by *X*. from IPython.display import IFrame IFrame('output/gencode_chr9_test/gene_scatter0.pdf', width=600, height=500)
tutorials/archive/genes.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import json import numpy as np from time import time import pandas as pd from pandas.io.json import json_normalize import os import concurrent.futures import functools import random # + t0 = time() with open('spotify_million_playlist_dataset/data/mpd.slice.0-999.json') as f: data = json.load(f) names = pd.DataFrame(data['playlists']).columns k = time()-t0 print('time elapsed {:0.2f} min'.format(k/60)) # - D.shape # + def threading(start_index, runs_per_worker, path_list): print(start_index) result = [] with open(f"spotify_million_playlist_dataset/data/{file}",'r') as infile: data = json.load(infile) for el in data['playlists']: result.append(el) with open("jsons/merged_file.json", "a") as outfile: json.dump(result, outfile) path_list = os.listdir(r"spotify_million_playlist_dataset/data/")[:3] max_workers = 100 start_index = 0 # min: 0 end_index = 1000 runs_per_worker = int(end_index/max_workers) start_index = range(start_index, end_index, runs_per_worker) # + # t0 = time() # with concurrent.futures.ThreadPoolExecutor(max_workers=max_workers) as executor: # temp = functools.partial(threading, # runs_per_worker=runs_per_worker, # path_list=path_list # ) # executor.map(temp, start_index) # # Time diff # print(f"Time taken: {time()-t0}") # - # ### Read playlists t0 = time() keys = [ 'pid', 'name', 'description', 'num_artists', 'num_albums', 'num_tracks', 'num_followers', 'duration_ms', 'collaborative', 'tracks' ] samp = 700 arr = np.empty(shape=(samp*1000,len(keys)), dtype = object) path='spotify_million_playlist_dataset/data/' filenames = os.listdir(path) for i, filename in enumerate(random.sample(sorted(filenames), samp)): if filename.startswith("mpd.slice.") and filename.endswith(".json"): fullpath = os.sep.join((path, filename)) f = open(fullpath) js = f.read() f.close() mpd_slice = json.loads(js) D = pd.DataFrame(mpd_slice['playlists'])[keys].to_numpy() arr[i*1000:(i+1)*1000,:]= D print(filename,i) # Time diff print(f"Time taken: {(time()-t0)/60}") arr[:10] # ## Reading track_uri lst = [] for playlist in data['playlists'][:2]: for track in playlist['tracks']: lst.append([track['track_uri'],playlist['pid']]) # + t0 = time() samp = 100 lst = [] path='spotify_million_playlist_dataset/data/' filenames = os.listdir(path) for i, filename in enumerate(sorted(filenames)): if filename.startswith("mpd.slice.") and filename.endswith(".json"): fullpath = os.sep.join((path, filename)) f = open(fullpath) js = f.read() f.close() mpd_slice = json.loads(js) for playlist in mpd_slice['playlists']: for track in playlist['tracks']: lst.append([track['track_uri'],playlist['pid']]) print(filename, i) # Time diff print(f"Time taken: {(time()-t0)/60}") # - len(lst) lst_uri = [el[0] for el in lst if el[0]] set_uri = set(lst_uri) len(set_uri)
data-processing-notebooks/.ipynb_checkpoints/mpd-slice-combine-checkpoint.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .jl # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Julia 1.7.1 # language: julia # name: julia-1.7 # --- # + [markdown] hideCode=false hidePrompt=false slideshow={"slide_type": "slide"} # # 2022-02-28 Low Rank # # ## Last time # # * Solving least squares problems # * Geometry of the SVD # # ## Today # # * Reflection on algorithm choices # * Low-rank structure # * Primer on interpolation # + slideshow={"slide_type": "skip"} using LinearAlgebra using Plots default(linewidth=4, legendfontsize=12) function vander(x, k=nothing) if isnothing(k) k = length(x) end m = length(x) V = ones(m, k) for j in 2:k V[:, j] = V[:, j-1] .* x end V end function gram_schmidt_classical(A) m, n = size(A) Q = zeros(m, n) R = zeros(n, n) for j in 1:n v = A[:,j] R[1:j-1,j] = Q[:,1:j-1]' * v v -= Q[:,1:j-1] * R[1:j-1,j] R[j,j] = norm(v) Q[:,j] = v / R[j,j] end Q, R end function qr_householder(A) m, n = size(A) R = copy(A) V = [] # list of reflectors for j in 1:n v = copy(R[j:end, j]) v[1] += sign(v[1]) * norm(v) # <--- v = normalize(v) R[j:end,j:end] -= 2 * v * v' * R[j:end,j:end] push!(V, v) end V, R end function qr_chol(A) R = cholesky(A' * A).U Q = A / R Q, R end function qr_chol2(A) Q, R = qr_chol(A) Q, R1 = qr_chol(Q) Q, R1 * R end function peanut() theta = LinRange(0, 2*pi, 50) r = 1 .+ .4*sin.(3*theta) + .6*sin.(2*theta) r' .* [cos.(theta) sin.(theta)]' end function circle() theta = LinRange(0, 2*pi, 50) [cos.(theta) sin.(theta)]' end function Aplot(A) "Plot a transformation from X to Y" X = peanut() Y = A * X p = scatter(X[1,:], X[2,:], label="in") scatter!(p, Y[1,:], Y[2,:], label="out") X = circle() Y = A * X q = scatter(X[1,:], X[2,:], label="in") scatter!(q, Y[1,:], Y[2,:], label="out") plot(p, q, layout=2, aspect_ratio=:equal) end # + [markdown] slideshow={"slide_type": "slide"} # # Condition number via SVD # # $$ U \overbrace{\begin{bmatrix} \sigma_{\max} && \\ & \ddots & \\ && \sigma_{\min} \end{bmatrix}}^{\Sigma} V^T = A $$ # # \begin{align} # \lVert A \rVert &= \sigma_{\max} & # \kappa(A) &= \frac{\sigma_{\max}}{\sigma_{\min}} = \texttt{cond}(A) # \end{align} # + cell_style="split" A = randn(2,2) # nonsymmetric A = A + A' # + cell_style="split" @show svdvals(A) U, S, V = svd(A) @show norm(U - U') Aplot(A) # + [markdown] slideshow={"slide_type": "slide"} # # Example: autonomous vehicles # + [markdown] cell_style="split" # * Need to solve least squares problems in real time # * Weight/cost/size increase with compute # * What algorithm to choose? # * What precision to use? # + cell_style="split" slideshow={"slide_type": "fragment"} A = rand(5000, 500) A_32 = Float32.(A) @show cond(A) @time qr(A); # Householder; backward stable @time qr_chol(A); # Unstable @time qr(A_32); # + cell_style="split" slideshow={"slide_type": "fragment"} V = vander(LinRange(-1, 1, 20)) Q, R = qr(Float32.(V)) @show norm(Q' * Q - I) Q, R = qr_chol(V) @show norm(Q' * Q - I) # + [markdown] slideshow={"slide_type": "slide"} # # Best low rank approximation # # The SVD can be truncated to yield the best rank-$k$ approximation of a matrix. # + cell_style="split" n, k = 2, 1 A = randn(n, n) Aplot(A) # + cell_style="split" slideshow={"slide_type": "fragment"} U, S, V = svd(A) @show S[1:k+1] Uhat = U[:, 1:k] Shat = S[1:k] Vhat = V[:, 1:k] Ahat = Uhat * diagm(Shat) * Vhat' @show norm(Ahat) Aplot(Ahat - A) # + [markdown] slideshow={"slide_type": "slide"} # # Example: Galaxies # # Suppose we have two galaxies of size $n_1 = 100$ and $n_2 = 200$, each randomly distributed around their respective centers. # + galaxy(center, sigma, n) = reshape(center, 1, 3) .+ sigma*randn(n, 3) g1 = galaxy([0 0 0], 1, 100) g2 = galaxy([10 0 0], 1, 100) scatter(g1[:,1], g1[:,2], aspect_ratio=:equal) scatter!(g2[:,1], g2[:,2]) # + [markdown] slideshow={"slide_type": "slide"} # ## Forces between stars # # Consider the gravitational force from a star at position $x_2$ acting on a star at position $x_1$, # $$ F_{1,2} = G \frac{m_1 m_2}{\lVert x_2 - x_1 \rVert^3} (x_2 - x_1) $$ # where $m_1$ and $m_2$ are the masses of each star respectively. # - function gravity(g1, g2) m = size(g1, 1) n = size(g2, 1) F = zeros(3*m, n) for i in 0:m-1 for j in 1:n r = g2[j,:] - g1[1+i,:] F[1+3*i:3*(i+1),j] = r / norm(r)^3 end end F end gravity(g1, g2) # + [markdown] slideshow={"slide_type": "slide"} # # Spectrum # + cell_style="split" g1 = galaxy([0 0 0], 1, 500) g2 = galaxy([10 0 0], 1, 500) F = gravity(g1, g2) U, S, V = svd(F) scatter(S, yscale=:log10, ylims=(1e-10, 10), xlims=(0, 200)) # + cell_style="split" k = 20 Uhat = U[:,1:k] Shat = S[1:k] Vhat = V[:,1:k] Fhat = Uhat * diagm(Shat) * Vhat' @show norm(F) @show norm(F - Fhat) size(F) # + [markdown] slideshow={"slide_type": "slide"} # # What is interpolation? # # Given data $(x_i, y_i)$, find a (smooth?) function $f(x)$ such that $f(x_i) = y_i$. # + [markdown] cell_style="split" # ## Data in # # * direct field observations/measurement of a physical or social system # * numerically processed observations, perhaps by applying physical principles # * output from an expensive "exact" numerical computation # * output from an approximate numerical computation # + [markdown] cell_style="split" # ## Function out # # * Polynomials # * Piecewise polynomials (includes nearest-neighbor) # * Powers and exponentials # * Trigonometric functions (sine and cosine) # * Neural networks # + [markdown] slideshow={"slide_type": "fragment"} # Interpolation fits the data exactly! # + [markdown] slideshow={"slide_type": "slide"} # # Polynomial interpolation # # We've seen how we can fit a polynomial using Vandermonde matrices, one column per basis function and one row per observation. # # $$ \underbrace{\Bigg[ 1 \Bigg| x \Bigg| x^2 \Bigg| x^3 \Bigg]}_{A \in \mathbb R^{m\times n}} \Bigg[ \mathbf p \Bigg] = \Bigg[ \mathbf y \Bigg] $$ # # It's possible to find a unique polynomial $\mathbf p$ when which of the following are true? # 1. $m \le n$ # 2. $m = n$ # 3. $m \ge n$ # + [markdown] slideshow={"slide_type": "slide"} # # Polynomial interpolation with a Vandermonde matrix # + x = LinRange(-1.5, 2, 4) y = sin.(x) A = vander(x) p = A \ y scatter(x, y) s = LinRange(-3, 3, 50) plot!(s, [sin.(s) vander(s, length(p)) * p]) # + [markdown] slideshow={"slide_type": "slide"} # ## Vandermonde matrices can be ill-conditioned # + slideshow={"slide_type": ""} A = vander(LinRange(-1, 1, 30)) cond(A) # + [markdown] slideshow={"slide_type": "fragment"} # 1. It's because of the points $x$? # 2. It's because of the basis functions $\{ 1, x, x^2, x^3, \dotsc \}$?
slides/2022-02-28-low-rank.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 (ipykernel) # language: python # name: python3 # --- # # All at odds - part 1 # It won't come as a shock to anyone that this is not the only blog on this particular subject. There are many blogs YouTube channels, etc. on the subject of data Science and specifically data Science in Sport and Sports betting. It is interesting when looking at these other resources how different people take different routes, one assumes or at least talks, with the same destination in mind. To date, I have not seen another blog that uses the same starting point as I am about to, though there is a very good chance that there is one out there somewhere. # Historically, the only place to go to put a bet on sports was a bookmaker, in fact until the late 1990s that would have meant visiting a shop for most people. At a bookmaker, you could place a back bet on a particular outcome and that was it. Now, when you visit a bookmaker you have a huge range of different options available to you. Especially in sports like football all where you can bet on everything from the outcome of the match to the number of corners a particular side will have. That is not the only thing that changed in Sports betting because around 2000 betting exchanges were introduced. Exchanges are effectively peer-to-peer betting services where the exchange provides the infrastructure to allow people to exchange bets. Your exchange rate could be against another individual, a syndicate, an organisation, or even a bookmaker, you just don't know as you'll never see the other side of the bet. The other thing that changed with the advent of betting exchanges was that you can now bet on something not happening, i.e. you can bet against a particular team winning rather than just backing them to win. # My journey into the analysis of Sports Betting will begin with looking at the accuracy of the odds at the beginning of a particular event. The odds offered by individual bookmakers tend to match closely those offered by other bookmakers and those offered at the exchanges. This needs to be the case otherwise there would be the potential for bettors to exploit differences in prices between different bookmakers and exchanges. That technique is known as arbitrage betting. The price that is available just before the commencement of a particular event it's probably as close as the real probability of a particular outcome for that event, for two very good reasons. Where the price is being set by pressure from an exchange, then we have the "wisdom of the crowd" coming into play. There could be thousands of individuals looking for value that will drive the price to a balance point of maximum value. The second reason, where the price is being driven by pressure from bookmakers then their experience will come into play. This knowledge over many many years, plus, no doubt, the data scientists that they employ to calculate real probability, leads to the price at that particular point in time will be as close to real as possible. # Therefore, my opening question is . . . # # How accurate are the starting odds set for a market in a particular event? # I will be using the starting prices from a bookmaker for this particular exercise, but that's only due to the availability of the data and is not a reflection of the perceived accuracy of that data. Should data from betting exchanges for the same event start to be collected then there will be very little difference between not and the data we have used. The first analysis will be to look at match odds data, i.e. Home win away win what draws in Premier League football matches between 2010 and 2021. # The analysis is undertaken with the data used in the previous set downloaded from # # **[FootballData.co.uk](https://www.football-data.co.uk/data.php)** # # First with the English Premiership between 2010 and 2021, then we can look at other leagues. # + # load dependencies and data import pandas as pd import numpy as np import glob import matplotlib.pyplot as plt import seaborn as sns from pandas.plotting import scatter_matrix #load all csv files and append to a single dataframe path = '../../../GitHub/England-EPL' all_files = glob.glob(path + "/*.csv") li = [] for filename in all_files: df = pd.read_csv(filename, index_col=None, header=0) li.append(df) footdata = pd.concat(li, axis=0, ignore_index=True) footdata.rename(columns={'B365H': 'Home odds', 'B365A': 'Away odds', 'B365D': 'Draw odds'}, inplace=True, errors='raise') #check that the data is loaded and see how it looks footdata # + # to make the dataframe more manageable and readable let's select only the columns we want to analyse footdata_odds = footdata[['FTR','Home odds','Away odds','Draw odds']] footdata_odds # + # there are some NaN values in the dataframe. Here are the counts per column footdata_odds.isna().sum() # - # We will initially look at this data via a Boxplot. Boxplot is similar to a candle chart for anyone who has done any technical charting. What a Boxplot does is give us a Box showing where the majority of the data is while showing outliers as lines and dots. In this way, we can see where the majority of the data sits. # The main Box in a Boxplot shows all data from the Q1 or 25th percentile to the Q3 or 75th percentile. Using a Boxplot we can see how closely grouped the data or in this case the starting odds are. The lines and dots above and below these levels will show how spread out the rest of the data is. With this technique, we will be able to get a good picture of where the starting odds are in relation to each outcome and thus an idea of the accuracy of the odds. # We will look at 3 Boxplots - one for each of the 3 possible outcomes. To do this we will split the odds dataset we just created into 3. # + #create a table with a subset of the data for Home win, Away win and Draw footdata_odds_h = footdata_odds.loc[footdata_odds['FTR'] == "H"] footdata_odds_a = footdata_odds.loc[footdata_odds['FTR'] == "A"] footdata_odds_d = footdata_odds.loc[footdata_odds['FTR'] == "D"] # + #plot the boxplot for games that ended a home win. Nice and big, so we can get a good look at the data. footdata_odds_h.plot(kind='box', figsize=(12,15),title=('Starting odds for home wins'),grid='TRUE') # - # Now we know what a Boxplot looks like then let's have a more detailed explanation of what the elements are. Looking first at the box, the horizontal line in the middle represents the 50% point of the data numbers, the median. From the median line to the bottom of the box is 50% to 25% of data points, while the top part of the box is 50% to 75% of data points. The box, i.e. 25% to 75% of data points is known as the interquartile range. # The lines emanating from the top and bottom of the boxes are known as whiskers and represent the top (75%+) and bottom (25%-) of the data points. The bar across the whiskers is set at (1.5 * Interquartile Range) and the dots above and below these are the Outliers. The Outliers will generally cover approx 0.7% of data values, i.e. 0.35% highest and lowest values in the range. # With the above in mind, how to interpret the Boxplot. The obvious feature is that for Home wins the vast majority of Home odds fall below those of the Away win and the Draw. This is the first area of further identification that we have identified. Before we do though, let's have a look at the same plots for Away wins and Draws as the outcome. # + #let's plot the boxplot for games that ended as an away win. footdata_odds_a.plot(kind='box',figsize=(12,15),title=('Starting odds for away wins'),grid='TRUE') # + #let'In the Boxplots for Away wins and Draws the overlap of odds is much more pronounced than for the Home wins we looked at earlier. It would be fair to say at this stage that for Away wins and Draws odds alone would not be a sufficient distinguishing factor to develop any conclusions from. Another variable would be required to provide clearer results. That leaves us with the further analysis of the Home win odds in relation to Home win outcomes.s plot the boxplot for games that ended a home win. Nice and big, so we can see the outcome. footdata_odds_d.plot(kind='box',figsize=(12,15),title=('Starting odds for draws'),grid='TRUE') # - # In the Boxplots for Away wins and Draws the overlap of odds is much more pronounced than for the Home wins we looked at earlier. It would be fair to say at this stage that for Away wins and Draws odds alone would not be a sufficient distinguishing factor to develop any conclusions from. Another variable would be required to provide clearer results. That leaves us with the further analysis of the Home win odds in relation to Home win outcomes. # That will be our starting point for part 2 of this series on using starting odds for analysis of outcome.
_notebooks/2021-11-24-All at odds - Part 1.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # FloPy # # ## Plotting SWR Process Results # # This notebook demonstrates the use of the `SwrObs` and `SwrStage`, `SwrBudget`, `SwrFlow`, and `SwrExchange`, `SwrStructure`, classes to read binary SWR Process observation, stage, budget, reach to reach flows, reach-aquifer exchange, and structure files. It demonstrates these capabilities by loading these binary file types and showing examples of plotting SWR Process data. An example showing how the simulated water surface profile at a selected time along a selection of reaches can be plotted is also presented. # + from IPython.display import Image import os import sys import numpy as np import matplotlib as mpl import matplotlib.pyplot as plt # run installed version of flopy or add local path try: import flopy except: fpth = os.path.abspath(os.path.join('..', '..')) sys.path.append(fpth) import flopy print(sys.version) print('numpy version: {}'.format(np.__version__)) print('matplotlib version: {}'.format(mpl.__version__)) print('flopy version: {}'.format(flopy.__version__)) # + #Set the paths datapth = os.path.join('..', 'data', 'swr_test') # SWR Process binary files files = ('SWR004.obs', 'SWR004.vel', 'SWR004.str', 'SWR004.stg', 'SWR004.flow') # - # ### Load SWR Process observations # # Create an instance of the `SwrObs` class and load the observation data. # + sobj = flopy.utils.SwrObs(os.path.join(datapth, files[0])) ts = sobj.get_data() # - # #### Plot the data from the binary SWR Process observation file # + fig = plt.figure(figsize=(6, 12)) ax1 = fig.add_subplot(3, 1, 1) ax1.semilogx(ts['totim']/3600., -ts['OBS1'], label='OBS1') ax1.semilogx(ts['totim']/3600., -ts['OBS2'], label='OBS2') ax1.semilogx(ts['totim']/3600., -ts['OBS9'], label='OBS3') ax1.set_ylabel('Flow, in cubic meters per second') ax1.legend() ax = fig.add_subplot(3, 1, 2, sharex=ax1) ax.semilogx(ts['totim']/3600., -ts['OBS4'], label='OBS4') ax.semilogx(ts['totim']/3600., -ts['OBS5'], label='OBS5') ax.set_ylabel('Flow, in cubic meters per second') ax.legend() ax = fig.add_subplot(3, 1, 3, sharex=ax1) ax.semilogx(ts['totim']/3600., ts['OBS6'], label='OBS6') ax.semilogx(ts['totim']/3600., ts['OBS7'], label='OBS7') ax.set_xlim(1, 100) ax.set_ylabel('Stage, in meters') ax.set_xlabel('Time, in hours') ax.legend(); # - # ### Load the same data from the individual binary SWR Process files # # Load discharge data from the flow file. The flow file contains the simulated flow between connected reaches for each connection in the model. sobj = flopy.utils.SwrFlow(os.path.join(datapth, files[1])) times = np.array(sobj.get_times())/3600. obs1 = sobj.get_ts(irec=1, iconn=0) obs2 = sobj.get_ts(irec=14, iconn=13) obs4 = sobj.get_ts(irec=4, iconn=3) obs5 = sobj.get_ts(irec=5, iconn=4) # Load discharge data from the structure file. The structure file contains the simulated structure flow for each reach with a structure. sobj = flopy.utils.SwrStructure(os.path.join(datapth, files[2])) obs3 = sobj.get_ts(irec=17, istr=0) # Load stage data from the stage file. The flow file contains the simulated stage for each reach in the model. sobj = flopy.utils.SwrStage(os.path.join(datapth, files[3])) obs6 = sobj.get_ts(irec=13) # Load budget data from the budget file. The budget file contains the simulated budget for each reach group in the model. The budget file also contains the stage data for each reach group. In this case the number of reach groups equals the number of reaches in the model. sobj = flopy.utils.SwrBudget(os.path.join(datapth, files[4])) obs7 = sobj.get_ts(irec=17) # #### Plot the data loaded from the individual binary SWR Process files. # # Note that the plots are identical to the plots generated from the binary SWR observation data. # + fig = plt.figure(figsize=(6, 12)) ax1 = fig.add_subplot(3, 1, 1) ax1.semilogx(times, obs1['flow'], label='OBS1') ax1.semilogx(times, obs2['flow'], label='OBS2') ax1.semilogx(times, -obs3['strflow'], label='OBS3') ax1.set_ylabel('Flow, in cubic meters per second') ax1.legend() ax = fig.add_subplot(3, 1, 2, sharex=ax1) ax.semilogx(times, obs4['flow'], label='OBS4') ax.semilogx(times, obs5['flow'], label='OBS5') ax.set_ylabel('Flow, in cubic meters per second') ax.legend() ax = fig.add_subplot(3, 1, 3, sharex=ax1) ax.semilogx(times, obs6['stage'], label='OBS6') ax.semilogx(times, obs7['stage'], label='OBS7') ax.set_xlim(1, 100) ax.set_ylabel('Stage, in meters') ax.set_xlabel('Time, in hours') ax.legend(); # - # ### Plot simulated water surface profiles # # Simulated water surface profiles can be created using the `ModelCrossSection` class. # # Several things that we need in addition to the stage data include reach lengths and bottom elevations. We load these data from an existing file. sd = np.genfromtxt(os.path.join(datapth, 'SWR004.dis.ref'), names=True) # The contents of the file are shown in the cell below. fc = open(os.path.join(datapth, 'SWR004.dis.ref')).readlines() fc # Create an instance of the `SwrStage` class for SWR Process stage data. sobj = flopy.utils.SwrStage(os.path.join(datapth, files[3])) # Create a selection condition (`iprof`) that can be used to extract data for the reaches of interest (reaches 0, 1, and 8 through 17). Use this selection condition to extract reach lengths (from `sd['RLEN']`) and the bottom elevation (from `sd['BELEV']`) for the reaches of interest. The selection condition will also be used to extract the stage data for reaches of interest. iprof = sd['IRCH'] > 0 iprof[2:8] = False dx = np.extract(iprof, sd['RLEN']) belev = np.extract(iprof, sd['BELEV']) # Create a fake model instance so that the `ModelCrossSection` class can be used. ml = flopy.modflow.Modflow() dis = flopy.modflow.ModflowDis(ml, nrow=1, ncol=dx.shape[0], delr=dx, top=4.5, botm=belev.reshape(1,1,12)) # Create an array with the x position at the downstream end of each reach, which will be used to color the plots below each reach. x = np.cumsum(dx) # Plot simulated water surface profiles for 8 times. fig = plt.figure(figsize=(12, 12)) for idx, v in enumerate([19, 29, 34, 39, 44, 49, 54, 59]): ax = fig.add_subplot(4, 2, idx+1) s = sobj.get_data(idx=v) stage = np.extract(iprof, s['stage']) xs = flopy.plot.PlotCrossSection(model=ml, line={'Row': 0}) xs.plot_fill_between(stage.reshape(1,1,12), colors=['none', 'blue'], ax=ax, edgecolors='none') linecollection = xs.plot_grid(ax=ax, zorder=10) ax.fill_between(np.append(0., x), y1=np.append(belev[0], belev), y2=-0.5, facecolor='0.5', edgecolor='none', step='pre') ax.set_title('{} hours'.format(times[v])) ax.set_ylim(-0.5, 4.5) # ## Summary # # This notebook demonstrates flopy functionality for reading binary output generated by the SWR Process. Binary files that can be read include observations, stages, budgets, flow, reach-aquifer exchanges, and structure data. The binary stage data can also be used to create water-surface profiles. # # Hope this gets you started!
examples/Notebooks/flopy3_LoadSWRBinaryData.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python [conda env:dsi] # language: python # name: conda-env-dsi-py # --- # Importing the required libraries: # + import numpy as np import pandas as pd from math import sqrt import pickle import os import datetime import seaborn as sns import matplotlib.pyplot as plt ## for machine learning from sklearn.preprocessing import MinMaxScaler from sklearn.preprocessing import RobustScaler from sklearn.metrics import mean_squared_error from tensorflow import keras from keras.models import Sequential from keras.layers import LSTM from keras.layers import Dense from keras.layers import Dropout from keras.layers import Masking # - # ## Preparing the sine wave function # + x_axis = np.arange(-50*np.pi, 50*np.pi, 0.1) y_axis = np.sin(x_axis) df = pd.DataFrame({"x_axis": x_axis, "y_axis":y_axis}) df.head() # - sns.lineplot(data=df, x="x_axis", y="y_axis"); # + df_eq_model = df.copy() train_size = int(len(df_eq_model)* 0.9) test_size = len(df_eq_model) - train_size train, test = df_eq_model.iloc[0:train_size], df_eq_model.iloc[train_size:len(df_eq_model)] print(train.shape, test.shape) # - train.head(2) test.head(2) # + from sklearn.preprocessing import RobustScaler f_columns = ["x_axis"] f_transformer = RobustScaler() y_transformer = RobustScaler() f_transformer = f_transformer.fit(train[f_columns].to_numpy()) y_transformer = y_transformer.fit(train[["y_axis"]]) # - f_transformer.get_params() # + train.loc[:,f_columns] = f_transformer.transform(train[f_columns].to_numpy()) train["y_axis"] = y_transformer.transform(train[["y_axis"]]) test.loc[:,f_columns] = f_transformer.transform(test[f_columns].to_numpy()) test["y_axis"] = y_transformer.transform(test[["y_axis"]]) # - # ### Preparing the data for LSTM def create_dataset(X, y, time_steps = 1): Xs, ys = [], [] for i in range(len(X)- time_steps): v = X.iloc[i: (i + time_steps)].to_numpy() Xs.append(v) ys.append(y.iloc[i+time_steps]) return np.array(Xs), np.array(ys) # + TIME_STEPS = 60 X_train, y_train = create_dataset(train, train["y_axis"], time_steps = TIME_STEPS) X_test, y_test = create_dataset(test, test["y_axis"], time_steps= TIME_STEPS) # + print(X_train.shape, y_train.shape) print(X_test.shape, y_test.shape) # - X_train[0][0] # ## Model Architecture # + model = keras.Sequential() # Adding bi-directional layer model.add( keras.layers.Bidirectional( keras.layers.LSTM( units=64, input_shape = (X_train.shape[1], X_train.shape[2]) ) ) ) # Adding dropout layer to regularize complexities model.add(keras.layers.Dropout(rate = 0.2)) # Add output layer model.add(keras.layers.Dense(units = 1)) # - # Compiling the model model.compile(loss = "mean_squared_error", optimizer = "adam") history = model.fit( X_train, y_train, epochs = 20, batch_size = 32, validation_split = 0.33, shuffle = False # As it is time-series ) plt.plot(history.history["loss"], label = "train") plt.plot(history.history["val_loss"], label = "validation") plt.legend() y_pred = model.predict(X_test) y_train_inv = y_transformer.inverse_transform(y_train.reshape(1,-1)) y_test_inv = y_transformer.inverse_transform(y_test.reshape(1,-1)) y_pred_inv = y_transformer.inverse_transform(y_pred) # + plt.figure(figsize = (14,10)) plt.plot(y_test_inv.flatten(), marker = '.', label = "True") plt.plot(y_pred_inv.flatten(), marker = '.', label = "Predicted") plt.legend() # - print("Hello World!")
codes/P01_sin_simple.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="P9bJCDjdlgG6" colab_type="text" # # **Spit some [tensor] flow** # # Practice makes perfect # # `Let's get this over with` # # + id="aQwc0re5mFld" colab_type="code" outputId="66d225fb-4584-4b8a-db85-4a37c6416174" colab={"base_uri": "https://localhost:8080/", "height": 34} import numpy as np import pandas as pd import matplotlib.pyplot as plt import tensorflow as tf print(tf.__version__) # + id="EwuI22MhLLkX" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 210} outputId="44e183ab-227e-490e-f471-1fd35756f5f3" # !wget --passive-ftp --prefer-family=ipv4 https://archive.ics.uci.edu/ml/machine-learning-databases/00275/Bike-Sharing-Dataset.zip # + id="o5pydz9SLlH3" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="a3016107-ef2c-4be2-fa81-c22c846bdfd8" # !ls # + id="RUx7hLU8Lnft" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 140} outputId="f7c6eeb8-a0f3-4d8c-d889-aa56c8ad9a34" # !unzip Bike-Sharing-Dataset.zip # + id="q1FvalUpL0W-" colab_type="code" colab={} # !rm Bike-Sharing-Dataset.zip # + id="dyhE95izL7B9" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="a957ff11-b671-4689-96e5-e9653a77229f" # !ls # + id="bqM_AfKtMDF2" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 308} outputId="08a5da1a-f9a6-4df2-9477-715304af4a1a" data = pd.read_csv('hour.csv') data.head() # + id="qN0pF0BWMbhF" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 308} outputId="2c364b4f-edec-419d-c6eb-d9ece4fcebb9" categoricals = ['weathersit', 'season', 'mnth', 'hr', 'weekday'] for col in categoricals: dummies = pd.get_dummies(data[col], prefix=col, drop_first=False) data = pd.concat([data, dummies], axis = 1) data = data.drop(categoricals, axis=1) data.head() # + id="jA5fzpFMNYYt" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 223} outputId="fd459233-9e6b-4441-8f9c-ef533e4ae35c" drop_cols = "instant,dteday,workingday,atemp".split(",") data = data.drop(drop_cols, axis=1) data.head() # + id="d9eaZkHIQGnV" colab_type="code" colab={} from sklearn.preprocessing import StandardScaler numericals = ['temp', 'hum', 'windspeed', 'registered', 'cnt','casual'] scaler = StandardScaler() data[numericals] = scaler.fit_transform(data[numericals]) # + id="ca-MZAHDO0P2" colab_type="code" colab={} targets = ['cnt', 'casual', 'registered'] y = data[targets] # + id="4H6GFnM2PL0d" colab_type="code" colab={} X = data.drop(targets, axis=1) # + id="9jk6WqizOzKd" colab_type="code" colab={} # TRAIN TEST SPLIT from sklearn.model_selection import train_test_split X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.2) # + id="7hQGKjLvBT6z" colab_type="code" outputId="14471373-2827-4033-aee8-5e8a111894f3" colab={"base_uri": "https://localhost:8080/", "height": 87} print(X_train.shape) print(y_train.shape) print(X_test.shape) print(y_test.shape) # + id="FoFPHGhBqyVm" colab_type="code" colab={} N, D = X_train.shape # + id="xzXQWizgRsx8" colab_type="code" colab={} Y = y_train.shape[1] # + id="yl4qttcnRIgU" colab_type="code" colab={} from tensorflow.keras.layers import Input, Dense, Dropout from tensorflow.keras.models import Model from tensorflow.keras.optimizers import SGD, Adam # + id="R4OLY0_wRMwI" colab_type="code" colab={} i_layer = Input(shape=(D,)) h_layer = Dense(10, activation='relu')(i_layer) h_layer = Dense(10, activation='relu')(h_layer) o_layer = Dense(Y, activation='relu')(h_layer) model = Model(i_layer, o_layer) # + id="bYip2sQaNiNI" colab_type="code" colab={} model.compile( optimizer='adam', loss='mse') # + id="tEsZnuikNwv3" colab_type="code" outputId="15cd7219-960f-4be3-c2d7-555f0f02507c" colab={"base_uri": "https://localhost:8080/", "height": 1000} report = model.fit(X_train, y_train, validation_data=(X_test, y_test), epochs = 100) # + id="MSj0kifiSDfW" colab_type="code" outputId="e9cc7c16-17e5-40d6-9bd3-182d10f53cf4" colab={"base_uri": "https://localhost:8080/", "height": 282} plt.plot(report.history['loss'], label="loss") plt.plot(report.history['val_loss'], label="validation_loss") plt.legend()
Tensorflow_2X_Notebooks/Demo68_ANNforBikeSharing.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # CVR Registret # # This code cleans the Danish register of company information. Specifically, the open lists of companies available here: https://www.sktst.dk/aktuelt/skatteoplysninger-for-selskaber/ import pandas as pd import numpy as np import os def percentile(n): ''' Example: df.groupby(year)[var].agg([percentile(10), percentile(90)]).plot() ''' def percentile_(x): return np.percentile(x, n) percentile_.__name__ = f'P{n:2d}' return percentile_ # # Read in data assert os.path.isdir('Raw'), f'There must be a subfolder "./Raw" in the current folder (where the raw data should be)' files = os.listdir('./Raw') files = [f for f in files if f.startswith('skatteliste-') and f.endswith('.csv')] assert len(files)>0, f'No files of the type "skatteliste-*.csv" found in ./Raw' years = np.sort([int(f[12:16]) for f in files]) years dd = [] for yr in years: print(yr) if yr == 2015: encoding = 'latin' # in 2015, the encoding is different, for some reason else: encoding = 'utf' d_ = pd.read_csv(f'./Raw/skatteliste-{yr}.csv', encoding=encoding, na_values='\xa0') d_['year'] = yr dd.append(d_) # Concatenate into a single dataframe dat = pd.concat(dd) del dd # + dat['dummy_co2'] = dat['Kode = K = Kulbrinte'] == 'K' del dat['Kode = K = Kulbrinte'] dat['taxed_by_co2'] = dat['Kulbrinte/tonnage'] == 'Kulbrintebeskattet' dat['taxed_by_tonnage'] = dat['Kulbrinte/tonnage'] == 'Tonnagebeskattet' del dat['Kulbrinte/tonnage'] # - del dat['Unnamed: 20'] cols_category = ['Skattepligtsbestemmelse', 'Selskabstype'] for c in cols_category: dat[c] = dat[c].astype('category') # Much of this relates to the `kulbrinteskat` (carbo-hydrates tax). dict_ren = {'Administrationsselskabets CVR-nr.':'parent_cvr', 'Indkomstår':'income_year', 'CVR-nr.':'cvr', 'Navn':'name', 'SE-nr.':'se_nr', 'Selskabstype':'company_type', 'Skattepligtic indkomst':'income', 'Underskud':'deficit', 'Skattepligtig indkomst':'taxable_income', 'Skattepligtsbestemmelse':'taxation_category', 'Selskabsskat':'tax', 'Skattepligtig Kulbrinteindkosmt':'taxable_ch_income', 'kulbrinte_skat':'ch_tax', 'Underskud fratrukket i selskabsindkomst jf. KB':'deficit_subtracted_from_ch_tax', 'Selskabsindkomst jf. KB':'tax_cf_ch', 'SE nr. Kulbrinte':'se_nr_ch', 'Selskabsskat af KB':'tax_of_ch', 'Kulbrinteskat':'ch_tax', 'Skattepligtig Kulbrinteindkomst':'taxable_ch_income'} dat.rename(columns=dict_ren, inplace=True) dat['date_ajour'] = pd.to_datetime(dat['Dato ajour']) del dat['Dato ajour'] I = dat.parent_cvr == 0.0 # in 2014, all missings are coded as 0.0 for some reason dat.loc[I, 'parent_cvr'] = np.nan ch_vars = ['taxable_ch_income', 'ch_tax', 'Underskud fratrukket i kulbrinteindkomst', 'tax_cf_ch', 'deficit_subtracted_from_ch_tax', 'tax_of_ch', 'se_nr_ch', 'dummy_co2', 'taxed_by_co2', 'taxed_by_tonnage'] # + dat['net_inc']= dat.taxable_income - dat.deficit I = dat.taxable_income.notnull() & dat.deficit.isnull() dat.loc[I, 'net_inc'] = dat.loc[I, 'taxable_income'] I = dat.taxable_income.isnull() & dat.deficit.notnull() if I.any(): dat.loc[I, 'net_inc'] = -dat.loc[I, 'deficit'] else: print('(none found here)') # - # ### Categorize firms based on the name # # We do not observe much about firms, but their names contain (imperfect/imprecise) info about them. dat.name = dat.name.str.lower() dat['dum_as'] = dat.name.str.contains('a/s') dat['dum_aps'] = dat.name.str.contains('aps') dat['dum_ivs'] = dat.name.str.contains('ivs') dat['dum_ab'] = dat.name.str.contains('a/b') dat['dum_realestate'] = dat.name.str.contains('ejendom') dat['dum_holding'] = dat.name.str.contains('holding') dat['dum_invest'] = dat.name.str.contains('invest') dat['dum_consult'] = dat.name.str.contains('consult') dat['dum_service'] = dat.name.str.contains('service') dat['dum_dot_dk'] = dat.name.str.contains('.dk') dat['dum_doctor'] = dat.name.str.contains('læge') dat['dum_carpenter'] = dat.name.str.contains('tømrer') dat['dum_transport'] = dat.name.str.contains('transport') | dat.name.str.contains('lastvogn') dat['dum_plumbing'] = dat.name.str.contains('vvs') | dat.name.str.contains('kloak') dat['dum_import'] = dat.name.str.contains('import') dat['dum_masonry'] = dat.name.str.contains('murer') dat['dum_nielsen'] = dat.name.str.contains('nielsen') dat['dum_sorensen'] = dat.name.str.contains('sorensen') cc = ['autocenter', 'automob', 'automester', 'autoværk', 'autoclean', 'autolak', 'autoophug', 'autoteknik', 'autodele', 'autogården', 'biler'] c = cc[0] I = dat.name.str.contains(c) for c in cc[1:]: I = I | dat.name.str.contains(c) dat['dum_cars'] = I == True cols_dum = [c for c in dat.columns if c[:4] == 'dum_'] # ### `dat['cat']`: firm category # A neat categorical variable # # **Warning:** Firms can have multiple dummies switched on: then only the last one is kept. dat['cat'] = np.nan for c in cols_dum[4:]: name = c[4:] dat.loc[dat[c], 'cat'] = name dat['cat'] = dat['cat'].astype('category') ax = dat.groupby('cat').net_inc.mean().plot(kind='bar'); ax.set_ylabel('Firm revenue minus losses'); ax.set_xlabel('Firm name contains'); ax = dat.groupby('cat').tax.mean().plot(kind='bar'); ax.set_ylabel('Tax Payment'); ax.set_xlabel('Firm name contains'); # # Export for Analysis Dataset # income makes sense if we delete the very large firms (over 10 mio net earnings before taxes ) I = np.abs(dat.net_inc) < 10_000_000 dat[I].net_inc.hist(bins=100); # ## Sample Selection I = (dat.parent_cvr.isnull()) dat[I].groupby('year').net_inc.sum().plot(marker='o') I = ((dat.company_type == 'Enkeltstående selskab') # no conglomorates & (dat.parent_cvr.isnull()) # not a daughter & (np.abs(dat.net_inc) < 10_000_000)) # net inc before tax below 10 mio DKK print(f'Selecting {I.mean():5.2%} of obs. ({I.sum():,d} obs.)') d = dat[I].sort_values(['cvr', 'year']) # ### Making the sample balanced tmp = d.groupby(['cvr', 'year']).net_inc.count().groupby(level='cvr').max() cvr_drop = tmp[tmp == 2].index.values print(f'Dropping {len(cvr_drop)} CVR numbers') d = d.loc[d.cvr.isin(cvr_drop) == False, :].copy() # + d['num_years'] = d.groupby('cvr').net_inc.transform('count') # select a balanced sample I = d.num_years == d.year.unique().size print(f'Selecting {I.mean():5.2%} of obs. ({I.sum():,d} obs.)') d = d[I].copy() # - # ### Add anonymized firm ID variable # # The `CVR` number is too directly related to the firm. Instead, we create `firmid` as `0, 1, ..., N`. c_ = d.cvr.drop_duplicates().reset_index(drop=True) c_.index.name = 'firmid' d = pd.merge(d, c_.reset_index(), on='cvr', how='left') del d['cvr'] # make sure the dataset is ready to do first differencing d.sort_values(['firmid', 'year'], inplace=True) # ### Save dataset cols = ['firmid', 'year', 'taxable_income', 'deficit', 'tax', 'net_inc', 'cat'] + cols_dum d_ = d[cols].copy() for c in cols_dum: d_[c] = d_[c].astype(int) d_.to_csv('cvr_extract.csv', index=False) stop # stop execution here. The code below is not working # # Daughter companies # # **Unfinished code Below!** # * **Goal:** analyzing what happens when a company gets a daughter/subsidiary. # * To do this, we link via the `parent_cvr` variable. # * I have not finished the code but it should be possible to proceed based on the code below I = dat.parent_cvr.notnull() daughters = dat.loc[I, ['parent_cvr', 'year', 'taxable_income', 'deficit']] daughters = daughters.query('parent_cvr != 0.0') d_ = daughters.groupby('parent_cvr').sum() d_['num_daughters'] = daughters.groupby('parent_cvr').taxable_income.size() I = dat.parent_cvr.isnull() d = pd.merge(dat[I], d_, left_on=['cvr', 'year'], right_on=['parent_cvr', 'year'], how='left', suffixes=('', '_daughter')) del d['parent_cvr'] # these rows are only for parent companies d.taxable_income.describe() I = np.abs(d.taxable_income) < 1000000 d[I].taxable_income.hist(bins=100) d = d.set_index(['cvr', 'year']).sort_index() for c in ch_vars: del d[c] I = np.abs(d.net_inc) < 1000000 d.loc[I, 'net_inc'].hist(bins=100); d[I].groupby('year').net_inc.agg(['mean', percentile(10), percentile(50), percentile(90)]).plot(marker='o'); d.num_daughters.fillna(0, inplace=True) d.num_daughters = d.num_daughters.astype(int) # Changes in the number of daughter companies within a firm d['diff_num_daughters'] = d.groupby(level=['cvr']).num_daughters.diff() d.diff_num_daughters.value_counts() min_diff = d.groupby(level='cvr').diff_num_daughters.min() max_diff = d.groupby(level='cvr').diff_num_daughters.max() # + # find CVR numbers where there is growth at least at some point cvrs = max_diff[max_diff == 1.0].index.values idx = d.loc[d.diff_num_daughters == 1.0, []] idx.reset_index().year.value_counts() # create event variable, "t" d.loc[idx.index, 't'] = 1 # event occurs at period t = 0 # - def create_event_time(x: np.ndarray) -> np.ndarray: '''create_event_time(x) ARGS: x: array that is == 1 when an entry occurs RETURNS: array of nans if no event ever occurs, otherwise array like [-1, 0, 1, 2] (if t == 1 in the second period) ''' N = len(x) for i in range(N): if x[i] == 1: break if i < len(x): return np.arange(N)-i else: # we went through the whole list without finding a single event return np.nan((N,)) d['t_'] = d.groupby(level='cvr').t.transform(lambda x: create_event_time(x.values)) # let's look at some of the rows where an entry occurred d.loc[cvrs[:2]][['num_daughters', 't','t_', 'net_inc']] del d['t'] d.rename(columns={'t_':'t'}, inplace=True) I = (np.abs(d.net_inc) < 1000000) & (d.t > -5) & (d.net_inc != 0.0) ax = d[I].groupby('t').net_inc.agg([percentile(10), percentile(50), percentile(90)]).plot(marker='o') ax.axvline(-0.5, color='gray', linestyle=':'); dat[(dat.tax < 0)]
CVR/materialize.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Machine Learning Engineer Nanodegree # ## Unsupervised Learning # ## Project: Creating Customer Segments # ## Getting Started # # In this project, a dataset containing data on various customers' annual spending amounts (reported in monetary units) of diverse product categories for internal structure are analyzed. One goal of this project is to best describe the variation in the different types of customers that a wholesale distributor interacts with. Doing so would equip the distributor with insight into how to best structure their delivery service to meet the needs of each customer. # # The dataset for this project can be found on the [UCI Machine Learning Repository](https://archive.ics.uci.edu/ml/datasets/Wholesale+customers). For the purposes of this project, the features `'Channel'` and `'Region'` will be excluded in the analysis — with focus instead on the six product categories recorded for customers. # # + import numpy as np import pandas as pd from IPython.display import display # Allows the use of display() for DataFrames # Import supplementary visualizations code visuals.py import visuals as vs # Pretty display for notebooks # %matplotlib inline # Load the wholesale customers dataset try: data = pd.read_csv("customers.csv") data.drop(['Region', 'Channel'], axis = 1, inplace = True) print("Wholesale customers dataset has {} samples with {} features each.".format(*data.shape)) except: print("Dataset could not be loaded. Is the dataset missing?") # - # ## Data Exploration # In this section, the data are explored through visualizations and code to understand how each feature is related to the others. A statistical description of the dataset, the relevance of each feature are observed and a few sample data points from the dataset are tracked through the course of this project. # # The dataset is composed of six important product categories: **'Fresh'**, **'Milk'**, **'Grocery'**, **'Frozen'**, **'Detergents_Paper'**, and **'Delicatessen'**. # Display a description of the dataset display(data.describe()) # ### Implementation: Selecting Samples # To get a better understanding of the customers and how their data will transform through the analysis, it would be best to select a few sample data points and explore them in more detail. In the code block below, three random indices sampled from the data will represent the customers to track. # + # Three random indices are sampled from the dataset indices = [0, 380, 418] # DataFrame of the chosen samples samples = pd.DataFrame(data.loc[indices], columns = data.keys()).reset_index(drop = True) print("Chosen samples of wholesale customers dataset:") display(samples) # - # ### Question 1 # Consider the total purchase cost of each product category and the statistical description of the dataset above for your sample customers. # # * What kind of establishment (customer) could each of the three samples you've chosen represent? # # **Hint:** Examples of establishments include places like markets, cafes, delis, wholesale retailers, among many others. Avoid using names for establishments, such as saying *"McDonalds"* when describing a sample customer as a restaurant. You can use the mean values for reference to compare your samples with. The mean values are as follows: # # * Fresh: 12000.2977 # * Milk: 5796.2 # * Grocery: 3071.9 # * Detergents_paper: 2881.4 # * Delicatessen: 1524.8 # # Knowing this, how do your samples compare? Does that help in driving your insight into what kind of establishments they might be? # # **Answer:** On comparing the total purchase cost of each product category for the sample customers with respective mean values, it can be assumed that # - The first customer may be from market, as the total purchase cost of all products excluding frozen products are higher or closer to the respective mean values. # - The second customer may be from restaurant, as the total purchase cost of fresh and frozen products are higher than the respective mean values. # - The third customer may be from cafe, as the total purchase cost of milk and grocery are higher than the respective mean values. Also, the purchase cost of detergents paper is higher than the mean value, as the cafe may require more detergents paper. # ### Implementation: Feature Relevance # One interesting thought to consider is if one (or more) of the six product categories is actually relevant for understanding customer purchasing. That is to say, is it possible to determine whether customers purchasing some amount of one category of products will necessarily purchase some proportional amount of another category of products? this can be determined quite easily by training a supervised regression learner on a subset of the data with one feature removed, and then score how well that model can predict the removed feature. # + # Make a copy of the DataFrame, and dropping the 'Detergents_Paper' feature new_data = data.drop(['Detergents_Paper'], axis=1) # Split the data into training and testing sets(0.25) using the given feature as the target from sklearn.cross_validation import train_test_split X_train, X_test, y_train, y_test = train_test_split(new_data, pd.DataFrame(data.Detergents_Paper), test_size=0.25, random_state=0) # Fitting a decision tree regressor to the training set from sklearn.tree import DecisionTreeRegressor regressor = DecisionTreeRegressor(random_state=0).fit(X_train, y_train) # Predicting score using the testing set score = regressor.score(X_test, y_test) score # - # ### Question 2 # # * Which feature did you attempt to predict? # * What was the reported prediction score? # * Is this feature necessary for identifying customers' spending habits? # # **Hint:** The coefficient of determination, `R^2`, is scored between 0 and 1, with 1 being a perfect fit. A negative `R^2` implies the model fails to fit the data. If you get a low score for a particular feature, that lends us to beleive that that feature point is hard to predict using the other features, thereby making it an important feature to consider when considering relevance. # **Answer:** I tried predicting every feature, in that `Detergents_Paper`, which resulted in the prediction score of 0.72 is the highest. Hence, this feature may not be necessary for identifying customer's spending habits. # ### Visualize Feature Distributions # To get a better understanding of the dataset, we can construct a scatter matrix of each of the six product features present in the data. If the feature, we attempted to predict above, is relevant for identifying a specific customer, then the scatter matrix below may not show any correlation between that feature and the others. Conversely, if that feature is not relevant for identifying a specific customer, the scatter matrix might show a correlation between that feature and another feature in the data. # Scatter matrix for each pair of features in the data pd.plotting.scatter_matrix(data, alpha = 0.3, figsize = (14,8), diagonal = 'kde'); # + #Feature correlations import seaborn seaborn.heatmap(data.corr(), annot = True) # + #Visualizing the dataset distribution import matplotlib.pyplot as plt for x in data.keys(): plt.figure() seaborn.distplot(data[x], axlabel = x) # + # Normlaity test (Shapiro-Wilk test) # Reference: https://machinelearningmastery.com/a-gentle-introduction-to-normality-tests-in-python/ from scipy.stats import shapiro alpha = 0.05 for x in data.keys(): stat, p = shapiro(data[x]) print("\033[1m"+ x + ':\033[0m') print('Statistics=%.3f, p=%.6f' % (stat, p)) if p > alpha: print('Sample looks Gaussian\n') else: print('Sample does not look Gaussian\n') # - # ### Question 3 # * Using the scatter matrix as a reference, discuss the distribution of the dataset, specifically talk about the normality, outliers, large number of data points near 0 among others. If you need to sepearate out some of the plots individually to further accentuate your point, you may do so as well. # * Are there any pairs of features which exhibit some degree of correlation? # * Does this confirm or deny your suspicions about the relevance of the feature you attempted to predict? # * How is the data for those features distributed? # # **Hint:** Is the data normally distributed? Where do most of the data points lie? You can use [corr()](https://pandas.pydata.org/pandas-docs/stable/generated/pandas.DataFrame.corr.html) to get the feature correlations and then visualize them using a [heatmap](http://seaborn.pydata.org/generated/seaborn.heatmap.html) (the data that would be fed into the heatmap would be the correlation values, for eg: `data.corr()`) to gain further insight. # **Answer:** From the scatter matrix, it can be perceived that `Milk & Grocery`, `Milk & Detergents_Paper`, `Grocery & Detergents_Paper` show a relationship approximately in a linear fashion. To confirm this, I've plotted a heatmap with correlation value of the features and it is evident that `Detergents_Paper and Grocery` exhibits a high correlation with a value of 0.92, next to this comes `Milk and Grocery` and `Milk and Detergents_Papper`. Thus, it confirms the relevance of the feature I attempted to predict. In order to understand the distribution of dataset of all the features, I've plotted the histogram of the dataset for all the features, from which it can be observed that the distribution of dataset for all the features are `Skewed right`, where mean > median. It is further confirmed by the normality test, Shapiro-Wilk test. Based on the p-value, the test proved that the dataset of all features are not normally distributed, which may be due to the presence of many outliers. Therefore, the data need preprocessing to create better representation. # ## Data Preprocessing # Creating a better representation of customers by performing a scaling on the data and detecting (and optionally removing) outliers. Preprocessing data is often times a critical step in assuring that results from the analysis are significant and meaningful. # ### Implementation: Feature Scaling # If data is not normally distributed, especially if the mean and median vary significantly (indicating a large skew), it is most [often appropriate](http://econbrowser.com/archives/2014/02/use-of-logarithms-in-economics) to apply a non-linear scaling — particularly for financial data. One way to achieve this scaling is by using a [Box-Cox test](http://scipy.github.io/devdocs/generated/scipy.stats.boxcox.html), which calculates the best power transformation of the data that reduces skewness. A simpler approach which can work in most cases would be applying the natural logarithm. # + # Data scaling using the natural logarithm log_data = np.log(data) # Scaling the sample data using the natural logarithm log_samples = np.log(samples) # Scatter matrix for each pair of newly-transformed features pd.plotting.scatter_matrix(log_data, alpha = 0.3, figsize = (14,8), diagonal = 'kde'); # - # ### Observation # After applying a natural logarithm scaling to the data, the distribution of each feature is much more normal. # Display the log-transformed sample data display(log_samples) # ### Implementation: Outlier Detection # Detecting outliers in the data is extremely important in the data preprocessing step of any analysis. The presence of outliers can often skew results which take into consideration these data points. There are many "rules of thumb" for what constitutes an outlier in a dataset. Here, we will use [Tukey's Method for identfying outliers](http://datapigtechnologies.com/blog/index.php/highlighting-outliers-in-your-data-with-the-tukey-method/): An *outlier step* is calculated as 1.5 times the interquartile range (IQR). A data point with a feature that is beyond an outlier step outside of the IQR for that feature is considered abnormal. # # In the code block below, you will need to implement the following: # - Assign the value of the 25th percentile for the given feature to `Q1`. Use `np.percentile` for this. # - Assign the value of the 75th percentile for the given feature to `Q3`. Again, use `np.percentile`. # - Assign the calculation of an outlier step for the given feature to `step`. # - Optionally remove data points from the dataset by adding indices to the `outliers` list. # # **NOTE:** If you choose to remove any outliers, ensure that the sample data does not contain any of these points! # Once you have performed this implementation, the dataset will be stored in the variable `good_data`. # + # For each feature find the data points with extreme high or low values for feature in log_data.keys(): # Calculate Q1 (25th percentile of the data) for the given feature Q1 = np.percentile(log_data[feature], 25) # Calculate Q3 (75th percentile of the data) for the given feature Q3 = np.percentile(log_data[feature], 75) # Use the interquartile range to calculate an outlier step (1.5 times the interquartile range) step = 1.5*(Q3 - Q1) # Display the outliers print("Data points considered outliers for the feature '{}':".format(feature)) display(log_data[~((log_data[feature] >= Q1 - step) & (log_data[feature] <= Q3 + step))]) # OPTIONAL: Select the indices for data points you wish to remove outliers = [66, 95, 128, 338, 154, 356, 75, 161, 109, 142, 187, 233] # Outliers removal good_data = log_data.drop(log_data.index[outliers]).reset_index(drop = True) # - # ### Question 4 # * Are there any data points considered outliers for more than one feature based on the definition above? # * Should these data points be removed from the dataset? # * If any data points were added to the `outliers` list to be removed, explain why. # # ** Hint: ** If you have datapoints that are outliers in multiple categories think about why that may be and if they warrant removal. Also note how k-means is affected by outliers and whether or not this plays a factor in your analysis of whether or not to remove them. # **Answer:** # - There are five data pints, [154, 65, 66, 128, 75] that are considered outliers for more than one feature. # - I think these data points should not be removed only due to the fact that it is predicted as outliers by Tukey's method for more than one feature in the dataset. It may contain critical information and even a small change may matter a lot and may change the entire result. In general, outliers exist due to incorrectly entered or measured data [[Ref]](https://www.theanalysisfactor.com/outliers-to-drop-or-not-to-drop/). It should only be removed, if they are either demonstrable data entry errors or else physically impossible. Otherwise, methods have to be adjusted, not the data [[Ref]](https://www.quora.com/What-is-a-good-criterion-for-removing-outliers/answer/Peter-Flom). In the given dataset, it is not definite which datapoints are incorrectly entered. But, I assume that since the purchase is from the wholesale distributor, the monetary unit will not be in single digit. Therefore, from the list of predicted outliers, [66, 95, 128, 338, 154, 356, 75, 161, 109, 142, 187, 233] these data points contain single digit monetary unit purchase. Hence, I'm dropping these datapoints. Moreover, this list contains four datapoints from the outliers list that are predicted for more than one feature. # - Outliers have great effect in K-means result. As this algorithm calculates the centroid of the cluster based on the mean value of the datapoints in the cluster, it is very sensitive to outliers. Therefore, either the outliers should be removed after careful analysis of data, to ensure no loss of critical information or K-medians should be used, which is less sensitive to outliers. # ## Feature Transformation # In this section we will use principal component analysis (PCA) to draw conclusions about the underlying structure of the wholesale customer data. Since using PCA on a dataset calculates the dimensions which best maximize variance, we will find which compound combinations of features best describe customers. # ### Implementation: PCA # # Now that the data has been scaled to a more normal distribution and has had any necessary outliers removed, we can now apply PCA to the good_data to discover which dimensions about the data best maximize the variance of features involved. In addition to finding these dimensions, PCA will also report the explained variance ratio of each dimension — how much variance within the data is explained by that dimension alone. Note that a component (dimension) from PCA can be considered a new "feature" of the space, however it is a composition of the original features present in the data. # + from sklearn.decomposition import PCA # Apply PCA by fitting the good data with the same number of dimensions as features pca = PCA().fit(good_data) # Transform log_samples using the PCA fit above pca_samples = pca.transform(log_samples) # Generate PCA results plot pca_results = vs.pca_results(good_data, pca) # - # ### Question 5 # # * How much variance in the data is explained* **in total** *by the first and second principal component? # * How much variance in the data is explained by the first four principal components? # * Using the visualization provided above, talk about each dimension and the cumulative variance explained by each, stressing upon which features are well represented by each dimension(both in terms of positive and negative variance explained). Discuss what the first four dimensions best represent in terms of customer spending. # # **Hint:** A positive increase in a specific dimension corresponds with an *increase* of the *positive-weighted* features and a *decrease* of the *negative-weighted* features. The rate of increase or decrease is based on the individual feature weights. # **Answer:** # - The first and second principal component explains the total variance of 0.73. # - The first four principal components explain the total variance of 0.9366. # - From the visualization, # - The first principal component is best represented by three features, Milk, Grocery and Detergents_Paper. All these exhibit high positive variance, in which Detergents_paper shows the highest. # - The second principal component is best represented by Fresh, Frozen and Delicatessen. All these features exhibit high negative variance with Fresh being the highest. # - The third principal component is best represented by Fresh, with highest variance (-ve), Frozen and Delicatessen with high positive variance. # - The fourth principal component has the highest variance (-ve) in Frozen. Next to it comes the Delicatessen with higher positive variance and finally the Detergents_Paper with high negative variance. # ### Observation # Checking how the log-transformed sample data has changed after having a PCA transformation applied to it in six dimensions. Observe the numerical value for the first four dimensions of the sample points. Consider if this is consistent with your initial interpretation of the sample points. # Display sample log-data after having a PCA transformation applied display(pd.DataFrame(np.round(pca_samples, 4), columns = pca_results.index.values)) # ### Implementation: Dimensionality Reduction # When using principal component analysis, one of the main goals is to reduce the dimensionality of the data — in effect, reducing the complexity of the problem. Dimensionality reduction comes at a cost: Fewer dimensions used implies less of the total variance in the data is being explained. Because of this, the cumulative explained variance ratio is extremely important for knowing how many dimensions are necessary for the problem. Additionally, if a signifiant amount of variance is explained by only two or three dimensions, the reduced data can be visualized afterwards. # + # Apply PCA by fitting the good data with only two dimensions pca = PCA(n_components = 2).fit(good_data) # Transform the good data using the PCA fit above reduced_data = pca.transform(good_data) # Transform log_samples using the PCA fit above pca_samples = pca.transform(log_samples) # DataFrame for the reduced data reduced_data = pd.DataFrame(reduced_data, columns = ['Dimension 1', 'Dimension 2']) # - # ### Observation # Run the code below to see how the log-transformed sample data has changed after having a PCA transformation applied to it using only two dimensions. Observe how the values for the first two dimensions remains unchanged when compared to a PCA transformation in six dimensions. # Display sample log-data after applying PCA transformation in two dimensions display(pd.DataFrame(np.round(pca_samples, 4), columns = ['Dimension 1', 'Dimension 2'])) # ## Visualizing a Biplot # A biplot is a scatterplot where each data point is represented by its scores along the principal components. The axes are the principal components (in this case `Dimension 1` and `Dimension 2`). In addition, the biplot shows the projection of the original features along the components. A biplot can help us interpret the reduced dimensions of the data, and discover relationships between the principal components and original features. # Create a biplot vs.biplot(good_data, reduced_data, pca) # ### Observation # # Once we have the original feature projections (in red), it is easier to interpret the relative position of each data point in the scatterplot. For instance, a point the lower right corner of the figure will likely correspond to a customer that spends a lot on `'Milk'`, `'Grocery'` and `'Detergents_Paper'`, but not so much on the other product categories. # # From the biplot, which of the original features are most strongly correlated with the first component? What about those that are associated with the second component? Do these observations agree with the pca_results plot you obtained earlier? # ## Clustering # # In this section, either a K-Means clustering algorithm or a Gaussian Mixture Model clustering algorithm can be used to identify the various customer segments hidden in the data. Then, specific data points from the clusters are recovered to understand their significance by transforming them back into their original dimension and scale. # ### Question 6 # # * What are the advantages to using a K-Means clustering algorithm? # * What are the advantages to using a Gaussian Mixture Model clustering algorithm? # * Given your observations about the wholesale customer data so far, which of the two algorithms will you use and why? # # ** Hint: ** Think about the differences between hard clustering and soft clustering and which would be appropriate for our dataset. # **Answer:** # # **Advantages of K-Means clustering** # - It is faster, robust and computationally efficient. # - It gives the best result, when the dataset are separated from each other and non-uniform. # # **Advantages of GMM clustering** # - Unlike K-Means, which is a hard clustering model, GMM is a soft clustering model, where each data point will have the membership probability to all clusters. # - It doesn't assume cluster to be of any geometry and hence it works very well with uniform distribution of dataset. # # From the above biplot, it can be observed that the distribution of data is almost uniform and the data points will overlap with more than one cluster. Therefore, soft clustering model like Gaussian Mixture Model would be more appropriate fo this problem. # ### Implementation: Creating Clusters # Depending on the problem, the number of clusters that you expect to be in the data may already be known. When the number of clusters is not known *a priori*, there is no guarantee that a given number of clusters best segments the data, since it is unclear what structure exists in the data — if any. However, we can quantify the "goodness" of a clustering by calculating each data point's *silhouette coefficient*. The [silhouette coefficient](http://scikit-learn.org/stable/modules/generated/sklearn.metrics.silhouette_score.html) for a data point measures how similar it is to its assigned cluster from -1 (dissimilar) to 1 (similar). Calculating the *mean* silhouette coefficient provides for a simple scoring method of a given clustering. # + from sklearn.mixture import GMM from sklearn.metrics import silhouette_score clusters_no = [2, 3, 4, 5, 6] for n in clusters_no: # Applying Gaussian Mixture Model to the reduced data clusterer = GMM(n_components=n).fit(reduced_data) # Predicting the cluster for each data point preds = clusterer.predict(reduced_data) # Finding the cluster centers centers = clusterer.means_ # Predicting the cluster for each transformed sample data point sample_preds = clusterer.predict(pca_samples) # Calculating the mean silhouette coefficient for the number of clusters chosen score = silhouette_score(reduced_data, preds) print("The Silhouette_score for " + str(n) + " clusters is " + str(score)) # - # ### Question 7 # # * Report the silhouette score for several cluster numbers you tried. # * Of these, which number of clusters has the best silhouette score? # **Answer:** # # - The Silhouette_score for 2 clusters is 0.427279334424 # - The Silhouette_score for 3 clusters is 0.399245758866 # - The Silhouette_score for 4 clusters is 0.312101354457 # - The Silhouette_score for 5 clusters is 0.286384567387 # - The Silhouette_score for 6 clusters is 0.319146853364 # # Model with 2 clusters has the best silhouette_score. # ### Cluster Visualization # Once the optimal number of clusters for the clustering algorithm is chosen using the scoring metric above, we can now visualize the results by executing the code block below. # Display the results of the clustering from implementation vs.cluster_results(reduced_data, preds, centers, pca_samples) # ### Implementation: Data Recovery # Each cluster present in the visualization above has a central point. These centers (or means) are not specifically data points from the data, but rather the averages of all the data points predicted in the respective clusters. For the problem of creating customer segments, a cluster's center point corresponds to the average customer of that segment. Since the data is currently reduced in dimension and scaled by a logarithm, we can recover the representative customer spending from these data points by applying the inverse transformations. # + # Inverse transform the centers log_centers = pca.inverse_transform(centers) # Exponentiate the centers true_centers = np.exp(log_centers) # Display the true centers segments = ['Segment {}'.format(i) for i in range(0,len(centers))] true_centers = pd.DataFrame(np.round(true_centers), columns = data.keys()) true_centers.index = segments display(true_centers) display(data.describe()) # - # ### Question 8 # # * Consider the total purchase cost of each product category for the representative data points above, and reference the statistical description of the dataset at the beginning of this project(specifically looking at the mean values for the various feature points). What set of establishments could each of the customer segments represent? # # **Hint:** A customer who is assigned to `'Cluster X'` should best identify with the establishments represented by the feature set of `'Segment X'`. Think about what each segment represents in terms their values for the feature points chosen. Reference these values with the mean values to get some perspective into what kind of establishment they represent. # **Answer:** Since mean is sensitive to outliers, comparing median of the dataset with the total purchase cost of each product category for the representative data points would be more appropriate. # # - In Segment 0, the total purchase cost of Fresh and Frozen products are higher than the respective median values. Thus, this segment may represent restaurant. On the other hand, the values of all these features are lower than the respective mean values. Hence, it is difficult to represent the establishment, on comparing with mean values. # - In Segment 1, the total purchase cost of Milk, Grocery, Delicatessen and Detergents_Paper are higher than the respective median values. Thus, this segment may represent market. Moreover, same features exhibit values higher than the respective mean values. Thus, for this segment, comparing the total purchase cost with both mean and median values results in same establishment. # ### Question 9 # # * For each sample point, which customer segment from* **Question 8** *best represents it? # * Are the predictions for each sample point consistent with this?* # # Run the code block below to find which cluster each sample point is predicted to be. # Display the predictions for i, pred in enumerate(sample_preds): print("Sample point", i, "predicted to be in Cluster", pred) # **Answer:** # - Sample point 2 and 0 belong to cluster 1, which represents market. My previous assumption on sample point 0 establishment is correct. Whereas, I've assumed sample point 2 to be of cafe but it is predicted as market. # - Sample point 1 is predicted to be in cluster 0, which represents restaurant. This prediction is same as my assumption. # ## Conclusion # In this final section, you will investigate ways that you can make use of the clustered data. First, you will consider how the different groups of customers, the ***customer segments***, may be affected differently by a specific delivery scheme. Next, you will consider how giving a label to each customer (which *segment* that customer belongs to) can provide for additional features about the customer data. Finally, you will compare the ***customer segments*** to a hidden variable present in the data, to see whether the clustering identified certain relationships. # ### Question 10 # Companies will often run [A/B tests](https://en.wikipedia.org/wiki/A/B_testing) when making small changes to their products or services to determine whether making that change will affect its customers positively or negatively. The wholesale distributor is considering changing its delivery service from currently 5 days a week to 3 days a week. However, the distributor will only make this change in delivery service for customers that react positively. # # * How can the wholesale distributor use the customer segments to determine which customers, if any, would react positively to the change in delivery service?* # # **Hint:** Can we assume the change affects all customers equally? How can we determine which group of customers it affects the most? # **Answer:** Using A/B tests, the wholesale distributor can determine whether the change in delivery service from 5 days a week to 3 days a week would impact the customer positively or not. For this, the test has to be carried out for both the segments separately. The steps for A/B test are as follows # - Take some sample data points that are close to the cluster center. # - Split those data points into two equal half. # - For the first half, get the customer feedback for 3 days a week delivery service and for the second half, get the customer feedback for 5 days a week delivery service. # - Analyze the feedback to determine whether the customers react positively or not and assign the delivery frequency accordingly. The result of these samples can be considered as a representative for the entire cluster. # ### Question 11 # Additional structure is derived from originally unlabeled data when using clustering techniques. Since each customer has a ***customer segment*** it best identifies with (depending on the clustering algorithm applied), we can consider *'customer segment'* as an **engineered feature** for the data. Assume the wholesale distributor recently acquired ten new customers and each provided estimates for anticipated annual spending of each product category. Knowing these estimates, the wholesale distributor wants to classify each new customer to a ***customer segment*** to determine the most appropriate delivery service. # * How can the wholesale distributor label the new customers using only their estimated product spending and the **customer segment** data? # # **Hint:** A supervised learner could be used to train on the original customers. What would be the target variable? # **Answer:** The wholesale distributor can train the supervised learning algorithms like logistic regression, Neural Network, SVM, Decision trees on the customer dataset with input features as annual spending on each product category and the customer segment data as labels, to label new customers. As there are two segments/labels, it will be a binary classification problem. # ### Visualizing Underlying Distributions # # At the beginning of this project, it was discussed that the `'Channel'` and `'Region'` features would be excluded from the dataset so that the customer product categories were emphasized in the analysis. By reintroducing the `'Channel'` feature to the dataset, an interesting structure emerges when considering the same PCA dimensionality reduction applied earlier to the original dataset. # # Run the code block below to see how each data point is labeled either `'HoReCa'` (Hotel/Restaurant/Cafe) or `'Retail'` the reduced space. In addition, you will find the sample points are circled in the plot, which will identify their labeling. # Display the clustering results based on 'Channel' data vs.channel_results(reduced_data, outliers, pca_samples) vs.cluster_results(reduced_data, preds, centers, pca_samples) # ### Question 12 # # * How well does the clustering algorithm and number of clusters you've chosen compare to this underlying distribution of Hotel/Restaurant/Cafe customers to Retailer customers? # * Are there customer segments that would be classified as purely 'Retailers' or 'Hotels/Restaurants/Cafes' by this distribution? # * Would you consider these classifications as consistent with your previous definition of the customer segments? # **Answer:** # - The 2 clusters I chose almost matches with the clusters of customer channel data. Cluster 0 aligns with Hotel/Restaurant/Cafe customers, whereas cluster 1 aligns with Retailer customers. # - In the above plot, customer segments on the extreme right would be classified as purely 'Retailer', whereas on the exrteme left, it would be classified as purely 'Home/Restaurant/Cafe' # - I've guessed cluster 0 to be restaurant and it is consistent with the underlying distribution. For cluster 1, I assumed market. In channel data, it is labeled as Retailer. Moreover, the market is similar to retailer. Hence, it is also consistent with my previous definition of customer segment.
customer_segments.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 2 # language: python # name: python2 # --- # <h1>MANDATORY PACKAGES</h1> import xarray import os # %matplotlib inline # <H1> GRIDED DATA</h2> # Donwload a grided netCDF file with the motu-client and open it: filename = 'global-analysis-forecast-phy-001-024.nc' path2file = os.getcwd()#we pressume the file to open will be in the current working directory; set a different path otherwise os.chdir(path2file) netCDF = xarray.open_dataset(filename) netCDF # <H1>SELECT VARIABLE & SET ITS COORDINATES</H1> # From above we see temperature depends on time, lat, lon and depth; let's looks for its variation with deth & time by selection only a position and time: timeSerie = netCDF['thetao'].sel(longitude=-170.0, latitude=29.90, method='nearest') # Closer coordinates found in mesh: timeSerie.coords # Check temperature values the two available times and the only available depth: timeSerie.values.tolist() # Look for more information in google by searching: xarray Nearest neighbor lookups # <h1> TIME SERIE PLOT </H1> timeSerie.plot.line(x='time',color='purple', marker='o')
PythonNotebooks/workshops/201903_Edinburgh/notebooks/5_GridedData_TimeSeries.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 2 # language: python # name: python2 # --- # <h1> Preprocessing using tf.transform and Dataflow </h1> # # This notebook illustrates: # <ol> # <li> Creating datasets for Machine Learning using tf.transform and Dataflow # </ol> # <p> # While Pandas is fine for experimenting, for operationalization of your workflow, it is better to do preprocessing in Apache Beam. This will also help if you need to preprocess data in flight, since Apache Beam also allows for streaming. # Apache Beam only works in Python 2 at the moment, so we're going to switch to the Python 2 kernel. In the above menu, click the dropdown arrow and select `python2`. ![image.png](attachment:image.png) # Then activate a Python 2 environment and install Apache Beam. Only specific combinations of TensorFlow/Beam are supported by tf.transform. So make sure to get a combo that is. # * TFT 0.8.0 # * TF 1.8 or higher # * Apache Beam [GCP] 2.5.0 or higher # + language="bash" # source activate py2env # pip uninstall -y google-cloud-dataflow # conda install -y pytz==2018.4 # pip install apache-beam[gcp] tensorflow_transform==0.8.0 # + language="bash" # pip freeze | grep -e 'flow\|beam' # - # You need to restart your kernel to register the new installs running the below cells import tensorflow as tf import apache_beam as beam print(tf.__version__) # change these to try this notebook out BUCKET = 'cloud-training-demos-ml' # REPLACE WITH YOUR PROJECT ID PROJECT = 'cloud-training-demos' # REPLACE WITH YOUR BUCKET NAME REGION = 'us-central1' import os os.environ['BUCKET'] = BUCKET os.environ['PROJECT'] = PROJECT os.environ['REGION'] = REGION # !gcloud config set project $PROJECT # + language="bash" # if ! gsutil ls | grep -q gs://${BUCKET}/; then # gsutil mb -l ${REGION} gs://${BUCKET} # fi # - # <h2> Save the query from earlier </h2> # # The data is natality data (record of births in the US). My goal is to predict the baby's weight given a number of factors about the pregnancy and the baby's mother. Later, we will want to split the data into training and eval datasets. The hash of the year-month will be used for that. query=""" SELECT weight_pounds, is_male, mother_age, mother_race, plurality, gestation_weeks, mother_married, ever_born, cigarette_use, alcohol_use, FARM_FINGERPRINT(CONCAT(CAST(YEAR AS STRING), CAST(month AS STRING))) AS hashmonth FROM publicdata.samples.natality WHERE year > 2000 """ import google.datalab.bigquery as bq df = bq.Query(query + " LIMIT 100").execute().result().to_dataframe() df.head() # <h2> Create ML dataset using tf.transform and Dataflow </h2> # <p> # Let's use Cloud Dataflow to read in the BigQuery data and write it out as CSV files. Along the way, let's use tf.transform to do scaling and transforming. Using tf.transform allows us to save the metadata to ensure that the appropriate transformations get carried out during prediction as well. # <p> # Note that after you launch this, the notebook won't show you progress. Go to the GCP webconsole to the Dataflow section and monitor the running job. It took about <b>30 minutes</b> for me. If you wish to continue without doing this step, you can copy my preprocessed output: # <pre> # gsutil -m cp -r gs://cloud-training-demos/babyweight/preproc_tft gs://your-bucket/ # </pre> # %writefile requirements.txt tensorflow-transform==0.8.0 # + import datetime import apache_beam as beam import tensorflow_transform as tft from tensorflow_transform.beam import impl as beam_impl def preprocess_tft(inputs): import copy import numpy as np def center(x): return x - tft.mean(x) result = copy.copy(inputs) # shallow copy result['mother_age_tft'] = center(inputs['mother_age']) result['gestation_weeks_centered'] = tft.scale_to_0_1(inputs['gestation_weeks']) result['mother_race_tft'] = tft.string_to_int(inputs['mother_race']) return result #return inputs def cleanup(rowdict): import copy, hashlib CSV_COLUMNS = 'weight_pounds,is_male,mother_age,mother_race,plurality,gestation_weeks,mother_married,cigarette_use,alcohol_use'.split(',') STR_COLUMNS = 'key,is_male,mother_race,mother_married,cigarette_use,alcohol_use'.split(',') FLT_COLUMNS = 'weight_pounds,mother_age,plurality,gestation_weeks'.split(',') # add any missing columns, and correct the types def tofloat(value, ifnot): try: return float(value) except (ValueError, TypeError): return ifnot result = { k : str(rowdict[k]) if k in rowdict else 'None' for k in STR_COLUMNS } result.update({ k : tofloat(rowdict[k], -99) if k in rowdict else -99 for k in FLT_COLUMNS }) # modify opaque numeric race code into human-readable data races = dict(zip([1,2,3,4,5,6,7,18,28,39,48], ['White', 'Black', 'American Indian', 'Chinese', 'Japanese', 'Hawaiian', 'Filipino', 'Asian Indian', 'Korean', 'Samaon', 'Vietnamese'])) if 'mother_race' in rowdict and rowdict['mother_race'] in races: result['mother_race'] = races[rowdict['mother_race']] else: result['mother_race'] = 'Unknown' # cleanup: write out only the data we that we want to train on if result['weight_pounds'] > 0 and result['mother_age'] > 0 and result['gestation_weeks'] > 0 and result['plurality'] > 0: data = ','.join([str(result[k]) for k in CSV_COLUMNS]) result['key'] = hashlib.sha224(data).hexdigest() yield result def preprocess(query, in_test_mode): import os import os.path import tempfile import tensorflow as tf from apache_beam.io import tfrecordio from tensorflow_transform.coders import example_proto_coder from tensorflow_transform.tf_metadata import dataset_metadata from tensorflow_transform.tf_metadata import dataset_schema from tensorflow_transform.beam.tft_beam_io import transform_fn_io job_name = 'preprocess-babyweight-features' + '-' + datetime.datetime.now().strftime('%y%m%d-%H%M%S') if in_test_mode: import shutil print('Launching local job ... hang on') OUTPUT_DIR = './preproc_tft' shutil.rmtree(OUTPUT_DIR, ignore_errors=True) else: print('Launching Dataflow job {} ... hang on'.format(job_name)) OUTPUT_DIR = 'gs://{0}/babyweight/preproc_tft/'.format(BUCKET) import subprocess subprocess.call('gsutil rm -r {}'.format(OUTPUT_DIR).split()) options = { 'staging_location': os.path.join(OUTPUT_DIR, 'tmp', 'staging'), 'temp_location': os.path.join(OUTPUT_DIR, 'tmp'), 'job_name': job_name, 'project': PROJECT, 'max_num_workers': 24, 'teardown_policy': 'TEARDOWN_ALWAYS', 'no_save_main_session': True, 'requirements_file': 'requirements.txt' } opts = beam.pipeline.PipelineOptions(flags=[], **options) if in_test_mode: RUNNER = 'DirectRunner' else: RUNNER = 'DataflowRunner' # set up metadata raw_data_schema = { colname : dataset_schema.ColumnSchema(tf.string, [], dataset_schema.FixedColumnRepresentation()) for colname in 'key,is_male,mother_race,mother_married,cigarette_use,alcohol_use'.split(',') } raw_data_schema.update({ colname : dataset_schema.ColumnSchema(tf.float32, [], dataset_schema.FixedColumnRepresentation()) for colname in 'weight_pounds,mother_age,plurality,gestation_weeks'.split(',') }) raw_data_metadata = dataset_metadata.DatasetMetadata(dataset_schema.Schema(raw_data_schema)) def read_rawdata(p, step, test_mode): if step == 'train': selquery = 'SELECT * FROM ({}) WHERE MOD(ABS(hashmonth),4) < 3'.format(query) else: selquery = 'SELECT * FROM ({}) WHERE MOD(ABS(hashmonth),4) = 3'.format(query) if in_test_mode: selquery = selquery + ' LIMIT 100' #print('Processing {} data from {}'.format(step, selquery)) return (p | '{}_read'.format(step) >> beam.io.Read(beam.io.BigQuerySource(query=selquery, use_standard_sql=True)) | '{}_cleanup'.format(step) >> beam.FlatMap(cleanup) ) # run Beam with beam.Pipeline(RUNNER, options=opts) as p: with beam_impl.Context(temp_dir=os.path.join(OUTPUT_DIR, 'tmp')): # analyze and transform training raw_data = read_rawdata(p, 'train', in_test_mode) raw_dataset = (raw_data, raw_data_metadata) transformed_dataset, transform_fn = ( raw_dataset | beam_impl.AnalyzeAndTransformDataset(preprocess_tft)) transformed_data, transformed_metadata = transformed_dataset _ = transformed_data | 'WriteTrainData' >> tfrecordio.WriteToTFRecord( os.path.join(OUTPUT_DIR, 'train'), coder=example_proto_coder.ExampleProtoCoder( transformed_metadata.schema)) # transform eval data raw_test_data = read_rawdata(p, 'eval', in_test_mode) raw_test_dataset = (raw_test_data, raw_data_metadata) transformed_test_dataset = ( (raw_test_dataset, transform_fn) | beam_impl.TransformDataset()) transformed_test_data, _ = transformed_test_dataset _ = transformed_test_data | 'WriteTestData' >> tfrecordio.WriteToTFRecord( os.path.join(OUTPUT_DIR, 'eval'), coder=example_proto_coder.ExampleProtoCoder( transformed_metadata.schema)) _ = (transform_fn | 'WriteTransformFn' >> transform_fn_io.WriteTransformFn(os.path.join(OUTPUT_DIR, 'metadata'))) job = p.run() if in_test_mode: job.wait_until_finish() print("Done!") preprocess(query, in_test_mode=False) # - # %bash gsutil ls gs://${BUCKET}/babyweight/preproc_tft/*-00000* # Copyright 2017 Google Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License
courses/machine_learning/deepdive/06_structured/4_preproc_tft.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # %load_ext autoreload # %autoreload 2 import matplotlib.pyplot as plt import pandas as pd import networkx as nx import numpy as np from tqdm import tqdm from CIoTS import CausalTSGenerator, draw_graph, VAR, pc_chen_modified, partial_corr_test, evaluate_edges dimensions = 4 max_p = 8 incoming_edges = 3 # + dimensions = 4 max_p = 5 incoming_edges = 2 datasets = [] for _ in tqdm(range(20)): generator = CausalTSGenerator(dimensions=dimensions, max_p=max_p, data_length=10000, incoming_edges=incoming_edges) ts = generator.generate() datasets.append(ts) var_scores, chen_scores = [], [] for ts in tqdm(datasets): var = VAR(max_p) var.fit(ts) A = np.abs(var.params[1:]) var_graph = var.to_graph(threshold=np.mean(A)+np.std(A)) chen_graph = pc_chen_modified(partial_corr_test, ts, max_p, alpha=0.05) var_scores.append(evaluate_edges(generator.graph, var_graph)['f1-score']) chen_scores.append(evaluate_edges(generator.graph, chen_graph)['f1-score']) plt.boxplot([chen_scores, var_scores], labels=['Chen', 'VAR']) plt.show() # + dimensions = 3 max_p = 8 incoming_edges = 3 datasets = [] for _ in tqdm(range(20)): generator = CausalTSGenerator(dimensions=dimensions, max_p=max_p, data_length=10000, incoming_edges=incoming_edges) ts = generator.generate() datasets.append(ts) var_scores, chen_scores = [], [] for ts in tqdm(datasets): var = VAR(max_p) var.fit(ts) A = np.abs(var.params[1:]) var_graph = var.to_graph(threshold=np.mean(A)+np.std(A)) chen_graph = pc_chen_modified(partial_corr_test, ts, max_p, alpha=0.05) var_scores.append(evaluate_edges(generator.graph, var_graph)['f1-score']) chen_scores.append(evaluate_edges(generator.graph, chen_graph)['f1-score']) plt.boxplot([chen_scores, var_scores], labels=['Chen', 'VAR']) plt.show() # + dimensions = 8 max_p = 3 incoming_edges = 3 datasets = [] for _ in tqdm(range(20)): generator = CausalTSGenerator(dimensions=dimensions, max_p=max_p, data_length=10000, incoming_edges=incoming_edges) ts = generator.generate() datasets.append(ts) var_scores, chen_scores = [], [] for ts in tqdm(datasets): var = VAR(max_p) var.fit(ts) A = np.abs(var.params[1:]) var_graph = var.to_graph(threshold=np.mean(A)+np.std(A)) chen_graph = pc_chen_modified(partial_corr_test, ts, max_p, alpha=0.05) var_scores.append(evaluate_edges(generator.graph, var_graph)['f1-score']) chen_scores.append(evaluate_edges(generator.graph, chen_graph)['f1-score']) plt.boxplot([chen_scores, var_scores], labels=['Chen', 'VAR']) plt.show() # + dimensions = 3 max_p = 4 incoming_edges = 2 datasets = [] for _ in tqdm(range(20)): generator = CausalTSGenerator(dimensions=dimensions, max_p=max_p, data_length=10000, incoming_edges=incoming_edges) ts = generator.generate() datasets.append(ts) var_scores, chen_scores = [], [] for ts in tqdm(datasets): var = VAR(max_p) var.fit(ts) A = np.abs(var.params[1:]) var_graph = var.to_graph(threshold=np.mean(A)+np.std(A)) chen_graph = pc_chen_modified(partial_corr_test, ts, max_p, alpha=0.05) var_scores.append(evaluate_edges(generator.graph, var_graph)['f1-score']) chen_scores.append(evaluate_edges(generator.graph, chen_graph)['f1-score']) plt.boxplot([chen_scores, var_scores], labels=['Chen', 'VAR']) plt.show() # -
Compare VAR vs. Chen.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # <h2> Creazione serie storica dei decessi giornalieri dal 2015 al 30 giugno 2020 dei 7357 comuni italiani su un totale di 7904 comuni import pandas as pd df = pd.read_csv('csv/decessi_istat_7357comuni.csv') df.head() # Per poter sommare il campo `Totale` in base al valore del campo `Data` è necessario rendere i parametri del campo `Data` di tipo `datetime` e i parametri del campo `Totale` di tipo `int64`, così da poter creare una <b>serie storica</b> dove per ogni data giornaliera dal <b>1 gennaio 2015</b> al <b>30 giugno 2020</b> venga indicato il numero totale dei decessi registrati nei comuni presenti nel DataFrame iniziale. df['DATA'] = pd.to_datetime(df['DATA']) df.info() df.drop(['REG', 'NOME_REGIONE', 'PROV', 'NOME_PROVINCIA', 'COD_PROVCOM', 'NOME_COMUNE'], axis=1, inplace=True) df.head() df=df.groupby('DATA').sum() df.head() df.to_csv('csv/serie_storica.csv')
Modulo 2 - Analisi dei dati per i 7.357 comuni/Creation of time series of daily deaths (7.357 municipalities).ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="view-in-github" colab_type="text" # <a href="https://colab.research.google.com/github/reemaratnani/startup_project_stripe_payment/blob/master/DermDetect.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # + id="43NMJNFG6qPQ" colab_type="code" colab={} import pandas as pd import numpy as np # + id="h2xKQRpHK8sX" colab_type="code" outputId="ec514c7e-3874-4733-a0e9-dd7c5a89ef23" colab={"base_uri": "https://localhost:8080/", "height": 234} data = pd.read_csv("derm.csv") data.head() # + id="q34K_oJ1LIWg" colab_type="code" colab={} data.isnull().sum() # + id="4SxQpGByLQ2-" colab_type="code" colab={} data.dropna(inplace=True) # + id="FpkrXt_JLdSp" colab_type="code" colab={} data.isnull().sum() # + id="j73Yj3XCLkp6" colab_type="code" colab={} import seaborn as sns import matplotlib.pyplot as plt # %matplotlib inline # + id="lbgItrxo2crY" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 301} outputId="79868165-05e8-48be-87a8-90046b5165c7" sns.countplot(data['class']) # + [markdown] id="9m755QLN2o4U" colab_type="text" # # # * There are 6 classes of erythemato-squamous disease # # # # + id="Ft0wh2-_Moss" colab_type="code" colab={} corr = data.corr() # + id="RueVHhTWM8ZM" colab_type="code" outputId="97d7d4e3-4225-41e7-862f-4c77488412a9" colab={"base_uri": "https://localhost:8080/", "height": 943} plt.figure(figsize=(12,12)) sns.heatmap(corr, linewidths=.5, cmap='viridis') # + id="zv9yljf6NCpV" colab_type="code" colab={} from sklearn.preprocessing import StandardScaler # + id="b6Fid2sxNMJB" colab_type="code" colab={} scaler = StandardScaler() # + id="UCoimJazNPDW" colab_type="code" colab={} #The Class code column is the value that needs to be predicted from the analysis. #Hence we will have to split X and y(Features and labels) based on this information X = data.iloc[:,0:34] # all rows, all the features and no labels y = data.iloc[:,-1] # all rows, label only # + id="2CDfbry-Na5N" colab_type="code" colab={} scaled_data = scaler.fit_transform(X) # + id="X1HFYLHSNcbi" colab_type="code" colab={} scaled_data # + id="oQlPIq44Ni9-" colab_type="code" colab={} from sklearn.decomposition import PCA pca = PCA() pca.fit_transform(X) # + id="v0S3HO0QOBkC" colab_type="code" colab={} pca.get_covariance() # + id="OHhQmxa5OCZ9" colab_type="code" outputId="90d0c0b9-d7bc-4e42-c5d8-d1d5a781dfca" colab={"base_uri": "https://localhost:8080/", "height": 176} explained_variance=pca.explained_variance_ratio_ explained_variance # + id="PB1aPhjXOGJe" colab_type="code" outputId="d27e4cbc-fbb0-46ca-b2f3-80c785864b32" colab={"base_uri": "https://localhost:8080/", "height": 441} with plt.style.context('dark_background'): plt.figure(figsize=(8, 6)) plt.bar(range(34), explained_variance, alpha=0.5, align='center', label='Explained Variance representation') plt.ylabel('Explained variance ratio') plt.xlabel('Principal components') plt.legend(loc='best') plt.tight_layout() # + [markdown] id="Lu5zIqltOXEa" colab_type="text" # The above graph is a clear indication that 'erythema' has the largest explained variance ratio in this dataset. # + id="SwI7O6_cOX8E" colab_type="code" outputId="97cce133-2978-4007-ad8c-71ee32e65ede" colab={"base_uri": "https://localhost:8080/", "height": 34} from sklearn.model_selection import train_test_split X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=42) X_train.shape # + id="-r5A6P4B3smt" colab_type="code" colab={} from sklearn.svm import SVC from sklearn.model_selection import GridSearchCV from sklearn.metrics import classification_report, confusion_matrix # + id="AKCrSYM94Xyd" colab_type="code" colab={} param_grid = {'C': [0.1, 1, 10, 100], 'gamma': [1, 0.1, 0.01, 0.001]} # + id="sA82-EzM4sA5" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 1000} outputId="fce51659-3604-436b-cc78-b2fee4d43607" grid = GridSearchCV(SVC(), param_grid, refit=True, verbose=2) grid.fit(X_train, y_train) # + id="EgU0EyuW9k5y" colab_type="code" colab={} import pickle with open('model.pkl', 'wb') as f: pickle.dump(grid, f) # + id="QIzpOf3d-no7" colab_type="code" colab={} with open('model.pkl', 'rb') as f: clf = pickle.load(f) # + id="gd7vCp9y5Tt6" colab_type="code" colab={} grid_predictions = clf.predict(X_test) # + id="S2n0dVte5a5i" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 123} outputId="627a518c-269d-4476-998b-567b0be13a0c" print(confusion_matrix(y_test, grid_predictions)) # + id="_RZRAFZe7XV-" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 247} outputId="a3c4087a-a894-4796-eb4a-187440a853a1" print(classification_report(y_test, grid_predictions)) # + id="1tGHL0-NOe2g" colab_type="code" colab={} from sklearn.ensemble import RandomForestRegressor from sklearn.ensemble import VotingClassifier from sklearn.ensemble import RandomForestClassifier, AdaBoostClassifier # + id="i85zyUYQOmMv" colab_type="code" outputId="0b721c13-75cf-490d-dcb4-7f5913d9b987" colab={"base_uri": "https://localhost:8080/", "height": 54} model = RandomForestRegressor() estimators = np.arange(10, 300, 10) scores = [] for n in estimators: model.set_params(n_estimators=n) model.fit(X_train, y_train) scores.append(model.score(X_test, y_test)) print(scores) # + id="dZIPFJLnOv1H" colab_type="code" outputId="3121aad9-f1fe-462f-93ad-38c4257d6177" colab={"base_uri": "https://localhost:8080/", "height": 313} plt.title("Random Forest Regressor - Effect of n_estimators") plt.xlabel("n_estimator") plt.ylabel("score") plt.plot(estimators, scores) # + id="kBAm_6s1PwVU" colab_type="code" outputId="4db2eb2d-263e-447f-cb0c-ea0cc8dca647" colab={"base_uri": "https://localhost:8080/", "height": 194} from sklearn.ensemble import AdaBoostClassifier from sklearn.model_selection import cross_val_score estimators = np.arange(100, 2000, 200) scores = [] for n in estimators: clf = AdaBoostClassifier(learning_rate = 0.1,n_estimators = n,random_state=42) clf_check = clf.fit(X_train,y_train) score = cross_val_score(clf, X_train, y_train, cv=5) scores.append(score) print('The Estimator {} has a score of {}'.format(n,score)) # + id="431LP5CYP2sf" colab_type="code" outputId="1f20ec1e-627f-448b-df34-dd1567694cbe" colab={"base_uri": "https://localhost:8080/", "height": 384} plt.title("AdaBoost Classifier - Effect of n_estimators") plt.xlabel("n_estimator") plt.ylabel("score") plt.plot(estimators, scores)
DermDetect.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3.8.9 64-bit ('env') # name: python389jvsc74a57bd0e514e8fb66f3ffe4f97051a3575f23813373bb532cd5a243c27d050192f765fb # --- import numpy as np import matplotlib.pyplot as plt from get_data import plot_sample from skimage.io import imshow from skimage.io import imread from skimage.transform import resize from skimage.color import rgb2gray from keras.models import load_model model = load_model('model.h5') image = imread('images\capture.jpg') image = rgb2gray(image) image = resize(image, (96,96)) test_image = np.array(image) test_image_input = np.reshape(test_image, (1, 96, 96, 1)) keypoints = model.predict(test_image_input) fig, axis = plt.subplots() plot_sample(test_image, keypoints[0], axis, "Prediction on captured image")
model_testing.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Assignment 2 # ### Author: <NAME> # + # 1. Describe the class of strings matched by the following regular expressions.No code is needed and just describe what the following regular expressions do/match). #a.[a-zA-Z]+ #b.[A-Z][a-z]* #c.p[aeiou]{,2}t # #d.\d+(\.\d+)? #e.([^aeiou][aeiou][^aeiou])* #f.\w+|[^\w\s]+ # - import nltk, re, pprint # + test = "The companies, Moderna and Pfizer, revealed details about how participants are being selected and monitored, the conditions under which the trials could be stopped early if there were problems, and the evidence researchers will use to determine whether people who got the vaccines were protected from Covid-19." # Took the test data from NY Times blog # a. nltk.re_show(r'[a-zA-Z]+', test) # Ans: It is matching anything that has alphabets, whether or not it has upper case or lower case. # + # b. nltk.re_show(r'[A-Z][a-z]*', test) # Ans: All capitalized words are matched which has first letter as capital. # + # c. nltk.re_show(r'p[aeiou]{,2}t', test) # Ans: Words starting with p and ending in t. And between them either 0, 1 or 2 vowels from english language. # The example I chose had not like that so see a new example below: nltk.re_show(r'p[aeiou]{,2}t', "pant") nltk.re_show(r'p[aeiou]{,2}t', "pat") nltk.re_show(r'p[aeiou]{,2}t', "pout") nltk.re_show(r'p[aeiou]{,2}t', "pan") nltk.re_show(r'p[aeiou]{,2}t', "pt") # + # d. nltk.re_show(r'\d+(\.\d+)?', test) # Ans: It will return any number - whole, fraction or integer. Anything that falls in the number line. # + # e. nltk.re_show(r'([^aeiou][aeiou][^aeiou])*', test) # Ans: Zero or more sequence of a combination of consonants-vowel-consonants. # + # f. nltk.re_show(r'\w+|[^\w\s]+', test) # Ans: All alphanumeric characters including punctuation and non-whitespaces. # - # ---- # + # 2. Rewrite the following loop as a list comprehension: # sent = ['This', 'is', 'an', 'introduction', 'class'] # result = [] # for word in sent: # word_len = (word, len(word)) # result.append(word_len) # result # + # Running the sample code: sent = ['This', 'is', 'an', 'introduction', 'class'] result = [] for word in sent: word_len = (word, len(word)) result.append(word_len) result # + # Re-writing it as a list comprehension: sent = ['This', 'is', 'an', 'introduction', 'class'] print([(w, len(w)) for w in sent]) # - # ---- # + # 3. Read in some text from your own document in your local disk, tokenize it, and print the list of all wh-word types that occur. (wh-words in English are used in questions, relative clauses and exclamations: who, which, what, and so on.) # + # The text in the file is same as what I used above for regex check ''' "The companies, Moderna and Pfizer, revealed details about how participants are being selected and monitored, the conditions under which the trials could be stopped early if there were problems, and the evidence researchers will use to determine whether people who got the vaccines were protected from Covid-19." ''' # - file = open('test.txt') file from nltk import word_tokenize from nltk.corpus import gutenberg raw = file.read() tokens = word_tokenize(raw) # + # Method 1: Using the method startswith() print([wh for wh in tokens if wh.lower().startswith('wh')]) # + # Method 2: Using regex expression print([wh for wh in tokens if re.findall('^[Ww]h', wh)]) # - # ---- # + # 4. Create your own file consisting of words and (made up) frequencies, where each line consists of a word, the space character, and a positive integer, e.g. fuzzy 53. Read the file into a Python list using open(filename).readlines(). Next, break each line into its two fields using split(), and convert the number into an integer using int(). The result should be a list of the form: [['fuzzy', 53], ...]. # + # Content of the file que4.txt is as follows: ''' hello 5 tanay 5 nlp 3 goodbye 7 friday 6 ''' # - que4 = open('que4.txt', encoding = "utf-8").readlines() print([[w, int(i)] for w, i in (rows.split() for rows in que4)]) # + # Oje's doubt: myfile2= open('que4_v2.txt','r') raw1 = myfile2.read() fields = raw1.split() # + # Fixed Oje's code: result1 = [] # Creates an empty list called result1 x=0 # Initiate with a dummy index for i in fields: # this function checks fields and appends results1 with 'strings' and changes numbers to integer if i.isalpha(): result1.append([i]) else: result1[x].append(int(i)) x = x+1 result1 # + # Alyssa's Code: [s.strip() for s in que4] # - # ---- # + #5. Readability measures are used to score the reading difficulty of a text, for the purposes of selecting texts of appropriate difficulty for language learners. Let us define μw to be the average number of letters per word, and μs to be the average number of words per sentence, in a given text. The Automated Readability Index (ARI) of the text is defined to be: 4.71 μw + 0.5 μs - 21.43. Compute the ARI score for each section of the Brown Corpus (i.e. News, Editorial,… Humor) (Hint: for category in brown.categories( )). Make use of the fact that nltk.corpus.brown.words() produces a sequence of words, while nltk.corpus.brown.sents() produces a sequence of sentences. (Hint: from nltk.corpus import brown) # - from nltk.corpus import brown def ARI(category): words = brown.words(categories = category) sents = brown.sents(categories = category) μw = sum(len(w) for w in words)/len(words) μs = sum(len(s) for s in sents)/len(sents) return (4.71 * μw + 0.5 * μs - 21.43) for category in brown.categories(): print("The category is {0:<15} and its corresponding ARI is {1}". format(category, round(ARI(category),2))) # ---- # + # 6.Use the Porter Stemmer to normalize some tokenized text (see below), calling the stemmer on each word. Do the same thing with the Lancaster Stemmer and describe any difference you observe by using these two stemmers. # text='Technologies based on NLP are becoming increasingly widespread. For example, phones and handheld computers support predictive text and handwriting recognition; web search engines give access to information locked up in unstructured text; machine translation allows us to retrieve texts written in Chinese and read them in Spanish; text analysis enables us to detect sentiment in tweets and blogs. By providing more natural human-machine interfaces, and more sophisticated access to stored information, language processing has come to play a central role in the multilingual information society' # - porter = nltk.PorterStemmer() lancaster = nltk.LancasterStemmer() text = 'Technologies based on NLP are becoming increasingly widespread. For example, phones and handheld computers support predictive text and handwriting recognition; web search engines give access to information locked up in unstructured text; machine translation allows us to retrieve texts written in Chinese and read them in Spanish; text analysis enables us to detect sentiment in tweets and blogs. By providing more natural human-machine interfaces, and more sophisticated access to stored information, language processing has come to play a central role in the multilingual information society' tokens = word_tokenize(text) print([porter.stem(t) for t in tokens]) print([lancaster.stem(t) for t in tokens]) # **Explanation:** Porter algorithm is the less aggressive as an algorithm. The stems of the words are somewhat intuitive and are understandable. On the other hand, Lancaster algorithm is very aggressive because of its strictly chopping words and making it much confusing. With this algorithm in use, the stems become non-relatable to some extent # ---- # + # 7. Obtain raw texts from two or more genres and compute their respective reading difficulty scores as in the earlier exercise on reading difficulty. Please compare the reading difficulties for ABC Rural News ("rural.txt") and ABC Science News("science.txt") (nltk.corpus.abc).(Hint: from nltk.corpus import abc) # - from nltk.corpus import abc from nltk import word_tokenize, sent_tokenize # + def ARI(category): words = word_tokenize(category) sents = [nltk.word_tokenize(sent) for sent in nltk.sent_tokenize(category)] μw = sum(len(w) for w in words)/len(words) μs = sum(len(s) for s in sents)/len(sents) return 4.71 * μw + 0.5 * μs - 21.43 print("ABC Rural News has an ARI of {0:>7}". format(round(ARI(abc.raw("rural.txt")),2))) print("ABC Science News has an ARI of {0:>5}". format(round(ARI(abc.raw("science.txt")),2))) # - # ---- # + # 8.Rewrite the following nested loop as a nested list comprehension: # words = ['attribution', 'confabulation', 'elocution', # 'sequoia', 'tenacious', 'unidirectional'] # vsequences = set() # for word in words: # vowels = [] # for char in word: # if char in 'aeiou': # vowels.append(char) # vsequences.add(''.join(vowels)) # sorted(vsequences) # - words = ['attribution', 'confabulation', 'elocution', 'sequoia', 'tenacious', 'unidirectional'] vsequences = set() for word in words: vowels = [] for char in word: if char in 'aeiou': vowels.append(char) vsequences.add(''.join(vowels)) sorted(vsequences) # + # Re-writing it as a list comprehension: # Method 1: Using a generic for loop for vowels and then on words array words = ['attribution', 'confabulation', 'elocution', 'sequoia', 'tenacious', 'unidirectional'] sorted(set([''.join([w for w in word if w in 'aeiou']) for word in words])) # + # Re-writing it as a list comprehension: # Method 2: Using regex to identify vowels and then a for loop on words array sorted(re.sub('[^aeiou]', '', w) for w in words) # - # ---- # 9.Try to refer the following sample code to print the following sentences in a formatted way.(Hint: you should use str.format() method in print() and for loop;For more information, please read the textbook section 3.9 in chapter 3) # output should look like: ''' The Tragedie of Hamlet was written by <NAME> in 1599 Leaves of Grass was written by <NAME> in 1855 Emma was written by <NAME> in 1816 # sample code: template = 'Lee wants a {} right now' menu = ['sandwich', 'spam fritter', 'pancake'] for snack in menu: print(template.format(snack)) ''' template = 'Lee wants a {} right now' menu = ['sandwich', 'spam fritter', 'pancake'] for snack in menu: print(template.format(snack)) # + # How will it work for the given sample: menu = ['sandwich', 'spam fritter', 'pancake'] for i in range (len (menu)): print("{0:<11} {1:<12} {2}".format('Lee wants a', menu[i], 'right now')) # + # How will it work for the given question: books = ['The Tragedie of Hamlet', 'Leaves of Grass', 'Emma'] author = ['<NAME>', '<NAME>', '<NAME>'] year = [1599, 1855, 1816] for i in range (len (books)): print("{0:<22} was written by {1:<19} in {2}".format(books[i], author[i], year[i])) # - # ---- # + # 10. Define the variable quote to contain the list ['Action', 'speaks', 'louder', 'than', 'words']. Process this list using a for loop, and store the length of each word in a new list lengths. Hint: begin by assigning the empty list to lengths, using lengths = []. Then each time through the loop, use append() to add another length value to the list. Then do the same thing using a list comprehension. # - quote = ['Action', 'speaks', 'louder', 'than', 'words'] lengths = [[len(i) for i in x.split()] for x in quote] lengths # + # Also printing the arrays: quote and lengths using string formatting for i in range (len (quote)): print("Length of the word {0:<6} is {1}".format(quote[i], lengths[i])) # - # ----
Assignments/Assignment_2_Tanay.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] id="limiting-remove" # # Mixed Data Features # # In previous lectures, we've used neural networks to perform "basic" classification, image classification, and text classification. In each of these, we used a single pipeline that we custom built for the data we needed to work with, usually enclosed within a `keras.models.Sequential()` model. This worked fine for those situations, but what happens when we have *multiple* complex data components that we would like to use in our models? For example, maybe we would like to do classification in a dataset containing both: # # 1. Pictures (images) and captions (text). # 2. Descriptions (text) and timestamps (numbers). # 3. Article bodies (text) and titles (different text). # # Of course, it's possible to come up with various ad hoc ways to smoosh pieces of data together, but this isn't usually the right approach. A better thing to do is to create a more flexible model that accepts and appropriately processes different kinds of inputs. In this lecture, we'll learn how to do this. # # ### Key Tools # # - Labeled data sets. # - The Keras Functional API (alternative to Sequential API). # # ### Useful References # # - I consulted [this tutorial](https://keras.io/guides/functional_api/) on the Functional API while preparing these lecture notes. # + id="billion-print" import numpy as np import pandas as pd import tensorflow as tf import re import string from tensorflow.keras import layers from tensorflow.keras import losses from tensorflow import keras # requires update to tensorflow 2.4 # >>> conda activate PIC16B # >>> pip install tensorflow==2.4 from tensorflow.keras.layers.experimental.preprocessing import TextVectorization from tensorflow.keras.layers.experimental.preprocessing import StringLookup from sklearn.model_selection import train_test_split from sklearn.preprocessing import LabelEncoder # for embedding viz import plotly.express as px import plotly.io as pio pio.templates.default = "plotly_white" # + [markdown] id="sunrise-accreditation" # Our data set for this lecture contains various information about various musical tracks produced between the years 1950 and 2019. You can find it on Kaggle [here](https://www.kaggle.com/saurabhshahane/music-dataset-1950-to-2019). The data was originally published in the following data publication: # # > <NAME>; <NAME>; <NAME>; <NAME> (2020), “Music Dataset: Lyrics and Metadata from 1950 to 2019”, Mendeley Data, V3, doi: 10.17632/3t9vbwxgr5.3 # + id="resident-persian" url = "https://raw.githubusercontent.com/PhilChodrow/PIC16B/master/datasets/tcc_ceds_music.csv" df = pd.read_csv(url) # + [markdown] id="attractive-thing" # Let's take a quick look: # + colab={"base_uri": "https://localhost:8080/", "height": 629} id="lasting-baltimore" outputId="ad08b24f-3c8f-4150-d198-c55b7e9565d9" df.head() # + [markdown] id="unavailable-heating" # We have a number of columns here! We'll focus on a few subsets in particular: # # The `genre` describes the overall genre of the track. It has seven values: # + colab={"base_uri": "https://localhost:8080/"} id="defensive-backing" outputId="f3df13de-6583-4a84-a1cb-ab8ed637d13e" df.groupby("genre").size() # + [markdown] id="coral-doubt" # There data also includes # - The complete `lyrics` of the track (if it has any) # - The normalized `age` of the track (1.0 corresponds to tracks released in 1950, 0.0 corresponds to more recent tracks) # - A selection of columns that give the track numerical scores reflecting various attributes and topics, such as "danceability," "loudness," and "acousticness." # # There are also other columns in the data, but these are the ones we're going to focus on for this analysis. We are going to subset things a bit: let's focus on only a few genres, and on only relatively recent tracks. # + id="reverse-winner" df = df[df["release_date"] > 2000] genres = ["blues", "hip hop", "country"] df = df[df["genre"].apply(lambda x: x in genres)] # + [markdown] id="impossible-weekly" # Next, let's do a categorical encoding of the `genre` column. # + id="annual-seeking" le = LabelEncoder() df["genre"] = le.fit_transform(df["genre"]) num_genres = len(df["genre"].unique()) # + [markdown] id="experienced-sunset" # ## Create a Dataset # # We're now ready to create a TensorFlow `Dataset`. This is going to be a little more involved than last time, because we need to distinguish between different kinds of model inputs. # # Here's a list of all the scalar "score" columns in the data frame. # + id="minus-triangle" scalars = ['dating', 'violence', 'world/life', 'night/time', 'shake the audience', 'family/gospel', 'romantic', 'communication', 'obscene', 'music', 'movement/places', 'light/visual perceptions', 'family/spiritual', 'like/girls', 'sadness', 'feelings', 'danceability', 'loudness', 'acousticness', 'instrumentalness', 'valence', 'energy'] # + [markdown] id="reasonable-symposium" # Because we have multiple inputs, we are going to construct our `Dataset` from a tuple of dictionaries. The first dictionary is going to specify the different components in the predictor data, while the second dictionary is going to specify the different components of the target data. # + id="upper-carter" data = tf.data.Dataset.from_tensor_slices( ( { "lyrics" : df[["lyrics"]], "scalars" : df[scalars]}, { "genre" : df[["genre"]] } ) ) # + [markdown] id="valid-miami" # Next, we are going to perform a train/test/validation split. For each of the three split `Datasets`, we are going to *batch* them into small chunks of data. This helps with training runtime later. # + colab={"base_uri": "https://localhost:8080/"} id="ordered-density" outputId="901de6fe-5179-4aae-b0a8-3c8b912ed52c" data = data.shuffle(buffer_size = len(data)) train_size = int(0.7*len(data)) val_size = int(0.1*len(data)) train = data.take(train_size).batch(20) val = data.skip(train_size).take(val_size).batch(20) test = data.skip(train_size + val_size).batch(20) len(train), len(val), len(test) # + [markdown] id="growing-democracy" # Each of the numbers above should be multipled by the batch size to give the total number of rows in each `Dataset`. # # The code below sets up exactly the same text preprocessing that we did [last lecture](https://nbviewer.jupyter.org/github/PhilChodrow/PIC16B/blob/master/lectures/tf/tf-3.ipynb). # + id="minimal-function" size_vocabulary = 2000 def standardization(input_data): lowercase = tf.strings.lower(input_data) no_punctuation = tf.strings.regex_replace(lowercase, '[%s]' % re.escape(string.punctuation),'') return no_punctuation vectorize_layer = TextVectorization( standardize=standardization, max_tokens=size_vocabulary, # only consider this many words output_mode='int', output_sequence_length=500) lyrics = train.map(lambda x, y: x["lyrics"]) vectorize_layer.adapt(lyrics) # + [markdown] id="stopped-scholar" # ## The Keras Functional API # # In previousl lectures, we used the `Sequential` API for constructing models. For example, we wrote code like this: # # ```python # model = tf.keras.Sequential([ # layers.Embedding(max_tokens, output_dim = 3), # layers.Dropout(0.2), # layers.GlobalAveragePooling1D(), # layers.Dropout(0.2), # layers.Dense(len(categories))] # ) # ``` # # This model is designed to accept a single kind of input (in this case text) and spit out a single output. However, in our case we have two distinct *kinds* of input: the song lyrics and the scalar scores. It wouldn't really make much sense to pass the scalar scores through a text vectorization or embedding layer. For this reason, we need to move beyond the `Sequential` API and instead constructing our models using a somewhat more manual approach, referred to as the `Functional` API. # + [markdown] id="great-gambling" # ### Inputs # # Start by specifying the two kinds of `keras.Input` for our model. You should have one input for each qualitatively distinct "kind" of predictor data. All the parameters here are important: # # - `shape` should describe the shape of a single item of data. For example, the `lyrics` column contains just one entry for each song, so the shape is `(1,)` (a tuple of length 1). On the other hand, there are `len(scalars) = 22` distinct columns of scalar scores. # - the `name` should be some descriptive name that you're able to remember for later. # - The `dtype` specifies the kind of data contained in each of the input tensors. # + id="cooperative-confidence" # inputs lyrics_input = keras.Input( shape = (1,), name = "lyrics", dtype = "string" ) scalars_input = keras.Input( shape = (len(scalars),), name = "scalars", dtype = "float64" ) # + [markdown] id="chronic-carbon" # ## Hidden Layers # # First, let's write a pipeline for the lyrics. This pipeline is pretty much the same as the one we used earlier for text classification -- we're just building it differently. # + id="comprehensive-overall" # layers for processing the lyrics, pretty much the same as from our lecture # on text classification lyrics_features = vectorize_layer(lyrics_input) lyrics_features = layers.Embedding(size_vocabulary, 3, name = "embedding")(lyrics_features) lyrics_features = layers.Dropout(0.2)(lyrics_features) lyrics_features = layers.GlobalAveragePooling1D()(lyrics_features) lyrics_features = layers.Dropout(0.2)(lyrics_features) lyrics_features = layers.Dense(32)(lyrics_features) # + [markdown] id="accredited-canvas" # Next, let's write a pipeline for the scalars. We don't need to do anything fancy with them, so instead of messing with `Embeddings` and the like, we're just going to pass them through a `Dense` layer. # + id="opposed-scene" scalar_features = layers.Dense(32)(scalars_input) # + [markdown] id="accurate-omega" # Here's simultaneously the most important and most boring part of the whole model: we are going to `concatenate` the output of the `lyrics` pipeline with the output of the `scalar` pipeline: # + id="conscious-mapping" main = layers.concatenate([lyrics_features, scalar_features], axis = 1) # + [markdown] id="pleased-fighter" # Finally, let's pass the consolidated set of computed features through a few more `Dense` layers. Remember that the very last `Dense` layer should have a number of outputs equal to the number of classes in the data. # # **Observe that the output layer has a name, and that this name matches the key corresponding to the target data in the `Datasets` we will pass to the model.** This is how TensorFlow knows which part of our data set to compare against the outputs! # + id="fleet-society" main = layers.Dense(32)(main) output = layers.Dense(num_genres, name = "genre")(main) # + [markdown] id="cognitive-sleeve" # So far, we haven't actually created a model yet -- just a bunch of interrelated layers. We create the model by specifying the input(s) and output. # + id="accredited-publication" model = keras.Model( inputs = [lyrics_input, scalars_input], outputs = output ) # + [markdown] id="authentic-conversion" # The model summary provides one good way to look at the structure of the model, but it can be difficult to read when there are multiple inputs: # + colab={"base_uri": "https://localhost:8080/"} id="homeless-terrain" outputId="7b57a043-5365-4ef4-faed-c269d8b0b1be" model.summary() # + [markdown] id="lined-blade" # Something a bit more visually attractive can be obtained by using the `plot_model` function: # + colab={"base_uri": "https://localhost:8080/", "height": 953} id="outdoor-prague" outputId="469ea2db-f3e2-4c50-fc51-e732459cddee" keras.utils.plot_model(model) # + [markdown] id="executed-capability" # Now we're ready to train and evaluate our model as usual! As always, we need to compile: # + id="reported-glass" model.compile(optimizer = "adam", loss = losses.SparseCategoricalCrossentropy(from_logits=True), metrics=['accuracy'] ) # + [markdown] id="distinct-premium" # And now we train! # + id="developing-bread" history = model.fit(train, validation_data=val, epochs = 50, verbose = 0) # + [markdown] id="O5Aqe0Yb1zG0" # Let's take a look at how our training process went: # + colab={"base_uri": "https://localhost:8080/", "height": 282} id="fixed-questionnaire" outputId="29ef9631-f2d6-44bc-9b31-2efede1a2d86" from matplotlib import pyplot as plt plt.plot(history.history["accuracy"], label = "training") plt.plot(history.history["val_accuracy"], label = "validation") plt.legend() # + [markdown] id="U2P4vbD018Ed" # Well, that looks promising! How do we do on unseen test data? # + colab={"base_uri": "https://localhost:8080/"} id="e41Wp2eC1s9m" outputId="e034fd06-6401-46b4-fde2-cd018a69d76e" model.evaluate(test) # + [markdown] id="cloDFeKs16nC" # Not bad! A good exercise, which we "leave to the reader," is to compare this performance to that of a simpler model which uses only the lyrics or only the scalar scores. # + [markdown] id="cognitive-amateur" # ## Visualizing Embeddings # # As usual, it's fun to take a look at the embedding learned by our model. Provided that we gave our embedding layer a name, the same approach as last time works just fine: # + id="blond-holly" weights = model.get_layer('embedding').get_weights()[0] # get the weights from the embedding layer vocab = vectorize_layer.get_vocabulary() # get the vocabulary from our data prep for later from sklearn.decomposition import PCA pca = PCA(n_components=2) weights = pca.fit_transform(weights) embedding_df = pd.DataFrame({ 'word' : vocab, 'x0' : weights[:,0], 'x1' : weights[:,1] }) # + colab={"base_uri": "https://localhost:8080/", "height": 542} id="continuing-fluid" outputId="9db512d7-24ba-4c86-d708-889fccedcc22" import plotly.express as px fig = px.scatter(embedding_df, x = "x0", y = "x1", size = list(np.ones(len(embedding_df))), size_max = 2, hover_name = "word") fig.show() # + [markdown] id="lpOUCqRM2RCu" # Recalling the genres that we were attempting to classify, are you able to recognize any interpretable patterns in the learned word embedding?
lectures/tf/tf_4.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: python-env # language: python # name: python-env # --- from cv2 import cv2 import itertools import numpy as np import matplotlib.pyplot as plt from PIL import Image import time from IPython.display import clear_output def open_img(path): img_bgr2 = cv2.imread(path) img_rgb = cv2.cvtColor(img_bgr2, cv2.COLOR_BGR2RGB) return img_rgb def show_img(img, colormode='gray'): dim = img.shape colormap = 'gray' if len(dim) < 3 else None colormode = 'L' if len(dim) < 3 else 'RGB' img = Image.fromarray(np.uint8(img), colormode) plt.imshow(img, colormap) plt.axis('off') plt.show() img = open_img('../test_imgs/orange_blasco.png') print('IMAGE SHAPE', img.shape) show_img(img) dst = cv2.fastNlMeansDenoisingColored(img, None, 5, 5, 4, 10) show_img(dst) criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 100, 0.2) pixel_values = dst.reshape((-1, 3)) pixel_values = np.float32(pixel_values) k = 3 _, labels, (centers) = cv2.kmeans(pixel_values, k, None, criteria, 10, cv2.KMEANS_RANDOM_CENTERS) # + # convert back to 8 bit values centers = np.uint8(centers) # flatten the labels array labels = labels.flatten() # - segmented_image = centers[labels.flatten()] # reshape back to the original image dimension segmented_image = segmented_image.reshape(dst.shape) # show the image plt.imshow(segmented_image) plt.show()
blasco/Segmentation.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Final Project # <NAME> <br> # November 23, 2020 # + # Import NLTK to load training data import re import nltk nltk.download("tagsets") nltk.download("brown") nltk.download("punkt") nltk.download("averaged_perceptron_tagger") from nltk import sent_tokenize, word_tokenize, pos_tag # Import my modules from hmm import HMM from blstm import * from tagging_results import TaggingResults import numpy as np import pandas as pd import matplotlib.pyplot as plt from sklearn.metrics import confusion_matrix # %matplotlib inline # + # CAN LOAD PRE-TRAINED MODELS HERE def enpickle(obj, file): import pickle with open(file, 'wb') as out_file: pickle.dump(obj, out_file) def unpickle(file): import pickle with open(file, 'rb') as in_file: pickle_dict = pickle.load(in_file, encoding='bytes') return pickle_dict #hmm = unpickle("models/hmm") #summ = unpickle("models/blstm_train_summ") #net = torch.load("models/blstm") # - # ## Step 2: Generative Model train_size, test_size = 30000, 5000 # actual test set may be < test_size train = nltk.corpus.brown.tagged_sents(tagset="universal")[:train_size] test = nltk.corpus.brown.tagged_sents(tagset="universal")[train_size:train_size+test_size] hmm = HMM() # %time hmm.fit(train) # %time test_sents, y_true, y_predict = hmm.split_predict(test) # time this # + # Calculate accuracy of HMM on both datasets hmm_train = TaggingResults(hmm, train, hmm.vocab, hmm.tagset) hmm_test = TaggingResults(hmm, test, hmm.vocab, hmm.tagset) hmm_train.get_accuracy() hmm_test.get_accuracy() # - # ## Step 3: Discriminative Model # + untagged_train = nltk.corpus.brown.sents()[:train_size] # %time embedder = train_fasttext(untagged_train) # Also store the vocabulary for later net_vocab = pd.Index(sorted(set(sum(untagged_train, [])))) # + train_set = POSDataset(train, embedder, hmm.tagset) test_set = POSDataset(test, embedder, hmm.tagset) # Network is loaded onto a GPU by default # (to run on cpu, remove .cuda() and set use_cuda below to False) net = BLSTM(100, len(hmm.tagset), 128, 2, 0.2) # - net.cuda() # %time summ = train_BLSTM(net, train_set, test_set, \ # num_epochs=40, batch_size=16, use_cuda=True, \ # print_every=1, train_summ=None, \ # opt_params={'lr': 0.1, 'momentum': 0.8}) # + net.cpu() # %time test_sents, y_true, y_predict = net.split_predict(test_set) # time this plot_train_summ(summ) # Calculate accuracy of BiLSTM on both datasets net_train = TaggingResults(net, train, net_vocab, hmm.tagset, embedder) net_test = TaggingResults(net, test, net_vocab, hmm.tagset, embedder) net_train.get_accuracy() net_test.get_accuracy() # + # STORE MODELS HERE ONCE TRAINED #enpickle(hmm, "models/hmm") #enpickle(summ, "models/blstm_train_summ") #torch.save(net, "models/blstm") # - # # Step 4: Application to Artificial Data # Generate an artificial tagged dataset to test on np.random.seed(0) # %time artificial = hmm.generate(5000) # + # Calculate the accuracy on the artifical dataset hmm_art = TaggingResults(hmm, artificial, hmm.vocab, hmm.tagset) net_art = TaggingResults(net, artificial, net_vocab, hmm.tagset, embedder) hmm_art.get_accuracy() net_art.get_accuracy() # - # Confusion matrices, conditioned on true label print("\nConfusion matrix for HMM on generated data:") print("- entry i,j = Pr(y_predict = j | y_true = i)\n") pd.DataFrame(confusion_matrix(hmm_art.res.y_true, hmm_art.res.y_predict, normalize='true'), index=hmm.tagset, columns=hmm.tagset).round(2) print("\nConfusion matrix for BiLSTM on generated data:\n") pd.DataFrame(confusion_matrix(net_art.res.y_true, net_art.res.y_predict, normalize='true'), index=hmm.tagset, columns=hmm.tagset).round(2) hmm_art.get_sent(17) net_art.get_sent(17) # # Step 5: Application to Real Data # ### Trump Tweets # + def convert_tweet(tweet): """Given a string (which may contain multiple sentences), splits into sentences, tokenizes each, and automatically tags with NLTK. Also removes hyperlinks and @ references.""" to_remove = r"http\S*\b|@|#" tweet = re.sub(to_remove, " ", tweet) tweet_sents = sent_tokenize(tweet) tag = lambda sent: pos_tag(word_tokenize(sent), tagset='universal') tweet_data = list(map(tag, tweet_sents)) return tweet_data # Compile a compatible training set from the downloaded tweets all_months = [pd.read_json(f"data/trump{i}.json", encoding="utf8") for i in range(8, 11)] trump_tweets = pd.concat(all_months, axis=0) trump = sum(list(map(convert_tweet, trump_tweets.text)), []) # + # Calculate the accuracy on Moby-Dick hmm_trump = TaggingResults(hmm, trump, hmm.vocab, hmm.tagset) net_trump = TaggingResults(net, trump, net_vocab, hmm.tagset, embedder) hmm_trump.get_accuracy() net_trump.get_accuracy() # - # Confusion matrices, conditioned on true label print("\nConfusion matrix for HMM on Trump data:\n") pd.DataFrame(confusion_matrix(hmm_trump.res.y_true, hmm_trump.res.y_predict, normalize='true'), index=hmm.tagset, columns=hmm.tagset).round(2) print("\nConfusion matrix for BiLSTM on Trump data:\n") pd.DataFrame(confusion_matrix(net_trump.res.y_true, net_trump.res.y_predict, normalize='true'), index=hmm.tagset, columns=hmm.tagset).round(2) hmm_trump.get_sent(9) net_trump.get_sent(9)
Analysis.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # FIFA 19 Exploratory Data Analysis # + # import necessary libraries # %matplotlib inline import pandas as pd import numpy as np import scipy import matplotlib.pyplot as plt # - # load the data df = pd.read_csv('cleaned data/fifa19data_clean_no_outliers.csv') df # correlation between overall rating and value plt.plot(df['Overall'],df['Value'],'o') # sorted list of players by rating sorted_overall_rating=df.sort_values('Overall',ascending=False).reset_index(drop=True) sorted_overall_rating # sorted list of players by potential rating who are 21 or younger younger_than_21 = df['Age']<21 potential_younger_than_21=df[younger_than_21] sorted_potential_younger_than_21=potential_younger_than_21.sort_values('Potential',ascending=False).reset_index(drop=True) sorted_potential_younger_than_21 # club who spends the most on wages club_with_most_wages = df.groupby(by='Club')['Wage'].sum() club_with_most_wages.sort_values(ascending=False).head(1) # club with the highest rated players (average rating of top 10 players) df.groupby(by='Club')['Overall'].apply(lambda grp:grp.nlargest(10).mean()).sort_values(ascending=False).head(1) # club with the highest potential (average rating of players' with the top 10 potential at each club) df.groupby(by='Club')['Potential'].apply(lambda grp:grp.nlargest(10).mean()).sort_values(ascending=False).head(1) # club with the highest potential young players (players under 21) sorted_potential_younger_than_21.groupby(by='Club')['Potential'].apply(lambda grp:grp.nlargest(10).mean()).sort_values(ascending=False).head(1) # each country's average rating of top 10 outfield players and the top rated GK no_Goalie = df['Position']!='GK' df_No_Goalie=df[no_Goalie] df_No_Goalie.groupby(by='Nationality')['Overall'].apply(lambda grp:grp.nlargest(10).mean()) only_Goalie = df['Position']=='GK' df_Only_Goalie = df[only_Goalie] df_Only_Goalie.groupby(by='Nationality')['Overall'].max() # average rating of top 5 players in each position df.groupby(by='Position')['Overall'].apply(lambda grp:grp.nlargest(5).mean()).sort_values(ascending=False) # potential vs age plt.xlabel('Age') plt.ylabel('Average Potential') plt.plot(df.groupby('Age')['Potential'].mean().round()) # overall rating vs age plt.xlabel('Age') plt.ylabel('Average Overall Rating') plt.plot(df.groupby('Age')['Overall'].mean().round()) # Is overall rating a good indicator of value? plt.plot(df['Overall'], df['Value'],'o') # Which player is potentially undervalued? Relationship between potential rating and current value... # (-ve correlation maybe?) plt.plot(df['Potential'], df['Value'],'o') # + # As seen from the above graph, there are a lot more people with high potential but with very low salary. # - # Identify undervalued players...so those players with the largest overall rating per million euro # (overall rating / value) df.sort_values(by=['Value','Overall'],ascending=[True,False]) # Identify players who have the potential to be of good value... df.sort_values(by=['Value','Potential'],ascending=[True,False]) # young players with the largest potential rating per million euro potential_younger_than_21.sort_values(by=['Value','Potential','Age'],ascending=[True,False,True])
Code/Data Prep/Week 3 - EDA.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Run Daily Space-Time LISA # * Software dependencies import tools import pandas as pd import numpy as np import pysal as ps import multiprocessing as mp from sqlalchemy import create_engine import geopandas as gpd # * Data dependencies # + db_link ='/Users/dani/AAA/LargeData/adam_cell_phone/a10.db' shp_link = '../data/a10/a10.shp' # To be created in the process: ashp_link = '/Users/dani/Desktop/a10_agd_maxp.shp' engine = create_engine('sqlite:///'+db_link) # - # * Read data in # + # %%time a10 = pd.read_sql_query('SELECT gridcode, date_time, trafficerlang ' 'FROM data ', engine, parse_dates=['date_time']) months = a10['date_time'].apply(lambda x: str(x.year) + '-' + str(x.month)) hours = a10['date_time'].apply(lambda x: str(x.hour)) order = ps.open(shp_link.replace('.shp', '.dbf')).by_col('GRIDCODE') areas = pd.Series([poly.area for poly in ps.open(shp_link)], \ index=order) areas = areas * 1e-6 # Sq. Km # - # * MaxP # # This step removes an area with no data and joins very small polygons to adjacent ones with density as similar as possible. This is performed through an aggregation using the Max-P algorithm # + shp = gpd.read_file(shp_link).set_index('GRIDCODE') overall = a10.groupby('gridcode').mean() overall['area (Km2)'] = areas overall['erldens'] = overall['trafficerlang'] / overall['area (Km2)'] overall = gpd.GeoDataFrame(overall, geometry=shp['geometry'], crs=shp.crs)\ .dropna() # W wmxp = ps.queen_from_shapefile(shp_link, idVariable='GRIDCODE') wmxp.transform = 'R' wmxp.transform = 'O' # Polygon `49116` does not have data. Remove. wmxp = ps.w_subset(wmxp, [i for i in wmxp.id_order if i!=49116]) # Information matrix with hourly average day x = a10.assign(hour=hours).groupby(['gridcode', 'hour'])\ .mean()['trafficerlang']\ .unstack()\ .reindex(wmxp.id_order) # Areas for the MaxP mxp_a = overall.loc[wmxp.id_order, 'area (Km2)'].values # - # %%time np.random.seed(1234) mxp = ps.Maxp(wmxp, x.values, 0.05, mxp_a, initial=1000) labels = pd.Series(mxp.area2region).apply(lambda x: 'a'+str(x)) # * Aggregate polygons # + aggd = overall.groupby(labels).sum() aggd['erldens'] = aggd['trafficerlang'] / aggd['area (Km2)'] ag_geo = overall.groupby(labels)['geometry'].apply(lambda x: x.unary_union) aggd_shp = gpd.GeoDataFrame(aggd, geometry=ag_geo, crs=overall.crs) aggd_shp.reset_index().to_file(ashp_link) ag_a10 = a10.assign(hour=hours, month=months)\ .set_index('gridcode')\ .assign(labels=labels)\ .groupby(['month', 'hour', 'labels', 'date_time'])[['trafficerlang']].sum()\ .reset_index() # - # * $ST-W$ # + # W aw = ps.queen_from_shapefile(ashp_link, idVariable='index') aw.transform = 'R' aw.transform = 'O' # Space-Time W ats = ag_a10['hour'].unique().shape[0] # %time astw = tools.w_stitch_single(aw, ats) astw.transform = 'R' # - # * Expand areas aareas = aggd_shp.reset_index().set_index('index') astw_index = pd.Series(astw.id_order, \ index=[i.split('-')[1] for i in astw.id_order], \ name='astw_index') astareas = aareas.reset_index()\ .join(astw_index, on='index')\ .drop('index', axis=1)\ .set_index('astw_index')\ [['area (Km2)']] # * Reshape for daily runs daily = ag_a10.drop('month', axis=1)\ .assign(h_gc=ag_a10['hour']+'-'+ag_a10['labels'])\ .join(astareas, on='h_gc')\ .assign(date=ag_a10['date_time'].apply(lambda x: str(x.date())))\ .set_index(['date', 'hour', 'labels']) daily['erldens'] = daily['trafficerlang'] / daily['area (Km2)'] # * Run in parallel # + permutations = 1 g = daily.groupby(level='date') tasks = [(i, astw, astareas, permutations, id) for id, i in g] #pool = mp.Pool(mp.cpu_count()) # %time tasks = map(tools.child_lisa, tasks) lisa_clusters = pd.concat(tasks, axis=1) #lisa_clusters.to_csv('../data/lisa_clusters_%ip.csv'%permutations)ss
code/run_daily.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: 'Python 3.9.1 64-bit (''bpt'': conda)' # language: python # name: python39164bitbptconda7805b3f5d58e4b658b79cb94739371e6 # --- # # Predict Sex # # This notebook goes through a simple binary classification example, explaining general library functionality along the way. # Within this notebook we make use of data downloaded from Release 2.0.1 of the the ABCD Study (https://abcdstudy.org/). # This dataset is openly available to researchers (after signing a data use agreement) and is particularly well suited # towards performing neuroimaging based ML given the large sample size of the study. # # Within this notebook we will be performing binary classification predicting sex assigned at birth from tabular ROI structural MRI data. # # ## Load Data # + import BPt as bp import pandas as pd import os from warnings import simplefilter from sklearn.exceptions import ConvergenceWarning simplefilter("ignore", category=ConvergenceWarning) # - def load_from_rds(names, eventname='baseline_year_1_arm_1'): data = pd.read_csv('data/nda_rds_201.csv', usecols=['src_subject_id', 'eventname'] + names, na_values=['777', 999, '999', 777]) data = data.loc[data[data['eventname'] == eventname].index] data = data.set_index('src_subject_id') data = data.drop('eventname', axis=1) # Obsificate subject ID for public example data.index = list(range(len(data))) # Return as pandas DataFrame cast to BPt Dataset return bp.Dataset(data) # This way we can look at all column available all_cols = list(pd.read_csv('data/nda_rds_201.csv', nrows=0)) # We can search through all column to find which columns we actually want to load. We will start with the brain imaging features. # + feat_keys = {'thick': 'smri_thick_cort.destrieux_g.', 'sulc': 'smri_sulc_cort.destrieux_g.', 'area': 'smri_area_cort.destrieux_g.', 'subcort': 'smri_vol_subcort.aseg_'} feat_cols = {key: [c for c in all_cols if feat_keys[key] in c] for key in feat_keys} all_cols = sum(feat_cols.values(), []) # For example feat_cols['thick'][:10] # - # We also need our target variable, in this case sex. # # Let's load household income too as a non input, i.e., a variable we won't use directly as input. target = 'sex' non_inputs = ['household.income'] data = load_from_rds(all_cols + [target] + non_inputs ) data.verbose = 1 data # Next we need to tell the dataset a few things about sex, namely that it is a binary variable, and that it is our target variable. data.to_binary('sex', inplace=True) data.set_target('sex', inplace=True) data['target'] # We need to do something simillar for household income, tell it that it is a categorical variable, and has role non input data = data.ordinalize('household.income').set_role('household.income', 'non input') # Let's look at some NaN info data.nan_info() # What happens now if we drop any subjects with more than 1% of their loaded columns with NaN values data = data.drop_subjects_by_nan(threshold=.01) data.nan_info() # That greatly reduces the number of remaining missing values we have. Next, let's consider outlier filtering as... data.skew().sort_values() # We don't even care about these measurements data = data.drop_cols(exclusions='aseg_wm.hypointensities') data = data.filter_outliers_by_std(n_std=10) data.plot('target') # Note we have some missing data in the target variable, we can drop these. data = data.drop_nan_subjects('target') # Let's lastly split our data in a train test split. train_data, test_data = data.test_split(size=.2, random_state=2) train_data # ## Evaluating Models # # We will start by evaluating some different choices of pipelines / models on just our training data ps = bp.ProblemSpec(scorer=['roc_auc'], n_jobs=16) ps model_pipeline = bp.ModelPipeline(model=bp.Model('dt')) model_pipeline.print_all() # We can see that the their are a few default values, specifically we have a set of default imputers, one for replacing all float variables with the mean value, and one for replacing all categorical / binary variables (if any, otherwise ignored) with the median values. # # Next, we have a just standard scaler, which scales all features to have mean 0, std of 1. # # Then, we have our decision tree. # # Lastly, we have no param_search specified. # Now that we have an initial model, we are ready to use the Evaluate function # + results = bp.evaluate(pipeline=model_pipeline, dataset=train_data, problem_spec=ps, cv=5) results # - # Let's try a just linear model now (Logistic Regression, since binary), updating our model within our model_pipeline first. # + model_pipeline.model = bp.Model('linear') results = bp.evaluate(pipeline=model_pipeline, dataset=train_data, problem_spec=ps, cv=5) results # - # ## Examing Evaluation Results # The returned BPtEvaluator, which we store in variable results, has a bunch of different extra functionality built in for further examining the results of the evaluation. We will explore some of those functions here. # # We can look fisrt at for example the raw predictions made: # + preds = results.get_preds_dfs() # Just first fold preds[0] # - # Sometimes it can be useful to look at predictions made as resitricted to only a group of subjects. Here's where we can use that household income information. # See how these values are coded train_data.encoders['household.income'] # + # First we want to get just the subset of subjects # from let's say the first fold and just under 50K fold_preds = preds[0] val_subjs = fold_preds.index vs = bp.ValueSubset('household.income', '[<50K]', decode_values=True) # Specify the intersection of those subsets of subjects subjs = bp.Intersection([val_subjs, vs]) # Get the specific subject values subset_subjects = train_data.get_subjects(subjs) subset_preds = fold_preds.loc[subset_subjects] subset_preds # - # Now let's say we want to look at roc auc on just this subset from sklearn.metrics import roc_auc_score roc_auc_score(subset_preds['y_true'], subset_preds['predict_proba_1']) # One thing to note about post-stratify predictions by a group is that it is just a diagnostic tool. For example if we found that a sub group did much worse, it lets us know about the problem, but doesn't address it. # # That said, the above code may be useful for getting more famillar with the different internal saved attributes of the BPtEvaluator, but is it the easiest way to get this breakdown? No. Actually their is a dedicated function to breaking down results by a subset, let's check it out. subsets = results.subset_by(group='household.income', dataset=train_data) list(subsets) subsets['[<50K]'] # Each of these objects can be treated the same as the main BPtEvaluator object, except with essentially a subset of validation subjects. I.e., let's look at the roc_auc we calculated vs. the saved one here for fold 0. subsets['[<50K]'].scores['roc_auc'][0] # What if we wanted to say plot a confusion matrix? Well it seems like scikit-learn has a method dedicated to that, let's see if we can use it. # # https://scikit-learn.org/stable/modules/generated/sklearn.metrics.plot_confusion_matrix.html # # So for this function we need a trained estimator and then the validation X and y, let's grab those for just the first fold. # + from sklearn.metrics import plot_confusion_matrix fold = 0 estimator = results.estimators[fold] X, y = train_data.get_Xy(ps=results.ps, subjects=results.val_subjects[fold]) plot_confusion_matrix(estimator, X, y) # - # How would this change if we wanted to just plot the confusion matrix for that subset of subjects we looked at before? We just need to specify a different set of subjects, which we already calculated, so... # + X, y = train_data.get_Xy(ps=results.ps, subjects=subjs) plot_confusion_matrix(estimator, X, y) # - # Or of course we could just the the Subset evaluator. # We can also look at feature importances as averaged across all 5 folds. results.get_fis(mean=True).sort_values() # ## LinearResidualizer # # What we find here is a bit trivial. Basically just boys have bigger brains than girls ... That said, this is just an example. What if we say residualize in a nested way for intracranial volume? # # + from BPt.extensions import LinearResidualizer resid = LinearResidualizer(to_resid_df=data[['smri_vol_subcort.aseg_intracranialvolume']]) resid_scaler = bp.Scaler(resid, scope='float') resid_pipeline = bp.ModelPipeline(scalers=[bp.Scaler('robust'), resid_scaler], model=bp.Model('linear')) resid_pipeline.print_all() # + results = bp.evaluate(pipeline=model_pipeline, dataset=train_data, problem_spec=ps, cv=5) results # - results.get_fis(mean=True).sort_values() # Notably residualizing is far from perfect..., it is interesting though how the results change when we add the residualization. # # ## Default Pipelines # # # We can just ignore the issue for now, and explore some different pipelines (not residualizing anymore). We are going to just try some off the shelf default pipelines for simplicity. # + from BPt.default.pipelines import pipelines # Look at choices print(list(pipelines)) # Look at the pipeline pipelines['elastic_pipe'] # - results = bp.evaluate(pipeline=pipelines['elastic_pipe'], dataset=train_data, problem_spec=ps, cv=5) results # Look at the pipeline pipelines['lgbm_pipe'] results = bp.evaluate(pipeline=pipelines['lgbm_pipe'], dataset=train_data, problem_spec=ps, scorer=['roc_auc', 'balanced_accuracy'], cv=5) results # ## Employ Test Set # # How about now applying the test set, which can give us another estimate of generalizibility. One good strategy is re-train one model on the full training set, then we can apply it to the testing set, using say the elastic net based model. # + from sklearn.metrics import roc_auc_score # Get as a sklearn-style estimator estimator = pipelines['elastic_pipe'].build(train_data, ps) # Get train data X_train, y_train = train_data.get_Xy(ps) # Fit on train data estimator.fit(X_train, y_train) # Get test data X_test, y_test = test_data.get_Xy(ps) # Get test predictions test_preds = estimator.predict_proba(X_test) # Get roc_auc_score roc_auc_score(y_test, test_preds[:, 1])
doc/source/user_guide/sex.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + colab={"base_uri": "https://localhost:8080/"} executionInfo={"elapsed": 15403, "status": "ok", "timestamp": 1630850135776, "user": {"displayName": "\ubc15\ud574\ubbf8", "photoUrl": "", "userId": "17919108415319245320"}, "user_tz": -540} id="azRo8Wmm2azU" outputId="f22c3217-bb9c-49be-f434-40304450c366" from google.colab import drive drive.mount('/content/drive') # + colab={"base_uri": "https://localhost:8080/"} executionInfo={"elapsed": 8402, "status": "ok", "timestamp": 1630850017862, "user": {"displayName": "\ubc15\ud574\ubbf8", "photoUrl": "", "userId": "17919108415319245320"}, "user_tz": -540} id="-JQoyE1x2joS" outputId="413c6671-c62d-45b7-8bbb-4bbb5797b5f5" # HuggingFace transformers 설치 # !pip install transformers # + executionInfo={"elapsed": 4919, "status": "ok", "timestamp": 1630850024436, "user": {"displayName": "\ubc15\ud574\ubbf8", "photoUrl": "", "userId": "17919108415319245320"}, "user_tz": -540} id="P8rLDCcE2kpN" import pandas as pd import torch from torch.nn import functional as F from torch.utils.data import DataLoader, Dataset from transformers import AutoTokenizer, AutoModel, AdamW import numpy as np from tqdm.notebook import tqdm # + executionInfo={"elapsed": 255, "status": "ok", "timestamp": 1630850026277, "user": {"displayName": "\ubc15\ud574\ubbf8", "photoUrl": "", "userId": "17919108415319245320"}, "user_tz": -540} id="r4RVS9do2mcy" # GPU 사용 device = torch.device("cuda") # + executionInfo={"elapsed": 2, "status": "ok", "timestamp": 1630850027353, "user": {"displayName": "\ubc15\ud574\ubbf8", "photoUrl": "", "userId": "17919108415319245320"}, "user_tz": -540} id="SoIcUqhQ2n9q" class TrainDataset(Dataset): def __init__(self, dataset): self.tokenizer = AutoTokenizer.from_pretrained("beomi/KcELECTRA-base") self.sentences = [str([i[0]]) for i in dataset] self.labels = [np.int32(i[1]) for i in dataset] def __len__(self): return (len(self.labels)) def __getitem__(self, i): text = self.sentences[i] y = self.labels[i] inputs = self.tokenizer( text, return_tensors='pt', truncation=True, max_length=64, pad_to_max_length=True, add_special_tokens=True ) input_ids = inputs['input_ids'][0] attention_mask = inputs['attention_mask'][0] return input_ids, attention_mask, y # + executionInfo={"elapsed": 3054, "status": "ok", "timestamp": 1630850253831, "user": {"displayName": "\ubc15\ud574\ubbf8", "photoUrl": "", "userId": "17919108415319245320"}, "user_tz": -540} id="LAiR9raU2sBL" from torch import nn model = AutoModel.from_pretrained("beomi/KcELECTRA-base", num_labels=363) # model.classifier = torch.nn.Sequential( # nn.Linear(768, 768, bias=True), # nn.Dropout(p=0.1, inplace=False), # nn.Linear(768, 7, bias=True)) model = model.to(device) # 한번 실행해보기 # text, attention_mask, y = train_dataset[0] # model(text.unsqueeze(0).to(device), attention_mask=attention_mask.unsqueeze(0).to(device)) # + colab={"base_uri": "https://localhost:8080/"} executionInfo={"elapsed": 235, "status": "ok", "timestamp": 1630850256443, "user": {"displayName": "\ubc15\ud574\ubbf8", "photoUrl": "", "userId": "17919108415319245320"}, "user_tz": -540} id="0Rrcq2Ax2xiw" outputId="35966cde-a375-4ad1-b992-700d4a4dd817" # 모델 레이어 보기 model # + executionInfo={"elapsed": 262, "status": "ok", "timestamp": 1630827200456, "user": {"displayName": "\ubc15\ud574\ubbf8", "photoUrl": "", "userId": "17919108415319245320"}, "user_tz": -540} id="a2Q4bdYN2y3W" # batch_size = 32 # epochs = 30 # + executionInfo={"elapsed": 1, "status": "ok", "timestamp": 1630827200703, "user": {"displayName": "\ubc15\ud574\ubbf8", "photoUrl": "", "userId": "17919108415319245320"}, "user_tz": -540} id="71exC31E21Rb" # optimizer = AdamW(model.parameters(), lr=3e-5) # + colab={"base_uri": "https://localhost:8080/", "height": 370} executionInfo={"elapsed": 632, "status": "error", "timestamp": 1630850260305, "user": {"displayName": "\ubc15\ud574\ubbf8", "photoUrl": "", "userId": "17919108415319245320"}, "user_tz": -540} id="hq7mk8dB3AcF" outputId="4b2c9c85-6d9d-425a-e057-29d1fd849ec0" model.load_state_dict(torch.load('/content/drive/MyDrive/Colab Notebooks/model.pt')) model.eval() # + executionInfo={"elapsed": 243, "status": "ok", "timestamp": 1630827217154, "user": {"displayName": "\ubc15\ud574\ubbf8", "photoUrl": "", "userId": "17919108415319245320"}, "user_tz": -540} id="inA6NaUZ3Cka" def predict(sentence): data = [sentence, '0'] dataset_another = [data] logits = 0 another_test = TrainDataset(dataset_another) test_dataloader = torch.utils.data.DataLoader(another_test) model.eval() for input_ids_batch, attention_masks_batch, y_batch in test_dataloader: y_batch = y_batch.long().to(device) out = model(input_ids_batch.to(device), attention_mask=attention_masks_batch.to(device))[0] out = out[:, -1, :] for i in out: logits = i logits = logits.detach().cpu().numpy() logits = np.argmax(logits) return logits # + colab={"base_uri": "https://localhost:8080/"} executionInfo={"elapsed": 2939, "status": "ok", "timestamp": 1630827237422, "user": {"displayName": "\ubc15\ud574\ubbf8", "photoUrl": "", "userId": "17919108415319245320"}, "user_tz": -540} id="y2Sy2W7v3HgR" outputId="a82149bb-34e9-4c5e-d693-cc6708f5b9ca" predict("나한테 어울리는 복지 줘") # + id="7VYiPhlt4IX7"
ai/KcELECTRAchatbot/Service.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import numpy as np import matplotlib.pyplot as plt import matplotlib.patches as patches import numpy as np # %matplotlib inline def exponential_mechanism(data, domain, quality_function, eps, bulk=False, for_sparse=False): """Exponential Mechanism exponential_mechanism ( data , domain , quality function , privacy parameter ) :param data: list or array of values :param domain: list of possible results :param quality_function: function which get as input the data and a domain element and 'qualifies' it :param eps: privacy parameter :param bulk: in case that we can reduce run-time by evaluating the quality of the whole domain in bulk, the procedure will be given a 'bulk' quality function. meaning that instead of one domain element the quality function get the whole domain as input :param for_sparse: in cases that the domain is a very spared one, namely a big percent of the domain has quality 0, there is a special procedure called sparse_domain. That procedure needs, beside that result from the given mechanism, the total weight of the domain whose quality is more than 0. If that is the case Exponential-Mechanism will return also the P DF before the normalization. :return: an element of domain with approximately maximum value of quality function """ # calculate a list of probabilities for each element in the domain D # probability of element d in domain proportional to exp(eps*quality(data,d)/2) if bulk: qualified_domain = quality_function(data, domain) domain_pdf = [np.exp(eps * q / 2) for q in qualified_domain] else: domain_pdf = [np.exp(eps * quality_function(data, d) / 2) for d in domain] total_value = float(sum(domain_pdf)) domain_pdf = [d / total_value for d in domain_pdf] normalizer = sum(domain_pdf) # for debugging and other reasons: check that domain_cdf indeed defines a distribution # use the uniform distribution (from 0 to 1) to pick an elements by the CDF if abs(normalizer - 1) > 0.001: raise ValueError('ERR: exponential_mechanism, sum(domain_pdf) != 1.') # accumulate elements to get the CDF of the exponential distribution domain_cdf = np.cumsum(domain_pdf).tolist() # pick a uniformly random value on the CDF pick = np.random.uniform() # return the index corresponding to the pick # take the min between the index and len(D)-1 to prevent returning index out of bound result = domain[min(np.searchsorted(domain_cdf, pick), len(domain)-1)] # in exponential_mechanism_sparse we need also the total_sum value if for_sparse: return result, total_value return result def generate_labeled(size, dimension, trials=1000, pvals=0.5): ps = pvals*dimension sample = np.array([np.random.binomial(t,pvals,size) for t in [trials]*dimension]) sample = np.column_stack((sample)) sample = sample - tuple(sample[:,i].min() for i in range(dimension)) mins = [min(sample[:,i]) for i in range(dimension)] maxs = [max(sample[:,i]) for i in range(dimension)] thresholds = [np.random.uniform(int(mins[i]/3+2*maxs[i]/3), int(mins[i]/3+2*maxs[i]/3), 1) for i in range(dimension)] positives = np.array([x for x in sample if all(x[i] <= thresholds[i] for i in range(dimension))]) negatives = np.array([x for x in sample if any(x[i] > thresholds[i] for i in range(dimension))]) return positives, negatives # + def aar(data, domain, dimension, margins_size=0, beta=0.1, eps=0.5, delta=0.1, t=0.4, test=False): def q(data, x): if min(data) <= x <= max(data): return min(data[data <= x].shape[0], data[data >= x].shape[0]) else: return 0 picks = np.zeros(dimension) d = data if not margins_size: margins_size = 150*int(np.log(100*domain**2)) if test: interiors = [np.array(1) for _ in range(dimension)] margins = [np.array(1) for _ in range(dimension)] for i in range(dimension): noisy_margin = int(np.ceil(np.random.laplace(margins_size, margins_size*t, 1))) margin = d[np.argpartition(d[:,i],(-1)*noisy_margin)][(-1)*noisy_margin:] tnoise = int(t**2 * noisy_margin) interior = margin[np.argpartition(margin[:,i],tnoise)][:tnoise] pick = exponential_mechanism(interior[:,i], np.arange(0,domain), q, eps) new_d = np.array([x for x in d if x[i] < pick]) d = new_d picks[i] = pick if test: margins[i] = margin interiors[i] = interior if test: return picks, margins, interiors else: return picks # - def false_negatives(positives, points): return sum(1 for x in positives if any(x[i] > points[i] for i in range(len(points)))) / positives.shape[0]
.ipynb_checkpoints/Untitled-checkpoint.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + # Implement an algorithm to find the kth to last element of a singly linked list class Node: def __init__(self, value): self.value = value self.next = None class LinkedList: def __init__(self, head): self.head = head def insert_node(self, value): n = self.head while(n.next is not None): n = n.next n.next = Node(value) # Recursive solution
Python/LinkedList/return_kth_to_last.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 (ipykernel) # language: python # name: python3 # --- # # 10.3. Computing the autocorrelation of a time series import os import numpy as np import pandas as pd import matplotlib.pyplot as plt # %matplotlib inline import io import requests import zipfile url = ('https://github.com/ipython-books/' 'cookbook-2nd-data/blob/master/' 'babies.zip?raw=true') r = io.BytesIO(requests.get(url).content) zipfile.ZipFile(r).extractall('babies') # %ls babies files = [file for file in os.listdir('babies') if file.startswith('yob')] years = np.array(sorted([int(file[3:7]) for file in files])) data = {year: pd.read_csv('babies/yob%d.txt' % year, index_col=0, header=None, names=['First name', 'Gender', 'Number']) for year in years} # + podoc={"output_text": "Output"} data[2016].tail() # - def get_value(name, gender, year): """Return the number of babies born a given year, with a given gender and a given name.""" dy = data[year] try: return dy[dy['Gender'] == gender] \ ['Number'][name] except KeyError: return 0 def get_evolution(name, gender): """Return the evolution of a baby name over the years.""" return np.array([get_value(name, gender, year) for year in years]) def autocorr(x): result = np.correlate(x, x, mode='full') return result[result.size // 2:] def autocorr_name(name, gender, color, axes=None): x = get_evolution(name, gender) z = autocorr(x) # Evolution of the name. axes[0].plot(years, x, '-o' + color, label=name) axes[0].set_title("Baby names") axes[0].legend() # Autocorrelation. axes[1].plot(z / float(z.max()), '-' + color, label=name) axes[1].legend() axes[1].set_title("Autocorrelation") # + podoc={"output_text": "<matplotlib.figure.Figure at 0xcae9048>"} fig, axes = plt.subplots(1, 2, figsize=(12, 4)) autocorr_name('Olivia', 'F', 'k', axes=axes) autocorr_name('Maria', 'F', 'y', axes=axes) # -
chapter10_signal/03_autocorrelation.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + id="7k4czsKzJGF-" colab_type="code" colab={} import pandas as pd import numpy as np # + id="40ZrnptJSiqo" colab_type="code" outputId="f1ccfcdc-9dfe-4ee5-f955-9da42d38173e" executionInfo={"status": "ok", "timestamp": 1590669885932, "user_tz": 180, "elapsed": 1156, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GgViXuMODwoV89IoexbPI224IK1--RBLwo7MffLFg=s64", "userId": "12255835580949425627"}} colab={"base_uri": "https://localhost:8080/", "height": 417} apps = pd.read_csv('/content/drive/My Drive/Colab Notebooks/Projetos Autonomos/Machine Learn/Recomendador de app na APPLE STORE/AppleStore.csv') apps_descricao = pd.read_csv('/content/drive/My Drive/Colab Notebooks/Projetos Autonomos/Machine Learn/Recomendador de app na APPLE STORE/appleStore_description.csv') apps.head() # + [markdown] id="SfhvFvx2UxUv" colab_type="text" # *appleStore.csv* # # # "id": ID do aplicativo # # "track_name": nome do aplicativo # # "size_bytes": tamanho (em bytes) # # "currency": tipo de moeda # # "preço": valor do preço # # "ratingcounttot": contagens de classificação do usuário (para todas as versões) # # "ratingcountver": contagens de classificação do usuário (para versão atual) # # "user_rating": valor médio da classificação do usuário (para todas as versões) # # "userratingver": valor médio da classificação do usuário (para versão atual) # # "ver": código da versão mais recente # # "cont_rating": classificação do conteúdo # # "prime_genre": gênero principal # # "sup_devices.num": Número de dispositivos de suporte # # "ipadSc_urls.num": número de capturas de tela exibidas para exibição # # "lang.num": número de idiomas suportados # # "vpp_lic": licenciamento baseado em dispositivo Vpp ativado # + [markdown] id="J4mAtpc5VT4X" colab_type="text" # # *appleStore_description.csv* # # id : App ID # # track_name: Application name # # size_bytes: Memory size (in Bytes) # # app_desc: Application description # + [markdown] id="9mMI601Xs1kK" colab_type="text" # # Filtrando dados # + id="fBLAYqniVUsh" colab_type="code" outputId="3127f82f-ed1c-473b-eab7-28d7992621ae" executionInfo={"status": "ok", "timestamp": 1590669885933, "user_tz": 180, "elapsed": 1141, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GgViXuMODwoV89IoexbPI224IK1--RBLwo7MffLFg=s64", "userId": "12255835580949425627"}} colab={"base_uri": "https://localhost:8080/", "height": 195} apps = apps.drop(columns = ['sup_devices.num','ipadSc_urls.num','lang.num','vpp_lic','Unnamed: 0']) apps.head() # + id="rxM8s3Z8WZGD" colab_type="code" outputId="f9b6084e-4ee9-42e7-ed3e-c287fa7fdd38" executionInfo={"status": "ok", "timestamp": 1590669885934, "user_tz": 180, "elapsed": 1128, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GgViXuMODwoV89IoexbPI224IK1--RBLwo7MffLFg=s64", "userId": "12255835580949425627"}} colab={"base_uri": "https://localhost:8080/", "height": 195} apps.columns = ['ID', 'nome','tamanho_bytes','moeda','preco', 'avaliacoes','avaliacoes_ultima_versao','nota', 'nota_versao','versao','classificacao','genero'] apps.head() # Rever depois a coluna nota e media_avaliacoes tem algo de errado # + id="8pzjHzSzq4dO" colab_type="code" outputId="59fb30c1-ebb9-40d4-f269-3e99b0857663" executionInfo={"status": "ok", "timestamp": 1590669885935, "user_tz": 180, "elapsed": 1112, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GgViXuMODwoV89IoexbPI224IK1--RBLwo7MffLFg=s64", "userId": "12255835580949425627"}} colab={"base_uri": "https://localhost:8080/", "height": 195} apps_descricao.head() # + id="2c6CvmClrT-i" colab_type="code" outputId="ea2ae54f-b840-4dc8-cf24-69aa67a716d3" executionInfo={"status": "ok", "timestamp": 1590669886273, "user_tz": 180, "elapsed": 1434, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GgViXuMODwoV89IoexbPI224IK1--RBLwo7MffLFg=s64", "userId": "12255835580949425627"}} colab={"base_uri": "https://localhost:8080/", "height": 195} apps_descricao.columns = ['ID','nome','tamanho_bytes', 'descricao'] # Como tamanho_bytes e nome já existe em app, vou dropar no app, e deixar a informação apenas nesse. o ID necessita ficar em ambos apps = apps.drop(columns = ['nome','tamanho_bytes']) apps.head() # + id="juWWB-1ascN0" colab_type="code" outputId="b170244a-d0ce-4b2f-818d-0db9fbd4021a" executionInfo={"status": "ok", "timestamp": 1590669886274, "user_tz": 180, "elapsed": 1419, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GgViXuMODwoV89IoexbPI224IK1--RBLwo7MffLFg=s64", "userId": "12255835580949425627"}} colab={"base_uri": "https://localhost:8080/", "height": 195} apps_descricao.head() # + [markdown] id="i-C5axg4s7Tt" colab_type="text" # Transforma bytes para megabytes(kb para mb) # + id="mGT7Pl8PtDHN" colab_type="code" outputId="32c5d6d3-d8ea-44a3-cbf4-9a1b3ce4860d" executionInfo={"status": "ok", "timestamp": 1590669886275, "user_tz": 180, "elapsed": 1406, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GgViXuMODwoV89IoexbPI224IK1--RBLwo7MffLFg=s64", "userId": "12255835580949425627"}} colab={"base_uri": "https://localhost:8080/", "height": 195} # 1 megabytes = 1kb * 1e-6 apps_descricao['tamanho_megabytes'] = apps_descricao['tamanho_bytes'] * 1e-6 apps_descricao = apps_descricao.drop(columns = ['tamanho_bytes']) apps_descricao.head() # + id="CcTFRlT9u2oi" colab_type="code" outputId="4c1cd30d-88aa-4d3f-dcc6-3bd417a04456" executionInfo={"status": "ok", "timestamp": 1590669886276, "user_tz": 180, "elapsed": 1392, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GgViXuMODwoV89IoexbPI224IK1--RBLwo7MffLFg=s64", "userId": "12255835580949425627"}} colab={"base_uri": "https://localhost:8080/", "height": 195} apps.head() # + id="atPgda5XwGeK" colab_type="code" outputId="af892446-98e5-4867-b8dd-9a16a0321c33" executionInfo={"status": "ok", "timestamp": 1590669886277, "user_tz": 180, "elapsed": 1376, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GgViXuMODwoV89IoexbPI224IK1--RBLwo7MffLFg=s64", "userId": "12255835580949425627"}} colab={"base_uri": "https://localhost:8080/", "height": 195} gratuito = apps['preco'] == 0.0 apps['gratuito'] = gratuito apps.tail() # + id="9AsOKekVTkFH" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 343} outputId="abfab812-7392-4772-ea2b-e2e751964dda" executionInfo={"status": "ok", "timestamp": 1590670301074, "user_tz": 180, "elapsed": 627, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GgViXuMODwoV89IoexbPI224IK1--RBLwo7MffLFg=s64", "userId": "12255835580949425627"}} apps_descricao = apps_descricao.drop(columns = ['ID'],errors='ignore') apps = apps.drop(columns = ['ID','moeda'],errors='ignore') apps_descricao['preco'] = apps['preco'] apps_descricao['nota'] = apps['nota'] apps_descricao.head(10) # + [markdown] id="vYFtNnNPypFK" colab_type="text" # # Classificações:. # # * Genero # * Rentaveis # * Top gratuitos # * Melhores notas # * Melhores preços # * Tamanho do arquivo # + [markdown] id="H8LyqnWQzqBy" colab_type="text" # # Genero # * Filtrar pelo genero # * Se o usuario quer gratuito ou não # * Classificar em ordem pela nota # + id="JRMw8YOXuAAn" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 403} outputId="25604b34-0eef-4edd-d32a-800fe227c445" executionInfo={"status": "ok", "timestamp": 1590670325926, "user_tz": 180, "elapsed": 597, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GgViXuMODwoV89IoexbPI224IK1--RBLwo7MffLFg=s64", "userId": "12255835580949425627"}} generos = apps.genero.unique().tolist() generos # + id="HWXNL2sQsexG" colab_type="code" outputId="9083a412-9145-41bc-dda3-c863bb5c6dce" executionInfo={"status": "ok", "timestamp": 1590670327111, "user_tz": 180, "elapsed": 577, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GgViXuMODwoV89IoexbPI224IK1--RBLwo7MffLFg=s64", "userId": "12255835580949425627"}} colab={"base_uri": "https://localhost:8080/", "height": 284} apps.describe() # + id="a1x2sRc3O8Lg" colab_type="code" colab={} def achar_valores(df): index = df.index.tolist() return apps_descricao.iloc[index] # + id="UW7lRUPPz30G" colab_type="code" colab={} def recomendacao_por_genero(genero_escolhido,app_gratuito = None,apps = apps, k = 10): apps_famosos = apps.query('avaliacoes >= avaliacoes.mean()') if (app_gratuito): apps_famosos = apps_famosos.query('gratuito == True') apps_famosos = apps_famosos.query(f'genero == "{genero_escolhido}"') apps_recomendados = apps_famosos.sort_values('nota', ascending = False).head(k) return achar_valores(apps_recomendados) # + id="MAUQx32rMWWZ" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 491} outputId="3049b801-cbc8-4e0e-a3e9-c15361f5d191" executionInfo={"status": "ok", "timestamp": 1590671254143, "user_tz": 180, "elapsed": 612, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GgViXuMODwoV89IoexbPI224IK1--RBLwo7MffLFg=s64", "userId": "12255835580949425627"}} # Pedi o genero dos jogos, indiferente se é pago ou gratuito e quero uma sample de 15 amostras recomendacao_por_genero(generos[0], k = 15) # + id="mUkNCGAQWgZD" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 343} outputId="20cf47fe-3f2e-45e7-f9ad-9fbf6ac5ffac" executionInfo={"status": "ok", "timestamp": 1590671259805, "user_tz": 180, "elapsed": 634, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GgViXuMODwoV89IoexbPI224IK1--RBLwo7MffLFg=s64", "userId": "12255835580949425627"}} recomendacao_por_genero(generos[2]) # + id="_Y9h4Yv9WVi_" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 195} outputId="09e4429a-fd19-41de-8f0f-cf27716fa637" executionInfo={"status": "ok", "timestamp": 1590671333298, "user_tz": 180, "elapsed": 602, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GgViXuMODwoV89IoexbPI224IK1--RBLwo7MffLFg=s64", "userId": "12255835580949425627"}} recomendacao_por_genero(generos[2],app_gratuito = True, k = 5) # + [markdown] id="z2RJL5VdZtEY" colab_type="text" # # Rentaveis # # * Mais votos # * Ter um grande numero de avaliações # * Ter um grande numero de avaliações na ultima versão # * Nota da ultima versão tem que ser boa # * Ordenar por nota total # # + [markdown] id="wjqoauvZb13t" colab_type="text" # Celulas anteriores foram rodadas novamente por questão de organização # + id="KbnTjqIOd4c0" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 136} outputId="89cc35b8-3101-49a8-b9df-655f8d2c28aa" executionInfo={"status": "ok", "timestamp": 1590672484883, "user_tz": 180, "elapsed": 655, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GgViXuMODwoV89IoexbPI224IK1--RBLwo7MffLFg=s64", "userId": "12255835580949425627"}} apps.head(3) # + id="Rt6kPDqEd4tb" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 136} outputId="b72499cd-be68-4914-9fe5-33a0b7ff6ff0" executionInfo={"status": "ok", "timestamp": 1590672485783, "user_tz": 180, "elapsed": 635, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GgViXuMODwoV89IoexbPI224IK1--RBLwo7MffLFg=s64", "userId": "12255835580949425627"}} apps_descricao.head(3) # + id="q7lTIIpWeYz0" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 284} outputId="538dc5a0-1afd-4d2f-edc1-e309f254c5d8" executionInfo={"status": "ok", "timestamp": 1590672599179, "user_tz": 180, "elapsed": 634, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GgViXuMODwoV89IoexbPI224IK1--RBLwo7MffLFg=s64", "userId": "12255835580949425627"}} apps.describe() # + colab_type="code" outputId="6c11e15a-17c3-4d5b-c7fe-33828c453d4f" executionInfo={"status": "ok", "timestamp": 1590671910174, "user_tz": 180, "elapsed": 752, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GgViXuMODwoV89IoexbPI224IK1--RBLwo7MffLFg=s64", "userId": "12255835580949425627"}} id="o9GIbDeAbxU7" colab={"base_uri": "https://localhost:8080/", "height": 403} generos = apps.genero.unique().tolist() generos # + colab_type="code" id="hEeqesqtb0NV" colab={} def achar_valores(df): index = df.index.tolist() return apps_descricao.iloc[index] # + colab_type="code" id="sECPjI9rdCZq" colab={} def recomendacao_rentaveis(genero_escolhido = None,app_gratuito = None,apps = apps, k = 10): apps_famosos = apps.query('avaliacoes >= avaliacoes.mean()') apps_famosos = apps.query('avaliacoes_ultima_versao >= avaliacoes_ultima_versao.std() and nota_versao >= nota_versao.std()') if (app_gratuito): apps_famosos = apps_famosos.query('gratuito == True') if (genero_escolhido): apps_famosos = apps_famosos.query(f'genero == "{genero_escolhido}"') apps_recomendados = apps_famosos.sort_values('nota', ascending = False).head(k) return achar_valores(apps_recomendados) #return apps_recomendados # + id="vkJSBancdb7j" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 639} outputId="6779fa5f-2e39-497a-94ed-2338db0e3aa4" executionInfo={"status": "ok", "timestamp": 1590673147292, "user_tz": 180, "elapsed": 925, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GgViXuMODwoV89IoexbPI224IK1--RBLwo7MffLFg=s64", "userId": "12255835580949425627"}} recomendacao_rentaveis(genero_escolhido = 0, k = 20) # + id="BelIBMxxfSQT" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 639} outputId="05560670-ac88-4220-f9f7-ad0e25801a29" executionInfo={"status": "ok", "timestamp": 1590673175688, "user_tz": 180, "elapsed": 793, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GgViXuMODwoV89IoexbPI224IK1--RBLwo7MffLFg=s64", "userId": "12255835580949425627"}} recomendacao_rentaveis(app_gratuito = True, k = 20) # + id="Kw7H4jsFg65X" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 77} outputId="f3d44d0e-144b-4909-c965-c8371d6ac5cc" executionInfo={"status": "ok", "timestamp": 1590673358973, "user_tz": 180, "elapsed": 685, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GgViXuMODwoV89IoexbPI224IK1--RBLwo7MffLFg=s64", "userId": "12255835580949425627"}} recomendacao_rentaveis(genero_escolhido = 'Finance', k = 20) # Apenas 1 app atende os requisitos # + [markdown] id="6GGOBFVlhuRG" colab_type="text" # # Melhores preços # + id="3uqLEeoLhxqL" colab_type="code" colab={} # + [markdown] id="6dO0_z4Uh3V2" colab_type="text" # # Filtrar pelo tamanho do arquivo # + id="CzUOU6pRh64a" colab_type="code" colab={}
Recomendador de aplicativos APPLE STORE.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] id="_xGwQdv44OkO" # # 머신 러닝 교과서 3판 # + [markdown] id="ZDRcmK8K4OkU" # # 12장 - 다층 인공 신경망을 밑바닥부터 구현 # + [markdown] id="5zzcJG584OkV" # **아래 링크를 통해 이 노트북을 주피터 노트북 뷰어(nbviewer.jupyter.org)로 보거나 구글 코랩(colab.research.google.com)에서 실행할 수 있습니다.** # # <table class="tfo-notebook-buttons" align="left"> # <td> # <a target="_blank" href="https://nbviewer.jupyter.org/github/rickiepark/python-machine-learning-book-3rd-edition/blob/master/ch12/ch12.ipynb"><img src="https://jupyter.org/assets/main-logo.svg" width="28" />주피터 노트북 뷰어로 보기</a> # </td> # <td> # <a target="_blank" href="https://colab.research.google.com/github/rickiepark/python-machine-learning-book-3rd-edition/blob/master/ch12/ch12.ipynb"><img src="https://www.tensorflow.org/images/colab_logo_32px.png" />구글 코랩(Colab)에서 실행하기</a> # </td> # </table> # + [markdown] id="Rh7AAS9i4OkV" # ### 목차 # + [markdown] id="zIfT2P-D4OkV" # - 인공 신경망으로 복잡한 함수 모델링 # - 단일층 신경망 요약 # - 다층 신경망 구조 # - 정방향 계산으로 신경망 활성화 출력 계산 # - 손글씨 숫자 분류 # - MNIST 데이터셋 구하기 # - 다층 퍼셉트론 구현 # - 인공 신경망 훈련 # - 로지스틱 비용 함수 계산 # - 역전파 알고리즘 이해 # - 역전파 알고리즘으로 신경망 훈련 # - 신경망의 수렴 # - 신경망 구현에 관한 몇 가지 첨언 # - 요약 # + [markdown] id="v0ItJ7A04OkW" # <br> # <br> # + id="zvyRuFKr4OkW" from IPython.display import Image # + [markdown] id="ORtCZtmt4OkW" # # 인공 신경망으로 복잡한 함수 모델링 # + [markdown] id="zZ1axgpt4OkX" # ... # + [markdown] id="agpO4o5J4OkX" # ## 단일층 신경망 요약 # + colab={"base_uri": "https://localhost:8080/", "height": 304} id="1k-XISPU4OkX" outputId="ee44a915-89d8-4607-b3da-96db2b6877fc" Image(url='https://git.io/JLdrS', width=600) # + [markdown] id="OWCJE5nw4OkY" # <br> # <br> # + [markdown] id="fhENAYu74OkY" # ## 다층 신경망 구조 # + colab={"base_uri": "https://localhost:8080/", "height": 367} id="umM0VuIx4OkZ" outputId="5294b335-984d-4efe-c184-0bdd827d1707" Image(url='https://git.io/JLdrx', width=600) # + colab={"base_uri": "https://localhost:8080/", "height": 365} id="HoN06CDB4OkZ" outputId="656ad38d-3f0d-4b93-cc86-5780e4ce9afa" Image(url='https://git.io/JLdrp', width=500) # + [markdown] id="o7Tzuk9m4OkZ" # <br> # <br> # + [markdown] id="xDU-ewSp4Oka" # ## 정방향 계산으로 신경망 활성화 출력 계산 # + colab={"base_uri": "https://localhost:8080/", "height": 355} id="REJ94FgH4Oka" outputId="219718f4-47bd-419c-e04a-f2704c641978" Image(url='https://git.io/JLdoe', width=500) # + [markdown] id="qMOAw-WC4Okb" # 사이킷런을 사용해 MNIST 데이터를 적재하려면 다음 코드의 주석을 해제하고 실행하세요. # + colab={"base_uri": "https://localhost:8080/", "height": 70} id="Conf1Omr4Okb" outputId="91046040-93d2-4061-c74c-5046a0ab54ca" """ from sklearn.datasets import fetch_openml from sklearn.model_selection import train_test_split X, y = fetch_openml('mnist_784', version=1, return_X_y=True) y = y.astype(int) X = ((X / 255.) - .5) * 2 X_train, X_test, y_train, y_test = train_test_split( X, y, test_size=10000, random_state=123, stratify=y) """ # + [markdown] id="j3zsvP8b4Okb" # <br> # <br> # + [markdown] id="srqaPOBL4Okb" # # 손글씨 숫자 분류 # + [markdown] id="fQ9VEg904Okc" # ... # + [markdown] id="dZQsVb2G4Okc" # ## MNIST 데이터셋 구하기 # + [markdown] id="vm_CQgZY4Okc" # MNIST 데이터셋은 http://yann.lecun.com/exdb/mnist/에 공개되어 있으며 다음 네 부분으로 구성되어 있습니다. # # - 훈련 세트 이미지: train-images-idx3-ubyte.gz(9.9MB, 압축 해제 후 47MB, 60,000개 샘플) # - 훈련 세트 레이블: train-labels-idx1-ubyte.gz(29KB, 압축 해제 후 60KB, 60,000개 레이블) # - 테스트 세트 이미지: t10k-images-idx3-ubyte.gz(1.6MB, 압축 해제 후 7.8MB, 10,000개 샘플) # - 테스트 세트 레이블: t10k-labels-idx1-ubyte.gz(5KB, 압축 해제 후 10KB, 10,000개 레이블) # # 이 절에서는 MNIST 데이터 중 일부만 사용합니다. 따라서 훈련 데이터셋의 이미지와 레이블만 다운로드합니다. # # 파일을 다운로드한 후에 다음 코드 셀을 실행하면 파일 압축을 풀 수 있습니다. # + colab={"base_uri": "https://localhost:8080/"} id="953BW0QA4Okd" outputId="46674b19-4e3c-4b12-ee21-1d693b38d75e" # 코랩을 사용할 때는 다음 코드를 실행하세요. # !wget https://github.com/rickiepark/python-machine-learning-book-3rd-edition/raw/master/ch12/train-images-idx3-ubyte.gz # !wget https://github.com/rickiepark/python-machine-learning-book-3rd-edition/raw/master/ch12/train-labels-idx1-ubyte.gz # !wget https://github.com/rickiepark/python-machine-learning-book-3rd-edition/raw/master/ch12/t10k-images-idx3-ubyte.gz # !wget https://github.com/rickiepark/python-machine-learning-book-3rd-edition/raw/master/ch12/t10k-labels-idx1-ubyte.gz # + id="gAOPi8444Okd" # MNIST 데이터 압축을 푸는 코드 import sys import gzip import shutil import os if (sys.version_info > (3, 0)): writemode = 'wb' else: writemode = 'w' zipped_mnist = [f for f in os.listdir() if f.endswith('ubyte.gz')] for z in zipped_mnist: with gzip.GzipFile(z, mode='rb') as decompressed, open(z[:-3], writemode) as outfile: outfile.write(decompressed.read()) # + [markdown] id="ocRxdefS4Okd" # ---- # # 위 코드 셀을 실행할 때 에러가 발생할 경우: # # 위 코드 셀을 실행할 때 문제가 있다면 터미널에서 Unix/Linux gzip 명령을 사용해 파일의 압축을 푸는 것이 좋습니다. 예를 들어 MNIST 다운로드 디렉토리에서 다음 명령을 실행합니다. # # gzip *ubyte.gz -d # # 또는 마이크로소프트 윈도우를 사용한다면 선호하는 압축 프로그램을 사용할 수 있습니다. 이미지는 바이트 형태로 저장되어 있으므로 다음에 나오는 함수를 사용해 넘파이 배열로 읽어 MLP 모델을 훈련합니다. # # gzip을 사용하지 않는다면 만들어진 파일 이름이 다음과 같은지 확인하세요. # # - train-images-idx3-ubyte # - train-labels-idx1-ubyte # - t10k-images-idx3-ubyte # - t10k-labels-idx1-ubyte # # 만약 압축 해제 후에 (파일 확장자를 예측하는 일부 도구들 때문에) 파일 이름이 `train-images.idx3-ubyte`처럼 된다면 다음 코드를 진행하기 전에 `train-images-idx3-ubyte`로 이름을 바꾸어 주세요. # # ---- # + id="2pFzNdM24Okd" import os import struct import numpy as np def load_mnist(path, kind='train'): """`path`에서 MNIST 데이터 불러오기""" labels_path = os.path.join(path, '%s-labels-idx1-ubyte' % kind) images_path = os.path.join(path, '%s-images-idx3-ubyte' % kind) with open(labels_path, 'rb') as lbpath: magic, n = struct.unpack('>II', lbpath.read(8)) labels = np.fromfile(lbpath, dtype=np.uint8) with open(images_path, 'rb') as imgpath: magic, num, rows, cols = struct.unpack(">IIII", imgpath.read(16)) images = np.fromfile(imgpath, dtype=np.uint8).reshape(len(labels), 784) images = ((images / 255.) - .5) * 2 return images, labels # + colab={"base_uri": "https://localhost:8080/"} id="hIdJUukR4Okd" outputId="b7eec340-209b-48a4-e0b1-6f233506bbc3" # !ls # + colab={"base_uri": "https://localhost:8080/"} id="CG3DAEF_4Oke" outputId="0a71cb14-0631-4fde-d59b-a6d91a26d99d" X_train, y_train = load_mnist('', kind='train') print('행: %d, 열: %d' % (X_train.shape[0], X_train.shape[1])) # + colab={"base_uri": "https://localhost:8080/"} id="0onTIGXH4Oke" outputId="7eefa94f-34e6-4ddc-f52b-64c6d3849736" X_test, y_test = load_mnist('', kind='t10k') print('행: %d, 열: %d' % (X_test.shape[0], X_test.shape[1])) # + [markdown] id="2VSBU6TT4Okf" # 각 클래스의 첫 번째 이미지를 그립니다: # + colab={"base_uri": "https://localhost:8080/", "height": 224} id="FbAKUDJG4Okg" outputId="2736ec78-406a-4242-ae9f-eb43ae31c63a" import matplotlib.pyplot as plt fig, ax = plt.subplots(nrows=2, ncols=5, sharex=True, sharey=True) ax = ax.flatten() for i in range(10): img = X_train[y_train == i][0].reshape(28, 28) ax[i].imshow(img, cmap='Greys') ax[0].set_xticks([]) ax[0].set_yticks([]) plt.tight_layout() # plt.savefig('images/12_5.png', dpi=300) plt.show() # + [markdown] id="HUnoHgFM4Okg" # 숫자 7 샘플 25개를 그립니다: # + colab={"base_uri": "https://localhost:8080/", "height": 297} id="BavCchb_4Okh" outputId="6ca814bc-3783-445b-f15c-e395f3a1f6e3" fig, ax = plt.subplots(nrows=5, ncols=5, sharex=True, sharey=True,) ax = ax.flatten() for i in range(25): img = X_train[y_train == 7][i].reshape(28, 28) ax[i].imshow(img, cmap='Greys') ax[0].set_xticks([]) ax[0].set_yticks([]) plt.tight_layout() # plt.savefig('images/12_6.png', dpi=300) plt.show() # + id="Ia7lqEr64Okh" import numpy as np np.savez_compressed('mnist_scaled.npz', X_train=X_train, y_train=y_train, X_test=X_test, y_test=y_test) # + colab={"base_uri": "https://localhost:8080/"} id="l0lcuihY4Oki" outputId="38fc248c-bb28-47c2-9a8c-56cd501ca3f9" mnist = np.load('mnist_scaled.npz') mnist.files # + colab={"base_uri": "https://localhost:8080/"} id="kL_pNeYW4Oki" outputId="430cae3b-13c6-4064-feba-02f64c3dfbab" X_train, y_train, X_test, y_test = [mnist[f] for f in ['X_train', 'y_train', 'X_test', 'y_test']] del mnist X_train.shape # + [markdown] id="yo4f_UJ64Oki" # <br> # <br> # + [markdown] id="wvMEd6qy4Oki" # ## 다층 퍼셉트론 구현 # + id="ps0ZyJjU4Oki" import numpy as np import sys class NeuralNetMLP(object): """피드포워드 신경망 / 다층 퍼셉트론 분류기 매개변수 ------------ n_hidden : int (기본값: 30) 은닉 유닛 개수 l2 : float (기본값: 0.) L2 규제의 람다 값 l2=0이면 규제 없음. (기본값) epochs : int (기본값: 100) 훈련 세트를 반복할 횟수 eta : float (기본값: 0.001) 학습률 shuffle : bool (기본값: True) 에포크마다 훈련 세트를 섞을지 여부 True이면 데이터를 섞어 순서를 바꿉니다 minibatch_size : int (기본값: 1) 미니 배치의 훈련 샘플 개수 seed : int (기본값: None) 가중치와 데이터 셔플링을 위한 난수 초깃값 속성 ----------- eval_ : dict 훈련 에포크마다 비용, 훈련 정확도, 검증 정확도를 수집하기 위한 딕셔너리 """ def __init__(self, n_hidden=30, l2=0., epochs=100, eta=0.001, shuffle=True, minibatch_size=1, seed=None): self.random = np.random.RandomState(seed) self.n_hidden = n_hidden self.l2 = l2 self.epochs = epochs self.eta = eta self.shuffle = shuffle self.minibatch_size = minibatch_size def _onehot(self, y, n_classes): """레이블을 원-핫 방식으로 인코딩합니다 매개변수 ------------ y : 배열, 크기 = [n_samples] 타깃 값. n_classes : int 클래스 개수 반환값 ----------- onehot : 배열, 크기 = (n_samples, n_labels) """ onehot = np.zeros((n_classes, y.shape[0])) for idx, val in enumerate(y.astype(int)): onehot[val, idx] = 1. return onehot.T def _sigmoid(self, z): """로지스틱 함수(시그모이드)를 계산합니다""" return 1. / (1. + np.exp(-np.clip(z, -250, 250))) def _forward(self, X): """정방향 계산을 수행합니다""" # 단계 1: 은닉층의 최종 입력 # [n_samples, n_features] dot [n_features, n_hidden] # -> [n_samples, n_hidden] z_h = np.dot(X, self.w_h) + self.b_h # 단계 2: 은닉층의 활성화 출력 a_h = self._sigmoid(z_h) # 단계 3: 출력층의 최종 입력 # [n_samples, n_hidden] dot [n_hidden, n_classlabels] # -> [n_samples, n_classlabels] z_out = np.dot(a_h, self.w_out) + self.b_out # 단계 4: 출력층의 활성화 출력 a_out = self._sigmoid(z_out) return z_h, a_h, z_out, a_out def _compute_cost(self, y_enc, output): """비용 함수를 계산합니다 매개변수 ---------- y_enc : 배열, 크기 = (n_samples, n_labels) 원-핫 인코딩된 클래스 레이블 output : 배열, 크기 = [n_samples, n_output_units] 출력층의 활성화 출력 (정방향 계산) 반환값 --------- cost : float 규제가 포함된 비용 """ L2_term = (self.l2 * (np.sum(self.w_h ** 2.) + np.sum(self.w_out ** 2.))) term1 = -y_enc * (np.log(output)) term2 = (1. - y_enc) * np.log(1. - output) cost = np.sum(term1 - term2) + L2_term # 다른 데이터셋에서는 극단적인 (0 또는 1에 가까운) 활성화 값이 나올 수 있습니다. # 파이썬과 넘파이의 수치 연산이 불안정하기 때문에 "ZeroDivisionError"가 발생할 수 있습니다. # 즉, log(0)을 평가하는 경우입니다. # 이 문제를 해결하기 위해 로그 함수에 전달되는 활성화 값에 작은 상수를 더합니다. # # 예를 들어: # # term1 = -y_enc * (np.log(output + 1e-5)) # term2 = (1. - y_enc) * np.log(1. - output + 1e-5) return cost def predict(self, X): """클래스 레이블을 예측합니다 매개변수 ----------- X : 배열, 크기 = [n_samples, n_features] 원본 특성의 입력층 반환값: ---------- y_pred : 배열, 크기 = [n_samples] 예측된 클래스 레이블 """ z_h, a_h, z_out, a_out = self._forward(X) y_pred = np.argmax(z_out, axis=1) return y_pred def fit(self, X_train, y_train, X_valid, y_valid): """훈련 데이터에서 가중치를 학습합니다 매개변수 ----------- X_train : 배열, 크기 = [n_samples, n_features] 원본 특성의 입력층 y_train : 배열, 크기 = [n_samples] 타깃 클래스 레이블 X_valid : 배열, 크기 = [n_samples, n_features] 훈련하는 동안 검증에 사용할 샘플 특성 y_valid : 배열, 크기 = [n_samples] 훈련하는 동안 검증에 사용할 샘플 레이블 반환값: ---------- self """ n_output = np.unique(y_train).shape[0] # number of class labels n_features = X_train.shape[1] ######################## # 가중치 초기화 ######################## # 입력층 -> 은닉층 사이의 가중치 self.b_h = np.zeros(self.n_hidden) self.w_h = self.random.normal(loc=0.0, scale=0.1, size=(n_features, self.n_hidden)) # 은닉층 -> 출력층 사이의 가중치 self.b_out = np.zeros(n_output) self.w_out = self.random.normal(loc=0.0, scale=0.1, size=(self.n_hidden, n_output)) epoch_strlen = len(str(self.epochs)) # 출력 포맷을 위해 self.eval_ = {'cost': [], 'train_acc': [], 'valid_acc': []} y_train_enc = self._onehot(y_train, n_output) # 훈련 에포크를 반복합니다 for i in range(self.epochs): # 미니 배치로 반복합니다 indices = np.arange(X_train.shape[0]) if self.shuffle: self.random.shuffle(indices) for start_idx in range(0, indices.shape[0] - self.minibatch_size + 1, self.minibatch_size): batch_idx = indices[start_idx:start_idx + self.minibatch_size] # 정방향 계산 z_h, a_h, z_out, a_out = self._forward(X_train[batch_idx]) ################## # 역전파 ################## # [n_examples, n_classlabels] delta_out = a_out - y_train_enc[batch_idx] # [n_examples, n_hidden] sigmoid_derivative_h = a_h * (1. - a_h) # [n_examples, n_classlabels] dot [n_classlabels, n_hidden] # -> [n_examples, n_hidden] delta_h = (np.dot(delta_out, self.w_out.T) * sigmoid_derivative_h) # [n_features, n_examples] dot [n_examples, n_hidden] # -> [n_features, n_hidden] grad_w_h = np.dot(X_train[batch_idx].T, delta_h) grad_b_h = np.sum(delta_h, axis=0) # [n_hidden, n_examples] dot [n_examples, n_classlabels] # -> [n_hidden, n_classlabels] grad_w_out = np.dot(a_h.T, delta_out) grad_b_out = np.sum(delta_out, axis=0) # 규제와 가중치 업데이트 delta_w_h = (grad_w_h + self.l2*self.w_h) delta_b_h = grad_b_h # 편향은 규제하지 않습니다 self.w_h -= self.eta * delta_w_h self.b_h -= self.eta * delta_b_h delta_w_out = (grad_w_out + self.l2*self.w_out) delta_b_out = grad_b_out # 편향은 규제하지 않습니다 self.w_out -= self.eta * delta_w_out self.b_out -= self.eta * delta_b_out ############# # 평가 ############# # 훈련하는 동안 에포크마다 평가합니다 z_h, a_h, z_out, a_out = self._forward(X_train) cost = self._compute_cost(y_enc=y_train_enc, output=a_out) y_train_pred = self.predict(X_train) y_valid_pred = self.predict(X_valid) train_acc = ((np.sum(y_train == y_train_pred)).astype(np.float) / X_train.shape[0]) valid_acc = ((np.sum(y_valid == y_valid_pred)).astype(np.float) / X_valid.shape[0]) sys.stderr.write('\r%0*d/%d | 비용: %.2f ' '| 훈련/검증 정확도: %.2f%%/%.2f%% ' % (epoch_strlen, i+1, self.epochs, cost, train_acc*100, valid_acc*100)) sys.stderr.flush() self.eval_['cost'].append(cost) self.eval_['train_acc'].append(train_acc) self.eval_['valid_acc'].append(valid_acc) return self # + id="-vdALhZX4Okj" n_epochs = 200 # + colab={"base_uri": "https://localhost:8080/"} id="hNzZGfqF4Okk" outputId="5fe7c06c-fb09-49dd-fe45-0bc0d4e0f9fc" nn = NeuralNetMLP(n_hidden=100, l2=0.01, epochs=n_epochs, eta=0.0005, minibatch_size=100, shuffle=True, seed=1) nn.fit(X_train=X_train[:55000], y_train=y_train[:55000], X_valid=X_train[55000:], y_valid=y_train[55000:]) # + colab={"base_uri": "https://localhost:8080/", "height": 279} id="8YRZV-vn4Okk" outputId="e237383e-4c17-4de0-af26-bd4e03d9a46c" import matplotlib.pyplot as plt plt.plot(range(nn.epochs), nn.eval_['cost']) plt.ylabel('Cost') plt.xlabel('Epochs') # plt.savefig('images/12_07.png', dpi=300) plt.show() # + colab={"base_uri": "https://localhost:8080/", "height": 283} id="z4O1gYie4Okl" outputId="ceb1dbf1-efa4-4ed7-a88e-3bcf4a80b8a1" plt.plot(range(nn.epochs), nn.eval_['train_acc'], label='Training') plt.plot(range(nn.epochs), nn.eval_['valid_acc'], label='Validation', linestyle='--') plt.ylabel('Accuracy') plt.xlabel('Epochs') plt.legend(loc='lower right') # plt.savefig('images/12_08.png', dpi=300) plt.show() # + colab={"base_uri": "https://localhost:8080/"} id="zscbDz7F4Okl" outputId="e6dabaaa-66a1-47e3-99ca-3cb62c6fbb5c" y_test_pred = nn.predict(X_test) acc = (np.sum(y_test == y_test_pred) .astype(np.float) / X_test.shape[0]) print('테스트 정확도: %.2f%%' % (acc * 100)) # + colab={"base_uri": "https://localhost:8080/", "height": 297} id="bOSkex4s4Okl" outputId="f7430491-15ef-4383-f522-53318292c229" miscl_img = X_test[y_test != y_test_pred][:25] correct_lab = y_test[y_test != y_test_pred][:25] miscl_lab = y_test_pred[y_test != y_test_pred][:25] fig, ax = plt.subplots(nrows=5, ncols=5, sharex=True, sharey=True) ax = ax.flatten() for i in range(25): img = miscl_img[i].reshape(28, 28) ax[i].imshow(img, cmap='Greys', interpolation='nearest') ax[i].set_title('%d) t: %d p: %d' % (i+1, correct_lab[i], miscl_lab[i])) ax[0].set_xticks([]) ax[0].set_yticks([]) plt.tight_layout() # plt.savefig('images/12_09.png', dpi=300) plt.show() # + [markdown] id="j3Wd15HC4Okl" # <br> # <br> # + [markdown] id="h8bE0O6b4Okl" # # 인공 신경망 훈련 # + [markdown] id="2k-FpEok4Okl" # ... # + [markdown] id="R4si-7664Okl" # ## 로지스틱 비용 함수 계산 # + colab={"base_uri": "https://localhost:8080/", "height": 312} id="GAakQltn4Okl" outputId="e7cfa32a-4fb6-4ab0-fd86-614bab71275a" Image(url='https://git.io/JLdov', width=300) # + [markdown] id="LN03rNNR4Okm" # <br> # <br> # + [markdown] id="ETGepTEw4Okm" # ## 역전파 알고리즘 이해 # + [markdown] id="pFPsW5UE4Okm" # ... # + [markdown] id="iovHPtie4Okm" # ## 역전파 알고리즘으로 신경망 훈련 # + colab={"base_uri": "https://localhost:8080/", "height": 236} id="kkqR0QCI4Okm" outputId="a8f54fa5-729b-4ee3-8eac-67300cfdfbd0" Image(url='https://git.io/JLdoa', width=400) # + colab={"base_uri": "https://localhost:8080/", "height": 405} id="Bj3Colmn4Okm" outputId="14beb699-040b-409b-a643-6546f6c0c58a" Image(url='https://git.io/JLdoz', width=500) # + [markdown] id="yCWIr16M4Okm" # <br> # <br> # + [markdown] id="_t9nD_3j4Okn" # # 신경망의 수렴 # + colab={"base_uri": "https://localhost:8080/", "height": 335} id="X5M8ZRzm4Okn" outputId="20d9c34c-5731-45ba-c126-1873e454f314" Image(url='https://git.io/JLdoK', width=500) # + [markdown] id="hwvf3FQX4Okn" # <br> # <br> # + [markdown] id="8dwCxPtm4Okn" # ... # + [markdown] id="zKoOueKo4Okn" # # 요약 # + [markdown] id="o2TWIlZV4Okn" # ...
ch12/ch12.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # --- # + [markdown] application/vnd.databricks.v1+cell={"inputWidgets": {}, "nuid": "3fe3b97a-aabd-44da-bb3c-2369f8ec97d8", "showTitle": false, "title": ""} # ![JohnSnowLabs](https://nlp.johnsnowlabs.com/assets/images/logo.png) # + [markdown] application/vnd.databricks.v1+cell={"inputWidgets": {}, "nuid": "e80a457b-faf8-4afd-a342-baf270f15703", "showTitle": false, "title": ""} # # 7. Clinical NER Chunk Merger v3.0 # + application/vnd.databricks.v1+cell={"inputWidgets": {}, "nuid": "f308f18c-26b1-48dd-92a4-0a7c11c2c3b7", "showTitle": false, "title": ""} import os import json import string import numpy as np import pandas as pd import sparknlp import sparknlp_jsl from sparknlp.base import * from sparknlp.util import * from sparknlp.annotator import * from sparknlp_jsl.annotator import * from sparknlp.pretrained import ResourceDownloader from pyspark.sql import functions as F from pyspark.ml import Pipeline, PipelineModel pd.set_option('max_colwidth', 100) pd.set_option('display.max_columns', 100) pd.set_option('display.expand_frame_repr', False) print('sparknlp_jsl.version : ',sparknlp_jsl.version()) spark spark # + application/vnd.databricks.v1+cell={"inputWidgets": {}, "nuid": "0ec5ede9-94cc-4700-85ce-7d1380fc2917", "showTitle": false, "title": ""} # Sample data data_chunk_merge = spark.createDataFrame([ (1,"""A 63 years old man presents to the hospital with a history of recurrent infections that include cellulitis, pneumonias, and upper respiratory tract infections. He reports subjective fevers at home along with unintentional weight loss and occasional night sweats. The patient has a remote history of arthritis, which was diagnosed approximately 20 years ago and treated intermittently with methotrexate (MTX) and prednisone. On physical exam, he is found to be febrile at 102°F, rather cachectic, pale, and have hepatosplenomegaly. Several swollen joints that are tender to palpation and have decreased range of motion are also present. His laboratory values show pancytopenia with the most severe deficiency in neutrophils. """)]).toDF("id","text") data_chunk_merge.show(truncate=50) # + application/vnd.databricks.v1+cell={"inputWidgets": {}, "nuid": "1b4454be-d41a-4dc2-9673-347414d105da", "showTitle": false, "title": ""} # Annotator that transforms a text column from dataframe into an Annotation ready for NLP documentAssembler = DocumentAssembler()\ .setInputCol("text")\ .setOutputCol("document") # Sentence Detector annotator, processes various sentences per line sentenceDetector = SentenceDetector()\ .setInputCols(["document"])\ .setOutputCol("sentence") # Tokenizer splits words in a relevant format for NLP tokenizer = Tokenizer()\ .setInputCols(["sentence"])\ .setOutputCol("token") # Clinical word embeddings trained on PubMED dataset word_embeddings = WordEmbeddingsModel.pretrained("embeddings_clinical", "en", "clinical/models")\ .setInputCols(["sentence", "token"])\ .setOutputCol("embeddings") # NER model trained on i2b2 (sampled from MIMIC) dataset clinical_ner = MedicalNerModel.pretrained("ner_deid_large", "en", "clinical/models") \ .setInputCols(["sentence", "token", "embeddings"]) \ .setOutputCol("clinical_ner") clinical_ner_converter = NerConverter() \ .setInputCols(["sentence", "token", "clinical_ner"]) \ .setOutputCol("clinical_ner_chunk") # Cancer Genetics NER bionlp_ner = MedicalNerModel.pretrained("ner_bionlp", "en", "clinical/models") \ .setInputCols(["sentence", "token", "embeddings"]) \ .setOutputCol("bionlp_ner") bionlp_ner_converter = NerConverter() \ .setInputCols(["sentence", "token", "bionlp_ner"]) \ .setOutputCol("bionlp_ner_chunk") # merge ner_chunks by prioritizing the overlapping indices (chunks with longer lengths and highest information will be kept frome ach ner model) chunk_merger_1 = ChunkMergeApproach()\ .setInputCols('clinical_ner_chunk', "bionlp_ner_chunk")\ .setOutputCol('clinical_bionlp_ner_chunk') # internal clinical NER (general terms) jsl_ner = MedicalNerModel.pretrained("ner_jsl", "en", "clinical/models") \ .setInputCols(["sentence", "token", "embeddings"]) \ .setOutputCol("jsl_ner") jsl_ner_converter = NerConverter() \ .setInputCols(["sentence", "token", "jsl_ner"]) \ .setOutputCol("jsl_ner_chunk") # merge ner_chunks by prioritizing the overlapping indices (chunks with longer lengths and highest information will be kept frome ach ner model) chunk_merger_2 = ChunkMergeApproach()\ .setInputCols('clinical_bionlp_ner_chunk', "jsl_ner_chunk")\ .setOutputCol('final_ner_chunk') # merge ner_chunks regardess of overlapping indices # only works with 2.7 and later chunk_merger_NonOverlapped = ChunkMergeApproach()\ .setInputCols('clinical_bionlp_ner_chunk', "jsl_ner_chunk")\ .setOutputCol('nonOverlapped_ner_chunk')\ .setMergeOverlapping(False) nlpPipeline = Pipeline(stages=[ documentAssembler, sentenceDetector, tokenizer, word_embeddings, clinical_ner, clinical_ner_converter, bionlp_ner, bionlp_ner_converter, chunk_merger_1, jsl_ner, jsl_ner_converter, chunk_merger_2, chunk_merger_NonOverlapped]) empty_data = spark.createDataFrame([[""]]).toDF("text") model = nlpPipeline.fit(empty_data) # + application/vnd.databricks.v1+cell={"inputWidgets": {}, "nuid": "ac267add-888f-4d25-b158-84b975c7afc7", "showTitle": false, "title": ""} merged_data = model.transform(data_chunk_merge).cache() # + application/vnd.databricks.v1+cell={"inputWidgets": {}, "nuid": "3a8a20ee-09c1-45c4-95b1-18066c2f7b1f", "showTitle": false, "title": ""} from pyspark.sql import functions as F result_df = merged_data.select('id',F.explode('final_ner_chunk').alias("cols")) \ .select('id',F.expr("cols.begin").alias("begin"), F.expr("cols.end").alias("end"), F.expr("cols.result").alias("chunk"), F.expr("cols.metadata.entity").alias("entity")) result_df.show(50, truncate=100) # + [markdown] application/vnd.databricks.v1+cell={"inputWidgets": {}, "nuid": "0c3964ca-2b8f-4733-9b5b-3686202e8277", "showTitle": false, "title": ""} # ## NonOverlapped Chunk # # all the entities form each ner model will be returned one by one # + application/vnd.databricks.v1+cell={"inputWidgets": {}, "nuid": "f4984755-4573-4eb6-a6df-e506ff3f4e21", "showTitle": false, "title": ""} from pyspark.sql import functions as F result_df2 = merged_data.select('id',F.explode('nonOverlapped_ner_chunk').alias("cols")) \ .select('id',F.expr("cols.begin").alias("begin"), F.expr("cols.end").alias("end"), F.expr("cols.result").alias("chunk"), F.expr("cols.metadata.entity").alias("entity")) result_df2.show(50, truncate=100) # + [markdown] application/vnd.databricks.v1+cell={"inputWidgets": {}, "nuid": "07c67a4e-6121-47b0-b310-644c41f5406c", "showTitle": false, "title": ""} # End Od Notebook #7
tutorials/Certification_Trainings/Healthcare/databricks_notebooks/7.Clinical_NER_Chunk_Merger_v3.0.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # This notebook is aimed at summarzing the REPET output at different levels of the nomenclature and to plot this out. Parts of it are really slow. # # # This notebook was only designed for the purpose of analyzing the Pst-104E genome. No gurantees it works in any other situtation. It will have spelling errors due to the lack of autocorrection. # %matplotlib inline import pandas as pd import os import re from Bio import SeqIO import pysam from Bio.SeqRecord import SeqRecord from Bio.Seq import Seq from Bio import SearchIO from pybedtools import BedTool import numpy as np import pybedtools import multiprocessing import re import time import matplotlib.pyplot as plt def ID_filter_gff(_feature, _id): """ This filter parses out the top level id form the 9th gff column form a REPET gff file. It has a specific search pattern for each feature type in column 2. _type is defined by the feature '_'.join(feature.split("_")[-2:]) This function expects that the variable genome either ends with p_ctg or h_ctg and adapts the search pattern accordingly. """ _type = '_'.join(_feature.split("_")[-2:]) if _type == 'REPET_TEs': if genome.endswith('p_ctg'): TE_pattern = r'ID=[A-Z,a-z,0-9,-]*_[A-Z,a-z,0-9]*_[0-9]*_([^;| ]*)' elif genome.endswith('h_ctg'): TE_pattern = r'ID=[A-Z,a-z,0-9,-]*_[A-Z,a-z,0-9]*_[0-9]*_[0-9]*_([^;| ]*)' TE_prog = re.compile(TE_pattern) TE_match = TE_prog.search(_id) try: return TE_match.group(1) except AttributeError: print(_id) if _type == 'REPET_SSRs': if genome.endswith('p_ctg'): SSR_pattern = 'ID=[A-Z,a-z,0-9,-]*_[A-Z,a-z,0-9]*_[0-9]*_([A-Z,a-z,0-9,-]*)' elif genome.endswith('h_ctg'): SSR_pattern = 'ID=[A-Z,a-z,0-9,-]*_[A-Z,a-z,0-9]*_[0-9]*_[0-9]*_([A-Z,a-z,0-9,-]*)' SSR_prog = re.compile(SSR_pattern) SSR_match = SSR_prog.search(_id) return SSR_match.group(1) if _type == 'REPET_tblastx' or _type == 'REPET_blastx': if genome.endswith('p_ctg'): blast_prog = re.compile(r'ID=[A-Z,a-z,0-9,-]*_[A-Z,a-z,0-9]*_[0-9]*_([^;| ]*)') elif genome.endswith('h_ctg'): blast_prog = re.compile(r'ID=[A-Z,a-z,0-9,-]*_[A-Z,a-z,0-9]*_[0-9]*_[0-9]*_([^;| ]*)') #blast_prog = re.compile(blast_pattern) blast_match = blast_prog.search(_id) return blast_match.group(1) def blast_hit_gff(_feature, _row8, _id): """ This filter parses the blast hit for REPET_TEs from the new 'ID' column. If no blast hit available returns Pastec ids. If the result is blast already the value is simple parse the blast hit. SSRs also get SSR !!!Requires the three_letter_dict to be defined previously.!!! _type is defined by the feature '_'.join(feature.split("_")[-2:]) """ _type = '_'.join(_feature.split("_")[-2:]) if _type == 'REPET_TEs': #split the pastec_cat into the first three letter code #the spliting of the 'ID' column needs to be done differently depending on the h or p contigs. #h contigs contain one additional '_' in the contig id pastec_cat = _id.split('_')[0] if 'TE_BLR' in _row8: #hit_list = [x.split(';')[3] for x in _row8] blast_hit_pattern = r'TE_BLR\w*: (\S*)[ |;]' blast_hit_prog = re.compile(blast_hit_pattern) TE_match = blast_hit_prog.findall(_row8) first_sub_class = ':'.join(TE_match[0][:-1].split(':')[1:]) if len([x for x in TE_match if first_sub_class in x]) == len(TE_match): if ';' in first_sub_class: return first_sub_class.split(';')[0] else: return first_sub_class #fix this here to include the there letter code of the first bit of the ID similar to the blast hits #e.g. ClassI:?:? and so on. a dict might be the easiest here. else: return three_letter_dict[pastec_cat] else: return three_letter_dict[pastec_cat] if _type == 'REPET_SSRs': return 'SSR' return SSR_match.group(1) if _type == 'REPET_tblastx' or _type == 'REPET_blastx': return ':'.join(_id.split(':')[1:]) def TE_classification_filter(_id, level = 0): """ This function pulls out the class == level1, Order == level2, Superfamily == leve3. If SSR or noCat return these values. """ if len(_id.split(':')) == 1: return _id if level == 0: _class = _id.split(':')[0] if _class == 'ClassI': return 'Retrotransposon' if _class == 'ClassII': return 'DNA_transposon' elif level == 1: _order = _id.split(':')[1] if _order == '?': return 'noCat' else: return _order elif level == 2: _superfamily = _id.split(':')[2] if _superfamily == '?': return 'noCat' else: return _superfamily else: print('Something wrong! Check if level is 0, 1 or 2') # #### This needs to be updated here according to genome (either p_ctg or h_ctg) # + source_dir = '/home/benjamin/genome_assembly/PST79/FALCON/p_assemblies/v9_1/032017_assembly' genome = 'Pst_104E_v12_p_ctg' out_dir = '/home/benjamin/genome_assembly/PST79/FALCON/p_assemblies/v9_1/Pst_104E_v12/TE_analysis' # - if not os.path.exists(out_dir): os.mkdir(out_dir) #remove all commenting lines from the initial repet file # !grep -v "^#" {source_dir}/{genome}.REPET.gff > {out_dir}/{genome}.REPET.gff p_repet_gff = pd.read_csv(out_dir+'/'+genome+'.REPET.gff', sep='\t', header = None) # #### This needs to be updated here according to genome #This needs to be updated here according to genome TE_post_analysis_p = '/home/benjamin/genome_assembly/PST79/FALCON/p_assemblies/v9_1/REPET/Pst79_p/Pst79_p_full_annotate/postanalysis/' TE_post_analysis_p_header = 'TE length covg frags fullLgthFrags copies fullLgthCopies meanId sdId minId q25Id medId q75Id maxId meanLgth sdLgth minLgth q25Lgth medLgth q75Lgth maxLgth meanLgthPerc sdLgthPerc minLgthPerc q25LgthPerc medLgthPerc q75LgthPerc maxLgthPerc'.split(' ') TE_post_analysis_p_header = [x for x in TE_post_analysis_p_header if x != ''] # !ls {TE_post_analysis_p} # #### This needs to be updated here according to genome # + #this needs to be fixed up to pick the proper summary table p_repet_summary_df = pd.read_csv(TE_post_analysis_p+'/'+'Pst79p_anno_chr_allTEs_nr_noSSR_join_path.annotStatsPerTE.tab' ,\ names = TE_post_analysis_p_header, header=None, sep='\t', skiprows=1 ) #check if I can filter the tab files for removing all TEs that are on the 2000 plus contigs #remove tRNAs TEs with infernal p_repet_summary_df['Code'] = p_repet_summary_df['TE'].apply(lambda x: x.split('_')[0]) code_keys = p_repet_summary_df['Code'].unique() code_keys.sort() code_long = ['DNA_transposon Helitron', 'DNA_transposon Helitron', 'DNA_transposon Helitron', 'DNA_transposon Maverick',\ 'DNA_transposon TIR', 'DNA_transposon TIR', 'DNA_transposon TIR', 'DNA_transposon TIR', 'DNA_transposon noCat',\ 'DNA_transposon MITE','DNA_transposon MITE', 'Potential Host Gene', 'Retrotransposon LINE', 'Retrotransposon LINE',\ 'Retrotransposon LINE','Retrotransposon LTR','Retrotransposon LTR', 'Retrotransposon LTR', 'Retrotransposon LTR', 'Retrotransposon PLE', \ 'Retrotransposon SINE', 'Retrotransposon SINE', 'Retrotransposon noCat', 'Retrotransposon LARD',\ 'Retrotransposon LARD', 'Retrotransposon TRIM', 'Retrotransposon TRIM', 'Retrotransposon noCat', \ 'Retrotransposon DIRS','Retrotransposon DIRS','Retrotransposon DIRS','Retrotransposon DIRS',\ 'noCat', 'noCat'] if len(code_keys) != len(code_long): print('Check the code_long list, because different length of keys and values!\n\n') else: print('Check the code dict anyway') code_dict = dict(zip(code_keys, code_long)) print(code_dict) # - p_repet_summary_df['Code long'] = p_repet_summary_df['Code'].apply(lambda x: code_dict[x]) p_repet_summary_sum_df = pd.pivot_table(p_repet_summary_df, values=['covg', 'copies'], index='Code long', aggfunc=np.sum) p_repet_summary_mean_df = pd.pivot_table(p_repet_summary_df, values='length', index='Code long', aggfunc=np.mean) pd.concat([p_repet_summary_sum_df,p_repet_summary_mean_df], axis=1 ) # #### This needs to be updated here according to genome # + #now filter the gff dataframe to delete all the high coverage contigs #This might would have to be fixed as well. If we don't delete it as files should be already filtered contigs_smaller_2000 = pd.read_csv('/home/benjamin/genome_assembly/PST79/FALCON/p_assemblies/v9_1/032017_assembly/pcontig_smaller_2000.txt',\ header=None)[0].tolist() p_repet_gff = pd.read_csv(out_dir+'/'+genome+'.REPET.gff', sep='\t', header = None) p_repet_gff_filtered = p_repet_gff[p_repet_gff[0].isin(contigs_smaller_2000)].reset_index(drop=True) # - #filter out potential host genes p_repet_gff_filtered = p_repet_gff_filtered[~p_repet_gff_filtered[8].str.contains("Potential")] p_repet_gff_filtered['ID'] = p_repet_gff_filtered.apply(lambda row: ID_filter_gff(row[1], row[8]), axis=1) # + #re-generate the code dict using the gff ID as Code keys code_keys_gff = p_repet_gff_filtered[p_repet_gff_filtered[1].str.contains('REPET_TE')]['ID'].unique() code_keys_gff = list({x.split('_')[0] for x in code_keys_gff}) code_keys_gff.sort() #remove Potential host genes from long code list as those were filtered out previously. code_long.remove('Potential Host Gene') if len(code_keys_gff) != len(code_long): print("Go and check something is wrong at the code key stage!") code_dict = dict(zip(code_keys_gff, code_long)) # + three_letter_code = list({x for x in code_keys_gff}) three_letter_code.sort() three_letter_values = [] for x in three_letter_code: if 'MITE' in x: _value = "ClassII:MITE:?" three_letter_values.append(_value) continue if 'LARD' in x: _value = 'ClassI:LARD:?' three_letter_values.append(_value) continue if 'TRIM' in x: _value = 'ClassI:TRIM:?' three_letter_values.append(_value) continue _value ='' if x[0] == 'D': _value = _value + 'ClassII:' if x[0] == 'R': _value = _value + 'ClassI:' if x[0] != 'D' and x[0] != 'R': _value = 'noCat' three_letter_values.append(_value) continue if x[1] == 'T': _value = _value + 'TIR:?' if x[1] == 'H': _value = _value + 'Helitron:?' if x[1] == 'M': _value = _value + 'Maverick:?' if x[0:2] == 'DY': _value = _value + ':Crypton:?' if x[1] == 'X': _value = _value + '?:?' if x[1] == 'I': _value = _value + 'LINE:?' if x[1] == 'L': _value = _value + 'LTR:?' if x[1] == 'P': _value = _value + 'Penelope:?' if x[1] == 'S': _value = _value + 'SINE:?' if x[0:2] == 'RY': _value = _value + 'DIRS:?' three_letter_values.append(_value) if len(three_letter_code) == len(three_letter_values): print("Aas") three_letter_dict = dict(zip(three_letter_code, three_letter_values)) # - three_letter_dict p_repet_gff_filtered['Class:Order:Superfamily'] = p_repet_gff_filtered.apply(lambda row: blast_hit_gff(row[1], row[8], row['ID']), axis=1) # + #generate a dict that can be used to rename the Class:Order:Superfamily column considering that partial matches ([2] == match_part) might contain different #IDs even though they are the same TE only partial. _tmp_subset = p_repet_gff_filtered[~p_repet_gff_filtered[1].str.contains('SSR')].loc[:, 'ID':].sort_values(by=['ID','Class:Order:Superfamily'])\ .drop_duplicates(subset='ID', keep ='last') TE_COS_dict = dict(zip(_tmp_subset.loc[:, 'ID'], _tmp_subset.loc[:, 'Class:Order:Superfamily' ])) _tmp_subset = p_repet_gff_filtered[p_repet_gff_filtered[1].str.contains('SSR')].loc[:, 'ID':].sort_values(by=['ID','Class:Order:Superfamily'])\ .drop_duplicates(subset='ID', keep ='last') _tmp_dict = dict(zip(_tmp_subset.loc[:, 'ID'], _tmp_subset.loc[:, 'Class:Order:Superfamily' ])) TE_COS_dict.update(_tmp_dict) #remove all backslashes from the values as this will conflict with the output later on for x in TE_COS_dict.keys(): if '/' in TE_COS_dict[x]: value = TE_COS_dict[x] print(value) TE_COS_dict[x] = value.replace('/','_') print(TE_COS_dict[x]) # - p_repet_gff_filtered.to_csv(out_dir+'/'+genome+'.REPET.long.df', sep='\t', header = None, index=None) p_repet_gff_filtered['Class:Order:Superfamily'] = p_repet_gff_filtered['ID'].apply(lambda x: TE_COS_dict[x]) print('These are the unique Class:Order:Superfamily classifiers of this dataframe:') print(p_repet_gff_filtered['Class:Order:Superfamily'].unique()) #have a rough summary of the coverage not considering overlaps. p_repet_gff_filtered.drop_duplicates(subset=[3,4,'ID'], inplace =True) p_repet_gff_filtered['Length'] = p_repet_gff_filtered[4] - p_repet_gff_filtered[3] p_repet_gff_filtered['Class'] = p_repet_gff_filtered.apply(lambda row: TE_classification_filter(row['Class:Order:Superfamily'], 0), axis=1) p_repet_gff_filtered['Order'] = p_repet_gff_filtered.apply(lambda row: TE_classification_filter(row['Class:Order:Superfamily'], 1), axis=1) p_repet_gff_filtered['Superfamily'] = p_repet_gff_filtered.apply(lambda row: TE_classification_filter(row['Class:Order:Superfamily'], 2), axis=1) p_repet_gff_len_COS = p_repet_gff_filtered.groupby(by=['Class','Order','Superfamily'])['Length'].sum() p_repet_gff_len_S = p_repet_gff_filtered.groupby(by=['Class:Order:Superfamily'])['Length'].sum() print("This is the summary of overlapping coverage according to Class, Order, Superfamily") print(p_repet_gff_len_COS) print("This is the summary of overlapping coverage according to Superfamily") print(p_repet_gff_len_S) # + num_unique_TEs = len(p_repet_gff_filtered[~p_repet_gff_filtered[1].str.contains('SSR')]['ID'].unique()) num_unique_TE_super = len(p_repet_gff_filtered[~p_repet_gff_filtered[1].str.contains('SSR')]['Class:Order:Superfamily'].unique()) print('This is the number of unique TEs: %i\nThis is the number of unique TE superfamilies: %i' % (num_unique_TEs, num_unique_TE_super)) # - p_repet_gff_filtered.groupby(by=['Class','Order','Superfamily'])['Length'].count() p_repet_gff_filtered.groupby(by=['Class:Order:Superfamily'])['Length'].count() p_repet_gff_filtered.to_csv(out_dir+'/'+genome+'.REPET.long_v2.df', sep='\t', header = None, index=None) #make new gff files where the ID column is the superfamily level p_repet_gff_superfamily = p_repet_gff_filtered.iloc[:,:] p_repet_gff_superfamily[8] = p_repet_gff_superfamily['Class:Order:Superfamily'] p_repet_gff_superfamily.iloc[:,0:9].to_csv(out_dir+'/'+genome+'.REPET.superfamily.gff', sep='\t', header = None, index=None,columns=None) #make new gff file where the ID column is the TE level p_repet_gff_TE = p_repet_gff_filtered.iloc[:,:] p_repet_gff_TE[8] = p_repet_gff_TE['ID'] p_repet_gff_TE.iloc[:,0:9].to_csv(out_dir+'/'+genome+'.REPET.TE.gff', sep='\t', header = None, index=None,columns=None) #generate the directory structure to safe specific coverage files os.chdir(out_dir) TE_types = ['Retrotransposon', 'DNA_transposon', 'noCat', 'SSR'] TE_path = [os.path.join(out_dir, x) for x in TE_types] TE_path_dict = dict(zip(TE_types, TE_path)) for TE_type in TE_types: new_path = os.path.join(out_dir, TE_type) if not os.path.exists(new_path): os.mkdir(new_path) # subset the id and safe in specific folder # return the subsetted file as bedtool def subset_id(_id, bed_object, repet_prefix): #ClassI are retrotransposon form blast if 'ClassI:' in _id: out_path = TE_path_dict['Retrotransposon'] #ClassII are DNA_transponson elif 'ClassII' in _id: out_path = TE_path_dict['DNA_transposon'] #The rest with '_' should be REPET_TEs elif _id == 'noCat': out_path = TE_path_dict['noCat'] #everything without '_' at the end should be SSR elif _id == 'SSR': out_path = TE_path_dict['SSR'] out_fn = out_path+'/'+repet_prefix+'.'+_id+'.gff' result = bed_object.filter(id_filter, _id).saveas(out_fn) cov_fn = out_fn.replace('gff','cov') cov = result.genome_coverage(dz=True,g=p_genome_file) cov.saveas(cov_fn) #_len = len(pd.read_csv(cov_fn, header=None, sep='\t')) #_dict[_id] = _len #return pybedtools.BedTool(result.fn) # Next, we create a function to pass only features for a particular # featuretype. This is similar to a "grep" operation when applied to every # feature in a BedTool def id_filter(feature, _id): if feature[8] == _id: return True return False # + repet_prefix_TE = genome+'.REPET.TE' repet_prefix_S = genome+'.REPET.superfamily' p_genome_file = genome+'.genome_file' genome_df = pd.read_csv(p_genome_file, sep='\t', header=None,names=['contig', 'length']) genome_size = genome_df['length'].sum() # - #pull in the classification gff, make classification array, loop over array to save all the cov_dataframes RE_TE_gff = pybedtools.BedTool(out_dir+'/'+genome+'.REPET.TE.gff') g_TE = RE_TE_gff.remove_invalid().saveas(out_dir+'/'+genome+'.REPET.TE.bedobject') #use the blast filtered dataframe as well RE_S_gff = pybedtools.BedTool(out_dir+'/'+genome+'.REPET.superfamily.gff') g_S = RE_S_gff.remove_invalid().saveas(out_dir+'/'+genome+'.REPET.superfamily.bedobject') #use simple loop to loop over the bedcov genome coverage per classification. Keep track if everything is already done. jobs = [] bed_file = g_S superfamilies = p_repet_gff_superfamily['Class:Order:Superfamily'].unique() for superfamily in superfamilies: subset_id(superfamily, bed_file, repet_prefix_S) print('Doing %s' % superfamily) # + cur_dir = os.path.abspath(os.path.curdir) #this caputures all REPET classifications add the superfamily level class_cov_files = [] for dirpath, dirname, filenames in os.walk(cur_dir, topdown=True): if dirpath == cur_dir: continue cov_files = [dirpath +'/'+x for x in os.listdir(dirpath) if x.endswith('.cov') and repet_prefix_S in x] for file in cov_files: class_cov_files.append(file) #make a large summary dataframe from all the cov files where the last df_list =[] class_cov_files.sort() for file in class_cov_files: print(file) tmp_df = pd.read_csv(file, sep='\t', header = None) tmp_df["Class:Order:Superfamily"] = file.split('.')[-2] tmp_df.drop_duplicates(inplace=True) #drop all the duplicates meaning same position in the genome and same superfamily df_list.append(tmp_df) print(file.split('.')[-2]) df_REPET_classification = pd.concat(df_list) df_REPET_classification.to_csv(out_dir+'/'+ repet_prefix_S +'.cov', sep='\t', header =None, index=None) cov_per_superfamily = df_REPET_classification.pivot_table(values=1, columns= "Class:Order:Superfamily", aggfunc='count') cov_per_contig_per_superfamily = df_REPET_classification.groupby([0, "Class:Order:Superfamily"])[1].count() # - cov_all_TEs = df_REPET_classification.drop_duplicates([0,1]) #this gets ride of the overlap between different TE families and classes cov_all_TEs = len(cov_all_TEs) # + #make superfamily df and add columns for Class, order and superfamily cov_per_superfamily_df = cov_per_superfamily.append(pd.DataFrame.from_dict({'cov_all_TEs': cov_all_TEs}, orient='index')) cov_per_superfamily_df.rename(columns={0: 'bp'}, inplace=True) cov_per_superfamily_df['%'] = cov_per_superfamily_df['bp']/genome_size*100 cov_per_superfamily_df['Class:Order:Superfamily'] = cov_per_superfamily_df.index cov_per_superfamily_df['Class'] = cov_per_superfamily_df.apply(lambda row: TE_classification_filter(row['Class:Order:Superfamily'], 0), axis=1) cov_per_superfamily_df['Order'] = cov_per_superfamily_df.apply(lambda row: TE_classification_filter(row['Class:Order:Superfamily'], 1), axis=1) cov_per_superfamily_df['Superfamily'] = cov_per_superfamily_df.apply(lambda row: TE_classification_filter(row['Class:Order:Superfamily'], 2), axis=1) cov_per_superfamily_df.to_csv(out_dir+'/'+genome+'.REPET.summary.tab', sep='\t') # + #consider combining these cov data frames into classes and orders as well and simply using those as id column, drop duplicats #use those as real coverage analysis at those level # + cur_dir = os.path.abspath(os.path.curdir) #this caputures all REPET classifications add the superfamily level class_cov_files = [] for dirpath, dirname, filenames in os.walk(cur_dir, topdown=True): if dirpath == cur_dir: continue cov_files = [dirpath +'/'+x for x in os.listdir(dirpath) if x.endswith('.cov') and repet_prefix_S in x] for file in cov_files: class_cov_files.append(file) #make a large summary dataframe from all the cov files where the last df_list =[] class_cov_files.sort() for file in class_cov_files: tmp_df = pd.read_csv(file, sep='\t', header = None) tmp_df["Class"] = file.split('.')[-2].split(':')[0] #parse out the Class from the file name tmp_df.drop_duplicates(inplace=True) #drop all the duplicates meaning same position in the genome and same Class df_list.append(tmp_df) print(file.split('.')[-2].split(':')[0]) df_REPET_classification_class = pd.concat(df_list) df_REPET_classification_class.drop_duplicates(inplace=True) df_REPET_classification_class.to_csv(out_dir+'/'+ repet_prefix_S.replace('superfamily', 'Class') +'.cov', sep='\t', header =None, index=None) cov_per_class = df_REPET_classification_class.pivot_table(values=1, columns= "Class", aggfunc='count') cov_per_contig_per_class = df_REPET_classification_class.groupby([0, "Class"])[1].count() # + #this parsing of of the order is neccessary to eliminate any overlap at the order level. See drop duplicates line 23. cur_dir = os.path.abspath(os.path.curdir) #this caputures all REPET classifications add the superfamily level class_cov_files = [] for dirpath, dirname, filenames in os.walk(cur_dir, topdown=True): if dirpath == cur_dir: continue cov_files = [dirpath +'/'+x for x in os.listdir(dirpath) if x.endswith('.cov') and repet_prefix_S in x] for file in cov_files: class_cov_files.append(file) #make a large summary dataframe from all the cov files where the last df_list =[] class_cov_files.sort() for file in class_cov_files: tmp_df = pd.read_csv(file, sep='\t', header = None) if ':' in file: tmp_df["Order"] = ':'.join(file.split('.')[-2].split(':')[0:2]) #parse out the order from the file name print(':'.join(file.split('.')[-2].split(':')[0:2])) else: tmp_df["Order"] = file.split('.')[-2].split(':')[0] print(file.split('.')[-2].split(':')[0]) tmp_df.drop_duplicates(inplace=True) #drop all the duplicates meaning same position in the genome and same Order df_list.append(tmp_df) df_REPET_orderification_order = pd.concat(df_list) df_REPET_orderification_order.drop_duplicates(inplace=True) df_REPET_orderification_order.to_csv(out_dir+'/'+ repet_prefix_S.replace('superfamily', 'Order') +'.cov', sep='\t', header =None, index=None) cov_per_order = df_REPET_orderification_order.pivot_table(values=1, columns= "Order", aggfunc='count') cov_per_contig_per_order = df_REPET_orderification_order.groupby([0, "Order"])[1].count() # + cov_per_order_df = cov_per_order.append(pd.DataFrame.from_dict({'cov_all_TEs': cov_all_TEs}, orient='index')) cov_per_order_df.rename(columns={0: 'bp'}, inplace=True) cov_per_order_df['%'] = round(cov_per_order_df['bp']/genome_size*100, 3) # + cov_per_class_df = cov_per_class.append(pd.DataFrame.from_dict({'Total RE coverage': cov_all_TEs}, orient='index')) cov_per_class_df.rename(columns={0: 'bp'}, inplace=True) cov_per_class_df['%'] = round(cov_per_class_df['bp']/genome_size*100, 3) cov_per_class_df.sort_values('%', inplace=True) # - plt.style.available # + plt.style.use('seaborn-talk') fig, (ax0, ax1, ax2) = plt.subplots(nrows=3, ncols=1, figsize=(12,15)) fig.suptitle("Repetitive elements in P. striformis f. sp. tritici %s" % genome, fontsize=14, fontweight = 'bold') #color cycle from color blind people CB_color_cycle = ['#377eb8', '#ff7f00', '#4daf4a', '#f781bf', '#a65628', '#984ea3', '#999999', '#e41a1c', '#dede00'] #plot the overall genome coverage by repetitive element category cov_per_class_df.plot(kind='barh', y='%', ax=ax0, color='r') ax0.set_xlim([-5,60]) ax0.legend().set_visible(False) ax0.set_yticklabels(list(cov_per_class_df.index),fontsize=10, fontweight='bold') ax0.set_ylabel(ylabel='RE categories', fontsize=14, fontweight='bold') #plot class I classI_df = classI_df = cov_per_superfamily_df[cov_per_superfamily_df['Class'] == 'Retrotransposon'].sort_values('%') #pick out the colors to do color matching on the order level tmp_cn = len(classI_df['Order'].unique()) tmp_colors = CB_color_cycle[0:tmp_cn] tmp_col_dict = dict(zip(classI_df['Order'].unique(), tmp_colors)) classI_df['Color'] = classI_df['Order'].apply(lambda x: tmp_col_dict[x]) classI_df.plot(kind='barh', y = '%', ax=ax1, color=classI_df['Color']) ax1.set_xlim([-2,25]) ax1.legend().set_visible(False) ax1.set_yticklabels(list(classI_df.index),fontsize=10, fontweight='bold') ax1.set_ylabel(ylabel='Class:Order:Superfamily', fontsize=14, fontweight='bold') ax1.set_title('ClassI: Retrotransposons', fontsize=14, fontweight='bold') #add tick lables for p, value in zip(ax1.patches, classI_df['%']): ax1.annotate('{0:.3f}'.format(value), (18,p.get_y() * 1.005),fontsize=10, fontweight='bold' ) #plot class II classII_df = classII_df = cov_per_superfamily_df[cov_per_superfamily_df['Class'] == 'DNA_transposon'].sort_values('%') #pick out the colors to do color matching on the order level tmp_cn = len(classII_df['Order'].unique()) tmp_colors = CB_color_cycle[0:tmp_cn] tmp_col_dict = dict(zip(classII_df['Order'].unique(), tmp_colors)) classII_df['Color'] = classII_df['Order'].apply(lambda x: tmp_col_dict[x]) #plot the class II out classII_df.plot(kind='barh', y = '%', ax=ax2, color=classII_df['Color']) ax2.set_xlim([-2,25]) ax2.legend().set_visible(False) ax2.set_yticklabels(list(classII_df.index),fontsize=10, fontweight='bold') ax2.set_ylabel(ylabel='Class:Order:Superfamily', fontsize=14, fontweight='bold') ax2.set_title('ClassII: DNA transposons', fontsize=14, fontweight='bold') ax2.set_xlabel('% genome coverage', fontsize=14, fontweight='bold') #add tick lables for p, value in zip(ax2.patches, classII_df['%']): ax2.annotate('{0:.3f}'.format(value), (18 ,p.get_y() * 1.005),fontsize=10, fontweight='bold' ) fig.savefig(genome+'.REPET_summary.seaborn-talk.png', dpi=600, bbox_inches="tight") # - # ## Thats how far we got ##
notebooks/Pst_104E_v12_TE_filtering_and_summary_p_contigs_submission_21092017.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + id="4jeRQpKWoLEx" colab_type="code" colab={} # ! git clone https://github.com/mmarouen/marabou % cd marabou import os os.environ['PYTHONPATH'] += ":/content/marabou/marabou/train/" os.environ['MARABOU_HOME'] = "/content/marabou" # ! pip install opencv-python # + id="A7uLeXlah8jC" colab_type="code" colab={} # ! python marabou/train/src/scripts/train_fashion_classifier.py
marabou/train/src/scripts/fasion_classifier.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 2 # language: python # name: python2 # --- # + #''' #Demonstrates GRAPPA reconstruction of undersampled data. #See function grappa_basic.py for a simpler example. # #Pre-requisites: # 1) This Python script needs to be able to access a listening gadgetron. # On the Virtual Machine, gadgetron is installed and the user just needs # to type 'gadgetron' in a terminal window. # On standalone systems, the user will need to have installed ISMRMRD # and gadgetron code. # # 2) An input data file from a GRAPPA MRI acquisition in the ISMRMRD format. # Example GRAPPA datasets: # a) 'meas_MID00108_FID57249_test_2D_2x.dat' is # available from https://www.ccppetmr.ac.uk/downloads # This is in the manufacturer's raw data format and needs to be # converted to ISMRMRD format using 'siemens_to_ismrmrd'. # This executable is installed on the Virtual Machine. # # b) A simulated ISMRMRD h5 file is available as default # #Usage: # grappa_detail.py [--help | options] # #Options: # -f <file>, --file=<file> raw data file # [default: simulated_MR_2D_cartesian_Grappa2.h5] # -p <path>, --path=<path> path to data files, defaults to data/examples/MR # subfolder of SIRF root folder #''' # ## CCP PETMR Synergistic Image Reconstruction Framework (SIRF). ## Copyright 2015 - 2017 <NAME>leton Laboratory STFC. ## Copyright 2015 - 2017 University College London. ## Copyright 2015 - 2017 Physikalisch-Technische Bundesanstalt. ## ## This is software developed for the Collaborative Computational ## Project in Positron Emission Tomography and Magnetic Resonance imaging ## (http://www.ccppetmr.ac.uk/). ## ## Licensed under the Apache License, Version 2.0 (the "License"); ## you may not use this file except in compliance with the License. ## You may obtain a copy of the License at ## http://www.apache.org/licenses/LICENSE-2.0 ## Unless required by applicable law or agreed to in writing, software ## distributed under the License is distributed on an "AS IS" BASIS, ## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. ## See the License for the specific language governing permissions and ## limitations under the License. __version__ = '0.1.0' from docopt import docopt # import engine module from sirf.Gadgetron import * # process command-line options data_file = 'simulated_MR_2D_cartesian_Grappa2.h5' data_path = petmr_data_path('mr') from pUtilities import show_3D_array # Acquisitions will be read from this HDF file input_file = existing_filepath(data_path, data_file) # Initially we create a container that points to the h5 file. Data is # not read from file until the gadgetron is called using # the 'process' method. # Create an acquisition container of type sirf.Gadgetron.AcquisitionData print('---\n reading in file %s...' % input_file) acq_data = AcquisitionData(input_file) # Pre-process this input data using three preparation gadgets # from gadgetron. # List gadgets to use (not all may be required for this test data). prep_gadgets = ['NoiseAdjustGadget', 'AsymmetricEchoAdjustROGadget', \ 'RemoveROOversamplingGadget' ] # Call gadgetron by using the 'process' method. This runs the gadgets # specified in prep_gadgets, returning an instance # of an mGadgetron.AcquisitionsContainer preprocessed_data = acq_data.process(prep_gadgets) # Extract sorted k-space, permute dimensions and display acq_array = preprocessed_data.as_array(0) [ns,nc,nro] = preprocessed_data.dimensions() # [nx ncoil ny] acq_array = numpy.transpose(acq_array,(1,0,2)) title = 'Acquisition data (magnitude)' # %matplotlib inline show_3D_array(acq_array, power = 0.2, \ suptitle = title, title_size = 16, \ xlabel = 'samples', ylabel = 'readouts', label = 'coil') # Perform reconstruction of the preprocessed data. # 1) Create a recon object for the desired reconstruction. # In this demo, the recon object is created using the class # Reconstructor(). A simpler class is available in the SIRF code # for a GRAPPA reconstruction: # recon = CartesianGRAPPAReconstructor() recon_gadgets = ['AcquisitionAccumulateTriggerGadget', 'BucketToBufferGadget', 'GenericReconCartesianReferencePrepGadget', 'GRAPPA:GenericReconCartesianGrappaGadget', 'GenericReconFieldOfViewAdjustmentGadget', 'GenericReconImageArrayScalingGadget', 'ImageArraySplitGadget' ] recon = Reconstructor(recon_gadgets) # 2) The GRAPPA gadget can compute G-factors in addition to # reconstructed images. We can set a gadget property as below if the gadget # has been identified with a label. In the above list of recon_gadgets, # the 4th is labelled 'GRAPPA' and we can use this label as below: recon.set_gadget_property('GRAPPA', 'send_out_gfactor', True) # If the chain had been set using # recon = CartesianGRAPPAReconstructor(), an alternative method # would be available: # recon.compute_gfactors(True) # 3) set the reconstruction input to be the data we just preprocessed. recon.set_input(preprocessed_data) # 4) Run the reconstruction using 'process' to call gadgetron. print('---\n reconstructing...\n') recon.process() # Output # Reconstructed data sits in memory. We need to first get data # for both the reconstructed images and g-factors, before extracting the # data as Python arrays. # Get image and gfactor data as objects of type mGadgetron.ImageData # (Note this syntax may change in the future with the addition of a # method '.get_gfactor'.) image_data = recon.get_output('image') gfact_data = recon.get_output('gfactor') # Return as Python matrices the data pointed to by the containers. # Note the image data is complex. image_as_3D_array = image_data.as_array() maxv = numpy.amax(abs(image_as_3D_array)) title = 'Reconstructed image data (magnitude)' show_3D_array(abs(image_as_3D_array), \ suptitle = title, title_size = 16, \ xlabel = 'samples', ylabel = 'readouts', label = 'slice', \ scale = (0, maxv)) gfactor_as_3D_array = gfact_data.as_array() maxv = numpy.amax(abs(gfactor_as_3D_array)) title = 'G-factor data (magnitude)' show_3D_array(abs(gfactor_as_3D_array), suptitle = title, title_size = 16, \ xlabel = 'samples', ylabel = 'readouts', label = 'slice', \ scale = (0, maxv)) # -
notebooks/MR/Old_notebooks/Gadgetron/grappa_detail.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + import pandas as pd import numpy as np import seaborn as sns import matplotlib.pyplot as plt import matplotlib.dates as mdates # %matplotlib inline import datetime import statsmodels.tsa.api as smt # - # # Load and View Data # + def load_data(): return pd.read_csv('Forecast_Dataset_Monthly_2.csv') sales_data = load_data() # - sales_data.info() sales_data.head() sales_data['Date'] = sales_data['Date'].apply(lambda x: pd.to_datetime(str(x), format='%Y%m')) sales_data.head() # + [markdown] heading_collapsed=true # # EDA # + hidden=true # Duration of dataset def sales_duration(data): data.date = pd.to_datetime(data.Date) number_of_days = data.Date.max() - data.Date.min() number_of_years = number_of_days.days / 365 print(number_of_days.days, 'days') print(number_of_years, 'years') sales_duration(sales_data) # + hidden=true def sales_per_day(): fig, ax = plt.subplots(figsize=(7,4)) plt.hist(sales_data.Sales, color='mediumblue') ax.set(xlabel = "Sales Per day", ylabel = "Count", title = "Distrobution of Sales Per Day") sales_per_day() # + hidden=true def sales_per_store(): by_store = sales_data.groupby('TTYName')['Sales'].sum().reset_index() fig, ax = plt.subplots(figsize=(20,5)) sns.barplot(by_store.TTYName, by_store.Sales, color='mediumblue') ax.set(xlabel = "TTYName ID", ylabel = "Number of Sales", title = "Total Sales Per TTYName") sns.despine() sales_per_store()
Models/01_data_cleaning_and_eda.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import twint import json import nest_asyncio nest_asyncio.apply() #__import__('IPython').embed() c = twint.Config() c.Search = "neo4j OR \"graph database\" OR \"graph databases\" OR graphdb OR graphconnect OR @neoquestions OR @Neo4jDE OR @Neo4jFr OR neotechnology" c.Store_json = True #c.Custom["user"] = ["", "tweet", "user_id", "username", "hashtags", "mentions"] c.User_full = True c.Output = "tweets.json" c.Since = "2021-4-20" c.Hide_output = True twint.run.Search(c)
getTweets.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + from astropy.io import fits import astropy.io.ascii # separate to not overwrite namespace from astropy.table import Column from astropy import units as u from scipy import optimize from os.path import expanduser # from ROOT import TRolke # %pylab inline # #%matplotlib inline # - home = expanduser("~") gc_dir = home + "/Dropbox/GalacticCenter/" # + erg2TeV = (u.erg).to(u.TeV) print(erg2TeV) pylab.rcParams['figure.figsize'] = (12.0, 6.0) #matplotlib.rcParams['figure.figsize'] = (12.0, 6.0) # - # <h2> Define functions for extracting points, fitting, and plotting </h2> # + # define our line fitting function fitfunc = lambda p, x: p[0] + p[1] * (x) errfunc = lambda p, x, y, err: (y - fitfunc(p, x)) / err # define our () fitting function fitfuncECPL = lambda p, x: p[0] + p[1] * np.log(x) - (x) / p[2] # np.log is natural log errfuncECPL = lambda p, x, y, err: (np.log(y) - fitfuncECPL(p, x)) / (err) fitfuncECPL_CF = lambda N0, gamma, beta, E: N0 + gamma*E - 1.*np.exp(E) / beta #these are just copied from http://fermi.gsfc.nasa.gov/ssc/data/analysis/scitools/python_tutorial.html f = lambda E, N0, E0, gamma: N0*(E/E0)**(-1.*gamma) ferr = lambda E, F, N0, N0err, E0, cov_gg: \ F*np.sqrt(N0err**2/N0**2 + ((np.log(E/E0))**2)*cov_gg) f_ecpl = lambda E,N0,E0,gamma,beta: N0*(E/E0)**(-1.*gamma)*np.exp(-1.*E/beta) ferr_ecpl = lambda E, F, N0, N0err, E0, cov_gg, b, cov_bb: \ F*np.sqrt(N0err**2/N0**2 + ((np.log(E/E0))**2) * cov_gg + (E/E0)**2 / b**4 * cov_bb) def plotPythonFit(energy, flux, flux_err_arg, color, name, ECPL=False, E0=1., ls="--"):#, power=0.): """fit flux points to a curve then plot by default it's labeled in units of TeV, and flux / m^2 fluxes are multiplied by E^power """ logx = np.log(energy/E0) logy = np.log(flux) #logyerr = np.log(flux_err) if isinstance(flux_err_arg, tuple): flux_err = (flux_err_arg[1] + flux_err_arg[0]) / 2 else: flux_err = flux_err_arg logyerr = flux_err / flux if ECPL: pinit = [-26, -2.25, 10] out = optimize.leastsq(errfuncECPL, pinit, args=(energy/E0, flux, flux_err / flux), full_output=1) else: pinit = [-26, -2.25] # nb ln out = optimize.leastsq(errfunc, pinit, args=(logx, logy, logyerr), full_output=1) # end if else ECPL pfinal = out[0] covar = out[1] print("parameters:") print(pfinal) print("covariance matrix") print(covar) N0 = np.exp(pfinal[0]) gamma = pfinal[1] E = np.linspace(energy[0], energy[-1], num=100) if ECPL: beta = pfinal[2] F = f_ecpl(E, N0, E0, -1.*gamma, beta) chi2 = np.sum((flux - f_ecpl(energy, N0, E0, -1.*gamma, beta))**2/flux_err**2) / (len(energy) - 3) print("chi^2: "+str(chi2)+'\n') beta_err = np.sqrt( covar[2][2] ) * chi2 #* N0 gamma_err = np.sqrt( covar[0][0] ) * chi2 N0_err = np.sqrt( covar[1][1] ) * N0 * chi2 cov_gg = gamma_err**2 cov_bb = beta_err**2 Ferr = ferr_ecpl(E, F, N0, N0_err, E0, cov_gg, beta, cov_bb) fitTitle = (name + ' - N0: {0:.2e} +- {2:.2e}, '\ 'G: {1:.2f} +- {3:.2f}, '\ 'Ec: {4:.2f} +- {5:.2f}, ' 'E0: {6:.0f}').format(float(N0), float(gamma), float(N0_err), float(gamma_err), float(beta), float(beta_err), float(E0)) else: F = f(E, N0, E0, -1.*gamma) chi2 = np.sum((flux - f(energy, N0, E0, -1.*gamma))**2 / flux_err**2) / (len(energy) - 2) print(chi2) gamma_err = np.sqrt( covar[0][0] ) * chi2 N0_err = np.sqrt( covar[1][1] ) * N0 * chi2 cov_gg = gamma_err**2 Ferr = ferr(E, F, N0, N0_err, E0, cov_gg) fitTitle = (name + ' - N0= {0:.2e} +- {2:.2e}, '\ 'gamma= {1:.2f} +- {3:.2f}, '\ 'E0: {4:.2f}').format(float(N0), float(gamma), float(N0_err), float(gamma_err), float(E0)) plt.plot(E, F * (E)**power, color=color, ls=ls, marker="", label=fitTitle) plt.fill_between(E, (E)**power*(F+Ferr), (E)**power*(F-Ferr), color=color, alpha='0.25') plt.loglog(nonposy="clip") plt.errorbar(energy, flux*energy**power, flux_err_arg*energy**power, color=color, ls='', marker='_') # ,label=name plt.loglog(nonposy="clip") plt.xlabel("Energy [TeV]") # end plotPythonFit # + def extract_points(filename): """extracts points from prepared spectral file""" with open(filename) as infile: # make dict for line in iter(infile): line = line.split() try: float(line[0]) and float(line[3]) and float(line[4]) and float(line[5]) H_energy = np.append(H_energy, float(line[0])) H_flux = np.append(H_flux, float(line[3])) H_err_up = np.append(H_err_up, float(line[4])) H_err_dwn = np.append(H_err_dwn, float(line[5])) except ValueError: continue # end extract SgrA spectral points from file # + def extract_spectral_points_from_log(filename): """takes in filename of standard root log file and returns array of tuples representing spectral points""" verbose = False points_filename = filename.replace('_stage6', '_spectral-points') points_file = open(points_filename, 'w') fitparams = [0., 0., 0., 0.] # norm, index, norm_err, index_err #handle, ulfilename = mkstemp() #UL_file = os.fdopen(handle, 'w') #UL_file = open(filename.replace('stage6', 'ULs')) with open(filename) as infile: foundspec = False specover = False for line in infile: if foundspec and not specover: if line[0] == '+': # this signifies a point ls = line.split() newline = ' '.join(ls[1:]) + '\n' if verbose: print(line) print(newline) points_file.write(newline) else: try: float(line[0]) #UL_file.write(line + '\n') except ValueError: specover = True elif specover: ls = line.split() if ls[0] == '1' and ls[1] == "Norm": fitparams[0] = float(ls[2]) fitparams[2] = float(ls[3]) # err elif ls[0] == '2' and ls[1] == "Index": fitparams[1] = float(ls[2]) fitparams[3] = float(ls[3]) # err print(fitparams) break #elif line == " Bin Energy error Flux error Non Noff Nexcess RawOff Alpha Sig Low Edge High Edge": else: ls = line.split() if len(ls) >= 3 and ls[0] == "Bin" and ls[1] == "Energy" and ls[2] == "error": foundspec = True points_file.close() flux_points = np.genfromtxt(points_filename) #UL_array = np.genfromtxt(ulfilename) #UL_file.close() return flux_points, fitparams #, UL_points # end extract_spectral_points_from_log # - # # HESS Points # + # SgrA spectral points and errors with open(gc_dir+"spectralPoints/HESS_SgrAstar_SpectralPoints_TeV-cm2.txt") as infile: H_energy = np.array([]) H_flux = np.array([]) H_err_up = np.array([]) H_err_dwn = np.array([]) for line in iter(infile): line = line.split() try: float(line[0]) and float(line[3]) and float(line[4]) and float(line[5]) H_energy = np.append(H_energy, float(line[0])) H_flux = np.append(H_flux, float(line[3])) H_err_up = np.append(H_err_up, float(line[4])) H_err_dwn = np.append(H_err_dwn, float(line[5])) except ValueError: continue # end extract SgrA spectral points from file #"/spectralPoints/HESS_Diffuse_SpectralPoints_Bins.txt" # - print(H_err_dwn) # + VEGAS_Points = """ 2.499 6.62e-09 5.64e-10 3.96 1.61e-09 1.01e-10 6.273 4.9e-10 3.7e-11 9.935 1.49e-10 1.43e-11 15.73 4.3e-11 5.43e-12 24.87 6.44e-12 1.55e-12 35.37 5.29e-13 5.86e-13""" V_Points_fine = astropy.io.ascii.read(VEGAS_Points) # convert from m^-2 to cm^-2 V_Points_fine['col2'] *= 1e-4 V_Points_fine['col3'] *= 1e-4 V_Points_fine # - power = 0. plotPythonFit(V_Points_fine['col1'], V_Points_fine['col2'], V_Points_fine['col3'], "blue", "HESS", ECPL=True, E0=1.25) plt.errorbar(V_Points_fine['col1'], V_Points_fine['col2'] * V_Points_fine['col1']**power, yerr = V_Points_fine['col3'] * V_Points_fine['col1']**power, label = "VERITAS 2016 Paper", ls="", marker="+", color="red") plt.ylim(ymin=1e-17) plt.ylabel(r" dN/dE [TeV m$^{-2}$ s$^{-1}$]") # Combine # + # updated VEGAS points # highest energy bins # VEGAS_Points = """ 2.498 5.71e-09 4.03e-10 3.96 1.7e-09 1.08e-10 6.276 5.48e-10 4.15e-11 9.946 1.69e-10 1.69e-11 15.76 6.07e-11 7.63e-12 24.98 6.81e-12 2.67e-12 39.6 2.39e-13 7.91e-13 """ VEGAS_Points = """ 2.498 6.82e-09 5.97e-10 3.959 1.69e-09 1.08e-10 6.274 5.42e-10 4.14e-11 9.943 1.67e-10 1.69e-11 15.76 6.00e-11 7.63e-12 24.98 1.02e-11 2.59e-12 39.59 1.03e-12 7.56e-13 """ V_Points = astropy.io.ascii.read(VEGAS_Points) # convert from m^-2 to cm^-2 V_Points['col2'] *= 1e-4 #* erg2TeV V_Points['col3'] *= 1e-4 #* erg2TeV V_Points # + Andy_Points = ''' 2.813 3.44e-13 4.52e-14 3.541 2.17e-13 2.43e-14 4.458 1.23e-13 1.37e-14 5.613 5.13e-14 7.4e-15 7.066 2.72e-14 4.3e-15 8.896 1.27e-14 2.48e-15 12.49 5.8e-15 8.86e-16 19.8 1.44e-15 3.36e-16 31.39 1.22e-16 8.14e-17''' A_Points = astropy.io.ascii.read(Andy_Points) A_Points['col2'] *= 1 / erg2TeV A_Points['col3'] *= 1 / erg2TeV print(log10(A_Points['col1']*erg2TeV)) #A_Points # + print(And_p) print(A_Points) print(1.114e-15*(17.3929)**2) # + power = 2. # my VEGAS points #plotPythonFit(V_Points['col1'], V_Points['col2'], V_Points['col3'], "M2016", "red", ECPL=True, E0=1.0) plt.errorbar(V_Points['col1'], V_Points['col2'] * V_Points['col1']**power, yerr = V_Points['col3'] * V_Points['col1']**power, label = "Matt Buchovecky 2016 Update", ls="", marker="+", color="red") # HESS points plotPythonFit(H_energy[:-3], H_flux[:-3], (H_err_dwn[:-3],H_err_up[:-3]), "blue", "HESS", ECPL=True) plt.errorbar(H_energy[:-3], H_energy[:-3]**2*H_flux[:-3], yerr=H_energy[:-3]**2*(H_err_dwn[:-3], H_err_up[:-3]), marker="+", ls="", color="blue", label="HESS") plt.errorbar(H_energy[-3:], H_energy[-3:]**2*H_flux[-3:], yerr=(H_energy[-3:]**2*H_err_up[-3:], H_energy[-3:]**2*H_err_dwn[-3:]), marker="_", ls="", uplims=True, color="blue") # Andy's points plt.errorbar(A_Points['col1'], A_Points['col2'] * A_Points['col1']**power, yerr = A_Points['col3'] * A_Points['col1']**power, label = "VERITAS 2016 Paper", ls="", marker="+", color="gray") # plot format and save plt.title("Sgr A* Spectrum ") plt.loglog(nonposy="clip") plt.ylim(ymin=1e-15) plt.legend(loc="best") plt.xlabel("Energy [TeV]") plt.ylabel(r"E$^2$ dN/dE [TeV cm$^{-2}$ s$^{-1}$]") plt.savefig(gc_dir+"/plots/spectra/SgrA_spectra_HESSoverlay_wAndy_ECPL.png") # + power = 2. # pulled #plt.plot(And_p[:,0], And_p[:,1]*And_p[:,0]**(power-2.)*erg2TeV, label="Andy 2016 - pulled from paper", ls="", marker="+") # A_Points[:,2]*And_p[:,0]**(power-2.)*erg2TeV, # sent #plt.errorbar(A_Points['col1'], A_Points['col2']*1e4*A_Points['col1']**(power)*erg2TeV, yerr=A_Points['col3']*1e4*A_Points['col1']**(power)*erg2TeV, label="VERITAS 2016 Paper", ls="", marker="_") #print(A_Points) #plt.errorbar(H_energy[:-3], H_energy[:-3]**2*H_flux[:-3]*1e4, yerr=H_energy[:-3]**2*(H_err_dwn[:-3], H_err_up[:-3])*1e4, marker="_", ls="", label="HESS - points sent to me") #/erg2TeV #plt.errorbar(V5[0], V5[1], V5[2], marker='+', label='V5', ls='') #plt.errorbar(V6[0], V6[1], V6[2], marker='+', label='V6', ls='') plt.errorbar(allOff[0], allOff[1], allOff[2], marker='+', label='My 2016 analysis', ls='') plt.errorbar(mine_rl[0], mine_rl[1], mine_rl[2], marker='+', label='My analsyis w/ Andys runlist', ls='') msb_nd = np.asarray(msb) plt.errorbar(msb_nd[0], msb[1], msb[2], marker='+', label='My 2016 analysis w/ diff spectral binning', ls='') plt.plot(A_c_a[:,0], A_c_a[:,1]*1e4*erg2TeV, label='Andys results', ls='', marker='+') #plt.plot(A_c_m[:,0], A_c_m[:,1]*1e4*erg2TeV, label='matt runlist', ls='', marker='+') plt.title("Sgr A* Spectrum ") plt.loglog() #plt.ylim(ymin=1e-15) plt.legend(loc="best") plt.xlabel("Energy [TeV]") plt.ylabel(r"E$^2$ dN/dE [TeV m$^{-2}$ s$^{-1}$]") # - # <h3> Diffuse / Sgr B2 </h3> # + diffuse_points = np.genfromtxt(gc_dir+"/spectralPoints/HESS_diffuse_spectrum_points_transpose.csv") diffuse_points *= 1e3 # to go from cm^2 to m^2, and account for factor of 10 on plot diffuse_points[0] /= 1e3 # not needed anymore diffuse_err_up = diffuse_points[2] - diffuse_points[1] diffuse_err_down = diffuse_points[1] - diffuse_points[3] diffuse_points[2] = diffuse_err_down diffuse_points[3] = diffuse_err_up print(diffuse_points) #np.savetxt(home+"/Downloads/HESS_diffuse_spectrum_E2flux_TeV-m2.csv", diffuse_points, delimiter='\t') # + power = 2. mult_factor = 10 # to put diffuse and Sgr B2 closer # transpose so each variable is a list diffuse_points = np.genfromtxt(gc_dir+"/spectralPoints/HESS_diffuse_spectrum_E2flux_TeV-m2.csv") diffuse_points *= mult_factor # to put it closer for comparison diffuse_points[0] /= mult_factor # values are E^2 * flux SgrB2_points = np.genfromtxt(gc_dir+"/spectralPoints/SgrB2_spectral_flux_TeV-m2.txt") #SgrB2_points *= 1e-4 # #SgrB2_points[:,0] *= 1e4 # don't want to adjust energy # values are just flux plt.errorbar(diffuse_points[0], diffuse_points[1]*diffuse_points[0]**(power-2.), yerr=(diffuse_points[2]*diffuse_points[0]**(power-2.), diffuse_points[3]*diffuse_points[0]**(power-2.)), marker='+', ls='', color='red', label='HESS Diffuse') plt.errorbar(SgrB2_points[:,0], SgrB2_points[:,1]*SgrB2_points[:,0]**power, yerr=SgrB2_points[:,2]*SgrB2_points[:,0]**power, marker='_', ls='', color='blue', label='SgrB2') plotPythonFit(diffuse_points[0], diffuse_points[1]/diffuse_points[0]**2, (diffuse_points[2]/diffuse_points[0]**2,diffuse_points[3]/diffuse_points[0]**2), name="HESS Diffuse", color='red', ls='') plotPythonFit(SgrB2_points[:,0], SgrB2_points[:,1], SgrB2_points[:,2], name='SgrB2', color='blue', ls='') E_SgrB2 = np.linspace(SgrB2_points[0,0], SgrB2_points[-1,0], 100) flux_SgrB2 = 3.522e-9 * np.power(E_SgrB2, -1.932+power) plt.plot(E_SgrB2, flux_SgrB2, color='blue', ls='-', marker='', label="Sgr B2: N0=3.522e-9+-1.178e-9 G=-1.932+-0.1672") E_diffuse = np.linspace(diffuse_points[0,0], diffuse_points[0,-1], 100) flux_diffuse = 1.92e-8 * np.power(E_diffuse, -2.32+power) plt.plot(E_diffuse, flux_diffuse*mult_factor, color='red', ls='-', marker='', label="HESS: N0=(1.92+-0.08stat+-0.28sys)e-8 G=-2.32+-0.05stat+-0.11sys") plt.title("Sgr B2 / Diffuse Spectrum") plt.loglog() #plt.ylim(ymin=3e-14) plt.legend(loc="best") plt.xlabel("Energy [TeV]") plt.ylabel(r"E$^2$ dN/dE [TeV m$^{-2}$ s$^{-1}$]") plt.savefig(gc_dir+"/plots/spectra/SgrB2_diffuse_spectra_HESSoverlay.png") # - # <h2>G0.9+0.1</h2> # + power = 0. # all energies in TeV G09_points_M2016 = astropy.io.ascii.read(gc_dir+"/spectralPoints/G09+01_allOff_flux_TeV-m2.txt") # need to fix HESS points, to give actual size of error bars G09_points_HESS = astropy.io.ascii.read(gc_dir+"/spectralPoints/G09+01_HESS_2005_flux_TeV-cm2.csv") G09_points_Andy = astropy.io.ascii.read(gc_dir+"spectralPoints/G09+01_Andy_email_flux_m2.txt") #G09_points_Andy = astropy.io.ascii.read(gc_dir+"spectralPoints/G09+01_Andy_spectral_points_E2-ergs.txt") # convert cm^-2 to m^-2 G09_points_HESS['col2'] *= 1e4 G09_points_HESS['col3'] *= 1e4 G09_points_HESS['col4'] *= 1e4 G09_points_Andy['col2'] *= 1e4 G09_points_Andy['col3'] *= 1e4 print(G09_points_HESS) #plt.errorbar(G09_points_M2016['col1'], G09_points_M2016['col2'], G09_points_M2016['col3'], # label='M2016', ls='', marker='_') #plt.errorbar(G09_points_HESS['col1'], G09_points_HESS['col2'], # (G09_points_HESS['col2']-G09_points_HESS['col4'], G09_points_HESS['col3']-G09_points_HESS['col2']), # label="HESS", ls='', marker='_') #plt.errorbar() #plt.errorbar(G09_points_Andy['col1'], G09_points_Andy['col2']/erg2TeV, G09_points_Andy['col3']/erg2TeV, label="Andy", ls='') plotPythonFit(G09_points_HESS['col1'], G09_points_HESS['col2'], (G09_points_HESS['col2']-G09_points_HESS['col4'], G09_points_HESS['col3']-G09_points_HESS['col2']), color='red', name='HESS') plotPythonFit(G09_points_M2016['col1'], G09_points_M2016['col2'], G09_points_M2016['col3'], color='blue', name='M2016') plotPythonFit(G09_points_Andy['col1'], G09_points_Andy['col2']*1e-4/erg2TeV, G09_points_Andy['col3']*1e-4/erg2TeV, name="Andy", color='green') plt.title("G0.9+0.1 Spectrum ") plt.loglog(nonposy="clip") plt.legend(loc="best") #plt.xlabel("Energy [TeV]") plt.xlim(xmin=0.15, xmax=20) # think this is just dN/dE plt.ylabel(r"dN/dE [TeV m$^{-2}$ s$^{-1}$]") plt.savefig(gc_dir+"/plots/spectra/G09_spectra_HESSoverlay_wAndy.png") # - plotPythonFit(G09_points_M2016['col1'], G09_points_M2016['col2'], G09_points_M2016['col3'], 'blue', name='M2016') # <h2> Disp 5t / LZA / Crab Validation # + #def power = 2. plt.rcParams["figure.figsize"] = (16, 9) plt.ylabel(r"E^2 dN/dE [TeV m$^{-2}$ s$^{-1}$]") crab_dir = home + "/Dropbox/VEGAS/Crab" logfile = home + "/Dropbox/VEGAS/NERSC/validation/stage6/Crab_validation_V5_medium_rc6_stage6.txt" # disp 5t sza_points = np.genfromtxt(crab_dir+"/spectralPoints/spectral_points_Crab_SZA.txt") lza_points = np.genfromtxt(crab_dir+"/spectralPoints/spectral_points_Crab_LZA.txt") # fit parameters from VEGAS output sza_params = [3.133e-7, -2.427, 1.470e-8, 0.04705] # norm, index, norm_err, index_err lza_params = [3.157e-7, -2.525, 1.584e-8, 0.04649] flux_sza = sza_params[0] * np.power(E_sza, sza_params[1]+power) flux_lza = lza_params[0] * np.power(E_lza, lza_params[1]+power) # standard analysis standard_points, params_std = extract_spectral_points_from_log(logfile) E_std = standard_points[:,1] Epow = np.power(E_std, power) # e.g. E^2 dN/dE y_std = standard_points[:,3] * Epow yerr_std = standard_points[:,4] * Epow std_label = ("SZA-std - N0={0:.2e} +- {1:.2e} gamma={2:.2f} +- {3:.2f}") std_label = std_label.format(params_std[0], params_std[2], params_std[1], params_std[3]) plt.errorbar(E_std, y_std, yerr_std, ls='', color='red', label=std_label) E = np.linspace(E_std[0], E_std[-1], num=100) plt.plot(E, params_std[0]*np.power(E, params_std[1]+power), color='red', ls='-') flux_upper = (params_std[0]+params_std[2])*np.power(E, params_std[1]+params_std[3]+power) flux_lower = (params_std[0]-params_std[2])*np.power(E, params_std[1]-params_std[3]+power) plt.fill_between(E, flux_upper, flux_lower, color='red', alpha='0.25') plt.title("Crab Spectrum, Disp 5t vs standard") plt.xlabel("Energy [TeV]") plt.xlim(sza_points[0,0]/1.5, lza_points[-1,0]*1.5) plt.ylim(2e-8, 1e-6) plt.ylabel(r"E^"+str(power)+"dN/dE [TeV m$^{-2}$ s$^{-1}$]") plt.loglog() E_sza = np.linspace(sza_points[0,0], sza_points[-1,0], 100) E_lza = np.linspace(lza_points[0,0], lza_points[-1,0], 100) plotPythonFit(sza_points[:,0], sza_points[:,1], sza_points[:,2], name='SZA-disp5t', color='blue') plotPythonFit(lza_points[:,0], lza_points[:,1], lza_points[:,2], name='LZA-disp5t', color='green') #plotPythonFit(standard_points[:,1], standard_points[:,3], standard_points[:,4], name='standard', color='red') #plt.plot(E_sza, flux_sza, color='blue', ls='-', marker='', label="SZA: N0=3.133e-7+-1.47e-8 G=-2.427+-0.04705") #plt.plot(E_lza, flux_lza, color='green', ls='-', marker='', label="LZA: N0=3.157e-7+-1.584e-8 G=-2.525+-0.04649") #plt.fill_between(E, (E)**power*(sza_params[+Ferr), (E)**power*(F-Ferr), color=color, alpha='0.25') plt.legend(loc="best") plt.savefig(home+"/Dropbox/VEGAS/Crab/plots/Crab_disp5t_SZAvLZA_spectrum_E"+str(power)+"dNdE.png") # add upper limit # :,0 gives energy - then flux, error #plt.errorbar(sza_points[:,0], # sza_points[:,1]*sza_points[:,0]**power, # sza_points[:,2]*sza_points[:,0]**power, # label='SZA', ls='', color='blue', marker='_') #plt.errorbar(lza_points[:,0], # lza_points[:,1]*lza_points[:,0]**power, # lza_points[:,2]*lza_points[:,0]**power, # label='LZA', ls='', color='green', marker='_') # - # <h2> Plot multiple spectra from log file </h2> # # + from matplotlib import pyplot as plt # %matplotlib inline def plot_all_epochs(cuts): """""" epochs = ('V4', 'V5', 'V6') logdir = home + "/Dropbox/VEGAS/NERSC/validation/stage6" plotdir = home + "/Dropbox/VEGAS/NERSC/validation/plots" plt.clf() plt.loglog() plt.title("Crab spectrum: " + cuts + " cuts") plt.xlabel("Energy (TeV)") plt.ylabel("Flux [g's/m^2/TeV/s]") for epoch in epochs: base = "Crab_validation_" + epoch + '_' + cuts + "_rc6" fn = logdir + "/" + base + "_stage6.txt" print(fn) flux_points, fitparams = extract_spectral_points_from_log(fn) label = "Norm: " + str(fitparams[0]) + " Index: " + str(fitparams[1]) bins = flux_points[:,0].astype(np.int) energy = flux_points[:,1] #energyerr = flux_points[:,2] flux = flux_points[:, 3] fluxerr = flux_points[:, 4] plot = plt.errorbar(energy, flux, fluxerr, ls='', label=label) # loop over epochs plt.legend(loc='best', ncol=1) plotname = plotdir + "/Crab_validation_rc6_" + cuts + ".png" plt.savefig(plotname) # plot_all_epochs # + all_cuts = ('medium', 'hard', 'soft', 'loose') for cut in all_cuts: plot_all_epochs(cut) # - # <h1> TESTING </h1> # + import collections from collections import namedtuple from tempfile import mkstemp import os flux_point = collections.namedtuple('flux_point', " bin energy energyerr flux fluxerr Non Noff Nexcess RawOff alpha sig eLow eHigh") #file = open() #points_file = os.open(pfilename) #handle, pfilename = mkstemp() #points_file = os.fdopen(handle, 'w') #points_file.seek(0) #points_file.read() #flux_points = np.genfromtxt(open(points_file)) #flux_points = np.genfromtxt(pfilename) #points_file.delete() # #print(flux_points) #print(UL_points) # + # define our (line) fitting function fitfunc = lambda p, x: p[0] + p[1] * (x) errfunc = lambda p, x, y, err: (y - fitfunc(p, x)) / err # define our (line) fitting function fitfuncECPL = lambda p, x: p[0] + p[1] * np.log(x) - (x) / p[2] errfuncECPL = lambda p, x, y, err: (np.log(y) - fitfuncECPL(p, x)) / (err) fitfuncECPL_CF = lambda N0, gamma, beta, E: N0 + gamma * E -1.*np.exp(E) / beta #these are just copied from http://fermi.gsfc.nasa.gov/ssc/data/analysis/scitools/python_tutorial.html f = lambda E,N0,E0,gamma: N0*(E/E0)**(-1.*gamma) ferr = lambda E,F,N0,N0err,E0,cov_gg: F*np.sqrt(N0err**2/N0**2 + ((np.log(E/E0))**2)*cov_gg) f_ecpl = lambda E,N0,E0,gamma,beta: N0*(E/E0)**(-1.*gamma)*np.exp(-1.*E/beta) ferr_ecpl = lambda E,F,N0,N0err,E0,cov_gg,b,cov_bb: F*np.sqrt(N0err**2/N0**2 \ + ((np.log(E/E0))**2) * cov_gg \ + (E/E0)**2 / b**4 * cov_bb) def plotPythonFit_ralph(energy, flux, fluxEr, color, cuts='nocuts', ECPL = False, E0 = 1): logx = np.log(energy/E0) logy = np.log(flux) logyerr = fluxEr / flux if ECPL: pinit = [-26, -2.25, 10] out = optimize.leastsq(errfuncECPL, pinit, args=(energy/E0, flux, fluxEr / flux), full_output=1) # # print out # # pinit = [out[0][0], -1.*out[0][1], out[0][2]] # # print pinit # out = optimize.curve_fit(fitfuncECPL_CF, logx, logy, # p0=pinit, # sigma=fluxEr, # bounds = ([-30.,-3.,1.],[-25.,-2.,1e2])) # absolute_sigma = True, # print out # out = optimize.leastsq(errfuncECPL, pinit, # args=(energy, flux, fluxEr / flux), # full_output=1) else: pinit = [-26, -2.25] # nb ln out = optimize.leastsq(errfunc, pinit, args=(logx, logy, logyerr), full_output=1) pfinal = out[0] covar = out[1] print (pfinal) print (np.diag(covar)) N0 = np.exp(pfinal[0]) gamma = pfinal[1] E = np.linspace(energy[0],energy[-1],num=100) if ECPL: beta = pfinal[2] F = f_ecpl(E,N0,E0, -1. * gamma, beta) chi2 = np.sum((flux - f_ecpl(energy,N0,E0, -1. * gamma, beta))**2/fluxEr**2) / (len(energy) - 3) print(chi2) beta_err = np.sqrt( covar[2][2] ) * chi2 #* N0 gamma_err = np.sqrt( covar[0][0] ) * chi2 N0_err = np.sqrt( covar[1][1] ) * N0 * chi2 cov_gg = gamma_err**2 cov_bb = beta_err**2 Ferr = ferr_ecpl(E,F,N0,N0_err,E0,cov_gg,beta,cov_bb) fitTitle = (cuts + ' - N0: {0:.2e} +- {2:.2e}, '\ 'G: {1:.2f} +- {3:.2f}, '\ 'Ec: {4:.2f} +- {5:.2f}, ' 'E0: {6:.0f}').format(float(N0), float(gamma), float(N0_err), float(gamma_err), float(beta), float(beta_err), float(E0)) else: F = f(E,N0,E0, -1. * gamma) chi2 = np.sum((flux - f(energy, N0, E0, -1. * gamma))**2/fluxEr**2) / (len(energy) - 2) print (chi2) gamma_err = np.sqrt( covar[0][0] ) * chi2 N0_err = np.sqrt( covar[1][1] ) * N0 * chi2 cov_gg = gamma_err**2 Ferr = ferr(E,F,N0,N0_err,E0,cov_gg) fitTitle = (cuts + ' - N0: {0:.2e} +- {2:.2e}, '\ 'gamma: {1:.2f} +- {3:.2f}, '\ 'E0: {4:.2f}').format(float(N0), float(gamma), float(N0_err), float(gamma_err), float(E0)) plt.plot(E, F * (E)**power, color=color, ls="--", marker="", label = fitTitle) plt.fill_between(E, (E)**power*(F+Ferr), (E)**power*(F-Ferr), color=color, alpha='0.25') # + power = 2. err_bar_red = sqrt(2) # projection for doubled dataset # my VEGAS points #plotPythonFit(V_Points['col1'], V_Points['col2'], V_Points['col3'], name="M2016", color="gray", ECPL=True, E0=1.0) plt.errorbar(V_Points['col1'], V_Points['col2'] * V_Points['col1']**power, yerr = V_Points['col3'] * V_Points['col1']**power, label = "<NAME> 2016 Update", ls="", marker="_", color="gray") # HESS points plt.errorbar(H_energy[:-3], H_energy[:-3]**2*H_flux[:-3], yerr=H_energy[:-3]**2*(H_err_dwn[:-3], H_err_up[:-3]), marker="+", ls="", color="blue", label="HESS") plt.errorbar(H_energy[-3:], H_energy[-3:]**2*H_flux[-3:], yerr=(H_energy[-3:]**2*H_err_up[-3:], H_energy[-3:]**2*H_err_dwn[-3:]), marker="_", ls="", uplims=True, color="blue") # Andy's points And_p = np.genfromtxt(gc_dir+"/spectralPoints/SgrA_Andy2016_E2flux_erg-m2.csv") plt.plot(And_p[:,0], And_p[:,1] * And_p[:,0]**(power-2.), # yerr = And_p[:,] * And_p[:,0]**(power-2.), label = "VERITAS 2016 Paper", ls="", marker="+", color="green") # plot format and save plt.title("Sgr A* Spectrum ") plt.loglog(nonposy="clip") #plt.ylim(ymin=1e-15) plt.legend(loc="best") plt.xlabel("Energy [TeV]") plt.ylabel(r"E$^2$ dN/dE [TeV cm$^{-2}$ s$^{-1}$]") print(log10(And_p[:,0])) plt.savefig(gc_dir+"/plots/spectra/SgrA_spectra_HESSoverlay_wAndy_ECPL_projected.png") # + def extract_spectral_points(logfile, power=2.): """supply a stage 6 log file and get the spectral points from it""" points = [[], [], []] bin = 0 begin = False with open(logfile) as file: for line in file: split = line.split() if len(split) > 1 and split[0] == "Bin" and split[1] == "Energy": begin = True elif "FCN=" in line: begin = False elif begin and split[0] == '+': points[0].append(float(split[2])) points[1].append(float(split[4])*float(split[2])**power) points[2].append(float(split[5])*float(split[2])**power) bin += 1 return points V5 = extract_spectral_points(gc_dir+"/log/stage6/SgrA_V5_disp5t_4tel_stage6.txt") V6 = extract_spectral_points(gc_dir+"/log/stage6/SgrA_V6_disp5t_4tel_stage6.txt") allOff = extract_spectral_points(gc_dir+"/log/stage6/SgrA_test_allOff_stage6.txt") mine_rl = extract_spectral_points(gc_dir+"/log/stage6/stage6_Andy_SgrA_spectrum.txt") # me running Andy's runlist msb = extract_spectral_points(gc_dir+"/log/stage6/SgrA_bin_Andy_no69365_stage6.txt") # comparison with Andy A_c_a = np.genfromtxt(gc_dir+"spectralPoints/SgrA_spectrum_Andy_runlist_comparison_E2flux_ergs-cm2.csv") A_c_m = np.genfromtxt(gc_dir+"spectralPoints/SgrA_spectrum_Matt_runlist_comparison_E2flux_ergs-cm2.csv") print(A_c_a[1]*1e4*erg2TeV) #" Bin Energy error Flux error Non Noff Nexcess RawOff Alpha Sig Low Edge High Edge":
GC/SpectrumPlotter.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Introduction # State notebook purpose here # ### Imports # Import libraries and write settings here. # + # Data manipulation import pandas as pd import numpy as np # Options for pandas pd.options.display.max_columns = 50 pd.options.display.max_rows = 30 pd.options.display.float_format = '{:,.4f}'.format # Display all cell outputs from IPython.core.interactiveshell import InteractiveShell InteractiveShell.ast_node_interactivity = 'all' from IPython import get_ipython ipython = get_ipython() # autoreload extension if 'autoreload' not in ipython.extension_manager.loaded: # %load_ext autoreload # %autoreload 2 # Visualizations import seaborn as sns #import plotly.plotly as py #import plotly.graph_objs as go #from plotly.offline import iplot, init_notebook_mode #init_notebook_mode(connected=True) import cufflinks as cf cf.go_offline(connected=True) cf.set_config_file(theme='white') # - # ## Custom imports # + import sys # code_path = "/home/luca/Desktop/MLaaS4HEP/src/python/" # sys.path.append(code_path) import json from MLaaS4HEP.reader import JSONReader # - # # Analysis/Modeling # Do work here # ## Try custom import # + import json with open("rucio-opint.web.cern.ch.json", 'r') as f: raw_data = json.load(f) tot_errors = raw_data["count"] # - print("Total number of errors:", tot_errors) # + errors = pd.DataFrame(raw_data["results"]).set_index("id") errors.head() # - # ## Try simple clustering approach # + from sklearn.feature_extraction.text import TfidfVectorizer from sklearn.cluster import KMeans import numpy as np import pandas as pd vectorizer = TfidfVectorizer(stop_words='english') X = vectorizer.fit_transform(errors.message) # + true_k = 5 # Define the model with initialization model = KMeans(n_clusters=true_k, init='k-means++', max_iter=100, n_init=1) # Fit the model model.fit(X) # Get the resulting centroids order_centroids = model.cluster_centers_.argsort()[:, ::-1] # Get features terms = vectorizer.get_feature_names() # - df = pd.DataFrame(order_centroids, columns=terms , index=['Group_0', 'Group_1', 'Group_2', 'Group_3', 'Group_4']) df.T.head(100) model.n_iter_ # ## Try MLaaS4HEP # + read = JSONReader("rucio-opint.web.cern.ch.json", label="first100", verbose=0) for data in read.next(): errors = data # - for data in read.next(): print((data[0][3][102])) errors[0][2] data = read.next()["results"] # # Results # Show graphs and stats here # # Conclusions and Next Steps # Summarize findings here
Tutorial import .json MLaaS.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: tangles # language: python # name: tangles # --- # ### Allow relative imports # %load_ext autoreload # %autoreload 2 # %matplotlib inline # + import project_path import glob import os import random from pathlib import Path from itertools import product import pandas as pd # - def merge_csv(path): path_temp = Path(f'/tmp/{random.randint(10, 10000)}.csv').resolve() is_first_file = True with open(path_temp,"wb") as output_file: for subdir, dirs, files in os.walk(path): for file in files: input_path = f'{subdir}/{file}' if is_first_file: is_first_file = False with open(input_path, "rb") as input_file: output_file.write(input_file.read()) else: with open(input_path, "rb") as input_file: next(input_file) output_file.write(input_file.read()) return path_temp path_in = Path(f'../raw_results/benchmarks/').resolve() path_out = Path(f'../results/benchmarks/').resolve() path_out.mkdir(parents=True, exist_ok=True) path_out = path_out / 'other_algorithms.csv' path_temp = merge_csv(path_in) experiment_df = pd.read_csv(path_temp).reset_index(drop=True) experiment_df experiment_df.to_csv(path_out, index=None) experiments_names = dict(pd.read_csv('../experiments.csv').astype(str).values) experiments_names path_raw_results = Path('../raw_results/') path_results = Path('../results/') for exp in os.listdir(path_raw_results): name = experiments_names.get(exp, None) print(name) if name is not None: if name not in ['SBM', 'Mindsets']: path_out = path_results /'benchmarks' / f'{name}.csv' else: path_out = path_results / f'{name}.csv' path_temp = merge_csv(path_raw_results / f'{exp}') print(path_temp) experiment_df = pd.read_csv(path_temp).reset_index(drop=True).to_csv(path_out, index=None)
notebooks/Merge results.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .jl # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Julia 1.0.3 # language: julia # name: julia-1.0 # --- # # Evaluate re-coupling coefficients using JAC, SymEngine # **Note**: The Julia package `SymEngine` is needed to perform symbolic simplifications of Racah algebra expressions in JAC but, by default, is not automatically loaded. # In various research areas, the quantum mechanical description of many-particle structures and processes often requires an explicit transformation of the angular momenta (of the subsystems) due to different **coupling schemes**. Here, a quite simple example refers to the transformation (re-coupling) of three angular momenta $j_1,\, j_2,\, j_3$. For this example, we already saw that the standard Clebsch-Gordan expansions may give rapidly rise to complex and cumbersome expression, and which are very prone for making errors. In general, many of these transformations can be expressed in terms of **recoupling coefficients**, a formal generalization of the well-known *Clebsch-Gordan* coefficients. Often, these recoupling coefficients need to be evaluated over and over again. Here, we introduce and explain a notation which makes the application and evaluation of general recoupling coefficients much easier. # # Let us consider again the *recoupling coefficients* $ <(j_1,j_2)\, J_{12}, j_3; JM| j_1, (j_2,j_3)\,J_{23}; JM>$ for the re-coupling of three angular momenta. To avoid the explicit use of repeated Clebsch-Gordan expansions, we here introduce the notation of a **coupling sequence**, and which enables us to enter the coupling of each side of the coefficient separately. For this, we implemented the (data) type `RacahAlgebra.Csq` ? RacahAlgebra.Csq # As always, we will need some `Basic` variables: j1 = Basic(:j1); j2 = Basic(:j2); j3 = Basic(:j3); j4 = Basic(:j4); j5 = Basic(:j5) j6 = Basic(:j6); j7 = Basic(:j7); j8 = Basic(:j8); j9 = Basic(:j9); j10 = Basic(:j10) j11 = Basic(:j11); j12 = Basic(:j12); j13 = Basic(:j13); j14 = Basic(:j14); j15 = Basic(:j15) j16 = Basic(:j16); j17 = Basic(:j17); j18 = Basic(:j18); j19 = Basic(:j19); j20 = Basic(:j20) j21 = Basic(:j21); j22 = Basic(:j22); j23 = Basic(:j23); j24 = Basic(:j24); j25 = Basic(:j25) J = Basic(:J) # The `struct RacahAlgebra.Csq` then helps to express the left- and right-hand side of the recoupling coefficient above as: leftCsq = RacahAlgebra.Csq( RacahAlgebra.Csq( j1, j2, j12), j3, J) rightCsq = RacahAlgebra.Csq( j1, RacahAlgebra.Csq( j2, j3, j23), J) # and, obviously, could be easily extended towards much more complex coupling sequences. -- As before, we can evaluate this re-coupling coefficient by RacahAlgebra.evaluate(leftCsq, rightCsq) # What need to be done next to make the result more obvious ?? leftCsq = RacahAlgebra.Csq( RacahAlgebra.Csq(j1,j2,j5), RacahAlgebra.Csq(j3,j4,j6), j7 ) rightCsq = RacahAlgebra.Csq( j1, RacahAlgebra.Csq( RacahAlgebra.Csq(j2,j3,j8), j4, j9), j7 ) rex = RacahAlgebra.evaluate(leftCsq, rightCsq) # ## **The following part of this nootebook is still under construction.**
tutorials/94-evaluate-recoupling-coefficients.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="DFVYyoMKKj7i" # # Probability in Python # + [markdown] id="yQnPw68Ono8S" # # Importing packages # # You can ignore this part for now. # + id="pV7_0aGuiytb" import numpy as np # + [markdown] id="8t26cBgqYdR0" # # Import statistics module # We will use scipy.stats, which has several functions for statistics and probability distributions. # + id="cfuXiM6_YopX" import scipy.stats as st # + [markdown] id="oixdxHiLXEbG" # # Import pandas, matplotlib # + id="253oJrrPXJbG" import pandas as pd import matplotlib.pyplot as plt # + [markdown] id="qdCYJhrAoHVZ" # # Function for uniform outcome # # $n$: number of outcomes in the sample space # # Output: $m$ outcomes selected uniformly at random from 1 to $n$ # + cellView="code" id="JK-MRio0oRX7" def uniform(n, m): return np.random.randint(1, n+1, size = m) # + [markdown] id="3SKtwjTtphMd" # # Toss a coin # # Toss once, 10 times and 100 times # # 1: Heads and 2: Tails # + colab={"base_uri": "https://localhost:8080/"} id="b3nsAJiVpuvD" outputId="205b14f1-92f0-4511-ceb4-5a3d7ffa6e0d" print(uniform(2, 1)) print(uniform(2, 10)) print(uniform(2,100)) # + [markdown] id="4_IBZkw6rqxM" # # Throw a die # # Throw once, 10 times and 100 times # + colab={"base_uri": "https://localhost:8080/"} id="yVHb3PymsU_K" outputId="247eb37b-e49d-4854-bb3e-9e522231f195" print(uniform(6, 1)) print(uniform(6, 10)) print(uniform(6,100)) # + [markdown] id="Y8gHArc-vs77" # # Estimating probability by simulation - Monte Carlo # # The probability of an event $A$ can be estimated as follows. We can simulate the experiment repeatedly and independently, say $N$ times, and count the number of times the event occurred, say $N_A$. # # A good estimate of $P(A)$ is the following: # $$P(A) \approx \frac{N_A}{N}$$ # As $N$ grows larger and larger, the estimate becomes better and better. This method is generally termed as Monte Carlo simulation. # # We will first evaluate probability of coin toss described above using Monte Carlo simulations. There are two steps: generate a large number of tosses and count the number of heads or tails. These two steps can be written in a single loop usually. # # You should run the simulation multiple times to see what probability estimate is obtained each time. You will see that the estimate is close to 0.5. # + colab={"base_uri": "https://localhost:8080/"} id="Yn3MxagQxYxz" outputId="0c944082-fd01-4366-f5ea-bef58203be82" no_heads = 0 #variable for storing number of heads for i in range(1000): #repeat 1000 times if uniform(2, 1) == 1: #check if coin toss is heads no_heads = no_heads + 1 print(no_heads/1000) #probability estimate by Monte Carlo # + [markdown] id="fNTXoPKaytVy" # # Probability of die showing a number # # We will modify the Monte Carlo simulation above for finding the probability that a dies shows a number falling in an event $A$. You will see that the estimate is close to $P(A)$. If you change the loop iterations to 10000, the estimate will be much closer to $P(A)$ and more consistent as well. # + colab={"base_uri": "https://localhost:8080/"} id="9U8CCvkly4Tv" outputId="e7d2f1c0-ba04-4cfa-d64e-bd0db8839c0c" no = 0 #variable for storing number of event occurence for i in range(10000): #repetitions die = uniform(6,1) #experiment if die == 1 or die == 3: #Event no = no + 1 print(no/10000) #probability estimate by Monte Carlo # + [markdown] id="e6q5-pb0tZxR" # # Birthday problem # # In a group of $n$ persons, what is the chance that some two have the same birthday? Assume birthday of a person is uniformly distributed in $\{1,2,\ldots,365\}$ and is independent of all other birthdays. Most people will think that you need at least 100 persons before you start seeing same birthdays. However, surprisingly perhaps, even with 23 persons there is a 50% chance of two sharing a birthday. # # Event $A$: some two have same birthday # # Event $A^c$: no two have same birthday # # $A^c$: (Birthday 1 on any date $B_1$) and (Birthday 2 on any date other than $B_1$) and (Birthday 3 on any date other than $B_1$, $B_2$) and ... and (Birthday $n$ on any day other than $B_1,B_2,\ldots,B_{n-1}$) # # $P(A^c)= 1 \cdot \left(1 - \frac{1}{365}\right)\left(1 - \frac{2}{365}\right)\cdots\left(1 - \frac{n-1}{365}\right)$ # # If $n=10$, what is the chance? If $n=30$, what is the chance? # # We will do a Monte Carlo simulation to estimate the probability and compare with the calculation above. # + colab={"base_uri": "https://localhost:8080/"} id="n8tdz2TGz20G" outputId="5f573761-3ef5-47a7-ddbb-b1492ab0ffe7" no = 0 #variable for storing number of event occurence n = 60 #number of persons print(1 - np.prod(1-np.arange(1,n)/365)) #probability from expression for i in range(1000): B = np.zeros(366) #array to keep track of birthdays seen for j in range(n): #generate birthdays for each person Bi = uniform(365, 1) #i-th birthday if B[Bi] == 0: #if Bi is seen for the first time B[Bi] = 1 #make note that Bi has been seen else: no = no + 1 #if Bi has been seen before, then two birthdays are same break #we can stop generating more birthdays and exit loop early print(no/1000) #probability estimate by Monte Carlo # + [markdown] id="DNs7eAms4KMr" # # Monty Hall problem # # Here is the problem taken from the [Wiki page](https://en.wikipedia.org/wiki/Monty_Hall_problem). # # > Suppose you're on a game show, and you're given the choice of three doors: Behind one door is a car; behind the others, goats. You pick a door, say No. 1, and the host, who knows what's behind the doors, opens another door, say No. 3, which has a goat. He then says to you, "Do you want to pick door No. 2?" Is it to your advantage to switch your choice? # # The assumptions (also taken from [Wiki](https://en.wikipedia.org/wiki/Monty_Hall_problem)) are as follows: # 1. Car and goats are placed at random behind the doors. # 2. Host always picks a door not chosen by contestant. # 3. Host always reveals a goat and not a car. # 4. Host always offers a choice to switch from the original door to the other closed door. # # Under the above assumptions, here are the probabilities of winning. # # P(win if contestant chooses to switch) = 2/3 # # P(win if contestant does not switch) = 1/3 # # You can see the Wiki page for the computation. Let us simulate and find the probability of winning under switch by Monte Carlo. # + colab={"base_uri": "https://localhost:8080/"} id="DBpwENXM6okN" outputId="f15582b6-62d5-43f7-f71c-3cdfaee89d3c" no = 0 #variable for storing number of event occurence for i in range(1000): car_loc = uniform(3, 1) if car_loc == 1: goat1_loc = 2; goat2_loc = 3 elif car_loc == 2: goat1_loc = 1; goat2_loc = 3 else: goat1_loc = 1; goat2_loc = 2 contestant_orig = uniform(3, 1) if contestant_orig == goat1_loc: host_reveal_loc = goat2_loc; other_closed_door = car_loc elif contestant_orig == goat2_loc: host_reveal_loc = goat1_loc; other_closed_door = car_loc else: host_reveal_loc = goat1_loc; other_closed_door = goat2_loc if other_closed_door == car_loc: no = no + 1 print(no/1000) #probability estimate by Monte Carlo # + [markdown] id="nZhp9jXvBJgD" # # Polya's urn scheme # # Suppose an urn contains $r$ red and $b$ blue balls. The experiment proceeds in multiple steps, where Step $i$ is as follows: # # Step $i$: Draw a ball at random, note down its colour and replace it in the urn. Add $c$ more balls of the same colour to the urn. # # Let $R_i$ be the event that the $i$-th ball drawn is red. Let $B_i$ be the event that the $i$-th abll drawn is black. # # Clearly, $P(R_1) = \frac{r}{r+b}$ and $P(B_1)=\frac{b}{r+b}$. It is perhaps surprising that, irrespective of $c$, we have, for all $i$, # $$P(R_i) = \frac{r}{r+b}, P(B_i) = \frac{b}{r+b}.$$ # To prove the above, you can use induction. Assume that the above is true for $i$ and show it is true for $i+1$. Starting with $i=1$, by induction, the statement becomes true. # # We will setup a Monte Carlo simulation for verifying $P(R_i)$ above for a few steps. # + colab={"base_uri": "https://localhost:8080/"} id="0ASv_By4Cm1v" outputId="f1d98feb-0ebf-4bf6-99ea-b9f471b3bccb" no = 0 #variable for storing number of event occurence r = 10; b = 5 #assume 1 to r is red and r+1 to r+b is blue print(r/(r+b)) for i in range(1000): r = 10; b = 5 c = 3 for j in range(5): #do 5 steps if uniform(r+b, 1) <= r: r = r + c else: b = b + c if uniform(r+b, 1) <= r: #in the 6th step, count if red ball drawn no = no + 1 print(no/1000) #probability estimate by Monte Carlo # + [markdown] id="qH8Vqtw0dpgt" # # Gambler's ruin (simple random walk) # # A gambler starting with $k$ units of money plays the following game at a casino: # # * If he has $\ge 1$ units of money, a coin is tossed. If heads, the casino pays him 1 unit. If tails, he loses 1 unit to the casino. # * If he loses all money, he goes bankrupt and stops. # * If he gets $N$ units of money, he wins and stops playing. # # If $p$ is the probability of heads and $q=1-p$, it can be shown that # $$\text{Pr}(\text{Bankruptcy})=\begin{cases} # 1-k/N,&\text{ if }p=q=1/2,\\ # \frac{\left(\dfrac{q}{p}\right)^k-\left(\dfrac{q}{p}\right)^N}{1-\left(\dfrac{q}{p}\right)^N}, &\text{ if }p\ne q. # \end{cases}$$ # You can see some details of the proof of the above in the [Wiki page](https://en.wikipedia.org/wiki/Gambler%27s_ruin). Suppose $x_k$ denotes the probability of bankruptcy starting with $k$ units. The main idea is to condition on the first toss and derive the following recursive equation: # $$\begin{align} # x_k&=P(\text{Bankruptcy}\ |\ \text{first toss is head})\ p\ +\ P(\text{Bankruptcy}\ |\ \text{first toss is tail})\ q\\ # &=x_{k+1}p+x_{k-1}q # \end{align}$$ # with boundary conditions $x_0=1$ and $x_N=0$. Solution of the recursive equation results in the above closed form expression for $x_k$. # # We are interested in Monte Carlo simulation of Gambler's ruin and verification of the formula for $x_k$. First, we consider the case $p=1/2$. # + colab={"base_uri": "https://localhost:8080/"} id="9NRYRt5VgAon" outputId="78a285e7-c400-49e2-8ed4-22a6d97e0e5d" no = 0 #variable for storing number of event occurence k = 5; N = 10 print(1-k/N) for i in range(1000): k = 5 while k > 0 and k < N: if uniform(2, 1) == 1: k = k + 1 else: k = k - 1 if k == 0: no = no + 1 print(no/1000) #probability estimate by Monte Carlo # + [markdown] id="r_5U3VccjTjY" # # Toss a biased coin # # For $p\ne q$, we require a method to toss a biased coin. This is accomplished by the following function that generates $m$ coin tosses with probability of heads equal to $p$. Note that a value of 1 represents heads and 2 represents tails as before. # + id="5CWVmSF5jdVj" def biased(p, m): return 2-(np.random.rand(m) < p) # + colab={"base_uri": "https://localhost:8080/"} id="zMoilhs-kbgG" outputId="3c1e7f3e-da7c-46ef-a052-0c51ea07ec7a" no_heads = 0 #variable for storing number of heads p = 0.25 print(p) for i in range(1000): if biased(p, 1) == 1: no_heads = no_heads + 1 print(no_heads/1000) #probability estimate by Monte Carlo # + [markdown] id="pWB6g3EIkySB" # # Biased Gambler's ruin # # We now simulate the biased version of Gambler's ruin. # + colab={"base_uri": "https://localhost:8080/"} id="pXljvNWKk4Cm" outputId="326631a2-a773-495a-8f32-dd2b3fb62a72" no = 0 #variable for storing number of event occurence p = 0.35 qbyp = (1-p)/p k = 5; N = 10 print((qbyp**k-qbyp**N)/(1-qbyp**N)) for i in range(1000): k = 5 while k > 0 and k < N: if biased(p, 1) == 1: k = k + 1 else: k = k - 1 if k == 0: no = no + 1 print(no/1000) #probability estimate by Monte Carlo # + [markdown] id="kd3grxAHOxuX" # # Casino die game # Throw a pair of die. A player bets $k_1$ units of money on whether the sum of the two numbers is Under 7 or Over 7, and $k_2$ units on Equal to 7. For Under 7 and Over 7, the returns are $a$:1, while, for Equal to 7, the returns are $b$:1, if the player wins the bet. If the bet is lost, the unit of money goes to the casino. # # The strategy for betting will be to independently and randomly select one of the 3 bets. The simulation will track the average return over a large number of trails. # + colab={"base_uri": "https://localhost:8080/"} id="lHd9SqWJQgbQ" outputId="484aa00f-4e57-4441-b344-f597b065cc26" a = 1.0; b = 4.0 k1 = 1; k2 = 1 print((((a-1)*5-7)*k1+((b-1)-5)*k2)/6/3) #expected gain avg_return = 0 for i in range(1000): bet = uniform(3,1) #1 - Under 7, 2 - Over 7, 3 - Equal to 7 sum = uniform(6,1) + uniform(6,1) if ((bet == 1) and (sum < 7)) or ((bet == 2) and (sum > 7)): #win for Under 7 or Over 7 bet avg_return = avg_return + k1*(a-1)/1000 if (bet == 3) and (sum == 7): #win for Equal to 7 bet avg_return = avg_return + k2*(b-1)/1000 if ((bet == 1) and (sum >= 7)) or ((bet == 2) and (sum <= 7)): #loss for Under 7 or Over 7 bet avg_return = avg_return + (-k1)/1000 if (bet == 3) and (sum != 7): #loss for Equal to 7 bet avg_return = avg_return + (-k2)/1000 print(avg_return) #simulated gain # + [markdown] id="tPUDZwyMYxor" # # Expected value of common distributions # The module has functions for generating binomial, geometric, Poisson and other distributions. We will generate a large number of samples and compute the average value and compare with the expected value. # + colab={"base_uri": "https://localhost:8080/"} id="gZJcOoqGZaTM" outputId="c0452412-680a-427a-f9bc-592dae8747c8" #binomial(20,0.3) print(20*0.3) #expected value x = st.binom.rvs(20,0.3,size=1000) print(np.sum(x)/1000) #average value in simulation # + colab={"base_uri": "https://localhost:8080/"} id="iLm8Df4Ha79D" outputId="30fa3a4e-cfe6-4510-a540-14f470c157a2" #geometric(0.3) print(1/0.3) #expected value x = st.geom.rvs(0.3,size=1000) print(np.sum(x)/1000) #average value in simulation # + colab={"base_uri": "https://localhost:8080/"} id="uh-L-d-mbPeA" outputId="290e5fe1-38db-4d49-f8d0-ca66fd290902" #Poisson(6) print(6) #expected value x = st.poisson.rvs(6,size=1000) print(np.sum(x)/1000) #average value in simulation # + [markdown] id="DehsyaxrFX91" # # Balls and bins # Suppose $m$ balls are thrown independently and uniformly at random into $n$ bins. We will compute the expected number of empty bins by simulation and compare with the theoretical value of $n(1-1/n)^m\approx ne^{-m/n}$. # + colab={"base_uri": "https://localhost:8080/"} id="PfBJTeIxGbCo" outputId="a784c29a-9490-44cc-e607-564c8a35d6b5" m = 10; n = 3 print(n*((1-1/n)**m)) #expected value avg_empty_bins = 0 for i in range(1000): no_balls = np.zeros(n, dtype=int) #keep track of balls in bins for ball in range(m): bin = uniform(n, 1) no_balls[bin-1] += 1 no_empty_bins = 0 for bin in range(n): if no_balls[bin] == 0: no_empty_bins += 1 avg_empty_bins += no_empty_bins/1000.0 print(avg_empty_bins) #average value in simulation # + [markdown] id="TNBmUJcsaxoe" # # Common continuous distributions and histograms # Scipy stats module can be used to generate samples from common continuous distributions. We will generate a number of such samples and plot their histogram to confirm that the samples follow the expected density function. # # For histograms, we will use the hist() function from the matplotlib.pyplot module imported below. # + [markdown] id="67bP65pUc63P" # ## Uniform distribution # We will begin with the uniform distribution. # + colab={"base_uri": "https://localhost:8080/", "height": 265} id="Mw3bhDmNb7n0" outputId="c19f6cb5-45fe-4c28-e858-52bafa556506" x = st.uniform.rvs(0,3,size=10000) plt.hist(x,bins=50,range=(0,3),density=True) #blue histogram plt.plot([-0.2,0,0,3,3,3.2],[0,0,1.0/3,1.0/3,0,0],lw=2) #orange line. uniform[0,3] density plt.show() # + [markdown] id="OfbPu8akdosk" # ## From histogram to density # The code above generates 10000 samples that are supposed to be independent and uniformly distributed in $[0,3]$. The histogram, created using the plt.hist command, uses 100 bins of equal width in the range $[0,3]$. So, the bins are $[0,0.03),[0.03,0.06),\ldots,[2.97,3]$. # # Suppose the number of samples that fall into the bin $[0,0.03]$ is $N_0$. Then, by Monte Carlo, we have that # $$P(0<X\le 0.03)\approx \frac{N_0}{10000},$$ # where $X$ is a random variable with the sample distribution. Assuming that the density of $X$ satisfies $f_X(x)\approx f_X(0.015)$ over the bin ($0.015$ is the midpoint of the bin), we get # $$P(0<X\le 0.03)\approx 0.03f_X(0.015)\approx\frac{N_0}{10000}.$$ # Using the above, we get # $$f_X(0.015)\approx\frac{N_0}{300}.$$ # Similarly, if $N_i$ is the number of samples in the $i$-th bin with midpoint $x_i$, we have # $$f_X(x_i)\approx\frac{N_i}{300}.$$ # The option density=True in the plt.hist command specifies that the above calculation is to be done. # # The plt.plot command plots the expected PDF as a line plot. The parameter lw specifies the linewidth and 2 pts. # # Try changing the bin size to see how the plot changes. Does 50 bins look better? Why? # + [markdown] id="eDmOvrmzlCEg" # ## Exponential distribution # We will next repeat the same for $X\sim$ Exp$(\lambda)$. The PDF is # $$f_X(x)=\lambda\exp(-\lambda x),$$ # where $\lambda$ is called the scale parameter. Try changing the various parameters below to see what happens. # + colab={"base_uri": "https://localhost:8080/", "height": 265} id="5L99f2gAdreR" outputId="426c1897-6208-4c14-9664-fa11e453d456" x = st.expon.rvs(scale=1,size=10000) plt.hist(x,bins=50,range=(0,10),density=True) #blue histogram xp = np.linspace(0,10,50) plt.plot(xp,st.expon.pdf(xp,scale=1),lw=2) #orange line, exp(\lambda) density plt.show() # + [markdown] id="AsnRN0CTmB0a" # ## Normal distribution # We will repeat the same for $X\sim$ Normal$(\mu,\sigma^2)$. The PDF is # $$f_X(x)=\frac{1}{\sigma\sqrt{2\pi}}\exp(-(x-\mu)^2/2\sigma^2),$$ # where the mean $\mu$ is called the location parameter `loc' and $\sigma$ is called the scale parameter. # + colab={"base_uri": "https://localhost:8080/", "height": 265} id="Tv2Voiycmrav" outputId="47255165-7ac5-42a0-d057-35d93edd379b" x = st.norm.rvs(loc=0, scale=1, size=10000) plt.hist(x,bins=50,range=(-5,5),density=True) #blue histogram xp = np.linspace(-5,5,50) plt.plot(xp,st.norm.pdf(xp,loc=0,scale=1),lw=2) #orange line, normal pdf plt.show() # + id="HdmZPWt1yGQc" from sklearn.datasets import load_iris # + colab={"base_uri": "https://localhost:8080/"} id="Z7A13G1Jybd8" outputId="b6232197-2030-4612-cd17-3a3f01469248" iris = load_iris() print(iris.DESCR) # + [markdown] id="2hoFBjzNYvR2" # # Summarizing the data # iris.data: 2D array containing all data (size 150 x 4) # - Class 0: Row 0 to Row 49 of iris.data array # - Class 1: Row 50 to Row 99 of iris.data array # - Class 2: Row 100 to Row 149 of iris.data array # # From the module scipy.stats, we can use the 'describe' command to get summary statistics of an array of data. # + colab={"base_uri": "https://localhost:8080/"} id="QSND1Cl6ZZcN" outputId="5eca3ce2-b051-4bf8-bed8-1d9800150eda" # all data sum_stats = st.describe(iris.data) print(sum_stats.minmax) # + colab={"base_uri": "https://localhost:8080/"} id="YVnPzzlQaC2v" outputId="e8342daf-cf89-40b4-ce1e-cfbd992eba4f" print(sum_stats.mean) print(sum_stats.variance) # + colab={"base_uri": "https://localhost:8080/"} id="y3MsmH40aO4_" outputId="5ccb1356-a239-4c7a-e1e0-ab4bd9f3c195" #Class 0 sum_stats = st.describe(iris.data[:50,:]) print(sum_stats.minmax) print(sum_stats.mean) print(sum_stats.variance) # + [markdown] id="i1cry53TUi4Q" # # Plotting histograms to get a sense of data # For every class, we can create histogram plots. The code below generates these histograms for Class 0. # # # + colab={"base_uri": "https://localhost:8080/", "height": 302} id="NOOoXNjxyhvO" outputId="09e6bd1c-73ac-4137-e3d5-35956ddaba28" plt.subplot(221) plt.hist(iris.data[:50,0]) plt.xlim([0,6]) plt.title('SL') plt.subplot(222) plt.hist(iris.data[:50,1]) plt.xlim([0,6]) plt.title('SW') plt.subplot(223) plt.hist(iris.data[:50,2]) plt.xlim([0,6]) plt.title('PL') plt.subplot(224) plt.hist(iris.data[:50,3]) plt.xlim([0,6]) plt.title('PW') plt.suptitle('Class 0') plt.tight_layout() plt.show() # + [markdown] id="d7UY5ydJVWJH" # # 2D histograms # The following code shows how to plot 2D histograms. This is being provided as a reference for those who are interested. # + colab={"base_uri": "https://localhost:8080/", "height": 630} id="XQmz8tzUXeXv" outputId="3d183801-8830-49d7-be0c-a15864f878a3" fig = plt.figure(figsize=(10,5), dpi = 150) ax1 = fig.add_subplot(121, projection='3d') ax2 = fig.add_subplot(122, projection='3d') H, _x, _y = np.histogram2d(iris.data[:50,0], iris.data[:50,1]) _xx, _yy = np.meshgrid(_x[1:], _y[1:]) xx, yy, HH = _xx.ravel(), _yy.ravel(), H.ravel() ax1.bar3d(xx, yy, np.zeros_like(HH), 0.13, 0.13, HH, shade=True, color='b') ax1.set_xlabel('SL') ax1.set_ylabel('SW') H, _x, _y = np.histogram2d(iris.data[:50,2], iris.data[:50,3]) _xx, _yy = np.meshgrid(_x[1:], _y[1:]) xx, yy, HH = _xx.ravel(), _yy.ravel(), H.ravel() ax2.bar3d(xx, yy, np.zeros_like(HH), 0.04, 0.04, HH, shade=True, color='g') ax2.set_xlabel('PL') ax2.set_ylabel('PW') # + [markdown] id="_Fva8lrNdOjr" # # Fitting a distribution and estimating parameters # Interarrival times in a Gamma ray experiment are given in the book "Mathematical Statistics and Data Analysis" by <NAME>. The data is in the form of an excel file. # # We will use the pandas module in python to read the excel file. # + colab={"base_uri": "https://localhost:8080/", "height": 417} id="Q3nijj-BcZ2c" outputId="2afd30fd-4351-4c48-f1bb-7da8e29bf73e" #Make sure you upload the file gamma-arrivals.xls to the Colab runtime before running this... df = pd.read_excel(r'/content/gamma-arrivals.xls') df # + [markdown] id="OJl42U8sds9o" # We see that there are 3935 samples of data. To get a sense of the distribution, we should plot a histogram. # + colab={"base_uri": "https://localhost:8080/", "height": 588} id="No3OVW3dc4MJ" outputId="8d8ab6b9-7452-410f-f089-7bb13ebd65c1" plt.hist(df['Col1'],bins=50) # + [markdown] id="JZpmyV0eezlU" # # Fitting a Gamma distribution # From the histogram, the distribution could be modelled as Gamma$(\alpha,\beta)$. The next step is to estimate $\alpha$ and $\beta$ from the given samples. # # ## Method of moments # Suppose $m_1$ and $m_2$ are the first and second moments of the samples. The method of moments estimates are obtained by solving # $$m_1=\frac{\alpha}{\beta},$$ # $$m_2=\frac{\alpha^2}{\beta^2}+\frac{\alpha}{\beta^2}.$$ # The solution results in # $$\hat{\alpha}_{MM}=\frac{m_1^2}{m_2-m_1^2}=\frac{m_1^2}{s^2},\hat{\beta}_{MM}=\frac{m_1}{m_2-m_1^2}=\frac{m_1}{s^2}.$$ # We now compute the values of $m_1$ (sample mean) and $s^2=m_2-m_1^2$ (sample variance) from the data. After that, we can compute the estimates. # + colab={"base_uri": "https://localhost:8080/"} id="UBtNJU6gjB23" outputId="3ab063c0-793d-4d3a-edac-1def85206503" x = np.array(df['Col1']) m1 = np.average(x) ss = np.var(x) print(m1) print(ss) # + colab={"base_uri": "https://localhost:8080/"} id="Qdmlr7evlD9R" outputId="504f1abb-cad0-4b9e-9cdd-3bea044584a4" alphaMM = m1*m1/ss betaMM = m1/ss print(alphaMM) print(betaMM) # + [markdown] id="ZLZ853v0nr66" # We can plot the density of the Gamma on top of the density histogram to check if the estimate gives a reasonable fit. # + colab={"base_uri": "https://localhost:8080/", "height": 269} id="wEDEdu7mn21C" outputId="faf6f6f0-17ab-410b-db78-e40c8a5b1310" fig,ax = plt.subplots(1,1) ax.hist(x,density=True,bins=50) xx = np.linspace(0,300,50) ax.plot(xx, st.gamma.pdf(xx,alphaMM,scale=1/betaMM),label='gamma fit MM') ax.legend(loc='best') plt.show() # + [markdown] id="USKGGSPkqXuH" # # Bootstrap # How do we find the bias and variance of the estimator? Theoretical derivations of the sampling distributions may be too cumbersome and difficult in most cases. Bootstrap is a Monte Carlo simulation method for computing metrics such as bias, variance and confidence intervals for estimators. # # In the above example, we have found $\hat{\alpha}_{MM}=1.0123...$ and $\hat{\beta}_{MM}=0.01266...$. Using these values, we simulate $n=3935$ *iid* samples from Gamma$(1.0123...,0.0126...)$ and, using the simulated samples, we compute new estimates of $\alpha$ and $\beta$ and call them $\hat{\alpha}_{MM}(1)$ and $\hat{\beta}_{MM}(1)$. Now, repeat the simulation $N$ times to get estimates $\hat{\alpha}_{MM}(i)$ and $\hat{\beta}_{MM}(i)$, $i=1,2,\ldots,N$. # # The sample variance of $\{\hat{\alpha}_{MM}(1), \hat{\alpha}_{MM}(2),\ldots,\hat{\alpha}_{MM}(N)\}$ is taken to be the bootstrap estimate for the variance of the estimator. # + id="NOezpMTR1K3U" N = 1000 n = 3935 alpha_hat = np.zeros(N) beta_hat = np.zeros(N) for i in np.arange(N): xi = st.gamma.rvs(alphaMM,scale=1/betaMM,size=n) m1i = np.average(xi); ssi = np.var(xi) alpha_hat[i] = m1i*m1i/ssi; beta_hat[i] = m1i/ssi # + [markdown] id="C8iJSHwo4xFl" # We can see the histograms of the estimates to get an idea of the spread of the values. # + colab={"base_uri": "https://localhost:8080/", "height": 384} id="LhBsFxOs5Uco" outputId="80a9b660-7cb0-4e67-9532-a32842170aee" ax1 = plt.subplot(121) ax1.hist(alpha_hat,density=True) ax2 = plt.subplot(122) ax2.hist(beta_hat,density=True) # + [markdown] id="hr_MYGs02xiP" # Notice how the histograms look roughly normal. # # The sample standard deviations of the estimates is a bootstrap estimate for the standard error of the estimator. # + colab={"base_uri": "https://localhost:8080/"} id="zePWrNkj2gYb" outputId="ea255ea2-5458-44e6-8902-bf5c781e748b" print(np.sqrt(np.var(alpha_hat))) print(np.sqrt(np.var(beta_hat))) # + [markdown] id="eMyCTvb-7I-r" # ## Confidence intervals # Suppose a parameter $\theta$ is estimated as $\hat{\theta}$, and suppose the distribution of $\hat{\theta}-\theta$ is known. Then, to obtain $(100(1-\alpha))$% confidence intervals (typical values are $\alpha=0.1$ for 90% confidence intervals and $\alpha=0.05$ for 95% confidence intervals), we use the CDF of $\hat{\theta}-\theta$ to obtain $\delta_1$ and $\delta_2$ such that # $$P(\hat{\theta}-\theta\le\delta_1)=1-\frac{\alpha}{2},$$ # $$P(\hat{\theta}-\theta\le\delta_2)=\frac{\alpha}{2}.$$ # Actually, the inverse of the CDF of $\hat{\theta}-\theta$ is used to find the above $\delta_1$ and $\delta_2$. From the above, we see that # $$P(\hat{\theta}-\theta \le \delta_1)-P(\hat{\theta}-\theta \le \delta_2)= P(\delta_2< \hat{\theta}-\theta \le \delta_1)=1-\frac{\alpha}{2}-\frac{\alpha}{2}=1-\alpha.$$ # The above is rewritten as # $$P(\hat{\theta}-\delta_1\le\theta<\hat{\theta}-\delta_2)=1-\alpha,$$ # and $[\hat{\theta}-\delta_1,\hat{\theta}-\delta_2]$ is interpreted as the $100(1-\alpha)$% confidence interval. # # ## Bootstrap confidence intervals # The CDF of $\hat{\theta}-\theta$ might be difficult to determine in many cases, and the bootstrap method is used often to estimate $\delta_1$ and $\delta_2$. We consider the list of numbers $\{\hat{\alpha}_{MM}(1)-1.0123...,\ldots,\hat{\alpha}_{MM}(N)-1.0123...\}$ and pick the $100(\alpha/2)$-th percentile and $100(1-\alpha/2)$-th percentile. # + colab={"base_uri": "https://localhost:8080/"} id="_ZbTUlDjggDZ" outputId="9f5cccf2-ddd2-467c-c52e-a2bc22839d7d" del1 = np.percentile(alpha_hat - alphaMM, 97.5) del2 = np.percentile(alpha_hat - alphaMM, 2.5) print([del1,del2]) # + [markdown] id="wPVrUEyxlJxO" # The 95% confidence interval for $\alpha$ using the method of moments estimator works out to $[1.0123-0.0615,1.0123-(-0.0604)]=[0.9508,1.0727]$. # + [markdown] id="UC8o_-D7x9AE" # ## Maximum likelihood # We now turn to the maximum likelihood estimator for $\alpha$ and $\beta$. The likelihood $L(x_1,\ldots,x_n)$ can be written as # $$L = \frac{\beta^\alpha}{\Gamma(\alpha)}x_1^{\alpha-1}e^{-\beta x_1}\,\frac{\beta^\alpha}{\Gamma(\alpha)}x_2^{\alpha-1}e^{-\beta x_2}\cdots \frac{\beta^\alpha}{\Gamma(\alpha)}x_n^{\alpha-1}e^{-\beta x_n}= \frac{\beta^{n\alpha}}{\Gamma(\alpha)^n}(x_1\cdots x_n)^{\alpha-1}e^{-\beta(x_1+\cdots+x_n)},$$ # $$\log L = n\alpha\log\beta-n\log\Gamma(\alpha)+(\alpha-1)\log(x_1\cdots x_n)-\beta(x_1+\cdots+x_n).$$ # Differentiating $\log L$ with respect to $\beta$ and equating to zero, we get # $$n\alpha\frac{1}{\beta}-(x_1+\cdots+x_n)=0,\text{or }\alpha=\beta \frac{x_1+\cdots+x_n}{n}.$$ # Differentiating $\log L$ with respect to $\alpha$ and equating to zero, we get # $$n\log\beta-n\frac{\Gamma'(\alpha)}{\Gamma(\alpha)}+\log(x_1\cdots x_n)=0.$$ # So, we get two equations in the two variables $\alpha$ and $\beta$. However, the equations do not have a closed form solution, and we need to solve them numerically or approximately. From the first equation, we have $\log\beta=\log\alpha-\log\frac{x_1+\cdots+x_n}{n}$. Using this in the second equation, we get # $$\log\alpha - \frac{\Gamma'(\alpha)}{\Gamma(\alpha)}=\log\frac{x_1+\cdots+x_n}{n}-\frac{1}{n}\log(x_1\cdots x_n).$$ # We will now solve the above equation to find the ML estimate of $\alpha$. This will be a numerical solution. # + id="GYlRKzRjvt4M" lm1 = np.average(np.log(x)) #Write the equation as a function #digamma is the function Gamma'/Gamma from scipy.special import digamma fML = lambda a: (np.log(a) - digamma(a) - np.log(m1)+lm1) # + [markdown] id="R4XZ35_byjn-" # We can plot the above function to see how it looks. # + colab={"base_uri": "https://localhost:8080/", "height": 265} id="0JRopCjGynEM" outputId="bb19cdc7-54bd-4807-a5cb-8422261b7f2e" fig, ax = plt.subplots(1,1) xx = np.linspace(0.1,2,50) ax.plot(xx,fML(xx)) ax.grid(True) plt.show() # + colab={"base_uri": "https://localhost:8080/"} id="-mbVnUtMyckC" outputId="4a0b61bc-3e86-447d-fcc6-211e49c53634" #For solving numerically, we will use scipy.optimize import scipy.optimize as sopt sol = sopt.root_scalar(fML, bracket=[0.1,2]) sol.root # + colab={"base_uri": "https://localhost:8080/"} id="mtEG7bLVyIE5" outputId="295b5375-5acc-4e48-8d2f-3cb56483b6a1" alphaML = sol.root betaML = alphaML/m1 print([alphaML, betaML]) # + [markdown] id="A8hWFmeSz2L3" # Let us check the fit with the histogram. # + colab={"base_uri": "https://localhost:8080/", "height": 269} id="hKzsq6Snz4ns" outputId="c1b3a621-b9fb-4cd6-b64d-f87d3d1a2377" fig,ax = plt.subplots(1,1) ax.hist(x,density=True,bins=50) xx = np.linspace(0,300,50) ax.plot(xx, st.gamma.pdf(xx,alphaMM,scale=1/betaMM),lw='4',alpha=0.7,label='gamma fit MM') ax.plot(xx, st.gamma.pdf(xx,alphaML,scale=1/betaML),lw='1',label='gamma fit ML') ax.legend(loc='best') plt.show() # + [markdown] id="3KQj7-L_0ZJ9" # Both the curves are literally on top of each other showing very good fit. Let us use the bootstrap method to find variance and confidence intervals for the ML estimator. # + id="jyYVisUY0p6p" N = 1000 n = 3935 alpha_hatML = np.zeros(N) beta_hatML = np.zeros(N) for i in np.arange(N): xi = st.gamma.rvs(alphaMM,scale=1/betaMM,size=n) m1i = np.average(xi); lm1i = np.average(np.log(xi)) fMLi = lambda a: (np.log(a) - digamma(a) - np.log(m1i)+lm1i) soli = sopt.root_scalar(fMLi, bracket = [0.1,2]) alpha_hatML[i] = soli.root; beta_hatML[i] = soli.root / m1i # + colab={"base_uri": "https://localhost:8080/"} id="DjhTGkNF1jhV" outputId="eb15718a-06db-489d-d12f-14bdad5824d9" print(np.sqrt(np.var(alpha_hatML))) print(np.sqrt(np.var(beta_hatML))) # + [markdown] id="zGRsJ7nzS61q" # We see that the variance of the bootstrap ML estimator is lesser than that of bootstrap MM estimator.
Statistics/stats.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 2 # language: python # name: python2 # --- import itertools from itertools import combinations,permutations a = "123456789" li = itertools.permutations(a,9) li li.next li lis = [] lis = [i for i in li] len(lis) lis lis[0] int(''.join(lis[0])) lis2 = [int(''.join(i)) for i in lis] #all integer permutations of 123456789 len(lis2) lis2[362878] li3 = [] for i in range(11,100): for j in range(1000000,10000000): prod = i*j if prod in lis2: li3.append(prod) li3 # + from math import sqrt; from itertools import count, islice # - lis3 = [] isPrime(lis2[5]) lis3 = [0 if not isPrime(i) else i for i in lis2] def isPrime(n): return n > 1 and all(n%i for i in islice(count(2), int(sqrt(n)-1))) lis4 = [] len(lis3) lis3.count(0) for i in lis2: if isPrime(i) == "False": lis4.append(i) len(lis2) len(lis4) lis4 for i in lis2: if isPrime(i): lis4.append(i) lis4 isPrime(3) def factors(n): return set(reduce(list.__add__, ([i, n//i] for i in range(1, int(n**0.5) + 1) if n % i == 0))) factors(123549687) len(lis2) li27 = itertools.permutations(a,2) li27 lis27 = [i for i in li27] len(lis27) list27 = [int(''.join(i)) for i in lis27] #all 2 integer permutations of 123456789 list27[71] li36 = itertools.permutations(a,3) lis36 = [i for i in li36] list36 = [int(''.join(i)) for i in lis36] #all 3 integer permutations of 123456789 len(list36) list36[503] li45 = itertools.permutations(a,4) lis45 = [i for i in li45] list45 = [int(''.join(i)) for i in lis45] #all 4 integer permutations of 123456789 len(list45) li54 = itertools.permutations(a,5) lis54 = [i for i in li54] list54 = [int(''.join(i)) for i in lis54] #all 4 integer permutations of 123456789 len(list54) list45[0] counter = 0 for i in list54: if list54.count(i) >1: counter = counter +1 counter len(list45)+len(list54) list4554 = list45+list54 li23 list23 list27 list36 for i in list27: for j in list36: list99 = [] for i in list27: for j in list36: prod = i*j if prod in list4554: list99.append(int(str(i)+str(j)+str(prod))) len(list99) for i in list99: if i in lis: print i list99 list100 = [str(i) for i in list99] list101=[] for i in list100: if i.count('1') == 1 and i.count('2') == 1 and i.count('3') == 1 and i.count('4') == 1 and i.count('5') == 1 and i.count('6') == 1 and i.count('7') == 1 and i.count('8') == 1 and i.count('9') == 1: print i,list100.index(i) li110 = [i for i in range(1,10)] li45 list45 for i in li110: for j in list45: prod = i*j if prod in list4554: list101.append(int(str(i)+str(j)+str(prod))) li101 list101 list102 = [str(i) for i in list101] for i in list102: if i.count('1') == 1 and i.count('2') == 1 and i.count('3') == 1 and i.count('4') == 1 and i.count('5') == 1 and i.count('6') == 1 and i.count('7') == 1 and i.count('8') == 1 and i.count('9') == 1: print i,list102.index(i) listsum=[5796,5346,4396,7254,7632,6952,7852] sum(listsum) li110 li106=[] for i in list27: for j in list45: prod = i*j if prod in list4554: li106.append(int(str(i)+str(j)+str(prod))) list107 = [str(i) for i in li106] for i in list107: if i.count('1') == 1 and i.count('2') == 1 and i.count('3') == 1 and i.count('4') == 1 and i.count('5') == 1 and i.count('6') == 1 and i.count('7') == 1 and i.count('8') == 1 and i.count('9') == 1: print i,list107.index(i) list107 li206=[] for i in li110: for j in list36: prod = i*j if prod in list4554: li206.append(int(str(i)+str(j)+str(prod))) li206 list207 = [str(i) for i in li206] for i in list207: if i.count('1') == 1 and i.count('2') == 1 and i.count('3') == 1 and i.count('4') == 1 and i.count('5') == 1 and i.count('6') == 1 and i.count('7') == 1 and i.count('8') == 1 and i.count('9') == 1: print i,list207.index(i)
Python_Code/num32Trial.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Lab Assignment 3 # # ## <NAME>, s2028017 # # We consider the system $$\frac{dx}{dt}=x(y-1),\quad \frac{dy}{dt}=4-y^2-x^2.$$ # ## Task 1 (2 marks) # # Use `SymPy` to find the critical points of the system. import sympy as sym sym.init_printing() from IPython.display import display_latex # + # Define sympy symbols. t = sym.symbols("t") x = sym.Function("x") y = sym.Function("y") # Use these symbols to define the expressions for x' and y' given above. x_prime = x(t)*(y(t) - 1) y_prime = 4 - y(t)**2 - x(t)**2 deq_x = sym.Eq(x(t).diff(t), x_prime) deq_y = sym.Eq(y(t).diff(t), y_prime) # Symbolically solve for when (x', y') = (0, 0) crit_point_dicts = sym.solve([x_prime, y_prime]) # Extract the critical points from the dictionaries given by sympy. crit_points = [(point[x(t)], point[y(t)]) for point in crit_point_dicts] crit_points # - # ## Task 2 (4 marks) # # Give your implementation of the `linearise` function from Lab 3. # # Use this to find linear approximations of the system around the critical points with $x \geq 0$ and $y \geq 0$. Use the output to classify these critical points (use markdown cells and proper reasoning to explain the type of each critical point). # + # Define some variables to use in our linear system. u = sym.Function("u") v = sym.Function("v") def lin_matrix(eqs, crit_point): """Returns the jacobian F(x, y) = (x', y') evaluated at the given critical point""" # Unpack the expressions for x' and y' and use them to calculate the Jacobian. eq1, eq2 = eqs FG = sym.Matrix([eq1.rhs, eq2.rhs]) matJ = FG.jacobian([x(t), y(t)]) # Evaluate the Jacobian at the given critical point. x0, y0 = crit_point lin_mat = matJ.subs({x(t):x0, y(t):y0}) return lin_mat def linearise(eqs, crit_point): """Returns a list of equations for the linearised system of eqs evaluated at the given critical point""" # Get the jacobian, J, at our critical point lin_mat = lin_matrix(eqs, crit_point) # Construct the system (u', v') = J (u, v) component-wise and return. uv_rhs = lin_mat * sym.Matrix([u(t),v(t)]) u_eq = sym.Eq(u(t).diff(t), uv_rhs[0]) v_eq = sym.Eq(v(t).diff(t), uv_rhs[1]) return [u_eq, v_eq] # Print info about the linear system at each of the critical points. for point in crit_points: # If the x and y coords are non-negative, print information about the point. x0, y0 = point if x0 >= 0 and y0 >= 0: print("critical point:") display_latex((x0, y0)) # Use lin_matrix() to get the matrix and eigenvalues of the linearised system linearised_matrix = lin_matrix([deq_x, deq_y], point) print("linearised matrix, eigenvalues") display_latex(linearised_matrix) display_latex(list(linearised_matrix.eigenvals().keys())) # Use linearise() to get a printable version of the linear system print("full linearised system:") display_latex(linearise([deq_x, deq_y], point)) print() print() # - # We can see here that the point $(2, 0)$ will be unstable as the linearised system has a positive eigenvalue (namely $1$). In contrast, the eigenvalues for the linearised system at the critical point $(\sqrt{3}, 1)$ both have negative real parts so this critical point will be stable. # ## Task 3 (4 marks) # # Produce a phase portrait of the system, with trajectories showing the behaviour around all the critical points. A few trajectories are enough to show this behaviour. Use properly-sized arrows to diplay the vector field (the RHS of the ODE). There are some marks allocated to the quality of your figure in this part. Try to keep it illustrative yet not too cluttered. # + import numpy as np from matplotlib import pyplot as plt from scipy.integrate import odeint # %matplotlib inline # Get figure and axes fig, ax = plt.subplots(figsize=(12, 9)) # Define x and y derivatives (t variable for use with odeint) def vector_field(xy, t): X, Y = xy return (X*(Y - 1), 4 - Y**2 - X**2) # Get arrays for all the points with -4 < x < 4, -3 < y < 3 X, Y = np.mgrid[-4:4:24j, -3:3: 18j] # Evaluate the vector field and length of each vector at each point X_prime, Y_prime = vector_field((X, Y), None) Magnitude = np.hypot(X_prime, Y_prime) # Plot arrows which are faded if they have large magnitute ax.quiver(X, Y, X_prime, Y_prime, Magnitude, scale=200, pivot = 'mid', cmap = plt.cm.bone) # Pick some initial conditions for phase portraits ics = [[0.2, 2.2], [-0.2, -1.8], [3, 2], [-1.5, 0.5]] durations = [[0, 10], [0, 8], [0, 5], [0, 5]] vcolors = plt.cm.autumn_r(np.linspace(0.5, 1., len(ics))) # colors for each trajectory # plot trajectories for time_span, ic, color in zip(durations, ics, vcolors): t = np.linspace(*time_span, 100) sol = odeint(vector_field, ic, t) x_sol, y_sol = sol.T ax.plot(x_sol, y_sol, color=color, label=f"$(x_0, y_0)$ = {ic}") def split_coords(tuple_list): """Helper function which takes [(a, b), (c, d), (e, f) ... ] and returns [[a, c, e .. ], [b, d, f ...]]""" return np.array(tuple_list).T # Plot black and blue points for the critical points and initial conditions respectivelyl ax.scatter(*split_coords(crit_points), color = "k", label="critical points") ax.scatter(*split_coords(ics), color='b', label="initial conditions") plt.xlabel('x') plt.ylabel('y') plt.legend() plt.xlim(-4, 4) plt.ylim(-3, 3) plt.show() # -
.ipynb_checkpoints/Lab_3_Assignment-checkpoint.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python [default] # language: python # name: python3 # --- import os import glob os.getcwd() path = 'C:\\Users\\Dell\\Downloads' extension = 'csv' os.chdir(path) result = [i for i in glob.glob('*.{}'.format(extension))] print(result)
All+CSV+Files+in+a+Folder.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Read the dataset # #### Download the dataset from https://www.kaggle.com/mlg-ulb/creditcardfraud import numpy as np import pandas as pd #specify the data data = pd.read_csv('creditcard.csv') data.info() data.describe() data.head() #check for null values data.isnull().sum() # # Visualize the data #import necessary libraries import seaborn as sns import matplotlib.pyplot as plt import matplotlib.gridspec as gridspec # %matplotlib inline data.hist(figsize=(20,20), layout=(7,5), bins = 10) #check for outliers data.plot(kind = 'box', subplots =True, figsize = (20,20), layout = (7,5)) # + #Compare the distribution for Genuine cases & Fraud cases for each feature features_list = data.columns plt.figure(figsize=(12,31*4)) gs = gridspec.GridSpec(31,1) for i, col in enumerate(features_list): ax = plt.subplot(gs[i]) sns.distplot(data[col][data['Class']==0],color='b',label='Genuine Case') sns.distplot(data[col][data['Class']==1],color='r',label='Fraud Case') ax.legend() plt.show() # - # ##### Above plots show that 'Amount','V1','V2','V5','V6','V7','V8','V13','V15','V20','V21','V22','V23','V24','V25','V26','V27','V28','Time' are not useful # # Model #import necessary libraries #using random forest classifier because of imbalanced data from sklearn.ensemble import RandomForestClassifier from sklearn.model_selection import train_test_split #set variables for features(X) and traget(y) columns= ['Amount','V1','V2','V5','V6','V7','V8','V13','V15','V20','V21','V22','V23','V24','V25','V26','V27','V28','Time'] X = data.drop(columns , axis = 1) y = data['Class'] random_seed = 200 #divide the data in training and test set X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state = random_seed) #define model rff = RandomForestClassifier(random_state =random_seed) rff.fit(X_train, y_train) y_pred_rff = rff.predict(X_test) # # Accuracy #import necessary libraries from sklearn.metrics import r2_score, mean_absolute_error #random forest classifier model rmse = np.sqrt(mean_absolute_error(y_test, y_pred_rff)) print("Root Mean Squared Error of random forest classifier model is :", rmse) r2 = r2_score(y_test, y_pred_rff) print("Accuracy (R2 score) of random forest classifier model is :", r2)
Projects/credit-card-fraud-detection/Credit_Card_Fraud_Detection.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import numpy as np import pandas as pd import matplotlib.pyplot as plt df = pd.read_csv('diabetes_indian.csv') df.head() df.describe() df.info() X = df.iloc[:, 0:8] y = df.iloc[:, 8] print(X.shape) print(y.shape) from sklearn.model_selection import train_test_split X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42) import tensorflow as tf from tensorflow.keras import Sequential from tensorflow.keras.layers import Dense model = Sequential() model.add(Dense(10, input_dim=8, activation='relu')) model.add(Dense(20, activation='relu')) model.add(Dense(1, activation='sigmoid')) model.compile()
Sem5/MachineLearning/Tensorflow/.ipynb_checkpoints/tf-demo-checkpoint.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Práctica # ## Catálogo de películas con ficheros y pickle # + from io import open import pickle class Pelicula: # Constructor de clase def __init__(self, titulo, duracion, lanzamiento): self.titulo = titulo self.duracion = duracion self.lanzamiento = lanzamiento print('Se ha creado la película:',self.titulo) def __str__(self): return '{} ({})'.format(self.titulo, self.lanzamiento) class Catalogo: peliculas = [] # Constructor de clase def __init__(self): self.cargar() def agregar(self,p): self.peliculas.append(p) self.guardar() def mostrar(self): if len(self.peliculas) == 0: print("El catálogo está vacío") return for p in self.peliculas: print(p) def cargar(self): fichero = open('catalogo.pckl', 'ab+') fichero.seek(0) try: self.peliculas = pickle.load(fichero) except: print("El fichero está vacío") finally: fichero.close() del(fichero) print("Se han cargado {} películas".format( len(self.peliculas) )) def guardar(self): fichero = open('catalogo.pckl', 'wb') pickle.dump(self.peliculas, fichero) fichero.close() del(fichero) # Destructor de clase def __del__(self): self.guardar() # guardado automático print("Se ha guardado el fichero") # - # ## Creando un objeto catálogo c = Catalogo() c.mostrar() c.agregar( Pelicula("El Padrino", 175, 1972) ) c.agregar( Pelicula("El Padrino: Parte 2", 202, 1974) ) c.mostrar() del(c) # ## Recuperando el catálogo al crearlo de nuevo c = Catalogo() c.mostrar() del(c) c = Catalogo() c.agregar( Pelicula("Prueba", 100, 2005) ) c.mostrar() del(c) c = Catalogo() c.mostrar() # ## Conclusiones # - Trabajamos en memoria, no en el fichero # - Nosotros decidimos cuando escribir los datos: # 1. Al manipular un registro # 2. Al finalizar el programa
MaterialCursoPython/Fase 4 - Temas avanzados/Tema 12 - Manejo de ficheros/Apuntes/Leccion 04 (Apuntes) - Catalogo de peliculas persistente.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + papermill={"duration": 16.34767, "end_time": "2021-07-29T16:21:11.517148", "exception": false, "start_time": "2021-07-29T16:20:55.169478", "status": "completed"} tags=[] # !pip install -q efficientnet >> /dev/null import random, os import pandas as pd, numpy as np import matplotlib.pyplot as plt from kaggle_datasets import KaggleDatasets import tensorflow as tf, math import tensorflow_addons as tfa import efficientnet.tfkeras as efn import tensorflow.keras.backend as K from sklearn.model_selection import KFold from sklearn.metrics import roc_auc_score import glob # + papermill={"duration": 6.209867, "end_time": "2021-07-29T16:21:17.736056", "exception": false, "start_time": "2021-07-29T16:21:11.526189", "status": "completed"} tags=[] DEVICE = "TPU" #or "GPU" SEED = 16 train_fold = 4 FOLDS = 5 img_size = (896, 896) batch_size = 16 EPOCHS = 30 EFF_NET = 3 weights = 'noisy-student' #'noisy-student' 'imagenet' add_old_one = False def seed_everything(seed = 42): random.seed(seed) np.random.seed(seed) os.environ['PYTHONHASHSEED'] = str(seed) tf.random.set_seed(seed) seed_everything(SEED) if DEVICE == "TPU": print("connecting to TPU...") try: tpu = tf.distribute.cluster_resolver.TPUClusterResolver() print('Running on TPU ', tpu.master()) except ValueError: print("Could not connect to TPU") tpu = None if tpu: try: print("initializing TPU ...") tf.config.experimental_connect_to_cluster(tpu) tf.tpu.experimental.initialize_tpu_system(tpu) strategy = tf.distribute.experimental.TPUStrategy(tpu) print("TPU initialized") except _: print("failed to initialize TPU") else: DEVICE = "GPU" if DEVICE != "TPU": print("Using default strategy for CPU and single GPU") strategy = tf.distribute.get_strategy() if DEVICE == "GPU": print("Num GPUs Available: ", len(tf.config.experimental.list_physical_devices('GPU'))) AUTO = tf.data.experimental.AUTOTUNE REPLICAS = strategy.num_replicas_in_sync print(f'REPLICAS: {REPLICAS}') # + papermill={"duration": 0.506966, "end_time": "2021-07-29T16:21:18.252135", "exception": false, "start_time": "2021-07-29T16:21:17.745169", "status": "completed"} tags=[] GCS_PATH = KaggleDatasets().get_gcs_path('setitraintfrecords') files_all = np.sort(np.array(tf.io.gfile.glob(GCS_PATH + '/train_*.tfrec'))) files_all # + papermill={"duration": 0.044709, "end_time": "2021-07-29T16:21:18.306098", "exception": false, "start_time": "2021-07-29T16:21:18.261389", "status": "completed"} tags=[] def mixup(image, label): # input image - is a batch of images of size [n,dim,dim,3] not a single image of [dim,dim,3] # output - a batch of images with mixup applied imgs = []; labs = [] for j in range(batch_size * REPLICAS): # CHOOSE RANDOM k = tf.cast( tf.random.uniform([], 0, batch_size), tf.int32) p = tf.random.uniform([], 0.15, 0.35) # MAKE MIXUP IMAGE img1 = image[j,] img2 = image[k,] lab1 = tf.cast(label[j], tf.float32) lab2 = tf.cast(label[k], tf.float32) imgs.append((1-p)*img1 + p*img2) labs.append((1-p)*lab1 + p*lab2) # RESHAPE HACK SO TPU COMPILER KNOWS SHAPE OF OUTPUT TENSOR (maybe use Python typing instead?) image2 = tf.reshape(tf.stack(imgs),(batch_size * REPLICAS, img_size[0], img_size[1], 3)) label2 = tf.reshape(tf.stack(labs),(batch_size * REPLICAS, 1)) return image2,label2 def prepare_image(img, augment=True, dim=256): img = tf.io.decode_raw(img, tf.float16) img = tf.reshape(img, [273, 256, 3]) img = tf.cast(img, tf.float32) img = tf.clip_by_value(img, clip_value_min=-6.0, clip_value_max=6.0) img = img / 3.0 #img = tf.image.resize(img, [img_resize[0], img_resize[1]]) img = tf.unstack(img, axis = 2) img = tf.concat(img, axis = 0) img = tf.expand_dims(img, 2) img = tf.image.grayscale_to_rgb(img) img = tf.image.resize(img, [img_size[0], img_size[1]]) if augment: img = tf.image.random_flip_left_right(img) img = tf.image.random_flip_up_down(img) #if tf.random.uniform(shape=[]) < 0.25: # r_int = tf.random.uniform(shape=(), minval=32 , maxval=64 , dtype=tf.int32) # r_int = r_int - (r_int % 2) # img = tfa.image.random_cutout(tf.expand_dims(img, 0), (r_int, dim[0] - 2)) # img = tf.squeeze(img) #if tf.random.uniform(shape=[]) < 0.125: # r_int = tf.random.uniform(shape=(), minval=32 , maxval=64 , dtype=tf.int32) # r_int = r_int - (r_int % 2) # img = tfa.image.random_cutout(tf.expand_dims(img, 0), (r_int, dim[0] - 2)) # img = tf.squeeze(img) img = tf.reshape(img, [dim[0], dim[1], 3]) return img def count_data_items(filenames): n = [int(filename.split('_')[-1].split('.')[0]) for filename in filenames] return np.sum(n) def read_labeled_tfrecord(example): tfrec_format = { 'signal' : tf.io.FixedLenFeature([], tf.string), 'id' : tf.io.FixedLenFeature([], tf.string), 'target' : tf.io.FixedLenFeature([], tf.int64), } example = tf.io.parse_single_example(example, tfrec_format) return example['signal'], example['target'] def get_dataset(files, augment = False, shuffle = False, repeat = False, labeled=True, return_image_names=True, batch_size=16, dim=256): ds = tf.data.TFRecordDataset(files, num_parallel_reads=AUTO) ds = ds.cache() if repeat: ds = ds.repeat() if shuffle: ds = ds.shuffle(1024*8) opt = tf.data.Options() opt.experimental_deterministic = False ds = ds.with_options(opt) if labeled: ds = ds.map(read_labeled_tfrecord, num_parallel_calls=AUTO) else: ds = ds.map(lambda example: read_unlabeled_tfrecord(example, return_image_names), num_parallel_calls=AUTO) ds = ds.map(lambda img, imgname_or_label: (prepare_image(img, augment=augment, dim=dim), imgname_or_label), num_parallel_calls=AUTO) ds = ds.batch(batch_size * REPLICAS) ds = ds.prefetch(AUTO) if augment: ds = ds.map(mixup, num_parallel_calls = AUTO) return ds EFNS = [efn.EfficientNetB0, efn.EfficientNetB1, efn.EfficientNetB2, efn.EfficientNetB3, efn.EfficientNetB4, efn.EfficientNetB5, efn.EfficientNetB6] def build_model(dim=128, ef=0): inp = tf.keras.layers.Input(shape=(dim[0],dim[1],3)) base = EFNS[ef](input_shape=(dim[0],dim[1],3), weights=weights,include_top=False) x = base(inp) x = tf.keras.layers.GlobalAveragePooling2D()(x) x = tf.keras.layers.Dropout(0.25)(x) x = tf.keras.layers.Dense(1,activation='sigmoid')(x) model = tf.keras.Model(inputs=inp,outputs=x) opt = tf.keras.optimizers.Adam(learning_rate=0.001) loss = tf.keras.losses.BinaryCrossentropy() #label_smoothing=0.05 model.compile(optimizer=opt,loss=loss,metrics=['AUC']) return model def get_lr_callback(batch_size=8): lr_start = 0.000005 lr_max = 0.0000025 * REPLICAS * batch_size * 2 lr_min = 0.000005 lr_ramp_ep = 5 lr_sus_ep = 0 lr_decay = 0.8 def lrfn(epoch): if epoch < lr_ramp_ep: lr = (lr_max - lr_start) / lr_ramp_ep * epoch + lr_start elif epoch < lr_ramp_ep + lr_sus_ep: lr = lr_max else: lr = (lr_max - lr_min) * lr_decay**(epoch - lr_ramp_ep - lr_sus_ep) + lr_min return lr lr_callback = tf.keras.callbacks.LearningRateScheduler(lrfn, verbose=False) return lr_callback # + papermill={"duration": 11417.189407, "end_time": "2021-07-29T19:31:35.504769", "exception": false, "start_time": "2021-07-29T16:21:18.315362", "status": "completed"} tags=[] # USE VERBOSE=0 for silent, VERBOSE=1 for interactive, VERBOSE=2 for commit VERBOSE = 1 DISPLAY_PLOT = True auc_ = [] skf = KFold(n_splits=FOLDS,shuffle=True,random_state=SEED) for fold,(idxT,idxV) in enumerate(skf.split(np.arange(len(files_all)))): if fold != train_fold: continue # DISPLAY FOLD INFO if DEVICE=='TPU': if tpu: tf.tpu.experimental.initialize_tpu_system(tpu) print('#'*25); print('#### FOLD',fold+1) # CREATE TRAIN AND VALIDATION SUBSETS if add_old_one: files_train = tf.io.gfile.glob([files_all[x] for x in idxT] + [GCS_PATH + '/old_one_10012.tfrec']) else: files_train = tf.io.gfile.glob([files_all[x] for x in idxT]) np.random.shuffle(files_train); print('#'*25) files_valid = tf.io.gfile.glob([files_all[x] for x in idxV]) # BUILD MODEL K.clear_session() with strategy.scope(): model = build_model(dim=img_size ,ef=EFF_NET) # SAVE BEST MODEL EACH FOLD sv = tf.keras.callbacks.ModelCheckpoint( 'fold-%i.h5'%fold, monitor='val_auc', verbose=0, save_best_only=True, save_weights_only=True, mode='max', save_freq='epoch') # TRAIN history = model.fit( get_dataset(files_train, augment=True, shuffle=True, repeat=True, dim=img_size, batch_size = batch_size), epochs=EPOCHS, callbacks = [sv,get_lr_callback(batch_size)], steps_per_epoch=count_data_items(files_train)/batch_size//REPLICAS, validation_data=get_dataset(files_valid,augment=False,shuffle=False, repeat=False,dim=img_size), verbose=VERBOSE) #model.load_weights('fold-%i.h5'%fold) print() print('--------------- MAX AUC :- ', np.max(history.history['val_auc'])) print() auc_.append(np.max(history.history['val_auc'])) # PLOT TRAINING if DISPLAY_PLOT: plt.figure(figsize=(15,5)) plt.plot(np.arange(EPOCHS),history.history['auc'],'-o',label='Train AUC',color='#ff7f0e') plt.plot(np.arange(EPOCHS),history.history['val_auc'],'-o',label='Val AUC',color='#1f77b4') x = np.argmax( history.history['val_auc'] ); y = np.max( history.history['val_auc'] ) xdist = plt.xlim()[1] - plt.xlim()[0]; ydist = plt.ylim()[1] - plt.ylim()[0] plt.scatter(x,y,s=300,color='#1f77b4') plt.ylabel('AUC',size=14); plt.xlabel('Epoch',size=14) plt.legend(loc=2) plt2 = plt.gca().twinx() plt2.plot(np.arange(EPOCHS),history.history['loss'],'-o',label='Train Loss',color='#2ca02c') plt2.plot(np.arange(EPOCHS),history.history['val_loss'],'-o',label='Val Loss',color='#d62728') x = np.argmin( history.history['val_loss'] ); y = np.min( history.history['val_loss'] ) ydist = plt.ylim()[1] - plt.ylim()[0] plt.ylabel('Loss',size=14) plt.legend(loc=3) plt.show() # + papermill={"duration": 3.54093, "end_time": "2021-07-29T19:31:42.964460", "exception": false, "start_time": "2021-07-29T19:31:39.423530", "status": "completed"} tags=[] # + papermill={"duration": 3.81145, "end_time": "2021-07-29T19:31:50.320204", "exception": false, "start_time": "2021-07-29T19:31:46.508754", "status": "completed"} tags=[] # + papermill={"duration": 3.552848, "end_time": "2021-07-29T19:31:57.480841", "exception": false, "start_time": "2021-07-29T19:31:53.927993", "status": "completed"} tags=[]
Code/train_tf-b3ns-seti-896.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] colab_type="text" id="CG77DrrB2CrU" # ## 範例 # 參考 `train.py` 實現的訓練模型程式碼範例 # # + colab={"base_uri": "https://localhost:8080/", "height": 122} colab_type="code" executionInfo={"elapsed": 5490, "status": "ok", "timestamp": 1576077262614, "user": {"displayName": "\u675c\u9756\u6137", "photoUrl": "https://lh3.googleusercontent.com/a-/AAuE7mCXX8JZZ0DBZWt2P1EC5LvipxZDIWxKz2uCJ7bzpQ=s64", "userId": "03195633593178984431"}, "user_tz": -480} id="NCEP-DG0VxlV" outputId="b04620ee-b628-45d7-e1aa-2df1dfed29bc" # %tensorflow_version 1.x # 確保 colob 中使用的 tensorflow 是 1.x 版本而不是 tensorflow 2 import tensorflow as tf print(tf.__version__) # + colab={"base_uri": "https://localhost:8080/", "height": 281} colab_type="code" executionInfo={"elapsed": 13417, "status": "ok", "timestamp": 1576077270558, "user": {"displayName": "\u675c\u9756\u6137", "photoUrl": "https://lh3.googleusercontent.com/a-/AAuE7mCXX8JZZ0DBZWt2P1EC5LvipxZDIWxKz2uCJ7bzpQ=s64", "userId": "03195633593178984431"}, "user_tz": -480} id="eXT7SQe0KQxv" outputId="6c9e3a54-b626-49ca-a15a-fca187daf6e6" pip install keras==2.2.4 # 需要安裝 keras 2.2.4 的版本 # + colab={"base_uri": "https://localhost:8080/", "height": 160} colab_type="code" executionInfo={"elapsed": 44774, "status": "ok", "timestamp": 1576077316014, "user": {"displayName": "\u675c\u9756\u6137", "photoUrl": "https://lh3.googleusercontent.com/a-/AAuE7mCXX8JZZ0DBZWt2P1EC5LvipxZDIWxKz2uCJ7bzpQ=s64", "userId": "03195633593178984431"}, "user_tz": -480} id="vELO-PTVxAtm" outputId="fd66eb35-de5c-486c-82e2-3844480c735d" from google.colab import drive drive.mount('/content/gdrive') # 將 google drive 掛載在 colob, # 下載基於 keras 的 yolov3 程式碼 # %cd 'gdrive/My Drive' # # !git clone https://github.com/qqwweee/keras-yolo3 # 如果之前已經下載過就可以註解掉 # %cd keras-yolo3 # + colab={"base_uri": "https://localhost:8080/", "height": 34} colab_type="code" executionInfo={"elapsed": 10242, "status": "ok", "timestamp": 1576077318093, "user": {"displayName": "\u675c\u9756\u6137", "photoUrl": "https://lh3.googleusercontent.com/a-/AAuE7mCXX8JZZ0DBZWt2P1EC5LvipxZDIWxKz2uCJ7bzpQ=s64", "userId": "03195633593178984431"}, "user_tz": -480} id="Avxgh7T7yp2g" outputId="5dfe9523-6b7b-4118-d283-11b97e980de1" import os if not os.path.exists("model_data/yolo.h5"): # 下載 yolov3 的網路權重,並且把權重轉換為 keras 能夠讀取的格式 print("Model doesn't exist, downloading...") os.system("wget https://pjreddie.com/media/files/yolov3.weights") print("Converting yolov3.weights to yolo.h5...") os.system("python convert.py yolov3.cfg yolov3.weights model_data/yolo.h5") else: print("Model exist") # + colab={"base_uri": "https://localhost:8080/", "height": 34} colab_type="code" executionInfo={"elapsed": 1607, "status": "ok", "timestamp": 1576077322025, "user": {"displayName": "\u675c\u9756\u6137", "photoUrl": "https://lh3.googleusercontent.com/a-/AAuE7mCXX8JZZ0DBZWt2P1EC5LvipxZDIWxKz2uCJ7bzpQ=s64", "userId": "03195633593178984431"}, "user_tz": -480} id="p0Ae5_Q5QD2d" outputId="3a449174-245e-4070-e6ca-2debc2afb605" # 直接下載 VOC2007 的資料集作為範例 if not os.path.exists("VOCdevkit"): os.system("wget http://pjreddie.com/media/files/VOCtrainval_06-Nov-2007.tar") # 下載 VOC 資料集 os.system("tar xvf VOCtrainval_06-Nov-2007.tar") # 解壓縮資料集,會花幾分鐘 else: print("data exists") # + [markdown] colab_type="text" id="jgbDAK7NiHW3" # 以下的程式碼負責把下載下來的 Pascal VOC 資料集轉換為訓練模型時需要的格式,直接引用 https://github.com/qqwweee/keras-yolo3/blob/master/voc_annotation.py ,調整了一部分程式碼,讓我們只 sample 前 100 張圖片來做示範,在 colab 中會花一些時間跑完 # + colab={"base_uri": "https://localhost:8080/", "height": 72} colab_type="code" executionInfo={"elapsed": 165564, "status": "ok", "timestamp": 1576077489564, "user": {"displayName": "\u675c\u9756\u6137", "photoUrl": "https://lh3.googleusercontent.com/a-/AAuE7mCXX8JZZ0DBZWt2P1EC5LvipxZDIWxKz2uCJ7bzpQ=s64", "userId": "03195633593178984431"}, "user_tz": -480} id="vd7puN2WLjMA" outputId="d64a5f67-3dcb-4c34-971d-07f9f078327a" if not os.path.exists("2007_train.txt"): # 範例中訓練模型時所使用的,已經做好轉換的 annotation 檔名,增加這個檢查避免每次重新跑這段轉換的程式碼 import xml.etree.ElementTree as ET # 載入能夠 Parser xml 文件的 library from os import getcwd sets=[('2007', 'train'), ('2007', 'val')] # Pascal VOC 的資料類別 classes = ["aeroplane", "bicycle", "bird", "boat", "bottle", "bus", "car", "cat", "chair", "cow", "diningtable", "dog", "horse", "motorbike", "person", "pottedplant", "sheep", "sofa", "train", "tvmonitor"] # 把 annotation 轉換訓練時需要的資料形態 def convert_annotation(year, image_id, list_file): in_file = open('VOCdevkit/VOC%s/Annotations/%s.xml'%(year, image_id)) tree=ET.parse(in_file) root = tree.getroot() for obj in root.iter('object'): difficult = obj.find('difficult').text cls = obj.find('name').text if cls not in classes or int(difficult)==1: continue cls_id = classes.index(cls) xmlbox = obj.find('bndbox') b = (int(xmlbox.find('xmin').text), int(xmlbox.find('ymin').text), int(xmlbox.find('xmax').text), int(xmlbox.find('ymax').text)) list_file.write(" " + ",".join([str(a) for a in b]) + ',' + str(cls_id)) wd = "." for year, image_set in sets: image_ids = open('VOCdevkit/VOC%s/ImageSets/Main/%s.txt'%(year, image_set)).read().strip().split() annotation_path = '%s_%s.txt'%(year, image_set) list_file = open(annotation_path, 'w') print("save annotation at %s" % annotation_path) for image_id in image_ids[:100]: # 只處理 100 張圖片來做範例 list_file.write('%s/VOCdevkit/VOC%s/JPEGImages/%s.jpg'%(wd, year, image_id)) convert_annotation(year, image_id, list_file) list_file.write('\n') list_file.close() # + colab={"base_uri": "https://localhost:8080/", "height": 34} colab_type="code" executionInfo={"elapsed": 3782, "status": "ok", "timestamp": 1576078562308, "user": {"displayName": "\u675c\u9756\u6137", "photoUrl": "https://lh3.googleusercontent.com/a-/AAuE7mCXX8JZZ0DBZWt2P1EC5LvipxZDIWxKz2uCJ7bzpQ=s64", "userId": "03195633593178984431"}, "user_tz": -480} id="-vhfpWt92WZS" outputId="8d125a51-4135-4e94-a1a7-998a0e5f4534" # 將 train.py 所需要的套件載入 import numpy as np import keras.backend as K from keras.layers import Input, Lambda from keras.models import Model from keras.optimizers import Adam from keras.callbacks import TensorBoard, ModelCheckpoint, ReduceLROnPlateau, EarlyStopping from yolo3.model import preprocess_true_boxes, yolo_body, tiny_yolo_body, yolo_loss from yolo3.utils import get_random_data # + colab={} colab_type="code" id="gkydbfQ2GUts" from train import get_classes, get_anchors, create_model, create_tiny_model, data_generator, data_generator_wrapper # + [markdown] colab_type="text" id="VKDXIfXIllJS" # 把 YOLO weights 轉換為能夠提供給 keras 作為訓練新模型的初始權重,注意這部分多了一個 `-w` 的參數,可以參考 https://github.com/qqwweee/keras-yolo3/blob/master/convert.py#L242 以及 https://stackoverflow.com/questions/42621864/difference-between-keras-model-save-and-model-save-weights 理解其中差別 # + colab={"base_uri": "https://localhost:8080/", "height": 34} colab_type="code" executionInfo={"elapsed": 1582, "status": "ok", "timestamp": 1576078555069, "user": {"displayName": "\u675c\u9756\u6137", "photoUrl": "https://lh3.googleusercontent.com/a-/AAuE7mCXX8JZZ0DBZWt2P1EC5LvipxZDIWxKz2uCJ7bzpQ=s64", "userId": "03195633593178984431"}, "user_tz": -480} id="bxTnBxKISVSc" outputId="1a2c32bf-b332-4442-cb58-85da65bb12ac" if not os.path.exists("model_data/yolo_weights.h5"): print("Converting pretrained YOLOv3 weights for training") os.system("python convert.py -w yolov3.cfg yolov3.weights model_data/yolo_weights.h5") else: print("Pretrained weights exists") # + colab={"base_uri": "https://localhost:8080/", "height": 1000} colab_type="code" executionInfo={"elapsed": 795498, "status": "ok", "timestamp": 1575386579385, "user": {"displayName": "\u675c\u9756\u6137", "photoUrl": "<KEY>", "userId": "03195633593178984431"}, "user_tz": -480} id="jTQQyxP9Gnem" outputId="d7c8ed80-3b44-464f-810f-28df603d7bd8" annotation_path = '2007_train.txt' # 轉換好格式的標註檔案 log_dir = 'logs/000/' # 訓練好的模型儲存的路徑 classes_path = 'model_data/voc_classes.txt' anchors_path = 'model_data/yolo_anchors.txt' class_names = get_classes(classes_path) num_classes = len(class_names) anchors = get_anchors(anchors_path) input_shape = (416,416) # multiple of 32, hw is_tiny_version = len(anchors)==6 # default setting if is_tiny_version: model = create_tiny_model(input_shape, anchors, num_classes, freeze_body=2, weights_path='model_data/tiny_yolo_weights.h5') else: model = create_model(input_shape, anchors, num_classes, freeze_body=2, weights_path='model_data/yolo_weights.h5') # make sure you know what you freeze logging = TensorBoard(log_dir=log_dir) checkpoint = ModelCheckpoint(log_dir + 'ep{epoch:03d}-loss{loss:.3f}-val_loss{val_loss:.3f}.h5', monitor='val_loss', save_weights_only=True, save_best_only=True, period=3) reduce_lr = ReduceLROnPlateau(monitor='val_loss', factor=0.1, patience=3, verbose=1) early_stopping = EarlyStopping(monitor='val_loss', min_delta=0, patience=10, verbose=1) # 分為 training 以及 validation val_split = 0.1 with open(annotation_path) as f: lines = f.readlines() np.random.seed(10101) np.random.shuffle(lines) np.random.seed(None) num_val = int(len(lines)*val_split) num_train = len(lines) - num_val # Train with frozen layers first, to get a stable loss. # Adjust num epochs to your dataset. This step is enough to obtain a not bad model. # 一開始先 freeze YOLO 除了 output layer 以外的 darknet53 backbone 來 train if True: model.compile(optimizer=Adam(lr=1e-3), loss={ # use custom yolo_loss Lambda layer. 'yolo_loss': lambda y_true, y_pred: y_pred}) batch_size = 16 print('Train on {} samples, val on {} samples, with batch size {}.'.format(num_train, num_val, batch_size)) # 模型利用 generator 產生的資料做訓練,強烈建議大家去閱讀及理解 data_generator_wrapper 在 train.py 中的實現 model.fit_generator(data_generator_wrapper(lines[:num_train], batch_size, input_shape, anchors, num_classes), steps_per_epoch=max(1, num_train//batch_size), validation_data=data_generator_wrapper(lines[num_train:], batch_size, input_shape, anchors, num_classes), validation_steps=max(1, num_val//batch_size), epochs=50, initial_epoch=0, callbacks=[logging, checkpoint]) model.save_weights(log_dir + 'trained_weights_stage_1.h5') # Unfreeze and continue training, to fine-tune. # Train longer if the result is not good. if True: # 把所有 layer 都改為 trainable for i in range(len(model.layers)): model.layers[i].trainable = True model.compile(optimizer=Adam(lr=1e-4), loss={'yolo_loss': lambda y_true, y_pred: y_pred}) # recompile to apply the change print('Unfreeze all of the layers.') batch_size = 16 # note that more GPU memory is required after unfreezing the body print('Train on {} samples, val on {} samples, with batch size {}.'.format(num_train, num_val, batch_size)) model.fit_generator(data_generator_wrapper(lines[:num_train], batch_size, input_shape, anchors, num_classes), steps_per_epoch=max(1, num_train//batch_size), validation_data=data_generator_wrapper(lines[num_train:], batch_size, input_shape, anchors, num_classes), validation_steps=max(1, num_val//batch_size), epochs=100, initial_epoch=50, callbacks=[logging, checkpoint, reduce_lr, early_stopping]) model.save_weights(log_dir + 'trained_weights_final.h5') # + colab={} colab_type="code" id="k0MyCUeRuARA" from PIL import Image image = Image.open('dog.jpg') # + colab={} colab_type="code" id="v8r8dO3qrQ6_" from yolo import YOLO yolo_model = YOLO(model_path=log_dir + 'trained_weights_final.h5', classes_path=classes_path) r_image = yolo_model.detect_image(image) # + colab={"base_uri": "https://localhost:8080/", "height": 593} colab_type="code" executionInfo={"elapsed": 5437, "status": "ok", "timestamp": 1576080434070, "user": {"displayName": "\u675c\u9756\u6137", "photoUrl": "https://lh3.googleusercontent.com/a-/AAuE7mCXX8JZZ0DBZWt2P1EC5LvipxZDIWxKz2uCJ7bzpQ=s64", "userId": "03195633593178984431"}, "user_tz": -480} id="TY6v33F7trGQ" outputId="48bc209d-2feb-417f-fe91-b8cc97a3ad34" r_image # + colab={"base_uri": "https://localhost:8080/", "height": 1000} colab_type="code" executionInfo={"elapsed": 1134, "status": "ok", "timestamp": 1576081643905, "user": {"displayName": "\u675c\u9756\u6137", "photoUrl": "https://lh3.googleusercontent.com/a-/AAuE7mCXX8JZZ0DBZWt2P1EC5LvipxZDIWxKz2uCJ7bzpQ=s64", "userId": "03195633593178984431"}, "user_tz": -480} id="rmLd_Q55TeKJ" outputId="d1e65304-997f-45be-cfe3-1507f5db5531" with open("2007_train.txt", "r") as f: d = f.readlines() d
Day41/Day41_train_yolov3_Sample.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Exercise Solutions # --- # ## Pythonic Exercises # # # 1. Create a list of your favourite superheros and another list of their secret identities. # 1. Convert your two lists into a dictionary. (Can you do it in one line?) # 1. Remove one of your heroes and add a villain to your dictionary. # 1. Add a character that has multiple identities to your dictionary. (What kind of object should this be?) # 1. Demonstrate that you can look up one of your character's identities. # # # | Superhero | Identity | # |:-----------:|:---------------------------:| # | Iron Man | <NAME> | # | The Thing | <NAME> | # | Storm | <NAME> | # | Spider-Man | <NAME>, <NAME> | # # # **Example Solution** # # - Create a dictionary from two lists using *e.g.* `zip`. # # ```python # heroes = dict(zip(['Iron Man', 'The Thing', 'Storm'], ['<NAME>', '<NAME>', '<NAME>'])) # ``` # # - Remove one key-value pair from the dictionary *e.g.* using `pop`. # # ```python # heroes.pop('The Thing') # ``` # # - Add new key-value pair to the dictionary *e.g.* using assignment. # # ```python # heroes['Dr. Doom'] = '<NAME>' # ``` # # - Add new key-value pair to the dictionary, for which the value is another dictionary *e.g.* using `update`. # # ```python # heroes.update({'Spider-Man': {'original': '<NAME>', 'new': '<NAME>'}}) # ``` # # - Look up a value in a nested dictionary. # # ```python # print(heroes['Spider-Man']['original']) # ``` # > `<NAME>` # 2. Use Hubble's law ($v=H_{0}\,D$) to calculate the distance (in Mpc) of the galaxies in the following table. # # |Galaxy|Velocity (km/s)| # |------|---------------| # |NGC 123|1320| # |NGC 2342|5690| # |NGC 4442|8200| # # Remember that $H_0 \approx 70$ km/s/Mpc. # # **Example Solution** # # - Define a function to calculate the Hubble distance. # # ```python # def dis(vel): # return round(vel / 70., 2) # ``` # # - Create a list comprehension object that uses the function. # # ```python # res = [dis(vel) for vel in (1320, 5690, 8200)] # ``` # # - Check the results. # # ```python # print('Distances =', res) # ``` # # > `Distances = [18.86, 81.29, 117.14]` # 3. Flatten the following list using list comprehension. # # ```python # mylist = [[[1, 2], [3, 4, 5]], [[6], [7, 8]]] # ``` # # **Example Solution** # # - Flatten list using list comprehension. # # ```python # print([value for sublist in mylist for subsublist in sublist for value in subsublist]) # ``` # > `[1, 2, 3, 4, 5, 6, 7, 8]` # 4. Write a generator function that can be used to calculate the Fibonacci sequence. # # **Example Solution** # # - Write a generator function that uses the `yield` statement.| # # ```python # def fib(): # # prev_value = 0 # new_value = 1 # # while True: # # yield prev_value # # prev_value, new_value = new_value, new_value + prev_value # ``` # # - Create a generator object. # # ```python # f = fib() # ``` # # - Check that the generator returns the correct sequence. # # ```python # print(next(f)) # print(next(f)) # print(next(f)) # print(next(f)) # print(next(f)) # print(next(f)) # ``` # # >`0` # >`1` # >`1` # >`2` # >`3` # >`5` # ## Class Exercises (Part I) # # 1. Write a class to calculate the distance between two massive objects using using Newton's law of universal gravitation. # 1. Use it to determine the distance between the Earth and the Moon in kilometres. # 1. Then use it to determine the distance between the Earth and the Sun in kilometres. # # $$F=\frac{Gm_1m_2}{r^2}$$ # # # # > You may find the following values useful: # * Gravitational Constant: $G = 6.674\times 10^{-11}~\textrm{m}^3 \textrm{kg}^{-1} \textrm{s}^{-2}$ # * Mass of the Earth: $M_\oplus = 5.972\times 10^{24}~\textrm{kg}$ # * Mass of the Moon: $m = 7.348\times 10^{22}~\textrm{kg}$ # * Mass of the Sun: $M_\odot = 1.989\times 10^{30}~\textrm{kg}$ # * Force of attraction between the Earth and the Moon: $F = 1.986\times 10^{20}~\textrm{N}$ # * Force of attraction between the Earth and the Sun: $F = 3.6\times 10^{22}~\textrm{N}$ # # **Example Solution 1** # # - Write a class with an `__init__` method. # # ```python # class Grav: # # def __init__(self, mass1, mass2, force): # self.G = 6.67408e-11 # self.m1 = mass1 # self.m2 = mass2 # self.f = force # # def radius(self): # return sqrt(self.G * self.m1 * self.m2 / self.f) / 1000 # ``` # # - Check the results. # # ```python # print('{:.2E} km'.format(Grav(5.972e24, 7.34767309e22, 1.986e20).radius())) # print('{:.2E} km'.format(Grav(5.972e24, 1.989e30, 3.6e22).radius())) # ``` # >`3.84E+05 km` # >`1.48E+08 km` # # **Example Solution 2** # # - Write a class with a `classmethod`. # # ```python # class Grav: # G = 6.67408e-11 # # @classmethod # def radius(cls, m1, m2, f): # return sqrt(cls.G * m1 * m2 / f) / 1000 # ``` # # - Check the results. # # ```python # print('{:.2E} km'.format(Grav.radius(5.972e24, 7.34767309e22, 1.986e20))) # print('{:.2E} km'.format(Grav.radius(5.972e24, 1.989e30, 3.6e22))) # ``` # >`3.84E+05 km` # >`1.48E+08 km` # 2. Write a class that generates a numpy array of whole numbers from 0 to a given limit and has a method that returns a given metric on this array. # 1. Use it to return the median of an array of 20 values. # 1. Then use it to return the standard deviation of an array of 50 values. # # **Example Solution** # # - Import the numpy package. # # ```python # import numpy as np # ``` # # - Write a class that instantiates with a range limit and that contains a method that can take a numpy metric as an argument. # # ```python # class ArrMet: # # def __init__(self, limit): # # self._arr = np.arange(limit) # # def get_metric(self, metric): # # return metric(self._arr) # ``` # # - Check the results. # # ```python # print('The median is:', ArrMet(20).get_metric(np.median)) # print('The standard deviation is:', ArrMet(50).get_metric(np.std)) # ``` # >`The median is: 9.5` # >`The standard deviation is: 14.430869689661812` # 3. Write a class that can be used to store galaxy properties, in particular right ascension, declination and redshift. # 1. Make sure you only permit appropriate values for these parameters. # 1. Make it such that your class instances can be added and subtracted to create a new instance for which the all attributes are updated. # 1. Include an appropriate representation for your class instances. # 1. Finally, create instances to demonstrate that your class works. # # **Example Solution** # # - Write a class using `property` decorators then overload the `__add__`, `__sub__` and `__repr__` methods. # # ```python # class Galaxy: # # def __init__(self, ra, dec, z): # # self.ra = ra # self.dec = dec # self.z = z # # @property # def ra(self): # # return self._ra # # @ra.setter # def ra(self, value): # # if not isinstance(value, float) or value < 0. or value > 360.: # raise ValueError('Invalid RA value') # # self._ra = value # # @property # def dec(self): # # return self._dec # # @dec.setter # def dec(self, value): # # if not isinstance(value, float) or value < -90. or value > 90.: # raise ValueError('Invalid Dec value') # # self._dec = value # # @property # def z(self): # # return self._z # # @z.setter # def z(self, value): # # if not isinstance(value, float) or value < 0.: # raise ValueError('Invalid z value') # # self._z = value # # def __add__(self, inst): # # return Galaxy(self.ra + inst.ra, self.dec + inst.dec, self.z + inst.z) # # def __sub__(self, inst): # # return Galaxy(self.ra - inst.ra, self.dec - inst.dec, self.z - inst.z) # # def __repr__(self): # # return 'Galaxy({:.1f}, {:.1f}, {:.1f})'.format(self.ra, self.dec, self.z) # ``` # # - Test the addition of two instances. # # ```python # g1 = Galaxy(30., 40., 0.4) # g2 = Galaxy(10., 20., 0.2) # # g3 = g1 + g2 # g4 = g1 - g2 # print(g3) # print(g4) # ``` # >`Galaxy(40.0, 60.0, 0.6)` # >`Galaxy(20.0, 20.0, 0.2)` # # - Test the exception handling. # # ```python # Galaxy(30., 40., 0.4) - Galaxy(40., 40., 0.4) # ``` # > `ValueError: Invalid RA value` # ## Class Exercises (Part II) # # 1. Create a parent class and a child class that can identify its progenitor. # 1. Your parent class should have the class attribute `parent_name` with the value of your choice. # 1. Your child class should have the attribute `name` with the value of your choice. # 1. Printing an instance of your child class should contain its name and its parent's name. *e.g.* # # ```python # print(Child('Thor')) # <NAME> # ``` # # **Example Solution** # # - Write a parent class. # # ```python # class Odin: # # parent_name = 'Odin' # # def __str__(self): # # return '{} {}son'.format(self.name, self.parent_name) # ``` # # - Write a child class. # # ```python # class Child(Odin): # # def __init__(self, name): # # self.name = name # super().__init__() # ``` # # - Test the class. # # ```python # print(Child('Thor')) # print(Child('Loky')) # ``` # >`<NAME>` # >`<NAME>` # # 2. Define a class that can be initialised with composer classes that have been constrained by an abstract class. # 1. Define an abstract class called `EarthAttr` that has the abstract method `whatami`, whcih should return a string of your choice. # 1. Define at least two composer classes (*e.g.* `Moon` and `Core`) that satisfy the requirements of `EarthAttr`. # 1. Define a class called `Earth` that composes these classes to get the `whatami` attribute. # 1. Printing an instance of your `Earth` class should include the value of `whatami`. *e.g.* # # ```python # print(Earth(Moon)) # The Earth has a moon! # ``` # # 5. Finally, define a final composer class (*e.g.* `Lake`) and demonstrate that it will not instantiate if not correctly constrained by `EarthAttr`. You should get the following error: # # ```bash # 'Cant instantiate abstract class Lake with abstract methods whatami' # ``` # # **Example Solution** # # - Write an abstract parent class. # # ```python # class EarthAttr(ABC): # # @abstractmethod # def whatami(): # pass # ``` # # - Write child classes that inherit from the parent. # # ```python # class Moon(EarthAttr): # # @staticmethod # def whatami(): # return 'moon' # # class Core(EarthAttr): # # @staticmethod # def whatami(): # return 'core' # ``` # # - Write a class that uses the child classes as composers. # # ```python # class Earth: # # def __init__(self, comp): # self.attr = comp.whatami() # # def __str__(self): # # return 'The Earth has a {}!'.format(self.attr)| # ``` # # - Test the class. # # ```python # print(Earth(Moon)) # print(Earth(Core)) # ``` # >`The Earth has a moon!` # >`The Earth has a core!` # # - Test the exception handling. # # ```python # class Lake(EarthAttr): # pass # # Lake() # ``` # >`TypeError: Can't instantiate abstract class Lake with abstract methods whatami`
Exercise-Solutions.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # ML2CPP # ## Preparing the dataset # + from sklearn import datasets import numpy as np import pandas as pd boston = datasets.load_boston() def populate_table(tablename, feature_names): X = boston.data y = boston.target N = X.shape[0] y = y.reshape(N,1) k = np.arange(N).reshape(N, 1) k_X_y = np.concatenate((k, X, y) , axis=1) lTable=pd.DataFrame(k_X_y) # print(lTable.head()) lTable.columns = ['idx'] + feature_names + ['TGT']; lTable['TGT'] = lTable['TGT'].apply(int) lTable['idx'] = lTable['idx'].apply(int) lTable.to_csv(tablename , float_format='%.14g') # - metadata = {"primary_key" : "KEY", "features" : list(boston.feature_names), "targets" : ["TGT"], "table" : "iris"} populate_table("/tmp/boston.csv" , metadata["features"]) df = pd.read_csv("/tmp/boston.csv") df.sample(12, random_state=1960) # ## Training a Model # + # train any scikit model on the iris dataset from sklearn.svm import SVR clf = SVR() clf.fit(df[metadata['features']].values, df[metadata['targets']].values) # - # ## Deploying the Model # + def generate_cpp_for_model(model): import pickle, json, requests, base64 b64_data = base64.b64encode(pickle.dumps(model)).decode('utf-8') # send the model th the web service json_data={"Name":"model_cpp_sample", "PickleData":b64_data , "SQLDialect":"CPP", "FeatureNames" : metadata['features']} r = requests.post("https://sklearn2sql.herokuapp.com/model", json=json_data) content = r.json() lCPP = content["model"]["SQLGenrationResult"][0]["SQL"] # print(lCPP); return lCPP lCPPCode = generate_cpp_for_model(clf); # - print(lCPPCode) def write_text_to_file(iCPPCode, oCPPFile): with open(oCPPFile, "w") as text_file: text_file.write(iCPPCode) def add_cpp_main_function(iCPPCode, iCSVFile): lCPPCode = "#include \"Generic.i\"\n\n" lCPPCode = lCPPCode + iCPPCode lCPPCode = lCPPCode + "\tint main() {\n" lCPPCode = lCPPCode + "\t\tscore_csv_file(\"" + iCSVFile +"\");\n" lCPPCode = lCPPCode + "\treturn 0;\n}\n" return lCPPCode def compile_cpp_code_as_executable(iName): import subprocess lCommand = ["g++", "-Wall", "-Wno-unused-function", "-std=c++17" , "-g" , "-o", iName + ".exe", iName + ".cpp"] print("EXECUTING" , "'" + " ".join(lCommand) + "'") result = subprocess.check_output(lCommand) # print(result) def execute_cpp_model(iName, iCSVFile): import subprocess result2 = subprocess.check_output([iName + ".exe", iCSVFile]) result2 = result2.decode() print(result2[:100]) print(result2[-100:]) return result2 def execute_cpp_code(iCPPCode, iCSVFile): lName = "/tmp/sklearn2sql_cpp_" + str(id(clf)); lCPPCode = add_cpp_main_function(iCPPCode, iCSVFile) write_text_to_file(lCPPCode, lName + ".cpp") compile_cpp_code_as_executable(lName) result = execute_cpp_model(lName, iCSVFile) write_text_to_file(str(result), lName + ".out") return lName + ".out" populate_table("/tmp/boston2.csv" , ["Feature_" + str(i) for i,x in enumerate(metadata["features"])]) lCPPOutput = execute_cpp_code(lCPPCode , "/tmp/boston2.csv") cpp_output = pd.read_csv(lCPPOutput) cpp_output.sample(12, random_state=1960) # + skl_outputs = pd.DataFrame() X = df[metadata['features']].values skl_output_key = pd.DataFrame(list(range(X.shape[0])), columns=['idx']); skl_output_score = pd.DataFrame(clf.predict(X), columns=['Estimator']); skl_output = pd.concat([skl_output_key, skl_output_score] , axis=1) skl_output.sample(12, random_state=1960) # - cpp_skl_join = skl_output.join(cpp_output , how='left', on='idx', lsuffix='_skl', rsuffix='_cpp') cpp_skl_join.sample(12, random_state=1960) lDiff = cpp_skl_join['Estimator_skl'] - cpp_skl_join['Estimator_cpp'] lDiff.describe()
doc/SVM/ml2cpp_svm_regressor_boston.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 (Data Science) # language: python # name: python3__SAGEMAKER_INTERNAL__arn:aws:sagemaker:eu-west-1:470317259841:image/datascience-1.0 # --- # # Bias Detection with SageMaker Clarify # + language="sh" # pip -q install sagemaker --upgrade # pip -q install scikit-learn imbalanced-learn # - # ## 1 - Load dataset # + import boto3, io import pandas as pd dataset = pd.read_csv('dataset.csv') # - dataset.shape dataset.head() # + from sklearn.model_selection import train_test_split train_data, test_data = train_test_split(dataset, test_size=0.1) # - print(train_data.shape) print(test_data.shape) train_data.to_csv('train_data.csv', index=False, header=False) test_data.to_csv('test_data.csv', index=False, header=False) # + test_data_no_labels = test_data.drop(['Label'], axis=1) test_data_no_labels = test_data_no_labels[:100] test_data_no_labels.to_csv('test_data_no_labels.csv', index=False, header=False) # - # ## 3 - Train a classification model with XGBoost on Amazon SageMaker # + import sagemaker from sagemaker import get_execution_role from sagemaker import Session print(sagemaker.__version__) session = Session() bucket = session.default_bucket() prefix = 'bias-detection-adult-dataset' region = session.boto_region_name role = get_execution_role() # + from sagemaker.s3 import S3Uploader from sagemaker.inputs import TrainingInput train_uri = S3Uploader.upload('train_data.csv', 's3://{}/{}'.format(bucket, prefix)) train_input = TrainingInput(train_uri, content_type='csv') test_uri = S3Uploader.upload('test_data.csv', 's3://{}/{}'.format(bucket, prefix)) test_input = TrainingInput(test_uri, content_type='csv') test_no_labels_uri = S3Uploader.upload('test_data_no_labels.csv', 's3://{}/{}'.format(bucket, prefix)) # + from sagemaker.image_uris import retrieve from sagemaker.estimator import Estimator container = retrieve('xgboost', region, version='latest') xgb = Estimator(container, role, instance_count=1, instance_type='ml.m5.large', disable_profiler=True) xgb.set_hyperparameters(objective='binary:logistic', eval_metric='auc', num_round=100, early_stopping_rounds=20) # - xgb.fit({'train': train_input, 'validation': test_input}) xgb_predictor = xgb.deploy( initial_instance_count=1, instance_type='ml.t2.medium') xgb.delete_endpoint() # ## 4 - Analyze bias with Amazon SageMaker Clarify # ### Define a SageMaker Processing processor # + from sagemaker import clarify clarify_processor = clarify.SageMakerClarifyProcessor( role=role, instance_count=1, instance_type='ml.m5.large', sagemaker_session=session) # - # ### Configuring bias detection # + bias_report_output_path = 's3://{}/{}/clarify-bias'.format(bucket, prefix) data_config = clarify.DataConfig( s3_data_input_path=train_uri, s3_output_path=bias_report_output_path, label='Label', headers=train_data.columns.to_list(), dataset_type='text/csv') # - model_config = clarify.ModelConfig( model_name=xgb_predictor.endpoint_name, instance_type='ml.t2.medium', instance_count=1, accept_type='text/csv') bias_config = clarify.BiasConfig( label_values_or_threshold=[1], # Label for positive outcome facet_name='Sex_', facet_values_or_threshold=[1]) # Male: Sex_=0, Female: Sex_=1 # ### Compute pre-training and post-training bias metrics clarify_processor.run_bias( data_config=data_config, model_config=model_config, bias_config=bias_config) bias_report_output_path # + magic_args="-s $bias_report_output_path" language="sh" # aws s3 cp --recursive $1/ . # - # ## 5 - Run explainability analysis # + shap_config = clarify.SHAPConfig( baseline=test_no_labels_uri, num_samples=10, agg_method='mean_abs', save_local_shap_values=True, ) explainability_output_path = "s3://{}/{}/clarify-explainability".format(bucket, prefix) explainability_data_config = clarify.DataConfig( s3_data_input_path=train_uri, s3_output_path=explainability_output_path, label='Label', headers=train_data.columns.to_list(), dataset_type="text/csv", ) # + clarify_processor = clarify.SageMakerClarifyProcessor( role=role, instance_count=1, instance_type='ml.c5.4xlarge', sagemaker_session=session) clarify_processor.run_explainability( data_config=explainability_data_config, model_config=model_config, explainability_config=shap_config, ) # - # ## 6 - Inspect data # + # Count male (Sex=1) and female (Sex=0) instances female_male_count = train_data['Sex_'].value_counts() print(female_male_count) # + # Plot them female_male_count.sort_values().plot(kind='bar', title='Counts of Sex', rot=0, figsize=(6, 3)) # + # Count male and female not50k (Target=0) and 50k instances (Target=1) female_male_not_50k_count = train_data['Sex_'].where(train_data['Label']==0).value_counts() female_male_50k_count = train_data['Sex_'].where(train_data['Label']==1).value_counts() print(female_male_not_50k_count) print(female_male_50k_count) # + # Plot male and females making more than 50k female_male_50k_count.sort_values().plot(kind='bar', title='Counts of Sex earning >$50K', rot=0, figsize=(6, 3)) # + # Compute male and female 50k/not 50k ratios ratios = female_male_50k_count/female_male_not_50k_count print(ratios) # - # ## 7 - Rebalance the data set # We'll do this in two steps: # 1. Use SMOTE to generate new female 50k instances, in order to get the same 50k/not50k ratio as males. # 2. Use under-sampling to have the same number of male and female instances. # + import imblearn from collections import Counter print(imblearn.__version__) # - # ### Generate new female 50k instances # + # Keep female instances only male_instances = train_data[train_data['Sex_']==0] female_instances = train_data[train_data['Sex_']==1] female_X = female_instances.drop(['Label'], axis=1) female_Y = female_instances['Label'] Counter(female_Y) # + from imblearn.over_sampling import SMOTE # Rebalance female instances with the same (50k/not50k) ratio as male instances oversample = SMOTE(sampling_strategy=ratios[0]) balanced_female_X, balanced_female_Y = oversample.fit_resample(female_X, female_Y) # - Counter(balanced_female_Y) balanced_female=pd.concat([balanced_female_X, balanced_female_Y], axis=1) balanced_female # ### Rebuild dataset with original male instances plus balanced female instance balanced_train_data=pd.concat([male_instances, balanced_female], axis=0) balanced_train_data['Sex_'].value_counts().sort_values().plot(kind='bar', title='Counts of Sex', rot=0, figsize=(6, 3)) balanced_train_data['Sex_'].where(balanced_train_data['Label']==1).value_counts().sort_values().plot(kind='bar', title='Counts of Sex earning >$50K', rot=0, figsize=(6, 3)) # ### Undersample males to balance male and female instances # + from imblearn.under_sampling import RandomUnderSampler X = balanced_train_data.drop(['Sex_'], axis=1) Y = balanced_train_data['Sex_'] undersample = RandomUnderSampler(sampling_strategy='not minority') X,Y = undersample.fit_resample(X, Y) # - Counter(Y) balanced_train_data=pd.concat([X, Y], axis=1) balanced_train_data['Sex_'].value_counts().sort_values().plot(kind='bar', title='Counts of Sex', rot=0, figsize=(6, 3)) balanced_train_data['Sex_'].where(balanced_train_data['Label']==1).value_counts().sort_values().plot(kind='bar', title='Counts of Sex earning >$50K', rot=0, figsize=(6, 3)) female_male_count = balanced_train_data['Sex_'].value_counts() print(female_male_count) female_male_50k_count = balanced_train_data['Sex_'].where(balanced_train_data['Label']==1).value_counts() print(female_male_50k_count) ratios = female_male_50k_count/female_male_count print(ratios) # Now we have the same number of male and female instances, and both classes have the same 50k/not50k ratio. # ## 8 - Train again on the balanced dataset balanced_train_data.to_csv('balanced_train_data.csv', index=False, header=False) balanced_train_uri = S3Uploader.upload('balanced_train_data.csv', 's3://{}/{}'.format(bucket, prefix)) balanced_train_input = TrainingInput(balanced_train_uri, content_type='csv') xgb.fit({'train': balanced_train_input, 'validation': test_input}) xgb_predictor = xgb.deploy( initial_instance_count=1, instance_type='ml.t2.medium') xgb_predictor.delete_endpoint() # ## 8 - Run SageMaker Clarify again data_config = clarify.DataConfig( s3_data_input_path=balanced_train_uri, s3_output_path=bias_report_output_path, label='Label', headers=balanced_train_data.columns.to_list(), dataset_type='text/csv') model_config = clarify.ModelConfig( model_name=xgb_predictor.endpoint_name, instance_type='ml.t2.medium', instance_count=1, accept_type='text/csv') bias_config = clarify.BiasConfig( label_values_or_threshold=[1], facet_name='Sex_', facet_values_or_threshold=[1]) clarify_processor.run_bias( data_config=data_config, model_config=model_config, bias_config=bias_config)
Chapter 10/bias_detection/Bias Detection.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ### Question: Does Clauset's method independently verify $M_z$? # # We've established in another notebook that two metrics, the chi-square and SSI both pick $M_z$ from the space of possibilities as the best choice of sample size to arrive at the 2, 6, 12, 20, ... inverse pattern for n-legomena. These tests pick the _same_ $M_z$ as optimally fitting the TTR curve, but they both feel rather "ad hoc". After all, the 2-6-12-20-etc pattern is a secondary prediction of a Perfect Zipf Distribution, so testing for that doesn't exactly rule out confounding factors. What we'd like to do here is assert, using [Clauset's method](https://arxiv.org/pdf/0706.1062.pdf), that of the space of possible sample sizes $[0, M]$ that either: # # - (a) $M_z$ is the sample size most closely power-law-distributed with parameter $\alpha = 1$ OR # - (b) $M_z$ is the sample size most closely power-law-distributed, period. # # Should this be the case, should we find a "spike" or a "well" in the signal at this spot, then combined with the other two signals, this provides some pretty strong evidence that something magical happens at this point. # + # bloody dependencies import matplotlib.pyplot as plt plt.rcParams["figure.figsize"] = 16,8 from nltk.corpus import gutenberg import numpy as np import pandas as pd from scipy.special import zeta, zetac from scipy.optimize import fsolve # custom classes from legomena import Corpus, LogModel # - # ### Choosing a Book # # Select a book for use as our fitting corpus. # + # <NAME> words = gutenberg.words("melville-moby_dick.txt") corpus = Corpus(words) TTR = corpus.TTR m_tokens, n_types = TTR.m_tokens, TTR.n_types model = LogModel().fit(m_tokens, n_types) print("Optimum =", model.params) # plot TTR curve plt.plot(m_tokens, n_types) plt.title("Type-Token Relation") plt.xlabel("tokens") plt.ylabel("types") plt.legend() plt.show() # plot WFD xmax = 50 df = corpus.fdist.copy().head(xmax) plt.bar(df.index, df["freq"]) plt.xlabel("rank") plt.ylabel("frequency") plt.show() # - # ## What does an optimum sample look like? # + # take an optimum sample corpus.seed = None optimum = corpus.sample(model.M_z) optimum.M, optimum.N # plot WFD xmax = 50 H = np.sum(1/np.arange(1,xmax)) df = optimum.fdist.copy().reset_index().head(xmax) df["freq"] = df["freq"]/df["freq"].sum() df["pred"] = 1/H/(df["rank"]) plt.bar(df["rank"], df["freq"]) plt.plot(df["rank"], df["pred"], color="red") plt.title("Word Frequency Distribution, alpha = %0.4f" % optimum.alpha) plt.xlabel("rank") plt.ylabel("frequency") plt.show() # plot legomena k = optimum.k[:xmax] n = np.arange(1, len(k)) plt.bar(n, k[n]/sum(k)) plt.plot(n, 1/n/(n+1), color="red") plt.title("n-Legomena Distribution, gamma = %0.4f" % optimum.gamma) plt.xlabel("n") plt.ylabel("k_n") plt.show() # - # ## Fake Zipfian Data # # Here we take a brief vacation from our words data and instead use `numpy.random.zipf()` to generate some fake data instead. Indeed, it exhibits exactly the same pattern, bottoming out at a value somewhere around $\alpha = 1.75$. # + from scipy.stats import chisquare def ssi_test(f_obs, f_exp): assert all(f_obs >= 0) assert all(f_exp >= 0) nonzero = (f_obs > 0) & (f_exp > 0) f_obs = f_obs[nonzero] f_exp = f_exp[nonzero] f_obs = f_obs / f_obs.sum() f_exp = f_exp / f_exp.sum() return np.sum((f_obs - f_exp) * np.log(f_obs / f_exp)) M, n = 9999, 40 f_exp = np.array([1 / i / (i+1) for i in range(1, n) ]) print("Expected n-legomena proportions:", f_exp) f_exp = M * f_exp df = [] for _ in range(99): alpha = np.random.uniform(1.2, 2.5) x = np.random.zipf(a = alpha, size = M) f_obs = np.array([ sum(x == i) for i in range(1, n)]) sse = np.sum((f_exp - f_obs)**2) ssi = ssi_test(f_obs, f_exp) df.append((alpha, sse, ssi)) df = pd.DataFrame(df, columns = ["alpha", "sse", "ssi"]) df.head() # - # ## Enter Number Theory # # This stability measure appears to minimize for a random Zipf variable $X \sim Z[\rho]$ for $\zeta(\rho) = 2, \rho = 1.7286472389981835$ [Kalmar's Constant](https://en.wikipedia.org/?title=Talk:L%C3%A1szl%C3%B3_Kalm%C3%A1r&oldid=514021658). # # Why? # # Zipf's original rank-frequency distribution suggests an exponent near 1. Why would an exponent much closer to 2 appear to give better results? I think I'm doing something wrong here. But anyway, as it stands, the hypothesis is: Given a random Zipfian variable with parameter $\alpha$, $X \sim Z[\alpha]$, the optimum choice for $\alpha$ to fit $Pr(X=n) = \frac{1}{n(n+1)}$ is $\alpha = \rho = 1.7286472389981835$, [Kalmar's Constant](https://en.wikipedia.org/?title=Talk:L%C3%A1szl%C3%B3_Kalm%C3%A1r&oldid=514021658). # + # calculate rho func = lambda x : zeta(x) - 2. x0 = 1.75 rho = fsolve(func, x0)[0] print("rho = ", rho) print("zeta(rho) =", zeta(rho)) # - # visualization plt.scatter(df.alpha, df.ssi) plt.axvline(x = rho, color = 'r') plt.show() # ## n-Legomena from fake data # # In fact, we can count hapaxes and higher $n$-legomena straight from this fake distribution, just by answering the question, what is $Pr(X = n)$ given Zipf's density function $p(x) = \frac{x^{-\alpha}}{\zeta(\alpha)}$? # + # count hapaxes from 3rd party zipf distribution M, n = 9999, 9 df = [] for _ in range(999): alpha = np.random.uniform(1.2, 2.5) x = np.random.zipf(a = alpha, size = M) f_obs = np.array([ sum(x == i) for i in range(n)]) / M df.append((alpha, f_obs[1], f_obs[2], f_obs[3], f_obs[4], f_obs[5])) df = pd.DataFrame(df, columns = ["alpha", "hapax", "dis", "tris", "tetra", "penta"]) df = df.sort_values("alpha") df.head() plt.scatter(df.alpha, df.hapax) plt.scatter(df.alpha, df.dis) plt.scatter(df.alpha, df.tris) plt.scatter(df.alpha, df.tetra) plt.scatter(df.alpha, df.penta) plt.axvline(x = rho, color = 'r') plt.scatter([rho, rho, rho, rho, rho], 1./np.array([2,6,12,20,30]), color = 'r') plt.show() df[df.alpha > rho].head() # - # ## Clauset's Method # # Let's first tie down Clauset's method by producing power-law data and seeing if it can indeed recover the exponent. # + import powerlaw # fake data y = np.random.zipf(rho, 9999) _, y = np.unique(y, return_counts = True) # group dist = powerlaw.Fit(y).power_law a_obs = dist.alpha a_opt = rho y = y / sum(y) # normalize x = np.arange(1, len(y)+1) y1 = 1/x/(x+1) # model 1: k_n ~ 1/n/(n+1) y2 = x**-a_obs / zeta(a_obs) # clauset: a ~ 2.09 y3 = x**-a_opt / zeta(a_opt) # theory: a = rho ~ 1.72 plt.scatter(x, y, color = 'r') plt.plot(x, y1, label = "poly") plt.plot(x, y2, label = f"clauset (a = {a_obs:0.4f})") plt.plot(x, y3, label = f"theory (a = {a_opt:0.4f})") plt.loglog() plt.title(f"Moby Dick n-Legomena Frequencies") plt.legend() plt.show() # + import powerlaw data = optimum.fdist.freq.values dist = powerlaw.Fit(data).power_law a_obs = dist.alpha a_opt = rho y = optimum.k[1:500] x = np.arange(1, len(y)+1) y = y / sum(y) # normalize y1 = 1/x/(x+1) # model 1: k_n ~ 1/n/(n+1) y2 = x**-a_obs / zeta(a_obs) # clauset: a ~ 2.09 y3 = x**-a_opt / zeta(a_opt) # theory: a = rho ~ 1.72 plt.scatter(x, y, color = 'r') plt.plot(x, y1, label = "poly") plt.plot(x, y2, label = f"clauset (a = {a_obs:0.4f})") plt.plot(x, y3, label = f"theory (a = {a_opt:0.4f})") plt.loglog() plt.title(f"Moby Dick n-Legomena Frequencies") plt.legend() plt.show() # - # ## Conclusion # # None, really. Clauset's method doesn't appear to choose a better exponent than the other two approximations, and the data are too noisy to obtain a robust result.
notebooks/clauset.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Anomaly detection # # This notebook propose metric for anomaly detection algorithms and compare different models import datetime import calendar import time import json import numpy as np import pandas as pd from sklearn import tree from sklearn import metrics import matplotlib.pyplot as plt from matplotlib import rcParams rcParams['figure.figsize'] = 12, 4 # # Load project # + PROJECT_FOLDER = '../../datasets/thorium-large/' with open(PROJECT_FOLDER + 'project.json', 'r') as file: project = json.load(file) print(json.dumps(project, indent=4)) def load_series(fname, name): path = PROJECT_FOLDER + fname + '.csv' xs = pd.read_csv(path, parse_dates=['time']) xs = xs.set_index('time')[name].fillna(0) xs = xs.resample('5T').pad() xs = xs.rename(fname) return xs flow1_raw = load_series('flow1', 'flow') flow1_edited = load_series('flow1_edited', 'flow_edited') flow2_raw = load_series('flow2', 'flow') flow2_edited = load_series('flow2_edited', 'flow_edited') flow3_raw = load_series('flow3', 'flow') flow3_edited = load_series('flow3_edited', 'flow_edited') flow4_raw = load_series('flow4', 'flow') flow4_edited = load_series('flow4_edited', 'flow_edited') flow5_raw = load_series('flow5', 'flow') flow5_edited = load_series('flow5_edited', 'flow_edited') rainfall = load_series('rainfall1', 'rainfall') df = pd.concat( [flow1_raw, flow1_edited, flow2_raw, flow2_edited, flow3_raw, flow3_edited, flow4_raw, flow4_edited, flow5_raw, flow5_edited, rainfall ], axis=1) data_frame = df['2016-01-01':] data_frame.head() # + data_frame.flow1.plot(color='r') data_frame.flow1_edited.plot(color='b') plt.show() plt.plot(flow2_raw, color='r') plt.plot(flow2_edited, color='b') plt.show() plt.plot(flow3_raw, color='r') plt.plot(flow3_edited, color='b') plt.show() plt.plot(flow4_raw, color='r') plt.plot(flow4_edited, color='b') plt.show() plt.plot(flow5_raw, color='r') plt.plot(flow5_edited, color='b') plt.show() plt.plot(rainfall) plt.show() # + pd.options.mode.chained_assignment = None data_frame['flow1_anomalies'] = np.abs(data_frame.flow1 - data_frame.flow1_edited) > 0.001 data_frame['flow2_anomalies'] = np.abs(data_frame.flow2 - data_frame.flow2_edited) > 0.001 data_frame['flow3_anomalies'] = np.abs(data_frame.flow3 - data_frame.flow3_edited) > 0.001 data_frame['flow4_anomalies'] = np.abs(data_frame.flow4 - data_frame.flow4_edited) > 0.001 data_frame['flow5_anomalies'] = np.abs(data_frame.flow5 - data_frame.flow5_edited) > 0.001 data_frame.head() # - # # Count anomalies in 2017 # # Check how many anomalies were corrected in the whole 2017 year df2017 = data_frame['2017-01-01': '2018-01-01'] print('Flow1 anomalies: {}'.format(df2017.flow1_anomalies.sum())) print('Flow2 anomalies: {}'.format(df2017.flow2_anomalies.sum())) print('Flow3 anomalies: {}'.format(df2017.flow3_anomalies.sum())) print('Flow4 anomalies: {}'.format(df2017.flow4_anomalies.sum())) print('Flow5 anomalies: {}'.format(df2017.flow5_anomalies.sum())) # # Metric # # We use F1 score: # http://scikit-learn.org/stable/modules/generated/sklearn.metrics.f1_score.html#sklearn.metrics.f1_score # + def score(y_true, y_pred): return metrics.f1_score(y_true, y_pred, np.unique(y_pred)) print(score(df2017.flow1_anomalies, df2017.flow1_anomalies)) print(score(df2017.flow1_anomalies, df2017.flow2_anomalies)) # - # # Test base model # # Base model mark point as anomaly when its distance to the mean is more then 3 standard deviations. # + def evaluate_std_model(prev_flow, flow, anomalies): mu = prev_flow.mean() std = prev_flow.std() pred = (flow < 1) | (flow < mu-3*std) return score(anomalies, pred) # Model based on extreme values df = data_frame[: '2017-01-01'] print('Flow1 score: {:.3f}'.format(evaluate_std_model(df.flow1, df2017.flow1, df2017.flow1_anomalies))) print('Flow2 score: {:.3f}'.format(evaluate_std_model(df.flow2, df2017.flow2, df2017.flow2_anomalies))) print('Flow3 score: {:.3f}'.format(evaluate_std_model(df.flow3, df2017.flow3, df2017.flow3_anomalies))) print('Flow4 score: {:.3f}'.format(evaluate_std_model(df.flow4, df2017.flow4, df2017.flow4_anomalies))) print('Flow5 score: {:.3f}'.format(evaluate_std_model(df.flow5, df2017.flow5, df2017.flow5_anomalies)))
notebooks/thorium-large/[A] eda.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import ipywebrtc as webrtc # + from pythreejs import * import ipywidgets ball = Mesh(geometry=SphereGeometry(), material=MeshLambertMaterial(color='red')) key_light = DirectionalLight(color='white', position=[3, 5, 1], intensity=0.5) c = PerspectiveCamera(position=[0, 5, 5], up=[0, 1, 0], children=[key_light]) scene = Scene(children=[ball, c, AmbientLight(color='#777777')], background=None) renderer = Renderer(camera=c, scene=scene, alpha=True, clearOpacity=0, controls=[OrbitControls(controlling=c)]) renderer # - stream = webrtc.WidgetStream(widget=renderer) stream # the recorder can only record it seems after the scene has changed (so drag the above ball around it nothing happens) recorder = webrtc.MediaImageRecorder(stream=stream) recorder import bqplot.pyplot as plt import numpy as np fig = plt.figure() x = np.linspace(0, 2, 10) y = x**2 s = plt.scatter(x, y) plt.show() s.selected = [1,4,6] s.unselected_style = {'fill': 'orange', 'stroke': 'none'} # bqplot doesn't work yet, we should make use of its png saving feature stream_bqplot = webrtc.WidgetStream(widget=fig) stream_bqplot import ipyleaflet as ll m = ll.Map(zoom=1) m # bqplot doesn't work yet, we should make use of its png saving feature stream_ll = webrtc.WidgetStream(widget=m) stream_ll # you could use these streams in webrtc chatting webrtc.chat(stream=stream_ll, room='test')
docs/source/widget-stream.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="P9bJCDjdlgG6" colab_type="text" # # **Tame Your Python** # # # + id="aQwc0re5mFld" colab_type="code" outputId="69a04492-5b5f-4f52-cb44-9983a5f464af" colab={"base_uri": "https://localhost:8080/", "height": 35} import numpy as np import pandas as pd import matplotlib.pyplot as plt import tensorflow as tf print(tf.__version__) # + id="6CkmP_T7NuLU" colab_type="code" colab={} import matplotlib.pyplot as plt from sklearn.datasets import fetch_lfw_people # + id="HktWQUiENwPD" colab_type="code" outputId="d9dab12c-f0d1-43de-cfbc-2a9b933ed478" colab={"base_uri": "https://localhost:8080/", "height": 87} # Load data dataset = fetch_lfw_people(min_faces_per_person=100) N, H, W = dataset.images.shape X = dataset.data y = dataset.target target_names = dataset.target_names # + id="XzPUK9XOQKDj" colab_type="code" outputId="db4f80a8-292b-4304-f3ef-7aa7543e3892" colab={"base_uri": "https://localhost:8080/", "height": 52} print(target_names) # + id="kbDSjkI5Oemq" colab_type="code" outputId="995e88a0-be5a-41bd-e773-1d67d8ea7f87" colab={"base_uri": "https://localhost:8080/", "height": 69} print(dataset.images.shape) print(dataset.data.shape) print(dataset.target.shape) # + id="I_XJ7C71OtoK" colab_type="code" outputId="80a9094e-3cc0-432e-af6f-2052ef6cc571" colab={"base_uri": "https://localhost:8080/", "height": 35} print(H*W) # + id="ekR2xwrfOZRM" colab_type="code" colab={} from sklearn.model_selection import train_test_split X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.1) # + id="3fohxN_0OL67" colab_type="code" colab={} from sklearn.random_projection import SparseRandomProjection n_components = 80 decomposer = SparseRandomProjection(n_components=n_components).fit(X_train) X_train_d = decomposer.transform(X_train) X_test_d = decomposer.transform(X_test) # + id="Dw1hKsanOURT" colab_type="code" outputId="dd202792-ae88-480f-939b-546191119892" colab={"base_uri": "https://localhost:8080/", "height": 1000} from sklearn.neural_network import MLPClassifier model = MLPClassifier (hidden_layer_sizes=(1024,), batch_size=256, verbose=True, early_stopping=True) model.fit(X_train_d, y_train) # + id="5VGk76GvPHaS" colab_type="code" colab={} y_pred = model.predict(X_test_d) # + id="o8uBVnDdPPFS" colab_type="code" outputId="052d0432-3854-45b3-97f5-85197204caa4" colab={"base_uri": "https://localhost:8080/", "height": 225} from sklearn.metrics import classification_report print(classification_report(y_test, y_pred, target_names=target_names)) # + id="9qBRSJI5Proa" colab_type="code" outputId="35a1b1f2-894c-4370-956b-c1ce84e88306" colab={"base_uri": "https://localhost:8080/", "height": 281} idx = np.random.randint(0,len(y_pred)) plt.figure() plt.imshow(X_test[idx].reshape((H,W)), cmap = 'gray') plt.title("Real = " + str(target_names[y_test[idx]]) + " Predicted = " + str(target_names[y_pred[idx]])) plt.show() # + id="JVdvbWCiDNkB" colab_type="code" colab={}
MachineLearning_DataScience/Demo115_RandomProjection_LFWPeople.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ## Python 3.6 script that reads CSV input file for Colorado water rights data and loads in into SQL Server Database (WaDE) # <NAME>, Feb 2019 # import the needed Python libraries import pandas as pd import pyodbc from sqlalchemy import create_engine import urllib import pandas as pd import numpy as np print ('Done importing libraries') # + # connect to the WaDE SQL Server database server = '' database = '' username = '' password = '' conn = pyodbc.connect('DRIVER={ODBC Driver 13 for SQL Server};SERVER='+server+';DATABASE='+database+';UID='+username+';PWD='+password) data_frame = pd.read_sql('''SELECT * FROM [CVs].[ReportYearCV]''', conn) print (data_frame) print ('connected to the SQL Server database') # - # # test the upload to the db Sites_dim.to_sql('Sites_dim', engine,if_exists='append',index=False) # # Read the CSV files into panads dataframes for each table in WaDE 2.0 # # + Sites_dim = pd.read_csv("Allocations/Colorado/CSV_python_ingester/Sites_dim.csv") Methods_dim = pd.read_csv("Allocations/Colorado/CSV_python_ingester/Methods_dim.csv") WaterSources_dim = pd.read_csv("Allocations/Colorado/CSV_python_ingester/WaterSources_dim.csv") Date_dim = pd.read_csv("Allocations/Colorado/CSV_python_ingester/Date_dim.csv") Variables_dim = pd.read_csv("Allocations/Colorado/CSV_python_ingester/Variables_dim.csv") Organizations_dim = pd.read_csv("Allocations/Colorado/CSV_python_ingester/Organizations_dim.csv") Allocations_dim = pd.read_csv("Allocations/Colorado/CSV_python_ingester/Allocations_dim.csv") BeneficialUses_dim = pd.read_csv("Allocations/CSV_python_ingester/BeneficialUses_dim.csv") print ('Done reading') # + # load the tables into the WaDE database Sites.to_sql('Sites', engine,if_exists='append',index=False) Methods.to_sql('Methods', engine,if_exists='append',index=False) WaterSources.to_sql('WaterSources', engine,if_exists='append',index=False) Time_dim.to_sql('Time_dim', engine,if_exists='append',index=False) Variables.to_sql('Variables', engine,if_exists='append',index=False) Allocations.to_sql('Allocations', engine,if_exists='append',index=False) AmountMetadata.to_sql('AmountMetadata', engine,if_exists='append',index=False) AllocationAmounts.to_sql('AllocationAmounts', engine,if_exists='append',index=False) print ('Done loading') # -
Design_docs/SampleInputData/.ipynb_checkpoints/Load_WaterRights_CO_SQLServer-checkpoint.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # [![imagenes](imagenes/pythonista.png)](https://pythonista.mx) # # Expresiones regulares. # Las expresiones regulares son parte de los lenguajes formales y corresponden a una secuencia de caracteres que definen un patrón. # # Mediante el uso de expresiones regulares, es posible buscar patrones dentro de un flujo de texto. # ## Metacaracteres. # # Python reconoce a los siguientes caracteres como componentes sintácticos de una expresión regular. # # ```. ^ $ * + ? { } [ ] \ | ( )``` # # El uso de los metacaracteres se estudiará más adelante. # ## El módulo ```re```. # # Es el módulo que contiene funciones y clases relativas a búsqueda de patrones mediante expresiones regulares. # # La referencia del módulo ```re``` puede ser consultada en: # # https://docs.python.org/3/library/re.html import re # ## Parámetros de búsqueda. # # Es posible definir algunos parámetros que modifican la forma en la que la expresión regular es aplicada. # # * ```re.A``` ó ```ASCII```, que realiza la búsqueda de la expresión regular usando la codificación ASCII. # * ```re.I``` ó ```IGNORECASE```, que realiza la búsqueda sin diferenciar mayúsculas y minúsculas. # * ```re.L``` ó ```LOCALE``` que realiza la búsqueda de la expresión regular usando la configuración de idioma del sistema. # * ```re.M``` ó ```MULTILINE``` indica que debe reconocer los retornos de línea. # * ```re.S``` ó ```DOTALL``` indica que el metacaracter ```.``` tome en cuenta los retornos de línea. # * ```re.X``` ó ```VERBOSE``` permite añadir algunos elementos que mejoran la legibilidad de una expresión regular. # * ```re.U``` ó ```UNICODE```, que realiza la búsqueda de la expresión regular usando la codificación Unicode. # ## Funciones del módulo ```re```. # # ### La función ```re.search()```. # # Busca un patrón desde el principio de la cadena de caracteres. Al encontrar la primera conicidencia, regresa un objeto de la clase ```sre.SRE_Match``` que incluye los índices correspondientes a la coicidencia y el texto que coincide con el patrón. # # ``` # re.search(<patrón>, <cadena de caracteres>, <parámetros>) # ``` # **Ejemplos:** texto = "Azucar azu azucarado zucarita, asucar azurca azucar" patron = "azu" re.search(patron, texto) busqueda = re.search(patron, texto) busqueda.string busqueda.span() busqueda.start() busqueda.end() re.search(patron, texto, re.I) # ### La función ```re.findall()```. # Busca todas las coincidencias de un patrón desde el principio de la cadena de caracteres. Regresa una lista con todas las coincidencias. # # ``` # re.findall(<patrón>, <cadena de caracteres>, <parámetros>) # ``` # **Ejemplos:** texto = "Azucar azu azucarado zucarita, asucar azurca azucar" patron = "azu" re.findall(patron, texto) re.findall(patron, texto, re.I) # ### La función ```re.finditer()```. # # Busca todas las coincidencias de un patrón desde el principio de la cadena de caracteres. Regresa un iterador que regresa uno objeto ```sre.SRE_Match``` correspondiente a cada coincidencia. # # ``` # re.finditer(<patrón>, <cadena de caracteres>, <parámetros>) # ``` # **Ejemplos:** patron = 'azucar' texto = "Azucar azu azucarado zucarita, asucar azurca azucar" re.finditer(patron, texto, re.I) for elemento in re.finditer(patron, texto, re.I): print(elemento) # ### La función ```re.split()```. # # Busca todas las coincidencias de un patrón desde el principio de la cadena de caracteres y separa los elementos utilizando al patrón como separador. # # Regresa una lista de cadenas de caracteres con los textos separados. En caso de no encontrar coincidencias, regresa un objeto de tipo _list_ con el texto original. # # # ``` # re.split(<patrón>, <cadena de caracteres>, <parámetros>) # ``` # **Ejemplos:** patron = ('azu') texto = "Azucar azu azucarado zucarita, asucar azurca azucar" re.split(patron, texto) patron = ('sal') texto = "Azucar azu azucarado zucarita, asucar azurca azucar" re.split(patron, texto) # ### La función ```re.sub()```. # # Busca todas las coincidencias de un patrón desde el principio de la cadena de caracteres y dichas coincidencias serán sustituida con un nuevo texto. Regresa una cadena de caracteres con el texto modificado. # # # ``` # re.sub(<patrón>, <texto a sustituir>, <texto>, <parámetros>) # ``` # **Ejemplos:** patron = 'azucar' texto = "Azucar azu azucarado zucarita, asucar azurca azucar" re.sub(patron, 'dulce', texto) patron = 'sal' re.sub(patron, 'dulce', texto) # ### La función ```re.subn()```. # # Busca todas las coincidencias de un patrón desde el principio de la cadena de caracteres y dichas coincidencias serán sustituida con un nuevo texto. Regresa un objeto de tipo ```tuple``` que incluye la cadena de caracteres con el texto modificado y el número de coincidencias. # # # ``` # re.subn(<patrón>, <texto a sustituir>, <texto>, <parámetros>) # ``` # **Ejemplos:** patron = 'azucar' texto = "Azucar azu azucarado zucarita, asucar azurca azucar" re.subn(patron, 'dulce', texto) patron = 'sal' re.subn(patron, 'dulce', texto) # ### La función ```re.match()```. # Evalúa si el patron ingresado coincide con el inicio de una cadena de caracteres. En caso de que se encuentre el patrón, regresará un objeto con la información de la coincidencia. # # ``` # re.match(<patrón>, <texto>, <parámetros>) # ``` # **Ejemplos:** patron = 'azucar' texto = "Azucar azu azucarado zucarita, asucar azurca azucar" print(re.match(texto, patron)) re.match(patron, texto, re.I) # ### La función _re.compile()_. # Crea un objeto que incluye los siguientes métodos, los cuales se comportan de forma idéntica a las funciones del módulo _re_ utilizando el patron que se ingresa como parámetro: # * _search()_. # * _findall()_. # * _finditer()_. # * _split()_. # * _sub()_. # * _subn()_. # * _match()_. # # En cada módulo se ingresa un texto como parámetro para que el patrón sea aplicado en éste. # # **Sintaxis:** # ``` # re.compile(<patrón>, <parámetros>) # ``` # # **Ejemplos:** texto = "Azucar azu azucarado zucarita, asucar azurca azucar" re.compile('azucar') busca = re.compile('azucar') busca.match(texto) busca = re.compile('azucar', re.IGNORECASE) busca.match(texto) busca.search(texto) busca.subn("dulce", texto) # ## Algunos patrones con expresiones regulares. # Es posible construir patrones que no necesariamente corresponden a una cadena de caracteres única mediante las expresiones regulares. # ### El metacaracter ".". # # El metacaracter '.' indica una posición que debe ser sutituida por cualquier caracter. # # Es posible indicar más de un '.' y cada uno de ellos corresponde a un caracter. # **Ejemplos:** texto = "Carro carcar carreta caar cuarc Carranza ceerc " patron = "c.ar" re.findall(patron, texto, re.I) patron = "c..r" re.findall(patron, texto, re.I) # En este caso, buscará *'aaar'*. patron = "c...ar" re.findall(patron, texto, re.I) # ### El metacaracter "^". # Indica que el principio de la cadena de caracteres debe de coincidir con la expresión a la derecha del metacaracter. # **Ejemplos:** texto = "Carro carcar carreta caar cuarc Carranza ceerc " patron = "^car" re.findall(patron, texto, re.I) texto = "Cuenta cuentos cuenta cuentas cuantas cuentas cuenta" patron = "^.ue" re.findall(patron, texto, re.I) # ### El metacaracter "$". # Indica que el final de la cadena de caracteres debe de coincidir con la expresión a la izquierda del metacaracter. # **Ejemplos:** texto = "Carro carcar carrreta caar craacar Carranza" patron = "rranza$" re.findall(patron, texto, re.I) texto = "Carro carcar carrreta caar craacar Carranza" patron = "rra...$" re.findall(patron, texto, re.I) # # El metacaracter "*". # # Indica que puede haber 0 o más ocurrencias del caracter a la izquierda del metacaracter. # **Ejemplos:** texto = "Carro carcar carrreta caar craacar Carranza" patron = "ca*r" re.findall(patron, texto, re.I) texto = "Carro carcar carrreta caar craacar Carranza" patron = "ca*" re.findall(patron, texto, re.I) # ## El metacaracter "+". # Indica que puede haber 1 o más caracteres que coincidan con el caracter a la izquierda del metacaracter. # **Ejemplo:** texto = "Carro carcar carrreta caar craacar Carranza caaar" patron = "ca+" re.findall(patron, texto, re.I) # ## El metacarcter "*?*". # # Indica que pueden haber cero o una coincidencia con el caracter de la izquierda. # **Ejemplo:** texto = "Carro carcar carrreta caer craacar Carranza, caaar" patron = "ca?r" re.findall(patron, texto, re.I) # ## El uso de corchetes "*[_..._]*" # # Indica que el caracter que se busca está dentro de las opciones dentro de los corchetes. # **Ejemplos:** texto = "Azucar azu azecarado zucarita, asucar azurca azicar" patron = "az[aeiou]car" re.findall(patron, texto, re.I) texto = "Azucar azu azecarado zucarita, asucar azurca azicar" patron = "az[aeio]car" re.findall(patron, texto, re.I) # ## El uso de llaves "*{m, n}*" # # indica un rango indicando el número que se repite el caracter de la izquierda. texto = "1211111 111 11111 1111 11111 11 11 13" patron = "1{2,4}" re.findall(patron, texto, re.I) help(re) # ## Siguiente pasos. # # Python ofrece un tutorial de expresiones regulares en https://docs.python.org/3/howto/regex.html # # El siguiente es un sitio para puebas de expresiones regulares. https://regex101.com # <p style="text-align: center"><a rel="license" href="http://creativecommons.org/licenses/by/4.0/"><img alt="Licencia Creative Commons" style="border-width:0" src="https://i.creativecommons.org/l/by/4.0/80x15.png" /></a><br />Esta obra está bajo una <a rel="license" href="http://creativecommons.org/licenses/by/4.0/">Licencia Creative Commons Atribución 4.0 Internacional</a>.</p> # <p style="text-align: center">&copy; <NAME>. 2020.</p>
06_expresiones_regulares.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ## Exercícios Capítulo 05 # + # Exercício 1 - Crie um objeto a partir da classe abaixo, chamado roc1, passando 2 parâmetros e depois faça uma chamada # aos atributos e métodos from math import sqrt class Rocket(): def __init__(self, x=0, y=0): self.x = x self.y = y def move_rocket(self, x_increment=0, y_increment=1): self.x += x_increment self.y += y_increment def print_rocket(self): print(self.x, self.y) # - roc1 = Rocket(1,3) hasattr(roc1, "y") hasattr(roc1, "x") roc1.move_rocket(1,1) roc1.print_rocket() # + # Exercício 2 - Crie uma classe chamada Pessoa() com os atributos: nome, cidade, telefone e e-mail. Use pelo menos 2 # métodos especiais na sua classe. Crie um objeto da sua classe e faça uma chamada a pelo menos um dos seus métodos # especiais. class Pessoa(): def __init__(self, nome, cidade, telefone, email): self.nome = nome self.cidade = cidade self.telefone = telefone self.email = email print("Pessoa criada com sucesso!") def __str__(self): return "Impressão especial da pessoa: %s, da cidade de %s, com telefone %s e e-mail %s" \ %(self.nome, self.cidade, self.telefone, self.email) def printPessoa(self): print("Dados da pessoa cadastrada:") print("Nome: %s" %self.nome) print("Cidade: %s" %self.cidade) print("Telefone: %s" %self.telefone) print("email: %s" %self.email) # - pessoa1 = Pessoa("Aline", "Anapolis", 32326965, "<EMAIL>") str(pessoa1) pessoa1.printPessoa() pessoa1.nome # + # Exercício 3 - Crie a classe Smartphone com 2 atributos, tamanho e interface e crie a classe MP3Player com os # atributos capacidade. A classe MP3player deve herdar os atributos da classe Smartphone. class Smartphone(): def __init__(self, tamanho, interface): self.tamanho = tamanho self.interface = interface print("Smartphone criado com sucesso!") def imprimir(self): print("Impressão classe pai.") class MP3Player(Smartphone): def __init__(self, capacidade, tamanho = "pequeno", interface = "led"): self.capacidade = capacidade Smartphone.__init__(self, tamanho, interface) print("MP3 criado com sucesso") def imprimirMp3(self): print("Impressão classe filha") # - novo = MP3Player("32 Gb") novo.imprimir() novo.imprimirMp3() # ### FIM # ### Obrigado - Data Science Academy - <a href=http://facebook.com/dsacademy>facebook.com/dsacademybr</a>
05-Orientacao_Objetos/Aline/06-Exercicios - Resolvidos.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import numpy as np import pandas as pd import matplotlib.pyplot as plt import baselines.common.plot_util as pu import scipy # # Choose folders to load # + folders_to_load = { 'dqn':['./save/2019-06-01.23:57:30/progress.csv', './save/2019-06-02.00:03:53/progress.csv', './save/2019-06-02.00:06:59/progress.csv', ], 'dueling':['./save/2019-06-02.00:22:31/progress.csv', './save/2019-06-02.00:25:26/progress.csv', './save/2019-06-02.00:28:26/progress.csv', ] } folders_to_load # - cmap = ['green','darkorange','red','blue','purple','yellow'] assert len(cmap) >= len(folders_to_load) # # Set params max_timestep = 20000 n_steps_to_resample = 500 steps_resampled = np.linspace(0, max_timestep, n_steps_to_resample, endpoint=True) # # Plot raw data # # + # Without average # + c_idx=-1 plt.figure( figsize=(10,10) ) for group, path_to_logs in folders_to_load.items(): print('== ',group,' ==') c_idx+=1 for idx,path_to_log in enumerate(path_to_logs): # Get raw curves results=pd.read_csv(path_to_log, sep=',',header=0) rewards = np.reshape(results['mean 100 episode reward'].values, [-1,1]) episodes = np.reshape(results['episodes'].values, [-1,1]) steps = results['steps'].values episode_reward = results['episode_reward'].values # Resample curves steps_resampled = np.linspace(0, max_timestep, n_steps_to_resample, endpoint=True) episode_reward_resampled = np.interp(steps_resampled, steps, episode_reward) # Reshape steps = np.reshape(steps,[-1,1]) steps_resampled = np.reshape(steps_resampled,[-1,1]) episode_reward = np.reshape(episode_reward,[-1,1]) episode_reward_resampled = np.reshape(episode_reward_resampled,[-1,1]) # Plot resampled curves if idx==0: plt.plot(steps_resampled, episode_reward_resampled,c=cmap[c_idx],label=group) else: plt.plot(steps_resampled, episode_reward_resampled,c=cmap[c_idx]) plt.xlim((0,max_timestep)) plt.legend() plt.show() # + # With average # + c_idx=-1 plt.figure( figsize=(10,10) ) for group, path_to_logs in folders_to_load.items(): c_idx+=1 episode_reward_resampled_global = None for path_to_log in path_to_logs: # Get raw curves results=pd.read_csv(path_to_log, sep=',',header=0) rewards = np.reshape(results['mean 100 episode reward'].values, [-1,1]) episodes = np.reshape(results['episodes'].values, [-1,1]) steps = results['steps'].values episode_reward = results['episode_reward'].values # Resample curves steps_resampled = np.linspace(0, max_timestep, n_steps_to_resample, endpoint=True) episode_reward_resampled = np.interp(steps_resampled, steps, episode_reward) # Reshape steps = np.reshape(steps,[-1,1]) steps_resampled = np.reshape(steps_resampled,[-1,1]) episode_reward = np.reshape(episode_reward,[-1,1]) episode_reward_resampled = np.reshape(episode_reward_resampled,[-1,1]) episode_reward_resampled_global = episode_reward_resampled if episode_reward_resampled_global is None else np.concatenate([episode_reward_resampled_global,episode_reward_resampled],axis=1) # Get mean and std of resampled curves r_mean = np.mean(episode_reward_resampled_global,axis=1) r_std = np.std(episode_reward_resampled_global,axis=1) # Plot mean resampled curves plt.legend(group) plt.plot(steps_resampled, r_mean,c=cmap[c_idx],label=group) # Plot std deviation resampled curves plt.fill_between(steps_resampled.flatten(), r_mean-r_std, r_mean+r_std, alpha=0.5, edgecolor=cmap[c_idx], facecolor=cmap[c_idx]) plt.xlim((0,max_timestep)) plt.legend() plt.show() # - # # Plot smoothed data # radius=10 # + # Without average # + c_idx=-1 plt.figure( figsize=(10,10) ) for group, path_to_logs in folders_to_load.items(): c_idx+=1 for idx,path_to_log in enumerate(path_to_logs): # Get raw curves results=pd.read_csv(path_to_log, sep=',',header=0) rewards = np.reshape(results['mean 100 episode reward'].values, [-1,1]) episodes = np.reshape(results['episodes'].values, [-1,1]) steps = results['steps'].values episode_reward = results['episode_reward'].values # Resample curves steps_resampled = np.linspace(0, max_timestep, n_steps_to_resample, endpoint=True) episode_reward_resampled = np.interp(steps_resampled, steps, episode_reward) # Reshape steps = np.reshape(steps,[-1,1]) steps_resampled = np.reshape(steps_resampled,[-1,1]) episode_reward = np.reshape(episode_reward,[-1,1]) # Smooth resampled curves episode_reward_resampled_smoothed = pu.smooth(episode_reward_resampled,radius=radius) episode_reward_resampled_smoothed = np.reshape(episode_reward_resampled_smoothed,[-1,1]) # Plot resampled curves if idx==0: plt.plot(steps_resampled, episode_reward_resampled_smoothed,c=cmap[c_idx],label=group) else: plt.plot(steps_resampled, episode_reward_resampled_smoothed,c=cmap[c_idx]) plt.xlim((0,max_timestep)) plt.legend() plt.show() # + # With average # + c_idx=-1 plt.figure( figsize=(10,10) ) for group, path_to_logs in folders_to_load.items(): c_idx+=1 episode_reward_resampled_global = None for path_to_log in path_to_logs: # Get raw curves results=pd.read_csv(path_to_log, sep=',',header=0) rewards = np.reshape(results['mean 100 episode reward'].values, [-1,1]) episodes = np.reshape(results['episodes'].values, [-1,1]) steps = results['steps'].values episode_reward = results['episode_reward'].values # Resample curves steps_resampled = np.linspace(0, max_timestep, n_steps_to_resample, endpoint=True) episode_reward_resampled = np.interp(steps_resampled, steps, episode_reward) # Reshape steps = np.reshape(steps,[-1,1]) steps_resampled = np.reshape(steps_resampled,[-1,1]) episode_reward = np.reshape(episode_reward,[-1,1]) # Smooth resampled curves episode_reward_resampled_smoothed = pu.smooth(episode_reward_resampled,radius=radius) episode_reward_resampled_smoothed = np.reshape(episode_reward_resampled_smoothed,[-1,1]) episode_reward_resampled_global = episode_reward_resampled_smoothed if episode_reward_resampled_global is None else np.concatenate([episode_reward_resampled_global,episode_reward_resampled_smoothed],axis=1) # Get mean and std of resampled curves print('episode_reward_resampled_global',episode_reward_resampled_global.shape) r_mean = np.mean(episode_reward_resampled_global,axis=1) r_std = np.std(episode_reward_resampled_global,axis=1) print('r_mean',r_mean.shape) print('r_std',r_std.shape) # Plot mean resampled curves plt.plot(steps_resampled, r_mean,c=cmap[c_idx],label=group) # Plot std deviation resampled curves plt.fill_between(steps_resampled.flatten(), r_mean-r_std, r_mean+r_std, alpha=0.5, edgecolor=cmap[c_idx], facecolor=cmap[c_idx]) plt.xlim((0,max_timestep)) plt.legend() plt.show() # - # # TODO: filter out episodes whose length is too long...
visualize_results.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Neural Networks # ## Import and Prepare the Data # + import sklearn from sklearn.model_selection import train_test_split from matplotlib import pyplot as plt # %matplotlib inline import pandas import numpy as np import mglearn from collections import Counter from sklearn.metrics import cohen_kappa_score from sklearn import preprocessing df = pandas.read_excel('house_price_label.xlsx') # combine multipl columns into a 2D array # also convert the integer data to float data X = np.column_stack((df.built_in.astype(float),df.price.astype(float))) X = preprocessing.scale(X) # scale the data before training the model y = df.house_type X_train, X_test, y_train, y_test = train_test_split(X, y,test_size =0.3,stratify = y, random_state=0) # for classification, make sure a stratify splitting method is selected mglearn.discrete_scatter(X[:,0],X[:,1],y) # use mglearn to visualize data plt.legend(y,loc='best') plt.xlabel('build_in') plt.ylabel('house price') plt.show() # - # ## Multilayer Perceptions # + from sklearn.neural_network import MLPClassifier mlp = MLPClassifier(solver='lbfgs',hidden_layer_sizes=(10,), random_state=0).fit(X_train, y_train) mglearn.discrete_scatter(X_train[:, 0], X_train[:, 1],mlp.predict(X_train)) plt.legend(y,loc='best') plt.xlabel('build_in') plt.ylabel('house price') plt.show() print("Training set accuracy: {:.2f}".format(mlp.score(X_train, y_train))) print ("Training Kappa: {:.3f}".format(cohen_kappa_score(y_train,mlp.predict(X_train)))) print("Test set accuracy: {:.2f}".format(mlp.score(X_test, y_test))) print ("Test Kappa: {:.3f}".format(cohen_kappa_score(y_test,mlp.predict(X_test)))) # + from sklearn.neural_network import MLPClassifier mlp = MLPClassifier(solver='lbfgs', hidden_layer_sizes=(20,20,20), random_state=0).fit(X_train, y_train) mglearn.discrete_scatter(X_train[:, 0], X_train[:, 1],mlp.predict(X_train)) plt.legend(y,loc='best') plt.xlabel('build_in') plt.ylabel('house price') plt.show() print("Training set accuracy: {:.2f}".format(mlp.score(X_train, y_train))) print ("Training Kappa: {:.3f}".format(cohen_kappa_score(y_train,mlp.predict(X_train)))) print("Test set accuracy: {:.2f}".format(mlp.score(X_test, y_test))) print ("Test Kappa: {:.3f}".format(cohen_kappa_score(y_test,mlp.predict(X_test)))) # - # ## Tuning the Parameters # + fig, axes = plt.subplots(2, 4, figsize=(20, 8)) for axx, n_hidden_nodes in zip(axes, [10, 20]): for ax, alpha in zip(axx, [0.0001, 0.01, 0.1, 1]): mlp = MLPClassifier(solver='lbfgs', random_state=0, hidden_layer_sizes=[n_hidden_nodes, n_hidden_nodes], alpha=alpha) mlp.fit(X_train, y_train) mglearn.discrete_scatter(X_train[:, 0], X_train[:, 1], mlp.predict(X_train), ax=ax) ax.set_title("n_hidden=[{}, {}]\nalpha={:.4f}\nkapa={:.4f}".format( n_hidden_nodes, n_hidden_nodes, alpha,cohen_kappa_score(y_train,mlp.predict(X_train)))) plt.subplots_adjust(hspace=0.5) # - # ## Inspect the Model # + mlp = MLPClassifier(solver='lbfgs', hidden_layer_sizes=(20,20), random_state=0).fit(X_train, y_train) fig, axes = plt.subplots(1, 3, figsize=(20, 8)) for i , ax in zip(range(3),axes): img = ax.imshow(mlp.coefs_[i], interpolation='none', cmap='viridis') ax.set_title(" No.{} layer".format(i)) ax.set_xlabel("Columns in weight matrix") ax.set_ylabel("Input feature") fig.colorbar(img, ax = ax) # -
Lab8.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Countries # **[Work in progress]** # # This notebook creates a .csv file with country information for ingestion into the Knowledge Graph. # # Data source: [GeoNames.org](https://download.geonames.org/export/dump/) # # Author: <NAME> (<EMAIL>) import os from pathlib import Path import pandas as pd pd.options.display.max_rows = None # display all rows pd.options.display.max_columns = None # display all columsns NEO4J_IMPORT = Path(os.getenv('NEO4J_IMPORT')) print(NEO4J_IMPORT) # ### Create countries country_url = 'https://download.geonames.org/export/dump/countryInfo.txt' names = ['ISO','ISO3','ISO-Numeric','fips','Country','Capital','Area(in sq km)','Population', 'Continent','tld','CurrencyCode','CurrencyName','Phone','Postal Code Format', 'Postal Code Regex','Languages','geonameid','neighbours','EquivalentFipsCode' ] countries = pd.read_csv(country_url, sep='\t',comment='#', dtype='str', names=names) # ### Add missing data # Add missing iso code for Namibia index = countries.query("ISO3 == 'NAM'").index countries.at[index, 'ISO'] = 'NA' countries.head() # ### Standardize column names for Knowlege Graph # * id: unique identifier for country # * name: name of node # * parentId: unique identifier for continent # * properties: camelCase # + # https://www.iso.org/obp/ui/#iso:code:3166:BQ # - countries['id'] = countries['ISO'] # standard id column to link nodes countries.rename(columns={'ISO': 'iso'}, inplace=True) countries.rename(columns={'ISO3': 'iso3'}, inplace=True) countries.rename(columns={'ISO-Numeric': 'isoNumeric'}, inplace=True) countries.rename(columns={'Country': 'name'}, inplace=True) countries.rename(columns={'Population': 'population'}, inplace=True) countries.rename(columns={'Area(in sq km)': 'areaSqKm'}, inplace=True) countries.rename(columns={'geonameid': 'geonameId'}, inplace=True) # ### Export a minimum subset for now countries = countries[['id','name','iso','iso3','isoNumeric','areaSqKm','geonameId']].copy() countries.fillna('', inplace=True) countries.head(300) countries.to_csv(NEO4J_IMPORT / "00e-GeoNamesCountry.csv", index=False)
notebooks/dataprep/00e-GeoNamesCountry.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + id="qI25LKhSNg02" """ You can run either this notebook locally (if you have all the dependencies and a GPU) or on Google Colab. Instructions for setting up Colab are as follows: 1. Open a new Python 3 notebook. 2. Import this notebook from GitHub (File -> Upload Notebook -> "GITHUB" tab -> copy/paste GitHub URL) 3. Connect to an instance with a GPU (Runtime -> Change runtime type -> select "GPU" for hardware accelerator) 4. Run this cell to set up dependencies. 5. Restart the runtime (Runtime -> Restart Runtime) for any upgraded packages to take effect """ # If you're using Google Colab and not running locally, run this cell. import os # Install dependencies # !pip install wget # !apt-get install sox libsndfile1 ffmpeg # !pip install unidecode # !pip install matplotlib>=3.3.2 ## Install NeMo BRANCH = 'r1.3.0' # !python -m pip install git+https://github.com/NVIDIA/NeMo.git@$BRANCH#egg=nemo_toolkit[all] # + id="Ac55MjAM5cls" # In a conda environment, you would use the following command # Update Numba to > 0.53 # conda install -c numba numba # or # conda update -c numba numba # For pip based environments, # Update Numba to > 0.53 # !pip install --upgrade numba # + [markdown] id="dqbpRwpnQ-1D" # # Intro to Transducers # # By following the earlier tutorials for Automatic Speech Recognition in NeMo, one would have probably noticed that we always end up using [Connectionist Temporal Classification (CTC) loss](https://distill.pub/2017/ctc/) in order to train the model. Speech Recognition can be formulated in many different ways, and CTC is a more popular approach because it is a monotonic loss - an acoustic feature at timestep $t_1$ and $t_2$ will correspond to a target token at timestep $u_1$ and only then $u_2$. This monotonic property significantly simplifies the training of ASR models and speeds up convergence. However, it has certain drawbacks that we will discuss below. # # In general, ASR can be described as a sequence-to-sequence prediction task - the original sequence is an audio sequence (often transformed into mel spectrograms). The target sequence is a sequence of characters (or subword tokens). Attention models are capable of the same sequence-to-sequence prediction tasks. They can even perform better than CTC due to their autoregressive decoding. However, they lack certain inductive biases that can be leveraged to stabilize and speed up training (such as the monotonicity exhibited by the CTC loss). Furthermore, by design, attention models require the entire sequence to be available to align the sequence to the output, thereby preventing their use for streaming inference. # # Then comes the [Transducer Loss](https://arxiv.org/abs/1211.3711). Proposed by <NAME>, it aimed to resolve the issues in CTC loss while resolving the transcription accuracy issues by performing autoregressive decoding. # # # + [markdown] id="OaPS4_xSRGNv" # ## Drawbacks of Connectionist Temporal Classification (CTC) # # CTC is an excellent loss to train ASR models in a stable manner but comes with certain limitations on model design. If we presume speech recognition to be a sequence-to-sequence problem, let $T$ be the sequence length of the acoustic model's output, and let $U$ be the sequence length of the target text transcript (post tokenization, either as characters or subwords). # # ------- # # 1) CTC imposes the limitation : $T \ge U$. Normally, this assumption is naturally valid because $T$ is generally a lot longer than the final text transcription. However, there are many cases where this assumption fails. # # - Acoustic model performs downsampling to such a degree that $T < U$. Why would we want to perform so much downsampling? For convolutions, longer sequences take more stride steps and more memory. For Attention-based models (say Conformer), there's a quadratic memory cost of computing the attention step in proportion to $T$. So more downsampling significantly helps relieve the memory requirements. There are ways to bypass this limitation, as discussed in the `ASR_with_Subword_Tokenization` notebook, but even that has limits. # # - The target sequence is generally very long. Think of languages such as German, which have very long translations for short English words. In the task of ASR, if there is more than 2x downsampling and character tokenization is used, the model will often fail to learn due to this CTC limitation. # # 2) Tokens predicted by models which are trained with just CTC loss are assumed to be *conditionally independent*. This means that, unlike language models where *h*-*e*-*l*-*l* as input would probably predict *o* to complete *hello*, for CTC trained models - any character from the English alphabet has equal likelihood for prediction. So CTC trained models often have misspellings or missing tokens when transcribing the audio segment to text. # # - Since we often use the Word Error Rate (WER) metric when evaluating models, even a single misspelling contributes significantly to the "word" being incorrect. # # - To alleviate this issue, we have to resort to Beam Search via an external language model. While this often works and significantly improves transcription accuracy, it is a slow process and involves large N-gram or Neural language models. # + [markdown] id="5EVBcBDNf658" # -------- # # Let's see CTC loss's limitation (1) in action: # + id="4whMzIjYf4w8" import torch import torch.nn as nn # + id="aGdKAFe7gGY4" T = 10 # acoustic sequence length U = 16 # target sequence length V = 28 # vocabulary size def get_sample(T, U, V, require_grad=True): torch.manual_seed(0) acoustic_seq = torch.randn(1, T, V + 1, requires_grad=require_grad) acoustic_seq_len = torch.tensor([T], dtype=torch.int32) # actual seq length in padded tensor (here no padding is done) target_seq = torch.randint(low=0, high=V, size=(1, U)) target_seq_len = torch.tensor([U], dtype=torch.int32) return acoustic_seq, acoustic_seq_len, target_seq, target_seq_len # + id="DTYIb-7ngo_L" # First, we use CTC loss in the general sense. loss = torch.nn.CTCLoss(blank=V, zero_infinity=False) acoustic_seq, acoustic_seq_len, target_seq, target_seq_len = get_sample(T, U, V) # CTC loss expects acoustic sequence to be in shape (T, B, V) val = loss(acoustic_seq.transpose(1, 0), target_seq, acoustic_seq_len, target_seq_len) print("CTC Loss :", val) val.backward() print("Grad of Acoustic model (over V):", acoustic_seq.grad[0, 0, :]) # + id="lBDvC2RykFC4" # Next, we use CTC loss with `zero_infinity` flag set. loss = torch.nn.CTCLoss(blank=V, zero_infinity=True) acoustic_seq, acoustic_seq_len, target_seq, target_seq_len = get_sample(T, U, V) # CTC loss expects acoustic sequence to be in shape (T, B, V) val = loss(acoustic_seq.transpose(1, 0), target_seq, acoustic_seq_len, target_seq_len) print("CTC Loss :", val) val.backward() print("Grad of Acoustic model (over V):", acoustic_seq.grad[0, 0, :]) # + [markdown] id="SQe6WYnWkSAZ" # ------- # # As we saw, CTC loss in general case will not be able to compute the loss or the gradient when $T < U$. In the PyTorch specific implementation of CTC Loss, we can specify a flag `zero_infinity`, which explicitly checks for such cases, zeroes out the loss and the gradient if such a case occurs. The flag allows us to train a batch of samples where some samples may accidentally violate this limitation, but training will not halt, and gradients will not become NAN. # + [markdown] id="EGnc5HqnZ-GZ" # ## What is the Transducer Loss ? # + [markdown] id="0W12xF_CqcVF" # ![](https://github.com/NVIDIA/NeMo/blob/main/tutorials/asr/images/transducer.png?raw=true) # + [markdown] id="RoOQJtIkqxbA" # A model that seeks to use the Transducer loss is composed of three models that interact with each other. They are: # # ------- # # 1) **Acoustic model** : This is nearly the same acoustic model used for CTC models. The output shape of these models is generally $(Batch, \, T, \, AM-Hidden)$. You will note that unlike for CTC, the output of the acoustic model is no longer passed through a decoder layer which would have the shape $(Batch, \, T, \, Vocabulary + 1)$. # # 2) **Prediction / Decoder model** : The prediction model accepts a sequence of target tokens (in the case of ASR, text tokens) and is usually a causal auto-regressive model that is tasked with prediction some hidden feature dimension of shape $(Batch, \, U, \, Pred-Hidden)$. # # 3) **Joint model** : This model accepts the outputs of the Acoustic model and the Prediction model and joins them to compute a joint probability distribution over the vocabulary space to compute the alignments from Acoustic sequence to Target sequence. The output of this model is of the shape $(Batch, \, T, \, U, \, Vocabulary + 1)$. # # -------- # # During training, the transducer loss is computed on the output of the joint model, which computes the joint probability distribution of a target vocabulary token $v_{t, u}$ (for all $v \in V$) being predicted given the acoustic feature at timestep $t \le T$ and the prediction network features at timestep $u \le U$. # # -------- # # During inference, we perform a single forward pass over the Acoustic Network to obtain the features of shape $(Batch, \, T, \, AM-Hidden)$, and autoregressively perform the forward passes of the Prediction Network and the Joint Network to decode several $u \le U$ target tokens per acoustic timestep $t \le T$. We will discuss decoding in the following sections. # # + [markdown] id="yBxtxt2Ztuoo" # --------- # # **Note**: For an excellent in-depth explanation of how Transducer loss works, how it computes the alignment, and how the gradient of this alignment is calculated, we highly encourage you to read this post about [Sequence-to-sequence learning with Transducers by Loren Lugosch](https://lorenlugosch.github.io/posts/2020/11/transducer/). # # --------- # + [markdown] id="VgdYFkeyRGP-" # ## Benefits of Transducer Loss # # Now that we understand what a Transducer model is comprised of and how it is trained, the next question that comes to mind is - What is the benefit of the Transducer loss? # # ------ # # 1) It is a monotonic loss (similar to CTC). Monotonicity speeds up convergence and does not require auxiliary losses to stabilize training (which is required when using only attention-based loss for sequence-to-sequence training). # # 2) Autoregressive decoding enables the model to implicitly have a dependency between predicted tokens (the conditional independence assumption of CTC trained models is corrected). As such, missing characters or incorrect spellings are less frequent (but still exist since no model is perfect). # # 3) It no longer has the $T \ge U$ limitation that CTC imposed. This is because the total joint probability distribution is calculated now - mapping every acoustic timestep $t \le T$ to one or more target timestep $u \le U$. This means that for each timestep $t$, the model has at most $U$ tokens that it can predict, and therefore in the extreme case, it can predict a total of $T \times U$ tokens! # + [markdown] id="wisUfV8aRGSY" # ## Drawbacks of Transducer Loss # # All of these benefits come with certain costs. As is (almost) always the case in machine learning, there is no free lunch. # # ------- # # 1) During training, the Joint model is required to compute a joint matrix of shape $(Batch, \, T, \, U, \, Vocabulary + 1)$. If you consider the value of these constants for a general dataset like Librispeech, $T \sim 1600$, $U \sim 450$ (with character encoding) and vocabulary $V \sim 28+1$. Considering a batch size of 32, that total memory cost comes out to roughly **2.7 GB** at float precision. The model would also need another **2.7 GB** for the gradients. Of course, the model needs more memory still for the actual Acoustic model + Prediction model + their gradients. Note, however - this issue can be *partially* resolved with some simple tricks, which are discussed in the next tutorial. Also, this memory cost is no longer an issue during inference! # # 2) Autoregressive decoding is slow. Much slower than CTC models, which require just a simple argmax of the output tensor. So while we do get superior transcription quality, we sacrifice decoding speed. # + [markdown] id="RuASpPlD2con" # -------- # # Let's check that RNNT loss no longer shows the limitations of CTC loss - # + id="XXodnve02c8h" T = 10 # acoustic sequence length U = 16 # target sequence length V = 28 # vocabulary size def get_rnnt_sample(T, U, V, require_grad=True): torch.manual_seed(0) joint_tensor = torch.randn(1, T, U + 1, V + 1, requires_grad=require_grad) acoustic_seq_len = torch.tensor([T], dtype=torch.int32) # actual seq length in padded tensor (here no padding is done) target_seq = torch.randint(low=0, high=V, size=(1, U)) target_seq_len = torch.tensor([U], dtype=torch.int32) return joint_tensor, acoustic_seq_len, target_seq, target_seq_len # + id="w-9Qx01G21oK" import nemo.collections.asr as nemo_asr # + id="6hb7q81f21qj" joint_tensor, acoustic_seq_len, target_seq, target_seq_len = get_rnnt_sample(T, U, V) # RNNT loss expects joint tensor to be in shape (B, T, U, V) loss = nemo_asr.losses.rnnt.RNNTLoss(num_classes=V) # Uncomment to check out the keyword arguments required to call the RNNT loss print("Transducer loss input types :", loss.input_types) print() val = loss(log_probs=joint_tensor, targets=target_seq, input_lengths=acoustic_seq_len, target_lengths=target_seq_len) print("Transducer Loss :", val) val.backward() print("Grad of Acoustic model (over V):", joint_tensor.grad[0, 0, 0, :]) # + [markdown] id="5pfrpy7wRGUc" # # Configure a Transducer Model # # We now understand a bit more about the transducer loss. Next, we will take a deep dive into how to set up the config for a transducer model. # # Transducer configs contain a fair bit more detail as compared to CTC configs. However, the vast majority of the defaults can be copied and pasted into your configs to have a perfectly functioning transducer model! # # ------ # # Let us download one of the transducer configs already available in NeMo to analyze the components. # + id="cgJQXfwy7LO_" import os if not os.path.exists("contextnet_rnnt.yaml"): # !wget https://raw.githubusercontent.com/NVIDIA/NeMo/$BRANCH/examples/asr/conf/contextnet_rnnt/contextnet_rnnt.yaml # + id="CJ2-ORS17XbF" from omegaconf import OmegaConf, open_dict cfg = OmegaConf.load('contextnet_rnnt.yaml') # + [markdown] id="h5TsAJQk6o4N" # ## Model Defaults # # Since the transducer model is comprised of three seperate models working in unison, it is practical to have some shared section of the config. That shared section is called `model.model_defaults`. # + id="N8tWZ9eb75Gx" print(OmegaConf.to_yaml(cfg.model.model_defaults)) # + [markdown] id="m8IxgJFj7_gc" # ------- # # Of the many components shared here, the last three values are the primary components that a transducer model **must** possess. They are : # # 1) `enc_hidden`: The hidden dimension of the final layer of the Encoder network. # # 2) `pred_hidden`: The hidden dimension of the final layer of the Prediction network. # # 3) `joint_hidden`: The hidden dimension of the intermediate layer of the Joint network. # # -------- # # One can access these values inside the config by using OmegaConf interpolation as follows : # # ```yaml # model: # ... # decoder: # ... # prednet: # pred_hidden: ${model.model_defaults.pred_hidden} # ``` # + [markdown] id="uRckIz_eRGWr" # ## Acoustic Model # # As we discussed before, the transducer model is comprised of three models combined. One of these models is the Acoustic (encoder) model. We should be able to drop in any CTC Acoustic model config into this section of the transducer config. # # The only condition that needs to be met is that **the final layer of the acoustic model must have the dimension defined in `model_defaults.enc_hidden`**. # + [markdown] id="o505IGX4RGYy" # ## Decoder / Prediction Model # # The Prediction model is generally an autoregressive, causal model that consumes text tokens and returns embeddings that will be used by the Joint model. # # **This config can be dropped into any custom transducer model with no modification.** # + id="A2a9Y5CCArLs" print(OmegaConf.to_yaml(cfg.model.decoder)) # + [markdown] id="ry5a_Z-zAvll" # ------ # # This config will build an LSTM based Transducer Decoder model. Let us discuss some of the important arguments: # # 1) `blank_as_pad`: In ordinary transducer models, the embedding matrix does not acknowledge the `Transducer Blank` token (similar to CTC Blank). However, this causes the autoregressive loop to be more complicated and less efficient. Instead, this flag which is set by default, will add the `Transducer Blank` token to the embedding matrix - and use it as a pad value (zeros tensor). This enables more efficient inference without harming training. # # 2) `prednet.pred_hidden`: The hidden dimension of the LSTM and the output dimension of the Prediction network. # # + [markdown] id="FtdYE25cW1j_" # ## Joint Model # # The Joint model is a simple feed-forward Multi-Layer Perceptron network. This MLP accepts the output of the Acoustic and Prediction models and computes a joint probability distribution over the entire vocabulary space. # # **This config can be dropped into any custom transducer model with no modification.** # + id="pP8fL1bED3Dv" print(OmegaConf.to_yaml(cfg.model.joint)) # + [markdown] id="rA11eez_FYUl" # ------ # # The Joint model config has several essential components which we discuss below : # # 1) `log_softmax`: Due to the cost of computing softmax on such large tensors, the Numba CUDA implementation of RNNT loss will implicitly compute the log softmax when called (so its inputs should be logits). The CPU version of the loss doesnt face such memory issues so it requires log-probabilities instead. Since the behaviour is different for CPU-GPU, the `None` value will automatically switch behaviour dependent on whether the input tensor is on a CPU or GPU device. # # 2) `preserve_memory`: This flag will call `torch.cuda.empty_cache()` at certain critical sections when computing the Joint tensor. While this operation might allow us to preserve some memory, the empty_cache() operation is tremendously slow and will slow down training by an order of magnitude or more. It is available to use but not recommended. # # 3) `experimental_fuse_loss_wer`: This flag performs "batch splitting" and then "fused loss + metric" calculation. It will be discussed in detail in the next tutorial that will train a Transducer model. # # 4) `fused_batch_size`: When the above flag is set to True, the model will have two distinct "batch sizes". The batch size provided in the three data loader configs (`model.*_ds.batch_size`) will now be the `Acoustic model` batch size, whereas the `fused_batch_size` will be the batch size of the `Prediction model`, the `Joint model`, the `transducer loss` module and the `decoding` module. # # 5) `jointnet.joint_hidden`: The hidden intermediate dimension of the joint network. # + [markdown] id="cmIwDscCW1mP" # ## Transducer Decoding # # Models which have been trained with CTC can transcribe text simply by performing a regular argmax over the output of their decoder. # # For transducer-based models, the three networks must operate in a synchronized manner in order to transcribe the acoustic features. # # The following section of the config describes how to change the decoding logic of the transducer model. # # # **This config can be dropped into any custom transducer model with no modification.** # + id="LQjfXJsrIqFJ" print(OmegaConf.to_yaml(cfg.model.decoding)) # + [markdown] id="6FXtn41wIvu8" # ------- # # The most important component at the top level is the `strategy`. It can take one of many values: # # 1) `greedy`: This is sample-level greedy decoding. It is generally exceptionally slow as each sample in the batch will be decoded independently. For publications, this should be used alongside batch size of 1 for exact results. # # 2) `greedy_batch`: This is the general default and should nearly match the `greedy` decoding scores (if the acoustic features are not affected by feature mixing in batch mode). Even for small batch sizes, this strategy is significantly faster than `greedy`. # # 3) `beam`: Runs beam search with the implicit language model of the Prediction model. It will generally be quite slow, and might need some tuning of the beam size to get better transcriptions. # # 4) `tsd`: Time synchronous decoding. Please refer to the paper: [Alignment-Length Synchronous Decoding for RNN Transducer](https://ieeexplore.ieee.org/document/9053040) for details on the algorithm implemented. Time synchronous decoding (TSD) execution time grows by the factor T * max_symmetric_expansions. For longer sequences, T is greater and can therefore take a long time for beams to obtain good results. TSD also requires more memory to execute. # # 5) `alsd`: Alignment-length synchronous decoding. Please refer to the paper: [Alignment-Length Synchronous Decoding for RNN Transducer](https://ieeexplore.ieee.org/document/9053040) for details on the algorithm implemented. Alignment-length synchronous decoding (ALSD) execution time is faster than TSD, with a growth factor of T + U_max, where U_max is the maximum target length expected during execution. Generally, T + U_max < T * max_symmetric_expansions. However, ALSD beams are non-unique. Therefore it is required to use larger beam sizes to achieve the same (or close to the same) decoding accuracy as TSD. For a given decoding accuracy, it is possible to attain faster decoding via ALSD than TSD. # # ------- # # Below, we discuss the various decoding strategies. # + [markdown] id="PXzY7laMW1oo" # ### Greedy Decoding # # When `strategy` is one of `greedy` or `greedy_batch`, an additional subconfig of `decoding.greedy` can be used to set an important decoding value. # + id="778R5oy6Ipha" print(OmegaConf.to_yaml(cfg.model.decoding.greedy)) # + [markdown] id="vItXzTbZKwyB" # ------- # # This argument `max_symbols` is the maximum number of `target token` decoding steps $u \le U$ per acoustic timestep $t \le T$. Note that during training, this was implicitly constrained by the shape of the joint matrix (max_symbols = $U$). However, there is no such $U$ upper bound during inference (we dont have the ground truth $U$). # # So we explicitly set a heuristic upper bound on how many decoding steps can be performed per acoustic timestep. Generally a value of 5 and above is suffcient. # + [markdown] id="ebFogfLvW1q9" # ### Beam Decoding # # Next, we discuss the subconfig when `strategy` is one of `beam`, `tsd` or `alsd`. # + id="w073zT8ILtki" print(OmegaConf.to_yaml(cfg.model.decoding.beam)) # + [markdown] id="AvOeRhsULtrx" # ------ # # There are several important arguments in this section : # # 1) `beam_size`: This determines the beam size for all types of beam decoding strategy. Since this is implemented in PyTorch, large beam sizes will take exorbitant amounts of time. # # 2) `score_norm`: Whether to normalize scores prior to pruning the beam. # # 3) `return_best_hypothesis`: If beam search is being performed, we can choose to return just the best hypothesis or all the hypotheses. # # 4) `tsd_max_sym_exp`: The maximum symmetric expansions allowed per timestep during beam search. Larger values should be used to attempt decoding of longer sequences, but this in turn increases execution time and memory usage. # # 5) `alsd_max_target_len`: The maximum expected target sequence length during beam search. Larger values allow decoding of longer sequences at the expense of execution time and memory. # # + [markdown] id="MkoHp0dQW1tP" # ## Transducer Loss # # Finally, we reach the Transducer loss config itself. This section configures the type of Transducer loss itself, along with possible sub-sections. # # **This config can be dropped into any custom transducer model with no modification.** # + id="l3Uk11uHOa4O" print(OmegaConf.to_yaml(cfg.model.loss)) # + [markdown] id="1z_mYV9UOk_7" # --------- # # The loss config is based on a resolver pattern and can be used as follows: # # 1) `loss_name`: `default` is generally a good option. Will select one of the available resolved losses and match the kwargs from a sub-configs passed via explicit `{loss_name}_kwargs` sub-config. # # 2) `{loss_name}_kwargs`: This sub-config is passed to the resolved loss above and can be used to configure the resolved loss. # + [markdown] id="7w3Z3-IaRGaz" # ### WarpRNNT Numba Loss # # The default transducer loss implemented in NeMo is a Numba port of the excellent CUDA implementation of Transducer Loss found in https://github.com/HawkAaron/warp-transducer. # # It should suffice for most use cases (CPU / GPU) transducer training. # + [markdown] id="u9bPDu2-XYPB" # ### FastEmit Regularization # # Recently proposed regularization approach - [FastEmit: Low-latency Streaming ASR with Sequence-level Emission Regularization](https://arxiv.org/abs/2010.11148) allows us near-direct control over the latency of transducer models. # # Refer to the above paper for results and recommendations of `fastemit_lambda`. # + [markdown] id="iGG9UKTZXlme" # # Next Steps # # After that deep dive into how to configure Transducer models, the next tutorial will use one such config to build a transducer model and train it on a small dataset. We will then move on to exploring various decoding strategies and how to evaluate the model.
tutorials/asr/Intro_to_Transducers.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: 'Python 3.6.10 64-bit (''env'': conda)' # name: python3 # --- # + [markdown] pycharm={"is_executing": true} # # Unsupervised Anomaly Detection based on Forecasts # - # Anomaly detection detects data points in data that does not fit well with the rest of data. In this notebook we demonstrate how to do anomaly detection using Chronos's built-in model MTNet # For demonstration, we use the publicly available cluster trace data cluster-trace-v2018 of Alibaba Open Cluster Trace Program. You can find the dataset introduction <a href="https://github.com/alibaba/clusterdata/blob/master/cluster-trace-v2018/trace_2018.md" target="_blank">here</a>. In particular, we use machine usage data to demonstrate anomaly detection, you can download the separate data file directly with <a href="http://clusterdata2018pubcn.oss-cn-beijing.aliyuncs.com/machine_usage.tar.gz" target="_blank">machine_usage</a>. # ## Helper functions # This section defines some helper functions to be used in the following procedures. You can refer to it later when they're used. def get_result_df(y_true_unscale, y_pred_unscale, ano_index, look_back,target_col='cpu_usage'): """ Add prediction and anomaly value to dataframe. """ result_df = pd.DataFrame({"y_true": y_true_unscale.squeeze(), "y_pred": y_pred_unscale.squeeze()}) result_df['anomalies'] = 0 result_df.loc[result_df.index[ano_index], 'anomalies'] = 1 result_df['anomalies'] = result_df['anomalies'] > 0 return result_df # + pycharm={"name": "#%%\n"} def plot_anomalies_value(date, y_true, y_pred, anomalies): """ plot the anomalies value """ fig, axs = plt.subplots(figsize=(16,6)) axs.plot(date, y_true,color='blue', label='y_true') axs.plot(date, y_pred,color='orange', label='y_pred') axs.scatter(date[anomalies].tolist(), y_true[anomalies], color='red', label='anomalies value') axs.set_title('the anomalies value') plt.xlabel('datetime') plt.legend(loc='upper left') plt.show() # + [markdown] pycharm={"name": "#%% md\n"} # ## Download raw dataset and load into dataframe # + [markdown] pycharm={"name": "#%% md\n"} # Now we download the dataset and load it into a pandas dataframe.Steps are as below: # * First, download the raw data <a href="http://clusterdata2018pubcn.oss-cn-beijing.aliyuncs.com/machine_usage.tar.gz" target="_blank">machine_usage</a>. Or run the script `get_data.sh` to download the raw data.It will download the resource usage of each machine from m_1932 to m_2085. # * Second, run `grep m_1932 machine_usage.csv > m_1932.csv` to extract records of machine 1932. Or run `extract_data.sh`.We use machine 1932 as an example in this notebook.You can choose any machines in the similar way. # * Finally, use pandas to load `m_1932.csv` into a dataframe as shown below. # + pycharm={"name": "#%%\n"} import os import numpy as np import pandas as pd import matplotlib.pyplot as plt # %matplotlib inline # + pycharm={"name": "#%%\n"} df_1932 = pd.read_csv("m_1932.csv", header=None, usecols=[1,2,3], names=["time_step", "cpu_usage","mem_usage"]) # + [markdown] pycharm={"name": "#%% md\n"} # Below are some example records of the data # - df_1932.head() # + pycharm={"name": "#%%\n"} df_1932.sort_values(by="time_step", inplace=True) df_1932.reset_index(inplace=True) df_1932.sort_values(by="time_step").plot(y="cpu_usage", x="time_step", figsize=(16,6),title="cpu_usage of machine 1932") # + [markdown] pycharm={"name": "#%% md\n"} # ## Data pre-processing # + [markdown] pycharm={"name": "#%% md\n"} # Now we need to do data cleaning and preprocessing on the raw data. Note that this part could vary for different dataset. # # For the machine_usage data, the pre-processing convert the time step in seconds to timestamp starting from 2018-01-01. # + pycharm={"name": "#%%\n"} df_1932.reset_index(inplace=True) df_1932["time_step"] = pd.to_datetime(df_1932["time_step"], unit='s', origin=pd.Timestamp('2018-01-01')) # - # ## Feature Engineering & Data Preperation # For feature engineering, we use hour as feature in addition to the target cpu usage. # # For data preperation, we resample the average of cpu_usage in minutes, impute the data to handle missing data and scale the data. At last we generate the sample in numpy ndarray for Forecaster to use. # # We generate a built-in TSDataset to complete the whole processing. # + from bigdl.chronos.data import TSDataset from sklearn.preprocessing import StandardScaler # we look back one hour data which is of the frequency of 1min. look_back = 60 horizon = 1 tsdata_train, tsdata_val, tsdata_test = TSDataset.from_pandas(df_1932, dt_col="time_step", target_col="cpu_usage", with_split=True, val_ratio = 0.1, test_ratio=0.1) standard_scaler = StandardScaler() for tsdata in [tsdata_train, tsdata_val, tsdata_test]: tsdata.resample(interval='1min', merge_mode="mean")\ .impute(mode="last")\ .gen_dt_feature()\ .scale(standard_scaler, fit=(tsdata is tsdata_train))\ .roll(lookback=look_back, horizon=horizon, feature_col = ["HOUR"])\ x_train, y_train = tsdata_train.to_numpy() x_val, y_val = tsdata_val.to_numpy() x_test, y_test = tsdata_test.to_numpy() y_train, y_val, y_test = y_train[:, 0, :], y_val[:, 0, :], y_test[:, 0, :] x_train.shape, y_train.shape, x_val.shape, y_val.shape, x_test.shape, y_test.shape # + [markdown] pycharm={"name": "#%% md\n"} # ## Time series forecasting # + pycharm={"name": "#%%\n"} from bigdl.chronos.forecaster.mtnet_forecaster import MTNetForecaster # + [markdown] pycharm={"name": "#%% md\n"} # First, we initialize a mtnet_forecaster according to input data shape. Specifcally, look_back should equal `(long_series_num+1)*series_length` . Details refer to chronos docs <a href="https://bigdl.readthedocs.io/en/latest/doc/Chronos/Overview/chronos.html" target="_blank">here</a>. # + pycharm={"name": "#%%\n"} mtnet_forecaster = MTNetForecaster(target_dim=horizon, feature_dim=x_train.shape[-1], long_series_num=3, series_length=15 ) # + [markdown] pycharm={"name": "#%% md\n"} # MTNet needs to preprocess the X into another format, so we call `MTNetForecaster.preprocess_input` on train_x and test_x. # + pycharm={"name": "#%%\n"} # mtnet requires reshape of input x before feeding into model. x_train_mtnet = mtnet_forecaster.preprocess_input(x_train) x_val_mtnet = mtnet_forecaster.preprocess_input(x_val) x_test_mtnet = mtnet_forecaster.preprocess_input(x_test) # + [markdown] pycharm={"name": "#%% md\n"} # Now we train the model and wait till it finished. # + pycharm={"name": "#%%\n"} # %%time hist = mtnet_forecaster.fit(x = x_train_mtnet, y = y_train, batch_size=128, epochs=20) # + [markdown] pycharm={"name": "#%% md\n"} # Use the model for prediction and inverse the scaling of the prediction results. # + pycharm={"name": "#%%\n"} y_pred_val = mtnet_forecaster.predict(x_val_mtnet) y_pred_test = mtnet_forecaster.predict(x_test_mtnet) # - y_pred_val_unscale = tsdata_val.unscale_numpy(np.expand_dims(y_pred_val, axis=1))[:, 0, :] y_pred_test_unscale = tsdata_test.unscale_numpy(np.expand_dims(y_pred_test, axis=1))[:, 0, :] y_val_unscale = tsdata_val.unscale_numpy(np.expand_dims(y_val, axis=1))[:, 0, :] y_test_unscale = tsdata_test.unscale_numpy(np.expand_dims(y_test, axis=1))[:, 0, :] # + [markdown] pycharm={"name": "#%% md\n"} # Calculate the symetric mean absolute percentage error. # + pycharm={"name": "#%%\n"} # evaluate with sMAPE from bigdl.orca.automl.metrics import Evaluator smape = Evaluator.evaluate("smape", y_test_unscale, y_pred_test_unscale) print(f"sMAPE is {'%.2f' % smape}") # + [markdown] pycharm={"name": "#%% md\n"} # ## Anomaly detection # + pycharm={"name": "#%%\n"} from bigdl.chronos.detector.anomaly import ThresholdDetector ratio=0.01 thd=ThresholdDetector() thd.set_params(ratio=ratio) thd.fit(y_val_unscale,y_pred_val_unscale) print("The threshold of validation dataset is:",thd.th) # + pycharm={"name": "#%%\n"} anomaly_scores_val = thd.score() val_res_ano_idx = np.where(anomaly_scores_val > 0)[0] print("The index of anomalies in validation dataset is:",val_res_ano_idx) # - anomaly_scores_test = thd.score(y_test_unscale,y_pred_test_unscale) test_res_ano_idx = np.where(anomaly_scores_test > 0)[0] print("The index of anoalies in test dataset is:",test_res_ano_idx) # + [markdown] pycharm={"name": "#%% md\n"} # Get a new dataframe which contains `y_true`,`y_pred`,`anomalies` value. # + pycharm={"name": "#%%\n"} val_result_df = get_result_df(y_val_unscale, y_pred_val_unscale, val_res_ano_idx, look_back) test_result_df = get_result_df(y_test_unscale, y_pred_test_unscale, test_res_ano_idx, look_back) # + [markdown] pycharm={"name": "#%% md\n"} # Draw anomalies in line chart. # + pycharm={"name": "#%%\n"} plot_anomalies_value(val_result_df.index, val_result_df.y_true, val_result_df.y_pred, val_result_df.anomalies) # + pycharm={"name": "#%%\n"} plot_anomalies_value(test_result_df.index, test_result_df.y_true, test_result_df.y_pred, test_result_df.anomalies)
python/chronos/use-case/AIOps/AIOps_anomaly_detect_unsupervised_forecast_based.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 (ipykernel) # language: python # name: python3 # --- # %load_ext autoreload # %autoreload 2 import kwat # ## check_sorted for nu_ in [ [1, 2, 3], [3, 2, 1], [-1, 0, 1], [0, 1, -1], [0, 0], [True, False], [False, True], [True, False, True], ]: print(kwat.vector.check_sorted(nu_))
nb/vector.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3.6 # language: python # name: python3 # --- # + [markdown] button=false new_sheet=false run_control={"read_only": false} # <a href="https://www.bigdatauniversity.com"><img src="https://ibm.box.com/shared/static/cw2c7r3o20w9zn8gkecaeyjhgw3xdgbj.png" width="400" align="center"></a> # # <h1 align="center"><font size="5">Classification with Python</font></h1> # + [markdown] button=false new_sheet=false run_control={"read_only": false} # In this notebook we try to practice all the classification algorithms that we learned in this course. # # We load a dataset using Pandas library, and apply the following algorithms, and find the best one for this specific dataset by accuracy evaluation methods. # # Lets first load required libraries: # + button=false new_sheet=false run_control={"read_only": false} import itertools import numpy as np import matplotlib.pyplot as plt from matplotlib.ticker import NullFormatter import pandas as pd import numpy as np import matplotlib.ticker as ticker from sklearn import preprocessing # %matplotlib inline # + [markdown] button=false new_sheet=false run_control={"read_only": false} # ### About dataset # + [markdown] button=false new_sheet=false run_control={"read_only": false} # This dataset is about past loans. The __Loan_train.csv__ data set includes details of 346 customers whose loan are already paid off or defaulted. It includes following fields: # # | Field | Description | # |----------------|---------------------------------------------------------------------------------------| # | Loan_status | Whether a loan is paid off on in collection | # | Principal | Basic principal loan amount at the | # | Terms | Origination terms which can be weekly (7 days), biweekly, and monthly payoff schedule | # | Effective_date | When the loan got originated and took effects | # | Due_date | Since it’s one-time payoff schedule, each loan has one single due date | # | Age | Age of applicant | # | Education | Education of applicant | # | Gender | The gender of applicant | # + [markdown] button=false new_sheet=false run_control={"read_only": false} # Lets download the dataset # + button=false new_sheet=false run_control={"read_only": false} # !wget -O loan_train.csv https://s3-api.us-geo.objectstorage.softlayer.net/cf-courses-data/CognitiveClass/ML0101ENv3/labs/loan_train.csv # + [markdown] button=false new_sheet=false run_control={"read_only": false} # ### Load Data From CSV File # + button=false new_sheet=false run_control={"read_only": false} df = pd.read_csv('loan_train.csv') df.head() # - df.shape # + [markdown] button=false new_sheet=false run_control={"read_only": false} # ### Convert to date time object # + button=false new_sheet=false run_control={"read_only": false} df['due_date'] = pd.to_datetime(df['due_date']) df['effective_date'] = pd.to_datetime(df['effective_date']) df.head() # + [markdown] button=false new_sheet=false run_control={"read_only": false} # # Data visualization and pre-processing # # # + [markdown] button=false new_sheet=false run_control={"read_only": false} # Let’s see how many of each class is in our data set # + button=false new_sheet=false run_control={"read_only": false} df['loan_status'].value_counts() # + [markdown] button=false new_sheet=false run_control={"read_only": false} # 260 people have paid off the loan on time while 86 have gone into collection # # - # Lets plot some columns to underestand data better: # notice: installing seaborn might takes a few minutes # !conda install -c anaconda seaborn -y # + import seaborn as sns bins = np.linspace(df.Principal.min(), df.Principal.max(), 10) g = sns.FacetGrid(df, col="Gender", hue="loan_status", palette="Set1", col_wrap=2) g.map(plt.hist, 'Principal', bins=bins, ec="k") g.axes[-1].legend() plt.show() # + button=false new_sheet=false run_control={"read_only": false} bins = np.linspace(df.age.min(), df.age.max(), 10) g = sns.FacetGrid(df, col="Gender", hue="loan_status", palette="Set1", col_wrap=2) g.map(plt.hist, 'age', bins=bins, ec="k") g.axes[-1].legend() plt.show() # + [markdown] button=false new_sheet=false run_control={"read_only": false} # # Pre-processing: Feature selection/extraction # + [markdown] button=false new_sheet=false run_control={"read_only": false} # ### Lets look at the day of the week people get the loan # + button=false new_sheet=false run_control={"read_only": false} df['dayofweek'] = df['effective_date'].dt.dayofweek bins = np.linspace(df.dayofweek.min(), df.dayofweek.max(), 10) g = sns.FacetGrid(df, col="Gender", hue="loan_status", palette="Set1", col_wrap=2) g.map(plt.hist, 'dayofweek', bins=bins, ec="k") g.axes[-1].legend() plt.show() # + [markdown] button=false new_sheet=false run_control={"read_only": false} # We see that people who get the loan at the end of the week dont pay it off, so lets use Feature binarization to set a threshold values less then day 4 # + button=false new_sheet=false run_control={"read_only": false} df['weekend'] = df['dayofweek'].apply(lambda x: 1 if (x>3) else 0) df.head() # + [markdown] button=false new_sheet=false run_control={"read_only": false} # ## Convert Categorical features to numerical values # + [markdown] button=false new_sheet=false run_control={"read_only": false} # Lets look at gender: # + button=false new_sheet=false run_control={"read_only": false} df.groupby(['Gender'])['loan_status'].value_counts(normalize=True) # + [markdown] button=false new_sheet=false run_control={"read_only": false} # 86 % of female pay there loans while only 73 % of males pay there loan # # + [markdown] button=false new_sheet=false run_control={"read_only": false} # Lets convert male to 0 and female to 1: # # + button=false new_sheet=false run_control={"read_only": false} df['Gender'].replace(to_replace=['male','female'], value=[0,1],inplace=True) df.head() # + [markdown] button=false new_sheet=false run_control={"read_only": false} # ## One Hot Encoding # #### How about education? # + button=false new_sheet=false run_control={"read_only": false} df.groupby(['education'])['loan_status'].value_counts(normalize=True) # + [markdown] button=false new_sheet=false run_control={"read_only": false} # #### Feature befor One Hot Encoding # + button=false new_sheet=false run_control={"read_only": false} df[['Principal','terms','age','Gender','education']].head() # + [markdown] button=false new_sheet=false run_control={"read_only": false} # #### Use one hot encoding technique to conver categorical varables to binary variables and append them to the feature Data Frame # + button=false new_sheet=false run_control={"read_only": false} Feature = df[['Principal','terms','age','Gender','weekend']] Feature = pd.concat([Feature,pd.get_dummies(df['education'])], axis=1) Feature.drop(['Master or Above'], axis = 1,inplace=True) Feature.head() # + [markdown] button=false new_sheet=false run_control={"read_only": false} # ### Feature selection # + [markdown] button=false new_sheet=false run_control={"read_only": false} # Lets defind feature sets, X: # + button=false new_sheet=false run_control={"read_only": false} X = Feature X[0:5] # + [markdown] button=false new_sheet=false run_control={"read_only": false} # What are our lables? # + button=false new_sheet=false run_control={"read_only": false} y = df['loan_status'].values y[0:5] # + [markdown] button=false new_sheet=false run_control={"read_only": false} # ## Normalize Data # + [markdown] button=false new_sheet=false run_control={"read_only": false} # Data Standardization give data zero mean and unit variance (technically should be done after train test split ) # + button=false new_sheet=false run_control={"read_only": false} X= preprocessing.StandardScaler().fit(X).transform(X) X[0:5] # + [markdown] button=false new_sheet=false run_control={"read_only": false} # # Classification # + [markdown] button=false new_sheet=false run_control={"read_only": false} # Now, it is your turn, use the training set to build an accurate model. Then use the test set to report the accuracy of the model # You should use the following algorithm: # - K Nearest Neighbor(KNN) # - Decision Tree # - Support Vector Machine # - Logistic Regression # # # # __ Notice:__ # - You can go above and change the pre-processing, feature selection, feature-extraction, and so on, to make a better model. # - You should use either scikit-learn, Scipy or Numpy libraries for developing the classification algorithms. # - You should include the code of the algorithm in the following cells. # - # # K Nearest Neighbor(KNN) # Notice: You should find the best k to build the model with the best accuracy. # **warning:** You should not use the __loan_test.csv__ for finding the best k, however, you can split your train_loan.csv into train and test to find the best __k__. from sklearn.model_selection import train_test_split #Let's split the data to find the best parameters for each algorithm X_train, X_test, y_train, y_test = train_test_split( X, y, test_size=0.2, random_state=4) #Let's begin with KNN from sklearn.neighbors import KNeighborsClassifier from sklearn import metrics accuracy = [] for k in range(1,50): knn = KNeighborsClassifier(n_neighbors = k).fit(X_train,y_train.ravel()) accuracy.append(metrics.accuracy_score(y_test, knn.predict(X_test))) sns.scatterplot(x=range(1,50),y=accuracy) plt.show() print("Maximum accuracy is " + str(max(accuracy)) + " and is obtained for k=" + str(range(1,50)[accuracy.index(max(accuracy))])) #So, let's train the model for k=7, and this time with allt the dataset : knn = KNeighborsClassifier(n_neighbors = 7).fit(X,y.ravel()) # # Decision Tree #Now, let's use decision tree. We will play with two parameters : criterion, which can be either entropy or gini, and max_depth of the tree from sklearn.tree import DecisionTreeClassifier accuracy_gini = [] accuracy_entropy = [] for k in range(2,25): tree_gini = DecisionTreeClassifier(max_depth=k,criterion='gini').fit(X_train,y_train.ravel()) tree_entropy = DecisionTreeClassifier(max_depth=k,criterion='entropy').fit(X_train,y_train.ravel()) accuracy_gini.append(metrics.accuracy_score(y_test, tree_gini.predict(X_test))) accuracy_entropy.append(metrics.accuracy_score(y_test, tree_entropy.predict(X_test))) sns.scatterplot(x=range(2,25),y=accuracy_gini,color='red') sns.scatterplot(x=range(2,25),y=accuracy_entropy,color='blue') plt.show() print("Maximum accuracy for gini is " + str(max(accuracy_gini)) + " and is obtained for max_depth=" + str(range(2,25)[accuracy_gini.index(max(accuracy_gini))])) print("Maximum accuracy for entropy is " + str(max(accuracy_entropy)) + " and is obtained for max_depth=" + str(range(2,25)[accuracy_entropy.index(max(accuracy_entropy))])) #We choose gini method with max_depth=2 and train it on the whole dataset tree = DecisionTreeClassifier(max_depth=2,criterion='gini').fit(X,y.ravel()) # # Support Vector Machine #Now let's use SVM for C ranging from 0.1 to 2.9 from sklearn.svm import SVC accuracy = [] for C in [float(i)/10 for i in range(1,30)]: svm_model = SVC(C=C,gamma='auto').fit(X_train,y_train.ravel()) accuracy.append(metrics.accuracy_score(y_test, svm_model.predict(X_test))) sns.scatterplot(x=[float(i)/10 for i in range(1,30)],y=accuracy) plt.show() print("Maximum accuracy for SVM is " + str(max(accuracy)) + " and is obtained for C=" + str([float(i)/10 for i in range(1,30)][accuracy.index(max(accuracy))])) #So we are going to use C=0,1. Let's see if changing the kernel method can increase the performances : accuracy=[] for kernel in ['rbf','linear','poly','sigmoid']: svm_model = SVC(C=0.1,gamma='auto',kernel=kernel).fit(X_train,y_train.ravel()) accuracy.append(metrics.accuracy_score(y_test, svm_model.predict(X_test))) print("Maximum accuracy for SVM is " + str(max(accuracy)) + " and is obtained with kernel=" + str(['rbf','linear','poly','sigmoid'][accuracy.index(max(accuracy))])) # Now let's train our model on the whole dataset with C=0.1 and rbf kernel, which is the default svm_model = SVC(C=0.1,gamma='auto',kernel='rbf').fit(X,y.ravel()) # # Logistic Regression #Now let's use logistic regression for C ranging from 0.01 to 0.99 from sklearn.linear_model import LogisticRegression accuracy = [] for C in [float(i)/100 for i in range(1,100)]: lr = LogisticRegression(C=C,solver='lbfgs').fit(X_train,y_train.ravel()) accuracy.append(metrics.accuracy_score(y_test, lr.predict(X_test))) sns.scatterplot(x=[float(i)/100 for i in range(1,100)],y=accuracy) plt.show() print("Maximum accuracy for linear regression is " + str(max(accuracy)) + " and is obtained for C=" + str([float(i)/100 for i in range(1,100)][accuracy.index(max(accuracy))])) #So we are going to use C=0.01. Let's see if another solver can have better performances : for solver in ['lbfgs','liblinear','sag','saga','newton-cg']: lr = LogisticRegression(C=0.01,solver=solver).fit(X_train,y_train.ravel()) accuracy.append(metrics.accuracy_score(y_test, lr.predict(X_test))) print("Maximum accuracy for linear regression is " + str(max(accuracy)) + " and is obtained for kernel=" + str(['lbfgs','liblinear','sag','saga','newton-cg'][accuracy.index(max(accuracy))])) #So let's train our logistic regression model on the whole dataset with C=0.01 and solver=lbfgs lr = LogisticRegression(C=0.01,solver='lbfgs').fit(X,y.ravel()) # # Model Evaluation using Test set from sklearn.metrics import jaccard_similarity_score from sklearn.metrics import f1_score from sklearn.metrics import log_loss # First, download and load the test set: # !wget -O loan_test.csv https://s3-api.us-geo.objectstorage.softlayer.net/cf-courses-data/CognitiveClass/ML0101ENv3/labs/loan_test.csv # + [markdown] button=false new_sheet=false run_control={"read_only": false} # ### Load Test set for evaluation # + button=false new_sheet=false run_control={"read_only": false} test_df = pd.read_csv('loan_test.csv') test_df.head() # - #We have to apply all the transformations previously made on the training set to the test set : test_df['due_date'] = pd.to_datetime(test_df['due_date']) test_df['effective_date'] = pd.to_datetime(test_df['effective_date']) test_df['dayofweek'] = test_df['effective_date'].dt.dayofweek test_df['weekend'] = test_df['dayofweek'].apply(lambda x: 1 if (x>3) else 0) test_df['Gender'].replace(to_replace=['male','female'], value=[0,1],inplace=True) test_Feature = test_df[['Principal','terms','age','Gender','weekend']] test_Feature = pd.concat([test_Feature,pd.get_dummies(test_df['education'])], axis=1) test_Feature.drop(['Master or Above'], axis = 1,inplace=True) X_testSet = test_Feature y_testSet = test_df['loan_status'].values X_testSet= preprocessing.StandardScaler().fit(X_testSet).transform(X_testSet) #Let's generate the predictions according to our models : y_predict_knn = knn.predict(X_testSet) y_predict_tree = tree.predict(X_testSet) y_predict_svm = svm_model.predict(X_testSet) y_predict_lr = lr.predict(X_testSet) #Later we will need the probabilities calculated by the logistic regression in order to calculate logLoss y_predict_lr_proba = lr.predict_proba(X_testSet) report = pd.DataFrame(columns=['Algorithm','Jaccard','F1-score','LogLoss']) report['Algorithm'] = ['KNN','Decision Tree','SVM','Logistic Regression'] report['Jaccard'] = [ float(str(jaccard_similarity_score(y_true=y_testSet,y_pred=y_predict_knn))[0:5]) , float(str(jaccard_similarity_score(y_true=y_testSet,y_pred=y_predict_tree))[0:5]), float(str(jaccard_similarity_score(y_true=y_testSet,y_pred=y_predict_svm))[0:5]), float(str(jaccard_similarity_score(y_true=y_testSet,y_pred=y_predict_lr))[0:5])] report['F1-score'] = [ float(str(f1_score(y_true=y_testSet,y_pred=y_predict_knn,pos_label='PAIDOFF'))[0:5]), float(str(f1_score(y_true=y_testSet,y_pred=y_predict_tree,pos_label='PAIDOFF'))[0:5]), float(str(f1_score(y_true=y_testSet,y_pred=y_predict_svm,pos_label='PAIDOFF'))[0:5]), float(str(f1_score(y_true=y_testSet,y_pred=y_predict_lr,pos_label='PAIDOFF'))[0:5])] report['LogLoss'] = [ 'NA','NA','NA',float(str(log_loss(y_true=y_testSet,y_pred=y_predict_lr_proba))[0:5])] report.head(10) # # Report # You should be able to report the accuracy of the built model using different evaluation metrics: # | Algorithm | Jaccard | F1-score | LogLoss | # |--------------------|---------|----------|---------| # | KNN | ? | ? | NA | # | Decision Tree | ? | ? | NA | # | SVM | ? | ? | NA | # | LogisticRegression | ? | ? | ? | # + [markdown] button=false new_sheet=false run_control={"read_only": false} # <h2>Want to learn more?</h2> # # IBM SPSS Modeler is a comprehensive analytics platform that has many machine learning algorithms. It has been designed to bring predictive intelligence to decisions made by individuals, by groups, by systems – by your enterprise as a whole. A free trial is available through this course, available here: <a href="http://cocl.us/ML0101EN-SPSSModeler">SPSS Modeler</a> # # Also, you can use Watson Studio to run these notebooks faster with bigger datasets. Watson Studio is IBM's leading cloud solution for data scientists, built by data scientists. With Jupyter notebooks, RStudio, Apache Spark and popular libraries pre-packaged in the cloud, Watson Studio enables data scientists to collaborate on their projects without having to install anything. Join the fast-growing community of Watson Studio users today with a free account at <a href="https://cocl.us/ML0101EN_DSX">Watson Studio</a> # # <h3>Thanks for completing this lesson!</h3> # # <h4>Author: <a href="https://ca.linkedin.com/in/saeedaghabozorgi"><NAME></a></h4> # <p><a href="https://ca.linkedin.com/in/saeedaghabozorgi"><NAME></a>, PhD is a Data Scientist in IBM with a track record of developing enterprise level applications that substantially increases clients’ ability to turn data into actionable knowledge. He is a researcher in data mining field and expert in developing advanced analytic methods like machine learning and statistical modelling on large datasets.</p> # # <hr> # # <p>Copyright &copy; 2018 <a href="https://cocl.us/DX0108EN_CC">Cognitive Class</a>. This notebook and its source code are released under the terms of the <a href="https://bigdatauniversity.com/mit-license/">MIT License</a>.</p>
IBM_DataScience/8_MachineLearning/labs/FinalAssignmentSolution.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Baysean Methods for the Physical Sciences # ## Example 6.3.1 # + from collections import namedtuple from pprint import pprint import matplotlib import numpy as np import spacepy.plot as spp import pymc as mc # %matplotlib inline matplotlib.rcParams['savefig.dpi']=100 # - Data = namedtuple('Data', ['obstot', 'obsbkg', 'nbox', 'C']) obs = {2:Data(35, 269, 22, 41.49), 6:Data(156, 325, 52, 41.77), 9:Data(415, 3827, 10, 39.67)} pprint(obs) # + nclus = mc.Uniform('nclus', 0, 1e7) nbkg = mc.Uniform('nbkg', 1, 1e7) @mc.deterministic(plot=False) def log_nbkg(nbkg=nbkg): return np.log10(nbkg) obsbkg = mc.Poisson('obsbkg',nbkg) nbkgind = mc.Lognormal('nbkgind',mu=log_nbkg, tau=0.2**(-2)) obstot = mc.Poisson('obstot',nclus + nbkgind/obs[2].nbox, observed=True, value=obs[2].obstot) @mc.deterministic(plot=True) def lgLx(val=nclus, c=obs[2].C): return np.log10(val)/2.30258 + c # - model = mc.MCMC((nclus, nbkg, log_nbkg, obsbkg, nbkgind, obstot, lgLx)) model.sample(1000000, burn=1000, burn_till_tuned=True, thin=40) mc.Matplot.plot(lgLx)
Learning/BMPS_Example_6_3_1.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import numpy as np #numerical computation import pandas as pd #data wrangling import matplotlib.pyplot as plt #plotting package #Next line helps with rendering plots # %matplotlib inline import matplotlib as mpl #add'l plotting functionality mpl.rcParams['figure.dpi'] = 400 #high res figures # import graphviz #to visualize decision trees # # Exercise 22: Cleaning the Dataset df_orig = pd.read_excel('../Data/default_of_credit_card_clients__courseware_version_1_21_19.xls') df_zero_mask = df_orig == 0 feature_zero_mask = df_zero_mask.iloc[:,1:].all(axis=1) sum(feature_zero_mask) # Remove all the rows with all zero features and response, confirm this that gets rid of the duplicate IDs. df_clean = df_orig.loc[~feature_zero_mask,:].copy() df_clean.shape df_clean['ID'].nunique() # Clean up the `EDUCATION` and `MARRIAGE` features as in Chapter 1 df_clean['EDUCATION'].value_counts() # "Education (1 = graduate school; 2 = university; 3 = high school; 4 = others)" # Assign unknown categories to other. df_clean['EDUCATION'].replace(to_replace=[0, 5, 6], value=4, inplace=True) df_clean['EDUCATION'].value_counts() # Examine and clean marriage feature as well: df_clean['MARRIAGE'].value_counts() #Should only be (1 = married; 2 = single; 3 = others). df_clean['MARRIAGE'].replace(to_replace=0, value=3, inplace=True) df_clean['MARRIAGE'].value_counts() # Now instead of removing rows with `PAY_1` = 'Not available', as done in Chapter 1, here select these out for addition to training and testing splits. df_clean['PAY_1'].value_counts() missing_pay_1_mask = df_clean['PAY_1'] == 'Not available' sum(missing_pay_1_mask) df_missing_pay_1 = df_clean.loc[missing_pay_1_mask,:].copy() df_missing_pay_1.shape df_missing_pay_1['PAY_1'].head(3) df_missing_pay_1['PAY_1'].value_counts() df_missing_pay_1.columns # Load cleaned data df = pd.read_csv('../Data/Chapter_1_cleaned_data.csv') df.columns features_response = df.columns.tolist() items_to_remove = ['ID', 'SEX', 'PAY_2', 'PAY_3', 'PAY_4', 'PAY_5', 'PAY_6', 'EDUCATION_CAT', 'graduate school', 'high school', 'none', 'others', 'university'] features_response = [item for item in features_response if item not in items_to_remove] features_response # # Exercise 23: Mode and Random Imputation of `PAY_1` from sklearn.model_selection import train_test_split X_train, X_test, y_train, y_test = \ train_test_split(df[features_response[:-1]].values, df['default payment next month'].values, test_size=0.2, random_state=24) print(X_train.shape) print(X_test.shape) print(y_train.shape) print(y_test.shape) df_missing_pay_1.shape features_response[4] np.median(X_train[:,4]) np.random.seed(seed=1) fill_values = [0, np.random.choice(X_train[:,4], size=(3021,), replace=True)] fill_strategy = ['mode', 'random'] fill_values[-1] fig, axs = plt.subplots(1,2, figsize=(8,3)) bin_edges = np.arange(-2,9) axs[0].hist(X_train[:,4], bins=bin_edges, align='left') axs[0].set_xticks(bin_edges) axs[0].set_title('Non-missing values of PAY_1') axs[1].hist(fill_values[-1], bins=bin_edges, align='left') axs[1].set_xticks(bin_edges) axs[1].set_title('Random selection for imputation') plt.tight_layout() # To do cross-validation on the training set, now we need to shuffle since all the samples with missing `PAY_1` were concatenated on to the end. from sklearn.model_selection import KFold k_folds = KFold(n_splits=4, shuffle=True, random_state=1) # Don't need to do a grid search, so we can use `cross_validate` from sklearn.model_selection import cross_validate # For the estimator, set the optimal hyperparameters determined in previous chapter. from sklearn.ensemble import RandomForestClassifier rf = RandomForestClassifier\ (n_estimators=200, criterion='gini', max_depth=9, min_samples_split=2, min_samples_leaf=1, min_weight_fraction_leaf=0.0, max_features='auto', max_leaf_nodes=None, min_impurity_decrease=0.0, min_impurity_split=None, bootstrap=True, oob_score=False, n_jobs=None, random_state=4, verbose=1, warm_start=False, class_weight=None) for counter in range(len(fill_values)): #Copy the data frame with missing PAY_1 and assign imputed values df_fill_pay_1_filled = df_missing_pay_1.copy() df_fill_pay_1_filled['PAY_1'] = fill_values[counter] #Split imputed data in to training and testing, using the same #80/20 split we have used for the data with non-missing PAY_1 X_fill_pay_1_train, X_fill_pay_1_test, y_fill_pay_1_train, y_fill_pay_1_test = \ train_test_split( df_fill_pay_1_filled[features_response[:-1]].values, df_fill_pay_1_filled['default payment next month'].values, test_size=0.2, random_state=24) #Concatenate the imputed data with the array of non-missing data X_train_all = np.concatenate((X_train, X_fill_pay_1_train), axis=0) y_train_all = np.concatenate((y_train, y_fill_pay_1_train), axis=0) #Use the KFolds splitter and the random forest model to get #4-fold cross-validation scores for both imputation methods imputation_compare_cv = cross_validate(rf, X_train_all, y_train_all, scoring='roc_auc', cv=k_folds, n_jobs=-1, verbose=1, return_train_score=True, return_estimator=True, error_score='raise-deprecating') test_score = imputation_compare_cv['test_score'] print(fill_strategy[counter] + ' imputation: ' + 'mean testing score ' + str(np.mean(test_score)) + ', std ' + str(np.std(test_score))) # # A Predictive Model for `PAY_1` pay_1_df = df.copy() features_for_imputation = pay_1_df.columns.tolist() items_to_remove_2 = ['ID', 'SEX', 'PAY_2', 'PAY_3', 'PAY_4', 'PAY_5', 'PAY_6', 'EDUCATION_CAT', 'graduate school', 'high school', 'none', 'others', 'university', 'default payment next month', 'PAY_1'] features_for_imputation = [item for item in features_for_imputation if item not in items_to_remove_2] features_for_imputation # # Exercise 24: Building a Multiclass Classification Model for Imputation X_impute_train, X_impute_test, y_impute_train, y_impute_test = \ train_test_split( pay_1_df[features_for_imputation].values, pay_1_df['PAY_1'].values, test_size=0.2, random_state=24) rf_impute_params = {'max_depth':[3, 6, 9, 12], 'n_estimators':[10, 50, 100, 200]} from sklearn.model_selection import GridSearchCV # Need to use accuracy here as ROC AUC is not supported for multiclass. Need to use multiclass and not regression because need to limit to integer values of `PAY_1`. cv_rf_impute = GridSearchCV(rf, param_grid=rf_impute_params, scoring='accuracy', fit_params=None, n_jobs=-1, iid=False, refit=True, cv=4, verbose=2, error_score=np.nan, return_train_score=True) cv_rf_impute.fit(X_impute_train, y_impute_train) impute_df = pd.DataFrame(cv_rf_impute.cv_results_) impute_df cv_rf_impute.best_params_ cv_rf_impute.best_score_ pay_1_value_counts = pay_1_df['PAY_1'].value_counts().sort_index() pay_1_value_counts pay_1_value_counts/pay_1_value_counts.sum() y_impute_predict = cv_rf_impute.predict(X_impute_test) from sklearn import metrics metrics.accuracy_score(y_impute_test, y_impute_predict) fig, axs = plt.subplots(1,2, figsize=(8,3)) axs[0].hist(y_impute_test, bins=bin_edges, align='left') axs[0].set_xticks(bin_edges) axs[0].set_title('Non-missing values of PAY_1') axs[1].hist(y_impute_predict, bins=bin_edges, align='left') axs[1].set_xticks(bin_edges) axs[1].set_title('Model-based imputation') plt.tight_layout() X_impute_all = pay_1_df[features_for_imputation].values y_impute_all = pay_1_df['PAY_1'].values rf_impute = RandomForestClassifier(n_estimators=100, max_depth=12) rf_impute rf_impute.fit(X_impute_all, y_impute_all) # # Using the Imputation Model and Comparing it to Other Methods df_fill_pay_1_model = df_missing_pay_1.copy() df_fill_pay_1_model['PAY_1'].head() df_fill_pay_1_model['PAY_1'] = rf_impute.predict(df_fill_pay_1_model[features_for_imputation].values) df_fill_pay_1_model['PAY_1'].head() df_fill_pay_1_model['PAY_1'].value_counts().sort_index() X_fill_pay_1_train, X_fill_pay_1_test, y_fill_pay_1_train, y_fill_pay_1_test = \ train_test_split( df_fill_pay_1_model[features_response[:-1]].values, df_fill_pay_1_model['default payment next month'].values, test_size=0.2, random_state=24) print(X_fill_pay_1_train.shape) print(X_fill_pay_1_test.shape) print(y_fill_pay_1_train.shape) print(y_fill_pay_1_test.shape) X_train_all = np.concatenate((X_train, X_fill_pay_1_train), axis=0) y_train_all = np.concatenate((y_train, y_fill_pay_1_train), axis=0) print(X_train_all.shape) print(y_train_all.shape) rf imputation_compare_cv = cross_validate(rf, X_train_all, y_train_all, scoring='roc_auc', cv=k_folds, n_jobs=-1, verbose=1, return_train_score=True, return_estimator=True, error_score='raise-deprecating') imputation_compare_cv['test_score'] np.mean(imputation_compare_cv['test_score']) np.std(imputation_compare_cv['test_score']) # Reassign values using mode imputation df_fill_pay_1_model['PAY_1'] = np.zeros_like(df_fill_pay_1_model['PAY_1'].values) df_fill_pay_1_model['PAY_1'].unique() X_fill_pay_1_train, X_fill_pay_1_test, y_fill_pay_1_train, y_fill_pay_1_test = \ train_test_split( df_fill_pay_1_model[features_response[:-1]].values, df_fill_pay_1_model['default payment next month'].values, test_size=0.2, random_state=24) X_train_all = np.concatenate((X_train, X_fill_pay_1_train), axis=0) X_test_all = np.concatenate((X_test, X_fill_pay_1_test), axis=0) y_train_all = np.concatenate((y_train, y_fill_pay_1_train), axis=0) y_test_all = np.concatenate((y_test, y_fill_pay_1_test), axis=0) print(X_train_all.shape) print(X_test_all.shape) print(y_train_all.shape) print(y_test_all.shape) imputation_compare_cv = cross_validate(rf, X_train_all, y_train_all, scoring='roc_auc', cv=k_folds, n_jobs=-1, verbose=1, return_train_score=True, return_estimator=True, error_score='raise-deprecating') np.mean(imputation_compare_cv['test_score']) # # Confirming Model Performance on the Unseen Test Set rf.fit(X_train_all, y_train_all) y_test_all_predict_proba = rf.predict_proba(X_test_all) from sklearn.metrics import roc_auc_score roc_auc_score(y_test_all, y_test_all_predict_proba[:,1]) # 0.7696243835824927 # # Exercise 25: Characterizing Costs and Savings thresholds = np.linspace(0, 1, 101) # Use mean bill amount to estimate savings per prevented default df[features_response[:-1]].columns[5] savings_per_default = np.mean(X_test_all[:, 5]) savings_per_default cost_per_counseling = 7500 effectiveness = 0.70 n_pos_pred = np.empty_like(thresholds) cost_of_all_counselings = np.empty_like(thresholds) n_true_pos = np.empty_like(thresholds) savings_of_all_counselings = np.empty_like(thresholds) counter = 0 for threshold in thresholds: pos_pred = y_test_all_predict_proba[:,1]>threshold n_pos_pred[counter] = sum(pos_pred) cost_of_all_counselings[counter] = n_pos_pred[counter] * cost_per_counseling true_pos = pos_pred & y_test_all.astype(bool) n_true_pos[counter] = sum(true_pos) savings_of_all_counselings[counter] = n_true_pos[counter] * savings_per_default * effectiveness counter += 1 net_savings = savings_of_all_counselings - cost_of_all_counselings # + # plt.plot(thresholds, cost_of_all_counselings) # + # plt.plot(thresholds, savings_of_all_counselings) # - mpl.rcParams['figure.dpi'] = 400 plt.plot(thresholds, net_savings) plt.xlabel('Threshold') plt.ylabel('Net savings (NT$)') plt.xticks(np.linspace(0,1,11)) plt.grid(True) max_savings_ix = np.argmax(net_savings) # What is the threshold at which maximum savings is achieved? thresholds[max_savings_ix] # 0.2 # What is the maximum possible savings? net_savings[max_savings_ix] # 15446325.35991916 # # Activity 6: Deriving Financial Insights # What would be the cost of defaults if there were no counseling program? cost_of_defaults = sum(y_test_all) * savings_per_default cost_of_defaults # 66308240.202088244 # By what % can we decrease the cost of defaults with a program? net_savings[max_savings_ix]/cost_of_defaults # 0.2329472975431598 # Savings per all account at optimal threshold. net_savings[max_savings_ix]/len(y_test_all) # 2601.2673223171373 # Net savings per initial cost of counseling. Shows how much $ needs to be budgeted to realize a given amount of savings. Maybe a practical concern for the client's department. plt.plot(cost_of_all_counselings/len(y_test_all), net_savings/len(y_test_all)) plt.xlabel('Upfront investment: cost of counselings per account (NT$)') plt.ylabel('Net savings per account (NT$)') # Numbers of positive IDs and true positives by threshold. Maybe plot as rates (i.e. this would be the flag rate). plt.plot(thresholds, n_pos_pred/len(y_test_all)) plt.ylabel('Flag rate') plt.xlabel('Threshold') # Precision-Recall curve plt.plot(n_true_pos/sum(y_test_all), np.divide(n_true_pos, n_pos_pred)) plt.xlabel('Recall') plt.ylabel('Precision') # Precision and recall separately versus threshold. plt.plot(thresholds, np.divide(n_true_pos, n_pos_pred), label='Precision') plt.plot(thresholds, n_true_pos/sum(y_test_all), label='Recall') plt.xlabel('Threshold') plt.legend() # # Final Thoughts on Delivering the Predictive Model to the Client plt.hist(y_test_all_predict_proba[:,1], bins=30) plt.xlabel('Predicted probability of default') plt.ylabel('Number of accounts')
Lesson06/Lesson06.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: venv-datascience # language: python # name: venv-datascience # --- # # Time Methods import numpy as np import pandas as pd # # Python Datetime Review from datetime import datetime myyear = 2015 mymonth = 12 myday = 1 myhour = 2 mymin = 30 mysec = 15 mydate = datetime(myyear, mymonth, myday) mydate mydatetime = datetime(myyear, mymonth, myday, myhour, mymin, mysec) mydatetime mydatetime.year # ------- # # Pandas # # Converting to datetime # - pd.to_datetime() myser = pd.Series(['Nov 3, 1990', '2000-12-01', None]) myser myser # # pd.to_datetime() timeser = pd.to_datetime(myser) timeser timeser[0].year obvi_euro_date = '31-12-2020' pd.to_datetime(obvi_euro_date) euro_date = '10-12-2020' # actually 10th Dec, 2020 pd.to_datetime(euro_date, dayfirst = True) # need to specify dayfirst parameter # ------- # # Custom Time String Formatting style_date = '12--Dec--2020' #maybe these values are coming from old websites pd.to_datetime(style_date, format = '%d--%b--%Y') strange_date = '12th of Dec 2000' pd.to_datetime(strange_date) # -------- # # Data # # Retail Sales: Beer, Wine, and Liquor Stores # # Units: Millions of Dollars, Not Seasonally Adjusted # # Frequency: Monthly sales = pd.read_csv('Data/RetailSales_BeerWineLiquor.csv') sales.head() sales['DATE'] # convert to datetime object # now it becomes datetime object sales['DATE'] = pd.to_datetime(sales['DATE']) sales['DATE'] sales['DATE'][0].year # -------- # # Attempt to Parse Dates Automatically sales.head() # ask pandas to automatically parse the column as a date while reading from csv sales = pd.read_csv('Data/RetailSales_BeerWineLiquor.csv', parse_dates = [0]) sales.head() # pandas read in as Date time sales['DATE'] # ----------- # # Resample # - resample is like groupby and use it combination with aggreation methods. # [[reference](https://pandas.pydata.org/pandas-docs/stable/generated/pandas.DataFrame.resample.html)] # # <table style="display: inline-block"> # <caption style="text-align: center"><strong>TIME SERIES OFFSET ALIASES</strong></caption> # <tr><th>ALIAS</th><th>DESCRIPTION</th></tr> # <tr><td>B</td><td>business day frequency</td></tr> # <tr><td>C</td><td>custom business day frequency (experimental)</td></tr> # <tr><td>D</td><td>calendar day frequency</td></tr> # <tr><td>W</td><td>weekly frequency</td></tr> # <tr><td>M</td><td>month end frequency</td></tr> # <tr><td>SM</td><td>semi-month end frequency (15th and end of month)</td></tr> # <tr><td>BM</td><td>business month end frequency</td></tr> # <tr><td>CBM</td><td>custom business month end frequency</td></tr> # <tr><td>MS</td><td>month start frequency</td></tr> # <tr><td>SMS</td><td>semi-month start frequency (1st and 15th)</td></tr> # <tr><td>BMS</td><td>business month start frequency</td></tr> # <tr><td>CBMS</td><td>custom business month start frequency</td></tr> # <tr><td>Q</td><td>quarter end frequency</td></tr> # <tr><td></td><td><font color=white>intentionally left blank</font></td></tr></table> # # <table style="display: inline-block; margin-left: 40px"> # <caption style="text-align: center"></caption> # <tr><th>ALIAS</th><th>DESCRIPTION</th></tr> # <tr><td>BQ</td><td>business quarter endfrequency</td></tr> # <tr><td>QS</td><td>quarter start frequency</td></tr> # <tr><td>BQS</td><td>business quarter start frequency</td></tr> # <tr><td>A</td><td>year end frequency</td></tr> # <tr><td>BA</td><td>business year end frequency</td></tr> # <tr><td>AS</td><td>year start frequency</td></tr> # <tr><td>BAS</td><td>business year start frequency</td></tr> # <tr><td>BH</td><td>business hour frequency</td></tr> # <tr><td>H</td><td>hourly frequency</td></tr> # <tr><td>T, min</td><td>minutely frequency</td></tr> # <tr><td>S</td><td>secondly frequency</td></tr> # <tr><td>L, ms</td><td>milliseconds</td></tr> # <tr><td>U, us</td><td>microseconds</td></tr> # <tr><td>N</td><td>nanoseconds</td></tr></table> sales = sales.set_index('DATE') sales.head() #groupby year and get the average sales.resample(rule='A').mean() # -------- # # .dt Method Calls # - like string is .str, for datetime it is .dt # - by using .dt and we can use additional attributes like .dt.year, .dt.month etc sales = pd.read_csv('Data/RetailSales_BeerWineLiquor.csv', parse_dates = [0]) sales.head() sales.info() sales['DATE'].dt.year
Machine Learning & Data Science Masterclass - JP/03-Pandas/08-Time-Methods.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Introducing the Permutational Invariant Quantum Solver (PIQS) # # The *Permutational Invariant Quantum Solver (PIQS)* is an open-source Python solver to study the exact Lindbladian dynamics of open quantum systems consisting of identical qubits. It is integrated in QuTiP and can be imported as as a model. # # Using this library, the Liouvillian of an ensemble of $N$ qubits, or two-level systems (TLSs), $\mathcal{D}_{TLS}(\rho)$, can be built using only polynomial – instead of exponential – resources. This has many applications for the study of realistic quantum optics models of many TLSs and in general as a tool in cavity QED [1]. # # Consider a system evolving according to the equation # # \begin{eqnarray} # \dot{\rho} = \mathcal{D}_\text{TLS}(\rho) &=& # -\frac{i}{\hbar}\lbrack H,\rho \rbrack # +\frac{\gamma_\text{CE}}{2}\mathcal{L}_{J_{-}}[\rho] # +\frac{\gamma_\text{CD}}{2}\mathcal{L}_{J_{z}}[\rho] # +\frac{\gamma_\text{CP}}{2}\mathcal{L}_{J_{+}}[\rho]\nonumber\\ # &&+\sum_{n=1}^{N}\left( # \frac{\gamma_\text{E}}{2}\mathcal{L}_{J_{-,n}}[\rho] # +\frac{\gamma_\text{D}}{2}\mathcal{L}_{J_{z,n}}[\rho] # +\frac{\gamma_\text{P}}{2}\mathcal{L}_{J_{+,n}}[\rho]\right) # \end{eqnarray} # # where $J_{\alpha,n}=\frac{1}{2}\sigma_{\alpha,n}$ are SU(2) Pauli spin operators, with ${\alpha=x,y,z}$ and $J_{\pm,n}=\sigma_{\pm,n}$. The collective spin operators are $J_{\alpha} = \sum_{n}J_{\alpha,n}$. The Lindblad super-operators are $\mathcal{L}_{A} = 2A\rho A^\dagger - A^\dagger A \rho - \rho A^\dagger A$. # # The inclusion of local processes in the dynamics lead to using a Liouvillian space of dimension $4^N$. By exploiting the permutational invariance of identical particles [2-8], the Liouvillian $\mathcal{D}_\text{TLS}(\rho)$ can be built as a block-diagonal matrix in the basis of Dicke states $|j, m \rangle$. # # The system under study is defined by creating an object of the $\texttt{Piqs}$ class, e.g. simply named $\texttt{system}$, whose first attribute is # # - $\texttt{system.N}$, the number of TLSs of the system $N$. # # The rates for collective and local processes are simply defined as # # - $\texttt{collective}\_ \texttt{emission}$ defines $\gamma_\text{CE}$, collective (superradiant) emission # # # - $\texttt{collective}\_ \texttt{dephasing}$ defines $\gamma_\text{CD}$, collective dephasing # # # - $\texttt{collective}\_ \texttt{pumping}$ defines $\gamma_\text{CP}$, collective pumping. # # # - $\texttt{emission}$ defines $\gamma_\text{E}$, incoherent emission (losses) # # # - $\texttt{dephasing}$ defines $\gamma_\text{D}$, local dephasing # # # - $\texttt{pumping}$ defines $\gamma_\text{P}$, incoherent pumping. # # Then the $\texttt{system.lindbladian()}$ creates the total TLS Linbladian superoperator matrix. # # Similarly, $\texttt{system.hamiltonian}$ defines the TLS hamiltonian of the system $H_\text{TLS}$. # # The system's Liouvillian can be built using $\texttt{system.liouvillian()}$. The properties of a Piqs object can be visualized by simply calling $\texttt{system}$. # # We give two basic examples on the use of *PIQS*. In the first example the incoherent emission of $N$ driven TLSs is considered. # + import matplotlib.pyplot as plt import matplotlib as mpl from matplotlib import cm from qutip import * from piqs import * # - # ## $1$. $N$ Qubits Dynamics # We study a driven ensemble of $N$ TLSs emitting incoherently, # # \begin{eqnarray} # H_\text{TLS}&=&\hbar\omega_{0} J_{z}+\hbar\omega_{x} J_{x} # \end{eqnarray} # # \begin{eqnarray} # \dot{\rho} &=& \mathcal{D}_\text{TLS}(\rho)= -\frac{i}{\hbar}\lbrack H_\text{TLS},\rho \rbrack+\sum_{n=1}^{N}\frac{\gamma_\text{E}}{2}\mathcal{L}_{J_{-,n}}[\rho] # \end{eqnarray} N = 20 system = Dicke(N = N) [jx, jy, jz, jp, jm] = jspin(N) w0 = 1. wx = 0.1 system.hamiltonian = w0 * jz + wx * jx system.emission = 0.5 D_tls = system.liouvillian() # Calculating the TLS Steady state and steady expectation values is straightforward with QuTiP's $\texttt{steadystate}()$ and $\texttt{expect}()$ [9]. steady_tls = steadystate(D_tls) jz_ss = expect(jz, steady_tls) jpjm_ss = expect(jp*jm, steady_tls) # Calculating the TLS time evolution can be done with QuTiP's $\texttt{mesolve}()$ rho0_tls = dicke(N, N/2, -N/2) t = np.linspace(0, 20, 1000) result = mesolve(D_tls, rho0_tls, t, [], e_ops = [jz]) rhot_tls = result.states jzt = result.expect[0] # The properties of a given object can be updated dynamically, such that local dephasing could be added to the object $\texttt{'system'}$ symply with system.dephasing = 1 # ### Visualization # + j_max = (N/2) label_size = 20 fig1 = plt.figure(1) plt.rc('text', usetex = True) plt.rc('xtick', labelsize=label_size) plt.rc('ytick', labelsize=label_size) plt.plot(t, jzt/j_max, 'k-', label=r'$\langle J_{z}\rangle(t)$') plt.plot(t, t * 0 + jz_ss/j_max, 'g--', label = R'Steady-state $\langle J_{z}\rangle_\mathrm{ss}$') plt.title(r'Total inversion', fontsize = label_size) plt.xlabel(r'$t$', fontsize = label_size) plt.ylabel(r'$\langle J_{z}\rangle$', fontsize = label_size) plt.legend( fontsize = 0.8 * label_size) plt.yticks([-1, -0.99]) plt.show() plt.close() # - # ## $2$. Dynamics of $N$ Qubits in a Bosonic Cavity # # Now we consider an ensemble of spins in a driven, leaky cavity # # \begin{eqnarray} # \dot{\rho} &=& \mathcal{D}_\text{TLS}(\rho) +\mathcal{D}_\text{phot}(\rho) -\frac{i}{\hbar}\lbrack H_\text{int}, \rho\rbrack\nonumber\\ # &=& -i\lbrack \omega_{0} J_{z} + \omega_{c} a^\dagger a + g\left(a^\dagger+a\right)J_{x},\rho \rbrack+\frac{w}{2}\mathcal{L}_{a^\dagger}[\rho]+\frac{\kappa}{2}\mathcal{L}_{a}[\rho]+\sum_{n=1}^{N}\frac{\gamma_\text{E}}{2}\mathcal{L}_{J_{-,n}}[\rho] # \end{eqnarray} # # where now the full system density matrix is defined on a tensor Hilbert space $\rho \in \mathcal{H}_\text{TLS}\otimes\mathcal{H}_\text{phot}$, where the dymension of $\mathcal{H}_\text{TLS}$ is reduced from $2^N$ using the approach of an uncoupled basis to $O(N^2)$ using $PIQS$. # # Thanks to QuTiP's $\texttt{super}\_\texttt{tensor}()$ function, we can add the two independently built Liouvillians, being careful only to place the light-matter interaction of the Hamiltonian in the total Hilbert space and creating the corresponding "left" and "right" superoperators with $\texttt{spre}()$ and $\texttt{spost}()$. # + # TLS parameters n_tls = 5 N = n_tls system = Dicke(N = n_tls) [jx, jy, jz, jp, jm] = jspin(n_tls) w0 = 1. wx = 0.1 system.hamiltonian = w0 * jz + wx * jx system.emission = 0.5 D_tls = system.liouvillian() # Light-matter coupling parameters wc = 1. g = 0.9 kappa = 1 pump = 0.1 nphot = 16 a = destroy(nphot) h_int = g * tensor(a + a.dag(), jx) # <NAME> c_ops_phot = [np.sqrt(kappa) * a, np.sqrt(pump) * a.dag()] D_phot = liouvillian(wc * a.dag()*a , c_ops_phot) # Identity super-operators nds = num_dicke_states(n_tls) id_tls = to_super(qeye(nds)) id_phot = to_super(qeye(nphot)) # Define the total Liouvillian D_int = -1j* spre(h_int) + 1j* spost(h_int) D_tot = D_int + super_tensor(D_phot, id_tls) + super_tensor(id_phot, D_tls) # Define operator in the total space nphot_tot = tensor(a.dag()*a, qeye(nds)) # - # ### Wigner function and steady state $\rho_\text{ss}$ rho_ss = steadystate(D_tot) nphot_ss = expect(nphot_tot, rho_ss) psi = rho_ss.ptrace(0) xvec = np.linspace(-6, 6, 100) W = wigner(psi, xvec, xvec) # ### Visualization # + jmax = (0.5 * N) j2max = (0.5 * N + 1) * (0.5 * N) plt.rc('text', usetex = True) label_size = 20 plt.rc('xtick', labelsize=label_size) plt.rc('ytick', labelsize=label_size) wmap = wigner_cmap(W) # Generate Wigner colormap nrm = mpl.colors.Normalize(0, W.max()) max_cb =np.max(W) min_cb =np.min(W) fig2 = plt.figure(2) plotw = plt.contourf(xvec, xvec, W, 100, cmap=wmap, norm=nrm) plt.title(r"Wigner Function", fontsize=label_size); plt.xlabel(r'$x$', fontsize = label_size) plt.ylabel(r'$p$', fontsize = label_size) cb = plt.colorbar() cb.set_ticks( [min_cb, max_cb]) cb.set_ticklabels([r'$0$',r'max']) plt.show() plt.close() # - # ### Time evolution of $\rho(t)$ excited_state = excited(N) ground_phot = ket2dm(basis(nphot,0)) rho0 = tensor(ground_phot, excited_state) result2 = mesolve(D_tot, rho0, t, [], e_ops = [nphot_tot]) rhot_tot = result2.states nphot_t = result2.expect[0] # ### Visualization # + fig3 = plt.figure(3) plt.plot(t, nphot_t, 'k-', label='time evolution') plt.plot(t, t*0 + nphot_ss, 'g--', label = 'steady state') plt.title(r'Cavity photon population', fontsize = label_size) plt.xlabel(r'$t$', fontsize = label_size) plt.ylabel(r'$\langle a^\dagger a\rangle(t)$', fontsize = label_size) plt.legend(fontsize = label_size) plt.show() plt.close() # - # ### Steady-state correlations: $g^{(2)}(\tau)$ for $\rho_\text{ss}$ # We define the $g^{(2)}(\tau)$ of the system as the two-time correlation function of the intracavity photons, # \begin{eqnarray} # g^{(2)}(\tau) &=& \frac{\langle: a^\dagger(\tau) a^\dagger(0) a(\tau) a(0) :\rangle}{|\langle: a^\dagger(0) a(0) :\rangle|^2}\nonumber. # \end{eqnarray} B = nphot_tot rhoA = B * rho_ss result3 = mesolve(D_tot, rhoA, t, [], e_ops = B) g2_t = result3.expect[0] # ### Visualization # + fig4 = plt.figure(4) plt.plot(t, np.real(g2_t)/nphot_ss**2, '-') plt.plot(t, 0*t + 1, '--') plt.title(r'Intra-cavity photon correlation function', fontsize = label_size) plt.xlabel(r'$\tau$', fontsize = label_size) plt.ylabel(r'$g^{(2)}(\tau)$', fontsize = label_size) plt.show() plt.close() # - # ## $3$. Initial States # $PIQS$ allows the user to quickly define initial states as density matrices in the Dicke basis of dimension $O(N^2)$ (by default) or in the uncoupled TLS basis $2^N$ (by setting the basis specification as $\texttt{basis='uncoupled'}$). Below we give an overview of # # - Dicke states with "$\texttt{dicke}()$", # # # - Greenberger–Horne–Zeilinger (GHZ), called by "$\texttt{ghz}()$", # # # - Coherent Spin States (CSS) called by "$\texttt{css}()$", # # hereafter all expressed in the compact Dicke basis. # + N = 6 #Dicke Basis dicke_basis = np.real(block_matrix(N)) #Dicke states excited_state = dicke(N, N/2, N/2) superradiant_state = dicke(N, N/2, j_min(N)) subradiant_state = dicke(N, j_min(N), -j_min(N)) ground_state = dicke(N, N/2, -N/2) N = 7 #GHZ state ghz_state = ghz(N) #CSS states a = 1/np.sqrt(2) b = 1/np.sqrt(2) css_symmetric = css(N, a, b) css_antisymmetric = css(N, a, -b) # - # ### Visualization # + label_size = 15 c_map = 'bwr' # Convert to real-valued dense matrices rho1 = np.real(css_antisymmetric.full()) rho3b = np.real(ghz_state.full()) rho4b = np.real(css_symmetric.full()) rho5 = np.real(excited_state.full()) rho6 = np.real(superradiant_state.full()) rho7 = np.real(ground_state.full()) rho8 = np.real(subradiant_state.full()) rho9 = np.real(dicke_basis.todense()) # + # Dicke basis plt.rc('text', usetex = True) label_size = 25 plt.rc('xtick', labelsize=label_size) plt.rc('ytick', labelsize=label_size) fig, axes = plt.subplots(nrows=2, ncols=3, figsize=(20, 12)) fig1 = axes[0,0].imshow(rho9, cmap = c_map) axes[0,0].set_title(r"$\rho=\sum_{j,m,m'}|j,m\rangle\langle j,m'|$", fontsize = label_size) plt.setp(axes, xticks=[], yticks=[]) #Excited fig2 = axes[0,1].imshow(rho9+rho5, cmap = c_map) axes[0,1].set_title(r"Fully excited, $|\frac{N}{2},\frac{N}{2}\rangle\langle \frac{N}{2},\frac{N}{2}|$", fontsize = label_size) #Ground fig3 = axes[0,2].imshow(rho9+rho7, cmap = c_map) axes[0,2].set_title(r"Ground state, $|\frac{N}{2},-\frac{N}{2}\rangle\langle \frac{N}{2},-\frac{N}{2}|$", fontsize = label_size) #Classical Mixture fig4 = axes[1,0].imshow(rho9+(rho8+rho5), cmap = c_map) axes[1,0].set_title(r"Mixture, $|0,0\rangle\langle 0,0|+|\frac{N}{2},\frac{N}{2}\rangle\langle \frac{N}{2},\frac{N}{2}|$", fontsize = label_size) #Superradiant fig5 = axes[1,1].imshow(rho9+rho6, cmap = c_map) axes[1,1].set_title(r"Superradiant state, $|\frac{N}{2},0\rangle\langle \frac{N}{2},0|$", fontsize = label_size) #Subradiant fig6 = axes[1,2].imshow(rho9+rho8, cmap = c_map) axes[1,2].set_title(r"Subradiant state, $|0,0\rangle\langle 0,0|$", fontsize = label_size) plt.show() plt.close() # + # GHZ state fig3 = plt.imshow(rho3b, cmap = c_map) plt.rc('xtick', labelsize=label_size) plt.rc('ytick', labelsize=label_size) plt.title(r'$\rho=|\mathrm{GHZ}\rangle\langle\mathrm{GHZ}|$', fontsize = label_size) plt.xticks([]) plt.yticks([]) plt.xlabel(r'$d_N^j$', fontsize = label_size) plt.ylabel(r'Block $j$', fontsize = label_size) cb = plt.colorbar() plt.clim([0,np.max(rho3b)]) cb.set_ticks( [np.min(rho3b), np.max(rho3b)]) cb.set_ticklabels([r'$0$',r'max']) plt.show() plt.close() #Symmetric CSS state fig1 = plt.imshow(rho1, cmap = c_map) plt.rc('text', usetex = True) plt.rc('xtick', labelsize=label_size) plt.rc('ytick', labelsize=label_size) plt.title(r'$\rho=|\frac{1}{\sqrt{2}},-\frac{1}{\sqrt{2}}\rangle\langle\frac{1}{\sqrt{2}},-\frac{1}{\sqrt{2}}|_\mathrm{CSS}$', fontsize = label_size) plt.xticks([]) plt.yticks([]) plt.xlabel(r'$d_N^j$', fontsize = label_size) plt.ylabel(r'Block $j$', fontsize = label_size) cb = plt.colorbar() plt.clim([np.min(rho1),np.max(rho1)]) cb.set_ticks([np.min(rho1),0, np.max(rho1)]) cb.set_ticklabels([r'min',r'$0$',r'max']) plt.show() plt.close() #Antisymmetric CSS state fig5 = plt.imshow(rho4b, cmap = c_map) cb = plt.colorbar() plt.clim([0,np.max(rho4b)]) cb.set_ticks([0, np.max(rho4b)]) cb.set_ticklabels([r'$0$',r'max']) plt.rc('xtick', labelsize=label_size) plt.rc('ytick', labelsize=label_size) plt.title(r'$\rho=|\frac{1}{\sqrt{2}},\frac{1}{\sqrt{2}}\rangle\langle\frac{1}{\sqrt{2}},\frac{1}{\sqrt{2}}|_\mathrm{CSS}$', fontsize = label_size) plt.xticks([]) plt.yticks([]) plt.xlabel(r'$d_N^j$', fontsize = label_size) plt.ylabel(r'Block $j$', fontsize = label_size) plt.show() plt.close() # - # ## References # # [1] B.A. Chase and <NAME>, *Phys Rev. A* **78**, 052101 (2008) # # [2] <NAME>, <NAME>, and <NAME>, *Phys Rev. A* **87**, 062101 (2013) # # [3] <NAME>, *Quantum Inf. Comput.* **16**, 1333 (2016) # # [4] <NAME>, <NAME>, and <NAME>, *Phys. Rev. A* **94**, 033838 (2016) # # [5] <NAME> and <NAME>, , *Phys. Rev. Lett.* **118**, 123602 (2017) https://github.com/peterkirton/permutations # # [6] <NAME>, <NAME>, <NAME>, and <NAME>, *Phys Rev. A* **96**, 023863 (2017) # # [7] <NAME> and <NAME>, *Sci. Rep.* **7**, 16304 (2017) https://github.com/modmido/psiquasp # # [8] <NAME>, <NAME>, and <NAME>, *Comp. Phys. Comm.* **183**, 1760 (2012). http://qutip.org # qutip.about()
doc/notebooks/piqs_introduction.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Programming_Assingment16 # ### Question1. # Write a function that stutters a word as if someone is struggling to read it. The # first two letters are repeated twice with an ellipsis ... and space after each, and then the # word is pronounced with a question mark ?. # Examples # stutter('incredible') ➞ 'in... in... incredible?' # stutter('enthusiastic') ➞ 'en... en... enthusiastic?' # stutter('outstanding') ➞ 'ou... ou... outstanding?' # # Hint :- Assume all input is in lower case and at least two characters long. # # def stutter(word): return (2*(word[:2]+'... '))+word+'?' word = input('Enter word : ') print(stutter(word)) # ### Question 2. # Create a function that takes an angle in radians and returns the corresponding # angle in degrees rounded to one decimal place. # Examples # radians_to_degrees(1) ➞ 57.3 # radians_to_degrees(20) ➞ 1145.9 # radians_to_degrees(50) ➞ 2864.8 # # # Function for convertion def radians_to_degrees(radian): pi = 3.14159 #formula degree = radian * (180/pi) return degree radian = float(input('Enter the Radian : ')) print("degree =",(radians_to_degrees(radian))) # ### Question 3. # In this challenge, establish if a given integer num is a Curzon number. If 1 plus # 2 elevated to num is exactly divisible by 1 plus 2 multiplied by num, then num is a Curzon # number. # Given a non-negative integer num, implement a function that returns True if num is a Curzon # number, or False otherwise. # Examples # is_curzon(5) ➞ True # # 2 ** 5 + 1 = 33 # # 2 * 5 + 1 = 11 # # 33 is a multiple of 11 # is_curzon(10) ➞ False # # 2 ** 10 + 1 = 1025 # # 2 * 10 + 1 = 21 # # 1025 is not a multiple of 21 # is_curzon(14) ➞ True # # 2 ** 14 + 1 = 16385 # # 2 * 14 + 1 = 29 # # 16385 is a multiple of 29 # # # + def checkIfCurzonNumber(n): power, product = 0, 0 # Find 2**n + 1 power = pow(2, n) + 1 # Find 2*n + 1 product = 2 * n + 1 # Check for divisibility if (power % product == 0): print(n, "is Curzon Number") else: print(n, "is not a Curzon Number") n = int(input('Enter a number : ')) checkIfCurzonNumber(n) # - # ### Question 4. # # Given the side length x find the area of a hexagon. # # Examples # area_of_hexagon(1) ➞ 2.6 # area_of_hexagon(2) ➞ 10.4 # area_of_hexagon(3) ➞ 23.4 # # # + # area of a Hexagon # Area = (3 √3(n*n) ) / 2 import math def area_of_hexagon(s): return ((3 * math.sqrt(3) * (sideLength * sideLength)) / 2); #length of a side. sideLength = float(input('Enter the length : ')) print("Area:","{0:.4f}".format(area_of_hexagon(sideLength))) # - # ### Question 5. # Create a function that returns a base-2 (binary) representation of a base-10 # (decimal) string number. To convert is simple: ((2) means base-2 and (10) means base-10) # 010101001(2) = 1 + 8 + 32 + 128. # Going from right to left, the value of the most right bit is 1, now from that every bit to the left # will be x2 the value, value of an 8 bit binary numbers are (256, 128, 64, 32, 16, 8, 4, 2, 1). # Examples # binary(1) ➞ '1' # # 1*1 = 1 # binary(5) ➞ '101' # # 1*1 + 1*4 = 5 # binary(10) ➞ '1010' # # 1*2 + 1*8 = 10 # + # Function to convert Decimal number # to Binary number def decimalToBinary(n): return bin(n).replace("0b", "") for i in range(0,50): print(decimalToBinary(i))
Programming_Assingment16.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Supplemental Data Cleaning: Using Stemming # ### Test out Porter stemmer # + import nltk ps = nltk.PorterStemmer() # - # ### Read in raw text # + import pandas as pd import re import string pd.set_option('display.max_colwidth', 100) stopwords = nltk.corpus.stopwords.words('english') data = pd.read_csv("SMSSpamCollection.tsv", sep='\t') data.columns = ['label', 'body_text'] data.head() # - # ### Clean up text # + def clean_text(text): text = "".join([word for word in text if word not in string.punctuation]) tokens = re.split('\W+', text) text = [word for word in tokens if word not in stopwords] return text data['body_text_nostop'] = data['body_text'].apply(lambda x: clean_text(x.lower())) # - # ### Stem text
nlp/Ex_Files_NLP_Python_ML_EssT/Exercise Files/Ch02/02_02/Start/02_02.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # neural network(神经网络) import matplotlib.pyplot as plt import numpy as np import scipy.io as sio import matplotlib import scipy.optimize as opt from sklearn.metrics import classification_report#这个包是评价报告 def load_data(path, transpose=True): data = sio.loadmat(path) y = data.get('y') # (5000,1) y = y.reshape(y.shape[0]) # make it back to column vector X = data.get('X') # (5000,400) if transpose: # for this dataset, you need a transpose to get the orientation right X = np.array([im.reshape((20, 20)).T for im in X]) # and I flat the image again to preserve the vector presentation X = np.array([im.reshape(400) for im in X]) return X, y # + X, y = load_data('ex3data1.mat') print(X.shape) print(y.shape) # - def plot_an_image(image): # """ # image : (400,) # """ fig, ax = plt.subplots(figsize=(1, 1)) ax.matshow(image.reshape((20, 20)), cmap=matplotlib.cm.binary) plt.xticks(np.array([])) # just get rid of ticks plt.yticks(np.array([])) #绘图函数 pick_one = np.random.randint(0, 5000) plot_an_image(X[pick_one, :]) plt.show() print('this should be {}'.format(y[pick_one])) def plot_100_image(X): """ sample 100 image and show them assume the image is square X : (5000, 400) """ size = int(np.sqrt(X.shape[1])) # 随机选100个样本 sample_idx = np.random.choice(np.arange(X.shape[0]), 100) # 100*400 sample_images = X[sample_idx, :] fig, ax_array = plt.subplots(nrows=10, ncols=10, sharey=True, sharex=True, figsize=(8, 8)) for r in range(10): for c in range(10): ax_array[r, c].matshow(sample_images[10 * r + c].reshape((size, size)), cmap=matplotlib.cm.binary) plt.xticks(np.array([])) plt.yticks(np.array([])) #绘图函数,画100张图片 plot_100_image(X) plt.show() raw_X, raw_y = load_data('ex3data1.mat') print(raw_X.shape) print(raw_y.shape) # # 准备数据 # add intercept=1 for x0 X = np.insert(raw_X, 0, values=np.ones(raw_X.shape[0]), axis=1)#插入了第一列(全部为1) X.shape # + # y have 10 categories here. 1..10, they represent digit 0 as category 10 because matlab index start at 1 # I'll ditit 0, index 0 again y_matrix = [] for k in range(1, 11): y_matrix.append((raw_y == k).astype(int)) # 最后一列k=10,表示为0,把最后一列放到第一列 y_matrix = [y_matrix[-1]] + y_matrix[:-1] y = np.array(y_matrix) y.shape # (10, 50) # 扩展 5000*1 到 5000*10 # 比如 y=10 -> [0, 0, 0, 0, 0, 0, 0, 0, 0, 1]: ndarray # """ # - y # # train 1 model(训练一维模型) def cost(theta, X, y): ''' cost fn is -l(theta) for you to minimize''' return np.mean(-y * np.log(sigmoid(X @ theta)) - (1 - y) * np.log(1 - sigmoid(X @ theta))) def regularized_cost(theta, X, y, l=1): '''you don't penalize theta_0''' theta_j1_to_n = theta[1:] regularized_term = (l / (2 * len(X))) * np.power(theta_j1_to_n, 2).sum() return cost(theta, X, y) + regularized_term def regularized_gradient(theta, X, y, l=1): '''still, leave theta_0 alone''' theta_j1_to_n = theta[1:] regularized_theta = (l / len(X)) * theta_j1_to_n # by doing this, no offset is on theta_0 regularized_term = np.concatenate([np.array([0]), regularized_theta]) return gradient(theta, X, y) + regularized_term def sigmoid(z): return 1 / (1 + np.exp(-z)) def gradient(theta, X, y): '''just 1 batch gradient''' return (1 / len(X)) * X.T @ (sigmoid(X @ theta) - y) def logistic_regression(X, y, l=1): """generalized logistic regression args: X: feature matrix, (m, n+1) # with incercept x0=1 y: target vector, (m, ) l: lambda constant for regularization return: trained parameters """ # init theta theta = np.zeros(X.shape[1]) # train it res = opt.minimize(fun=regularized_cost, x0=theta, args=(X, y, l), method='TNC', jac=regularized_gradient, options={'disp': True}) # get trained parameters final_theta = res.x return final_theta def predict(x, theta): prob = sigmoid(x @ theta) return (prob >= 0.5).astype(int) t0 = logistic_regression(X, y[0]) print(t0.shape) y_pred = predict(X, t0) print('Accuracy={}'.format(np.mean(y[0] == y_pred))) # # train k model(训练k维模型) k_theta = np.array([logistic_regression(X, y[k]) for k in range(10)]) print(k_theta.shape) # # 进行预测 # * think about the shape of k_theta, now you are making $X\times\theta^T$ # > $(5000, 401) \times (10, 401).T = (5000, 10)$ # * after that, you run sigmoid to get probabilities and for each row, you find the highest prob as the answer prob_matrix = sigmoid(X @ k_theta.T) np.set_printoptions(suppress=True) prob_matrix y_pred = np.argmax(prob_matrix, axis=1)#返回沿轴axis最大值的索引,axis=1代表行 y_pred y_answer = raw_y.copy() y_answer[y_answer==10] = 0 print(classification_report(y_answer, y_pred)) # # 神经网络模型图示 # <img style="float: left;" src="../img/nn_model.png"> def load_weight(path): data = sio.loadmat(path) return data['Theta1'], data['Theta2'] # + theta1, theta2 = load_weight('ex3weights.mat') theta1.shape, theta2.shape # - # 因此在数据加载函数中,原始数据做了转置,然而,转置的数据与给定的参数不兼容,因为这些参数是由原始数据训练的。 所以为了应用给定的参数,我需要使用原始数据(不转置) # + X, y = load_data('ex3data1.mat',transpose=False) X = np.insert(X, 0, values=np.ones(X.shape[0]), axis=1) # intercept X.shape, y.shape # - # # feed forward prediction(前馈预测) a1 = X z2 = a1 @ theta1.T # (5000, 401) @ (25,401).T = (5000, 25) z2.shape z2 = np.insert(z2, 0, values=np.ones(z2.shape[0]), axis=1) a2 = sigmoid(z2) a2.shape z3 = a2 @ theta2.T z3.shape a3 = sigmoid(z3) a3 y_pred = np.argmax(a3, axis=1) + 1 # numpy is 0 base index, +1 for matlab convention,返回沿轴axis最大值的索引,axis=1代表行 y_pred.shape # # 准确率 # # 虽然人工神经网络是非常强大的模型,但训练数据的准确性并不能完美预测实际数据,在这里很容易过拟合。 print(classification_report(y, y_pred))
ex3-neural network/other1.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] extensions={"jupyter_dashboards": {"version": 1, "views": {"grid_default": {"col": 0, "height": 4, "hidden": false, "row": 0, "width": 4}, "report_default": {}}}} # # PFC 2019 simulations # + extensions={"jupyter_dashboards": {"version": 1, "views": {"grid_default": {"hidden": true}, "report_default": {}}}} from __future__ import division # %matplotlib inline # %config InlineBackend.figure_format = 'retina' # %load_ext autoreload # %autoreload 2 import numpy as np import glob, os, pickle import matplotlib.pyplot as plt import matplotlib matplotlib.rcParams['figure.dpi'] = 2.5 * matplotlib.rcParams['figure.dpi'] import astropy from astropy.time import Time import enterprise from enterprise.pulsar import Pulsar import enterprise_extensions from enterprise_extensions import models, model_utils import libstempo as T2, libstempo.toasim as LT, libstempo.plot as LP from ephem import Ecliptic, Equatorial # + extensions={"jupyter_dashboards": {"version": 1, "views": {"grid_default": {"hidden": true}, "report_default": {}}}} def figsize(scale): fig_width_pt = 513.17 #469.755 # Get this from LaTeX using \the\textwidth inches_per_pt = 1.0/72.27 # Convert pt to inch golden_mean = (np.sqrt(5.0)-1.0)/2.0 # Aesthetic ratio (you could change this) fig_width = fig_width_pt*inches_per_pt*scale # width in inches fig_height = fig_width*golden_mean # height in inches fig_size = [fig_width,fig_height] return fig_size #plt.rcParams.update(plt.rcParamsDefault) params = {'backend': 'pdf', 'axes.labelsize': 10, 'lines.markersize': 4, 'font.size': 10, 'xtick.major.size':6, 'xtick.minor.size':3, 'ytick.major.size':6, 'ytick.minor.size':3, 'xtick.major.width':0.5, 'ytick.major.width':0.5, 'xtick.minor.width':0.5, 'ytick.minor.width':0.5, 'lines.markeredgewidth':1, 'axes.linewidth':1.2, 'legend.fontsize': 7, 'xtick.labelsize': 10, 'ytick.labelsize': 10, 'savefig.dpi':200, 'path.simplify':True, 'font.family': 'serif', 'font.serif':'Times', 'text.latex.preamble': [r'\usepackage{amsmath}',r'\usepackage{amsbsy}', r'\DeclareMathAlphabet{\mathcal}{OMS}{cmsy}{m}{n}'], 'text.usetex':True, 'figure.figsize': figsize(0.5)} plt.rcParams.update(params) # - # ## Useful functions import pandas as pd # + def year2mjd(year): # rounds to nearest year return float(Time("{}-01-01T00:00:00".format(str(int(np.rint(year)))), format='isot').mjd) def mjd2year(mjd): return float(Time(mjd, format='mjd').decimalyear) # - # ## Process data data = pd.read_csv('../data/pfc2019/RMSonlyvsTime2018-GBTAO-BurningDumpsterNoOpenSkiesLoseGBT-SimulationIVe.csv',header=0,skip_blank_lines=True,) # data = pd.read_csv('../data/pfc2019/RMSonlyvsTime2018-DSA2000-CWStatusQuo-SimulationII.csv',header=0,skip_blank_lines=True,) for ii,name in data.iterrows(): print(name) data ### Make fake dataset for jj,name in data.iterrows(): psrname = name.PSR try: file = open('../data/pfc2019/popsynth_par_files/' + psrname + '.par', mode='r') file.close() par = '../data/pfc2019/popsynth_par_files/' + psrname + '.par' except: try: if psrname == 'J1713+0747': ext = '.working.t2.par' else: ext = '.working.par' file = open('../data/pfc2019/real_par_files/' + psrname + ext, mode='r') file.close() par = '../data/pfc2019/real_par_files/' + psrname + ext except: print('need',psrname) #os.system('cp /Users/taylosr8/Research/repos/MSIP2020/data/include/PAR/{}.par ../data/pfc2019/popsynth_par_files/'.format(psrname)) # ## Creating tim files # , 'cw_status_quo_simII', 'cw_optimized_simIII', 'burning_dumpster_simIV' sim_type = 'test2_simIVe'#'cw_optimized_simIII' timcheck=glob.glob('../data/pfc2019/tim_{}/*.tim'.format(sim_type)) Ltim=len(timcheck) print(Ltim) idxs = [ii for ii in range(Ltim)] # + if not os.path.exists('../data/pfc2019/tim_{}/'.format(sim_type)): os.makedirs('../data/pfc2019/tim_{}/'.format(sim_type)) sims = [] start_data = [] for jj,name in data.iterrows(): psrname = name.PSR print('\r'+psrname+' ',end='',flush=True) if os.path.exists('../data/pfc2019/tim_{}/{}.tim'.format(sim_type,psrname)): sims.append(None) # elif psrname in ['J2017+0603']:#'J1022+1001''J0621+2514','J0709+0458']: # sims.append(None) else: rms = np.array([name.RMS1, name.RMS2, name.RMS3, name.RMS4, name.RMS5]) rms[rms=='gap'] = np.inf rms = np.array(rms,dtype=float) epoch = np.array([name.Epoch1, name.Epoch2, name.Epoch3, name.Epoch4, name.Epoch5]) ### Start and End year start_yr = epoch[np.where(~np.isnan(rms))[0][0]] start_yr_mjd = year2mjd(start_yr) # end_yr = 2045.0 end_yr_mjd = year2mjd(end_yr) ### Spacing and obstimes spacing = 365.25 / 20.0 # days between observations # obstimes = np.arange(start_yr_mjd, end_yr_mjd, spacing) obstimes += np.random.uniform(low=-5,high=5,size = obstimes.size) # removing data gaps for kk,rmss in enumerate(rms): if np.isinf(rmss): if kk == 4: mask = np.logical_and(obstimes >= year2mjd(epoch[kk]), obstimes <= end_yr_mjd) obstimes = obstimes[~mask] elif np.isnan(rms[kk+1]): mask = np.logical_and(obstimes >= year2mjd(epoch[kk]), obstimes <= end_yr_mjd) obstimes = obstimes[~mask] else: mask = np.logical_and(obstimes >= year2mjd(epoch[kk]), obstimes <= year2mjd(epoch[kk+1])) obstimes = obstimes[~mask] ### Segmenting obstimes based on hardware/telescope switches stops = list(epoch[np.where(~np.isnan(rms))[0]]) + [end_yr] stops = [year2mjd(yr) for yr in stops] errors = list(rms[np.where(~np.isnan(rms))[0]]) ### Masking sections of data based on these stops masks = [] for kk,stop in enumerate(stops): if kk < len(stops)-1: masks.append(np.logical_and(obstimes >= stops[kk], obstimes <= stops[kk+1])) ### Applying RMS errors toa_errs = np.ones_like(obstimes) for kk,mask in enumerate(masks): toa_errs[mask] *= float(errors[kk]) ### Make fake dataset try: file = open('../data/pfc2019/popsynth_par_files/' + psrname + '.par', mode='r') file.close() par = '../data/pfc2019/popsynth_par_files/' + psrname + '.par' if name.Observatory == 'DSA2000': observatory_tag = 'vla' else: observatory_tag = name.Observatory.lower() sims.append(LT.fakepulsar(parfile=par, obstimes=obstimes, toaerr=toa_errs, observatory=observatory_tag)) # white noise LT.add_efac(sims[jj]) # save .tim sims[jj].savetim('../data/pfc2019/tim_{}/'.format(sim_type) + sims[jj].name + '.tim') ### start_data.append([psrname, start_yr, start_yr_mjd]) print(psrname, par, start_yr_mjd, end_yr_mjd, len(stops), len(masks), len(errors)) except: if psrname == 'J1713+0747': ext = '.working.t2.par' else: ext = '.working.par' # try: file = open('../data/pfc2019/real_par_files/' + psrname + ext, mode='r') file.close() par = '../data/pfc2019/real_par_files/' + psrname + ext if name.Observatory == 'DSA2000': observatory_tag = 'vla' else: observatory_tag = name.Observatory.lower() sims.append(LT.fakepulsar(parfile=par, obstimes=obstimes, toaerr=toa_errs, observatory=observatory_tag)) # white noise LT.add_efac(sims[jj]) # save .tim sims[jj].savetim('../data/pfc2019/tim_{}/'.format(sim_type) + sims[jj].name + '.tim') ### start_data.append([psrname, start_yr, start_yr_mjd]) print(psrname, par, start_yr_mjd, end_yr_mjd, len(stops), len(masks), len(errors)) # except: # print('no par file for {}...skipping'.format(psrname)) # + #start_data = np.array(start_data) #start_data[start_data[:,1].argsort()] #fil = open('sims_psr_startdata_{}.txt'.format(sim_type),'w') #for line in start_data[start_data[:,1].argsort()]: # print >>fil, line[0], line[1], line[2] #fil.close() # - # # Read In And Check Pulsars # + import enterprise from enterprise.pulsar import Pulsar from enterprise.signals import parameter from enterprise.signals import white_signals from enterprise.signals import gp_signals from enterprise.signals import signal_base import enterprise_extensions from enterprise_extensions import models, model_utils import glob # - psr_test = Pulsar('../data/pfc2019/real_par_files/J1713+0747.working.t2.par', '../data/pfc2019/tim_{}/J1713+0747.tim'.format(sim_type), ephem='DE436') # + plt.errorbar([mjd2year(p) for p in psr_test.toas/86400.0], psr_test.residuals/1e-6, psr_test.toaerrs/1e-6, alpha=0.3, fmt='.') plt.xlabel(r'Year') plt.ylabel(r'Residuals [$\mu$s]') plt.title(psr_test.name) plt.show() # - #Pull paths in with glob timpaths = sorted(glob.glob('../data/pfc2019/tim_{}/*.tim'.format(sim_type))) parpaths = glob.glob('../data/pfc2019/popsynth_par_files/*.par') parpaths.extend(glob.glob('../data/pfc2019/real_par_files/*.par')) psr_names = sorted([t.split('/')[-1].split('.')[0] for t in timpaths]) parpaths = sorted([p for p in parpaths if p.split('/')[-1].split('.')[0] in psr_names]) parpsr_order = [p.split('/')[-1].split('.')[0] for p in parpaths] #Check to see that numbers match len(timpaths), len(parpaths) timpaths.index('../data/pfc2019/tim_bg_status_quo_simI/J1909-3744.tim') parpaths[68] idxs = [parpsr_order.index(p.split('/')[-1].split('.')[0]) for p in psr_names] sorted_parpaths = np.array(parpaths)[idxs] sorted_parpaths[68] timpaths[0] # + # If they don't match you can use this to see which one don't match # [p for p in parpaths if p.split('/')[-1].split('.')[0] # not in psr_names] # - psrs=[] for t,p in zip(timpaths,sorted_parpaths): if t.split('/')[-1].split('.')[0] != p.split('/')[-1].split('.')[0]: raise ValueError('par and tim do not match !!!') psrs.append(Pulsar(t,p,ephem='DE436')) with open('../data/pfc2019/tim_status_quo/pfc2019_{0}_ePsrs.pkl'.format(sim_type), 'wb') as fin: pickle.dump(psrs,fin)
code/nano_pfc2019_sims.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] _cell_guid="6c4458a7-7a45-40a0-a69a-806d5defea27" _uuid="448e68b9-6bfc-4bc4-a4ff-b73186b7f2fd" papermill={"duration": 0.024945, "end_time": "2022-03-03T22:33:03.142614", "exception": false, "start_time": "2022-03-03T22:33:03.117669", "status": "completed"} tags=[] # # First things first # # Let's import all the fun stuff that lets us do the really fun stuff. # + _cell_guid="2409f407-ec7f-4260-86f6-c93b368b4054" _uuid="4c80af7c-4cb0-4a62-85da-fc02f77338bb" jupyter={"outputs_hidden": false} papermill={"duration": 1.029652, "end_time": "2022-03-03T22:33:04.198047", "exception": false, "start_time": "2022-03-03T22:33:03.168395", "status": "completed"} tags=[] import os import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns import matplotlib.pyplot as plt from pylab import rcParams INDIR = "/kaggle/input" OUTDIR = "/kaggle/working" DBNAME = "words.db" rcParams['figure.figsize'] = 16,9 pd.options.plotting.backend = "matplotlib" sns.set(style="darkgrid") figs = {} # + [markdown] _cell_guid="1c29d7d9-3438-4cc3-ab12-cfeec78611a5" _uuid="77836b9d-39eb-4d4f-89fd-7a73a1aefba4" papermill={"duration": 0.024625, "end_time": "2022-03-03T22:33:04.246450", "exception": false, "start_time": "2022-03-03T22:33:04.221825", "status": "completed"} tags=[] # # Prepping the data # # We're going to load our words from the word lists that we copied from the source code. There are two lists, one which seems to be a list of played words and ones that have yet to be released. # + _cell_guid="1c4060c2-5834-49e9-a4de-2fad532a559d" _uuid="1978ae01-9e97-413b-8840-a5a56cce2554" jupyter={"outputs_hidden": false} papermill={"duration": 0.10723, "end_time": "2022-03-03T22:33:04.378097", "exception": false, "start_time": "2022-03-03T22:33:04.270867", "status": "completed"} tags=[] playable_words = pd.read_json("../input/wordle-word-list/played_words.json") other_words = pd.read_json("../input/wordle-word-list/unplayed_words.json") playable_words[1] = True other_words[1] = False words = pd.concat([playable_words, other_words]) words.columns = ["name", "playable"] words.reset_index() words.describe() # + [markdown] _cell_guid="eddf2432-d7fe-4edf-b7e6-3dd6fa293d72" _uuid="75fa9288-bd86-4e4f-a599-958907154e7d" papermill={"duration": 0.024385, "end_time": "2022-03-03T22:33:04.426949", "exception": false, "start_time": "2022-03-03T22:33:04.402564", "status": "completed"} tags=[] # # First Heuristic: Letter Rank # # It's pretty simple: we're going to find out how often each letter is used in the entire wordle word list and rank them using a barplot. Once that is done, we'll check to see if there are any words spelled using the top 5 letters in the word list. # + _cell_guid="13ce829c-934d-4ab1-adcd-cb162d15a3c2" _uuid="a8b9742d-c13e-4b9c-92da-dedfa3204be5" jupyter={"outputs_hidden": false} papermill={"duration": 0.592438, "end_time": "2022-03-03T22:33:05.043715", "exception": false, "start_time": "2022-03-03T22:33:04.451277", "status": "completed"} tags=[] from itertools import chain game1 = {} letters = pd.Series(chain.from_iterable(words["name"])) letter_counts = letters.value_counts() ax = sns.barplot( x=letter_counts.index, y=letter_counts, color='#69b3a2' ) ax.set_xlabel("Letters") ax.set_ylabel("Frequency") ax.set_title("Letter Ranking") figs["letter_ranking.png"] = ax.figure # + [markdown] _cell_guid="3a6bb6ef-3987-4b3c-93ae-b892ec06b99d" _uuid="8b935440-6137-4e57-b12f-01d22002b3ac" papermill={"duration": 0.025064, "end_time": "2022-03-03T22:33:05.097540", "exception": false, "start_time": "2022-03-03T22:33:05.072476", "status": "completed"} tags=[] # From the above, we can see that the top 5 letters are: s, e, a, o and r. Given that information, let's try and find out if there are any words spelled using all five letters. # + _cell_guid="ce2bd907-08bf-4496-929d-a0158fbbc834" _uuid="4eaf6f3a-10c1-45a7-a2db-842ef6a9a8b4" jupyter={"outputs_hidden": false} papermill={"duration": 0.05226, "end_time": "2022-03-03T22:33:05.174947", "exception": false, "start_time": "2022-03-03T22:33:05.122687", "status": "completed"} tags=[] def contains_all(letters): return lambda word: set(letters) <= set(word) all_top5_letters = words["name"].apply(contains_all("seaor")) words[all_top5_letters] # + [markdown] _cell_guid="34869328-c37b-4a17-9c1c-bd78863c69d5" _uuid="34c42fc4-04ba-4c23-98c2-2284e266e050" papermill={"duration": 0.025322, "end_time": "2022-03-03T22:33:05.225966", "exception": false, "start_time": "2022-03-03T22:33:05.200644", "status": "completed"} tags=[] # The result is the following three words: # 1. arose # 2. aeros # 3. soare # + [markdown] papermill={"duration": 0.026063, "end_time": "2022-03-03T22:33:05.278722", "exception": false, "start_time": "2022-03-03T22:33:05.252659", "status": "completed"} tags=[] # ## First turn # # + _cell_guid="a9d583d2-56aa-4dbb-8a63-eda1c0d660a7" _uuid="fdbc7628-5918-4ae1-a461-da1e2478d3d7" jupyter={"outputs_hidden": false} papermill={"duration": 0.068956, "end_time": "2022-03-03T22:33:05.373354", "exception": false, "start_time": "2022-03-03T22:33:05.304398", "status": "completed"} tags=[] def contains_any(letters): return lambda word: len(set(letters) & set(word)) > 0 any_top5_letter = words["name"].apply(contains_any("seaor")) words_turn1 = words[any_top5_letter] words_turn1.describe() # + [markdown] papermill={"duration": 0.026059, "end_time": "2022-03-03T22:33:05.426113", "exception": false, "start_time": "2022-03-03T22:33:05.400054", "status": "completed"} tags=[] # So what does that mean for us? Well, let's look at the worst possible scenario: that we get no yellow or green tiles. In this case, we've still managed to do away with a great deal of the problem space, out of a total 12,972 words we've eliminated 12,395. Using only 1/6th of the time given to us, we've eliminated 95% of the problem space. Let's filter for any possible words using the next 5 most common letters. # + [markdown] papermill={"duration": 0.025846, "end_time": "2022-03-03T22:33:05.478365", "exception": false, "start_time": "2022-03-03T22:33:05.452519", "status": "completed"} tags=[] # ## Second Turn # + [markdown] papermill={"duration": 0.025691, "end_time": "2022-03-03T22:33:05.529981", "exception": false, "start_time": "2022-03-03T22:33:05.504290", "status": "completed"} tags=[] # The next five most frequent letters according to our chart are i, l, t, n and u. Same as above, we'll check the remaining words in the list and see if all five letters can be used to play a word. # + papermill={"duration": 0.039849, "end_time": "2022-03-03T22:33:05.595588", "exception": false, "start_time": "2022-03-03T22:33:05.555739", "status": "completed"} tags=[] remaining_words = words[~any_top5_letter] all_next5_letters = remaining_words["name"].apply(contains_all("intlu")) remaining_words[all_next5_letters] # + [markdown] papermill={"duration": 0.026408, "end_time": "2022-03-03T22:33:05.649275", "exception": false, "start_time": "2022-03-03T22:33:05.622867", "status": "completed"} tags=[] # So we have two words that use the letters ranking 6 through 10. Again, our worst case scenario is that none of these letters our in the secret word. How many possibilities have we eliminated? # + papermill={"duration": 0.044524, "end_time": "2022-03-03T22:33:05.720192", "exception": false, "start_time": "2022-03-03T22:33:05.675668", "status": "completed"} tags=[] any_next5_letters = remaining_words["name"].apply(contains_any("until")) words_turn2 = remaining_words[any_next5_letters] words_turn2.describe() # + [markdown] papermill={"duration": 0.027673, "end_time": "2022-03-03T22:33:05.775809", "exception": false, "start_time": "2022-03-03T22:33:05.748136", "status": "completed"} tags=[] # Two turns done, and we have eliminated 12,969 (12,395 + 574) words. By turn 3, this means that we have the following possiblities left. Even if we ignore the clues dropped by the game, in three more turns, we'll have the answer (assuming we actually know these words). # + papermill={"duration": 0.040409, "end_time": "2022-03-03T22:33:05.843189", "exception": false, "start_time": "2022-03-03T22:33:05.802780", "status": "completed"} tags=[] remaining_words = remaining_words[~any_next5_letters] remaining_words # + [markdown] papermill={"duration": 0.027061, "end_time": "2022-03-03T22:33:05.898066", "exception": false, "start_time": "2022-03-03T22:33:05.871005", "status": "completed"} tags=[] # # + papermill={"duration": 0.275612, "end_time": "2022-03-03T22:33:06.201619", "exception": false, "start_time": "2022-03-03T22:33:05.926007", "status": "completed"} tags=[] first_game = pd.DataFrame.from_dict({ "Turn 1": words_turn1["name"].count(), "Turn 2": words_turn2["name"].count(), "Remaining": remaining_words["name"].count() }, orient="index", columns=["Words"]) ax = sns.barplot( x=first_game.index, y=first_game.Words, color='#69b3a2' ) #ax.set_xlabel("Letters") ax.set_ylabel("Words") ax.set_title("Game 1 results") figs["first_game_result.png"] = ax.figure first_game # + [markdown] _cell_guid="484c7e6f-4992-475a-9273-4dbeceb7e8dd" _uuid="fd25a1db-dd98-442c-965b-52526ae4d909" papermill={"duration": 0.028035, "end_time": "2022-03-03T22:33:06.258582", "exception": false, "start_time": "2022-03-03T22:33:06.230547", "status": "completed"} tags=[] # # Second Heuristic: Et tu Brute-force-us # # Our first heuristic used the most common letters to find words that could match against. The thing is, a (good) heuristic gives us an answer that is good enough. So did our heuristic give us the word that eliminates the most # + _cell_guid="1a20c436-da9a-4af9-9572-313193adf838" _uuid="df655d3d-c04d-4709-8d2e-92509e9e9071" jupyter={"outputs_hidden": false} papermill={"duration": 210.128327, "end_time": "2022-03-03T22:36:36.415275", "exception": false, "start_time": "2022-03-03T22:33:06.286948", "status": "completed"} tags=[] def count_matches(words): return lambda word: words.apply( contains_any(word) ).value_counts()[True] # remove the word itself from number of matches words["starter_score"] = words["name"].apply(count_matches(words["name"])) words[words["starter_score"] == words["starter_score"].max()] # + [markdown] _cell_guid="4c6d9e87-c7d8-41fe-9664-5d4ad38bbabc" _uuid="cfc91643-c093-4e62-9f97-248debf44342" papermill={"duration": 0.028542, "end_time": "2022-03-03T22:36:36.472701", "exception": false, "start_time": "2022-03-03T22:36:36.444159", "status": "completed"} tags=[] # As it turns out, there are two words in the word list that just barely outperform our starter words consisting of the top 5 letter (and I have no idea what they mean): # 1. Stoae # 2. Toeas # + [markdown] papermill={"duration": 0.028684, "end_time": "2022-03-03T22:36:36.530044", "exception": false, "start_time": "2022-03-03T22:36:36.501360", "status": "completed"} tags=[] # ## Turn 1 # Since we already know how many words we rule out by playing either of the words above, we can go directly to figuring out what our options are for turn 2 in case we don't match any letters at all. # + papermill={"duration": 0.757471, "end_time": "2022-03-03T22:36:37.316379", "exception": false, "start_time": "2022-03-03T22:36:36.558908", "status": "completed"} tags=[] any_top5_letter = words["name"].apply(contains_any("stoae")) remaining_words = words[~any_top5_letter].copy() remaining_words["starter_score"] = remaining_words["name"].apply(count_matches(remaining_words["name"])) remaining_words[remaining_words["starter_score"] == remaining_words["starter_score"].max()] # + [markdown] papermill={"duration": 0.029287, "end_time": "2022-03-03T22:36:37.375372", "exception": false, "start_time": "2022-03-03T22:36:37.346085", "status": "completed"} tags=[] # We get three words that match the most remaining words. In fact, they match all the remaining words except one. # + [markdown] papermill={"duration": 0.02908, "end_time": "2022-03-03T22:36:37.433758", "exception": false, "start_time": "2022-03-03T22:36:37.404678", "status": "completed"} tags=[] # ## Turn 2 # We can play one of the three words above to find out what the last word is: grrrl. # # I was not expecting that. # + papermill={"duration": 0.042946, "end_time": "2022-03-03T22:36:37.506090", "exception": false, "start_time": "2022-03-03T22:36:37.463144", "status": "completed"} tags=[] any_next5_letters = remaining_words["name"].apply(contains_any("unify")) remaining_words = remaining_words[~any_next5_letters] remaining_words # + [markdown] papermill={"duration": 0.029466, "end_time": "2022-03-03T22:36:37.565867", "exception": false, "start_time": "2022-03-03T22:36:37.536401", "status": "completed"} tags=[] # It would seem that the second method outperforms the first. We're guaranteed to figure the wordle out even if the first two turns don't reveal any green or yellow tiles. # + papermill={"duration": 0.270318, "end_time": "2022-03-03T22:36:37.865793", "exception": false, "start_time": "2022-03-03T22:36:37.595475", "status": "completed"} tags=[] second_game = pd.DataFrame.from_dict({ "Turn 1": 12417, "Turn 2": 554, "Remaining": 1 }, orient="index", columns=["Words"]) ax = sns.barplot( x=second_game.index, y=second_game.Words, color='#69b3a2' ) #ax.set_xlabel("Letters") ax.set_ylabel("Words") ax.set_title("Game 2 results") figs["second_game_result.png"] = ax.figure second_game # + [markdown] papermill={"duration": 0.030632, "end_time": "2022-03-03T22:36:37.927683", "exception": false, "start_time": "2022-03-03T22:36:37.897051", "status": "completed"} tags=[] # # When you assume, you make ... # Having done all the analysis above, can we say that the second heuristic is better than the first? # 1. Is the worst case scenario really the worst case scenario? How can we find out? # 2. How do does discovering a yellow or green tile change the likelihood of other letters appearing in the wordle? # 3. How do you even compare two heuristics or techniques? # 4. Is there a great starter word? # 5. We're assuming that the player knows ALL the words in the wordle word list. # + [markdown] papermill={"duration": 0.031637, "end_time": "2022-03-03T22:36:37.991864", "exception": false, "start_time": "2022-03-03T22:36:37.960227", "status": "completed"} tags=[] # # Final Thoughts (Work in progress) # # + papermill={"duration": 0.055547, "end_time": "2022-03-03T22:36:38.079221", "exception": false, "start_time": "2022-03-03T22:36:38.023674", "status": "completed"} tags=[] matches = words[words['name'].apply(contains_all('sta'))] matches = matches[~matches['name'].apply(contains_any('oe'))] # + [markdown] _cell_guid="6d75b935-c772-4b39-9ed1-7e04e6998077" _uuid="730bb09e-8ba1-4086-bb37-9e249e43dd22" papermill={"duration": 0.030936, "end_time": "2022-03-03T22:36:38.141408", "exception": false, "start_time": "2022-03-03T22:36:38.110472", "status": "completed"} tags=[] # # Don't mind me # Just saving the plots # + _cell_guid="b292ed14-bf67-49ba-a512-02c003eeadd7" _uuid="8b735c7c-117f-45b2-b55d-d7734d48c6f9" jupyter={"outputs_hidden": false} papermill={"duration": 0.298522, "end_time": "2022-03-03T22:36:38.471225", "exception": false, "start_time": "2022-03-03T22:36:38.172703", "status": "completed"} tags=[] for name, figure in figs.items(): figure.savefig(name) # + [markdown] _cell_guid="6fbdcbef-f54a-41d2-b56b-1552e9548447" _uuid="a65df05a-c6ae-4a73-b568-f1f0f8cff34d" papermill={"duration": 0.030721, "end_time": "2022-03-03T22:36:38.533148", "exception": false, "start_time": "2022-03-03T22:36:38.502427", "status": "completed"} tags=[] #
wordle-analysis-p1.ipynb