code
stringlengths
38
801k
repo_path
stringlengths
6
263
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Use Case: Connecting EPA AOP-DB Data to Extant Databases with RDF and SPARQL <a name="top"></a> # <div class="alert alert-success"> # The following Python code executes <strong>SPARQL</strong> queries on <strong>AOP-DB</strong> and related databases. The first example queries just AOP-DB. The second example is a <strong>federated</strong> SPARQL query. Federated SPARQL queries are SPARQL queries which retrieve linked RDF data from multiple databases. # <br><br> # This document will showcase how SPARQL can be used to access AOP-DB RDF data, and link it with extant datasets. For more detailed information on RDF, SPARQL, and other semantic technologies, please visit the <a href="https://www.w3.org/standards/semanticweb/#w3c_overview">official semantic web documentation</a>. # </div> # <div class="alert alert-info"> # The <strong>SPARQLWrapper</strong> package allows SPARQL query execution in a Python environment. The <strong>pandas</strong> package is used for data manipulation and display. # </div> from SPARQLWrapper import SPARQLWrapper, JSON import pandas as pd # <div class="alert alert-info"> # The functions defined below convert JSON, which is the data format returned by SPARQL queries, into pandas dataframes. # </div> # This function takes the JSON data retrieved by SPARQL queries and converts it to a useful format def convertjson(jdata): jdata.setReturnFormat(JSON) return jdata.query().convert() # This function unpacks the queried data into a pandas dataframe def sparql_to_df(results): head = [] for header in results["head"]["vars"]: head.append(str(header)) dbdict = {} for i in range(len(head)): dbdict[head[i]] = [] for result in results["results"]["bindings"]: for item in head: dbdict[item].append(result[item]["value"]) return pd.DataFrame.from_dict(dbdict) # <div class="alert alert-info"> # Now we will use SPARQLWrapper to connect to <a href="http://192.168.3.11:8892/sparql/">AOP-DB's SPARQL endpoint</a>. # </div> aopdb = SPARQLWrapper("http://192.168.3.11:8892/sparql/") # <div class="alert alert-info"> # The SPARQL query in <strong><em>example one</em></strong> -- the query itself is the red text in triple quotes -- retrieves AOP-DB genes and related EPA ToxCast assay information. The variables returned in a SPARQL query are denoted by a leading question mark. In this case, these variables are <strong>?gene</strong> and <strong>?ToxCast_assay</strong>. # </div> # ### Example 1: Simple SPARQL Query of AOP-DB # Retrieving Genes and ToxCast Assays aopdb.setQuery(""" PREFIX mmo: <http://purl.obolibrary.org/obo/MMO_> PREFIX edam: <http://edamontology.org/> SELECT DISTINCT ?gene ?ToxCast_assay { ?gene_id mmo:0000441 ?assay_id; edam:data_1027 ?gene. ?assay_id dc:title ?ToxCast_assay. }LIMIT 50 """) gene_assay_Query = sparql_to_df(convertjson(aopdb)) # <div class="alert alert-info"> # Below is a sample of the National Center for Biotechnology Information (NCBI) gene identification numbers, # and their asociated assays, that the SPARQL query of AOP-DB in example one returned. # </div> gene_assay_Query.sort_values('gene').head(5) # <div class="alert alert-info"> # Example one is a simple demonstration of how SPARQL queries retrieve information from RDF data. <strong><em>Example two</em></strong>, a federated SPARQL query, will retrieve linked data from four databases- <strong> AOP-DB</strong>, <strong>AOP-Wiki</strong>,<strong> Protein Ontology</strong>, and <strong> Wikipathways</strong>. # <br><br> # This second example begins much like the first, retrieving gene and ToxCast assay information from AOP-DB. However, it then extends into the <a href="https://aopwiki.rdf.bigcat-bioinformatics.org/sparql">AOP-Wiki SPARQL endpoint</a> using the <strong>SERVICE</strong> call. Because AOP-DB and AOP-Wiki have RDF data describing different attributes of the same genes, the query, which has already retrieved gene and ToxCast assay information from AOP-DB, can retrieve from AOP-Wiki those gene's protein and key event information. Then the query makes another SERVICE call to the <a href="https://lod.proconsortium.org/yasgui.html">Protein Ontology SPARQL endpoint</a> to obtain detailed descriptions of each protein. Finally, the query calls the <a href="http://sparql.wikipathways.org/sparql">Wikipathways</a> endpoint and retrieves biological pathways that the genes are part of, and descriptions of those pathways. # </div> # ### Example 2: Federated SPARQL Query of AOP-DB, AOP-Wiki, Protein Ontology, and Wikipathways # Retrieving AOP, Key Event, Gene, Protein, and Biological Pathway Data aopdb.setQuery(""" PREFIX pato: <http://purl.obolibrary.org/obo/PATO_> PREFIX rdfs: <http://www.w3.org/2000/01/rdf-schema#> PREFIX aopo: <http://aopkb.org/aop_ontology#> PREFIX edam: <http://edamontology.org/> PREFIX skos: <http://www.w3.org/2004/02/skos/core#> PREFIX obo: <http://purl.obolibrary.org/obo/> PREFIX dc: <http://purl.org/dc/elements/1.1/> PREFIX wp: <http://vocabularies.wikipathways.org/wp#> PREFIX dcterms: <http://purl.org/dc/terms/> SELECT DISTINCT ?aop_id ?ke_id ?key_event ?gene_NCBI ?protein ?description ?wp_pathname ?wp_description { ?gene mmo:0000441 ?assay. ?assay dc:title ?assayId. SERVICE <https://aopwiki.rdf.bigcat-bioinformatics.org/sparql> { ?gene a edam:data_1027 ; dc:identifier ?gene_NCBI . ?object dc:title ?protein; skos:exactMatch ?gene. ?ke pato:0001241 ?object; dc:title ?key_event; rdfs:label ?ke_id. ?aop a aopo:AdverseOutcomePathway ; rdfs:label ?aop_id; aopo:has_key_event ?ke. } SERVICE <https://sparql.proconsortium.org/virtuoso/sparql> { ?object rdfs:label ?_PRO_label ; obo:IAO_0000115 ?description . BIND(STR(?_PRO_label) AS ?PRO_label) . } SERVICE <http://sparql.wikipathways.org/sparql> { ?wp_gene wp:bdbEntrezGene ?gene; dcterms:isPartOf ?wpPath . ?wpPath dcterms:identifier ?pathway ; dcterms:description ?wp_description ; dc:title ?wp_pathname . BIND(STR(?pathway) AS ?pathway) . } }LIMIT 50 """) fedQuery = sparql_to_df(convertjson(aopdb)) # <div class="alert alert-info"> # Below is a sample of the adverse outcome pathway, key event, assay, gene, protein, and biological pathway information cobined using the SPARQL query in example two. # </div> fedQuery.head(5) # <div class="alert alert-success" role='alert'> # In approximately 20 lines of Python and about 35 lines of SPARQL, this query retrieves data on eight variables from four databases. There are many more variables in AOP-DB, AOP-Wiki, Protein Ontology, and Wikipathways that can be combined in a SPARQL query, and there are also many more databases with relevant information that could be included. Having AOP-DB data available as RDF presents a broad range of opportunities for research and data access. # </div> # <hr></hr> # <table style="border:2px solid #add8e6;margin-left:0;margin-right:auto;text-align:left"> # <tr> # <td style='text-align:left'>Author:</td><td> </td><td><NAME></td> # </tr> # <tr> # <td style='text-align:left'>Date:</td><td> </td><td>5/28/2020</td> # </tr> # <tr> # <td style='text-align:left'>ORISE Fellow</td><td> </td><td>US EPA ORD CPHEA</td> # </tr> # # </table> # # <br> # <br>
sparql_aopdb_figure.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # <center> Configuración Global del NOTEBOOK</center> from IPython.core.display import display, HTML display(HTML("<style>.container { width:100% !important; }</style>")) # # <center> Función que retorna un dictionario con los carácteres de código morse </center> def morse_dictionary() -> dict: MORSE_CODE_DICTIONARY = {'A':'.-', 'B':'-...', 'C':'-.-.', 'D':'-..', 'E':'.', 'F':'..-.', 'G':'--.', 'H':'....', 'I':'..', 'J':'.---', 'K':'-.-', 'L':'.-..', 'M':'--', 'N':'-.', 'Ñ':'--.--', 'O':'---', 'P':'.--.', 'Q':'--.-', 'R':'.-.', 'S':'...', 'T':'-', 'U':'..-', 'V':'...-', 'W':'.--', 'X':'-..-', 'Y':'-.--', 'Z':'--..', '1':'.----', '2':'..---', '3':'...--', '4':'....-', '5':'.....', '6':'-....', '7':'--...', '8':'---..', '9':'----.', '0':'-----', ', ':'--..--', '.':'.-.-.-', '?':'..--..', ';':'−·−·−·','"':'·−··−·', '/':'-..-.', '-':'-....-','+':'·−·−·','*':'_.._', '=':'−···−','(':'-.--.', ')':'-.--.-', ' ': '/'} return MORSE_CODE_DICTIONARY # # <center> Función que obtiene el contenido de una key</center> def get_value_from_key(key: str) -> str: MORSE_CODE_DICTIONARY = morse_dictionary() return MORSE_CODE_DICTIONARY[key.upper()] if(key.upper() in MORSE_CODE_DICTIONARY) else key + " -> Value doesn't found" # # <center> Función que obtiene una key usando el valor de la key</center> def get_key_from_value(value: str) -> str: MORSE_CODE_DICTIONARY = morse_dictionary() key_list = list(MORSE_CODE_DICTIONARY.keys()) val_list = list(MORSE_CODE_DICTIONARY.values()) if(value in val_list): # obtengo el indice del valor en la lista para obtener la key return key_list[val_list.index(value)] else: return "key doesn't found" # # <center> Función que convierte texto normal en Código Morse</center> def encode_text_to_morse_code(text: str) -> str: morse_text = '' for i in list(text): morse_text += get_value_from_key(i) + ' ' # elimina el ultimo caracter en blanco del texto return morse_text[:-1] # # <center> Función que convierte Código Morse en texto normal</center> def decode_morse_code_to_text(text: str) -> str: normal_text = '' for i in text.split(' '): normal_text += get_key_from_value(i) # elimina el ultimo caracter en blanco del texto return normal_text # # <center> Función que Códifica Morse o Decódifica Morse</center> def proccess_text(text: str, encrypt: bool) -> str: if(encrypt): return encode_text_to_morse_code(text) else: return decode_morse_code_to_text(text) # # <center> Códificando Texto a Morse</center> texto_en_morse = proccess_text('el mono *3 esta en el bananal con el ñandu tururu', True) texto_en_morse # # <center> Decódificando Morse a Texto</center> proccess_text(texto_en_morse, False)
Python/Notebooks/Code and Decode Morse Code.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + active="" # <script> # function code_toggle() { # if (code_shown){ # $('div.input').hide('500'); # $('#toggleButton').val('Show Code') # } else { # $('div.input').show('500'); # $('#toggleButton').val('Hide Code') # } # code_shown = !code_shown # } # # $( document ).ready(function(){ # code_shown=false; # $('div.input').hide() # }); # </script> # <form action="javascript:code_toggle()"><input type="submit" id="toggleButton" value="Show Code"></form> # - # ## Modules # + from keras.layers import Input, Dense, Dropout from keras.models import Model from keras.datasets import mnist from keras.models import Sequential, load_model from keras.optimizers import RMSprop from keras.callbacks import TensorBoard from __future__ import print_function from IPython.display import SVG, Image from keras import regularizers from matplotlib import rc import keras import matplotlib.pyplot as plt import numpy as np # import graphviz # + # %matplotlib inline font = {'family' : 'monospace', 'weight' : 'bold', 'size' : 20} rc('font', **font) # - # ## Global Variables # The number of classes, the input dimension and the batch size are constant. num_classes = 10 input_dim = 784 batch_size = 256 # ## Data import and preprocessing # All data is normalized and serialized into a vector. (x_train, y_train), (x_val, y_val) = mnist.load_data() y_train # + (x_train, y_train), (x_val, y_val) = mnist.load_data() x_train = x_train.astype('float32') / 255. x_val = x_val.astype('float32') / 255. x_train = x_train.reshape((len(x_train), np.prod(x_train.shape[1:]))) x_val = x_val.reshape((len(x_val), np.prod(x_val.shape[1:]))) print(x_train.shape) # - y_train = keras.utils.to_categorical(y_train, num_classes) y_val = keras.utils.to_categorical(y_val, num_classes) # # Train Neural Net to recognize MNIST digits # constants hidden1_dim = 512 hidden2_dim = 512 # `Dropout` consists in randomly setting a fraction rate of input units to 0 at each update during training time, which helps prevent overfitting. # + input_data = Input(shape=(input_dim,), dtype='float32', name='main_input') x = Dense(hidden1_dim, activation='relu', kernel_initializer='normal')(input_data) x = Dropout(0.2)(x) x = Dense(hidden2_dim, activation='relu', kernel_initializer='normal')(x) x = Dropout(0.2)(x) output_layer = Dense(num_classes, activation='softmax', kernel_initializer='normal')(x) model = Model(input_data, output_layer) model.compile(loss='categorical_crossentropy', optimizer=RMSprop(), metrics=['accuracy']) # - Image("images/mnist_nn1.png") model.fit(x_train, y_train, batch_size=batch_size, epochs=20, verbose=0, validation_split=0.1) score = model.evaluate(x_val, y_val, verbose=0) print('Test loss:', score[0]) print('Test accuracy:', score[1]) fig = plt.figure(figsize=(20,10)) plt.plot(model.history.history['val_acc']) plt.plot(model.history.history['acc']) plt.axhline(y=score[1], c="red") plt.text(0, score[1], "test: " + str(round(score[1], 4)), fontdict=font) plt.title('model accuracy for neural net with 2 hidden layers') plt.ylabel('accuracy') plt.xlabel('epochs') plt.legend(['valid', 'train'], loc='lower right') plt.show() # # Single autoencoder # ## Model Definitions # Using keras module with compression to 32 floats. encoding_dim = 32 # + # input placeholder input_img = Input(shape=(input_dim,)) encoded = Dense(encoding_dim, activation='relu')(input_img) decoded = Dense(input_dim, activation='sigmoid')(encoded) single_autoencoder = Model(input_img, decoded) # - # Encoder Model: # this model maps an input to its encoded representation single_encoder = Model(input_img, encoded) # Decoder Model: encoded_input = Input(shape=(encoding_dim,)) # retrieve the last layer of the autoencoder model decoder_layer = single_autoencoder.layers[-1] # create the decoder model single_decoder = Model(encoded_input, decoder_layer(encoded_input)) # First, we'll configure our model to use a per-pixel binary crossentropy loss, and the RMSprop optimizer: # Binary Cross Entropy = Binomial Cross Entropy = Special Case of Multinomial Cross Entropy single_autoencoder.compile(optimizer=RMSprop(), loss='binary_crossentropy') # ### Train/load model # single_autoencoder = keras.models.load_model('models/single_autoencoder.h5') single_autoencoder.fit(x_train, x_train, epochs=50, batch_size=batch_size, shuffle=False, verbose=0, validation_split=0.1, callbacks=[TensorBoard(log_dir='/tmp/autoencoder')]) # + # single_autoencoder.save('models/single_autoencoder.h5') # - score = single_autoencoder.evaluate(x_val, x_val, verbose=0) print(score) # + # plot_model(single_autoencoder, to_file='images/single_autoencoder.png', show_shapes=True, show_layer_names=True, rankdir='LR') # - Image("images/single_autoencoder.png") # After 50 epochs, the autoencoder seems to reach a stable train/test loss value of about {{score}}. We can try to visualize the reconstructed inputs and the encoded representations. We will use Matplotlib. encoded_imgs = single_encoder.predict(x_val) # decoded_imgs = single_decoder.predict(encoded_imgs) decoded_imgs = single_autoencoder.predict(x_val) n = 10 plt.figure(figsize=(20, 6)) for i in range(n): # display original ax = plt.subplot(2, n, i + 1) plt.imshow(x_val[i].reshape(28, 28)) plt.gray() ax.get_xaxis().set_visible(False) ax.get_yaxis().set_visible(False) # display reconstruction ax = plt.subplot(2, n, i + 1 + n) plt.imshow(decoded_imgs[i].reshape(28, 28)) plt.gray() ax.get_xaxis().set_visible(False) ax.get_yaxis().set_visible(False) plt.show() # # Stacked Autoencoder ######## constants for stacked autoencoder ############ encoding_dim1 = 128 encoding_dim2 = 64 encoding_dim3 = 32 decoding_dim1 = 64 decoding_dim2 = 128 decoding_dim3 = input_dim epochs = 100 batch_size = 256 # + input_img = Input(shape=(input_dim,)) encoded = Dense(encoding_dim1, activation='relu')(input_img) encoded = Dense(encoding_dim2, activation='relu')(encoded) encoded = Dense(encoding_dim3, activation='relu')(encoded) decoded = Dense(decoding_dim1, activation='relu')(encoded) decoded = Dense(decoding_dim2, activation='relu')(decoded) decoded = Dense(decoding_dim3, activation='sigmoid')(decoded) # - stacked_autoencoder = Model(input_img, decoded) stacked_autoencoder.compile(optimizer=RMSprop(), loss='binary_crossentropy') stacked_autoencoder = keras.models.load_model('models/stacked_autoencoder.h5') #stacked_autoencoder.fit(x_train, x_train, # epochs=epochs, # batch_size=batch_size, # shuffle=False, # validation_split=0.1) # Save the model # + # stacked_autoencoder.save('models/stacked_autoencoder.h5') # - score = stacked_autoencoder.evaluate(x_val, x_val, verbose=0) print(score) # The result is slightly better than for single autoencoder. # + # plot_model(stacked_autoencoder, to_file='images/stacked_autoencoderTD.png', show_shapes=True, show_layer_names=True, rankdir='LR') # - Image("images/stacked_autoencoderTD.png") decoded_imgs = stacked_autoencoder.predict(x_val) n = 10 # how many digits we will display plt.figure(figsize=(20, 4)) for i in range(n): # display original ax = plt.subplot(2, n, i + 1) plt.imshow(x_val[i].reshape(28, 28)) plt.gray() ax.get_xaxis().set_visible(False) ax.get_yaxis().set_visible(False) # display reconstruction ax = plt.subplot(2, n, i + 1 + n) plt.imshow(decoded_imgs[i].reshape(28, 28)) plt.gray() ax.get_xaxis().set_visible(False) ax.get_yaxis().set_visible(False) plt.show() # # Denoising Data noise_factor = 0.5 # + x_train_noisy = x_train + noise_factor * np.random.normal(loc=0.0, scale=1.0, size=x_train.shape) x_val_noisy = x_val + noise_factor * np.random.normal(loc=0.0, scale=1.0, size=x_val.shape) # re-normalization by clipping to the intervall (0,1) x_train_noisy = np.clip(x_train_noisy, 0., 1.) x_val_noisy = np.clip(x_val_noisy, 0., 1.) # - n = 10 plt.figure(figsize=(20, 2)) for i in range(n): ax = plt.subplot(1, n, i + 1) plt.imshow(x_val_noisy[i].reshape(28, 28)) plt.gray() ax.get_xaxis().set_visible(False) ax.get_yaxis().set_visible(False) plt.show() # Stick with the stack denoising_autoencoder = Model(input_img, decoded) denoising_autoencoder.compile(optimizer=RMSprop(), loss='binary_crossentropy') # ### Train or load a stacked denoising autoencoder # denoising_autoencoder = keras.models.load_model('models/denoising_autoencoder.h5') denoising_autoencoder.fit(x_train_noisy, x_train, epochs=epochs, batch_size=batch_size, shuffle=True, validation_split=0.1, verbose=0, callbacks=[TensorBoard(log_dir='/tmp/autoencoder')]) fig = plt.figure(figsize=(20,10)) plt.plot(denoising_autoencoder.history.history['val_loss']) plt.plot(denoising_autoencoder.history.history['loss']) plt.title('model loss for stacked denoising autoencoder') plt.ylabel('loss') plt.xlabel('epochs') plt.legend(['valid', 'train'], loc='lower right') plt.show() decoded_imgs = denoising_autoencoder.predict(x_val_noisy) n = 10 # how many digits we will display plt.figure(figsize=(20, 4)) for i in range(n): # display original ax = plt.subplot(2, n, i + 1) plt.imshow(x_val[i].reshape(28, 28)) plt.gray() ax.get_xaxis().set_visible(False) ax.get_yaxis().set_visible(False) # display reconstruction ax = plt.subplot(2, n, i + 1 + n) plt.imshow(decoded_imgs[i].reshape(28, 28)) plt.gray() ax.get_xaxis().set_visible(False) ax.get_yaxis().set_visible(False) plt.show() # # Compare results # ## Classification of noisy data score = model.evaluate(x_val_noisy, y_val, verbose=0) print('Test loss:', score[0]) print('Test accuracy:', score[1]) # ## Classification of denoised data score = model.evaluate(decoded_imgs, y_val, verbose=0) print('Test loss:', score[0]) print('Test accuracy:', score[1]) # The difference in accuracy is huge. The autoencoder architecture is highly recommendable for denoising signals. # # Train with sparsity constraint on hidden layer (not for presentation) # So what does all this mean for sparsity? How does the rectifier activation function allow for sparsity in the hidden units? If the hidden units are exposed to a range of input values it makes sense that the rectifier activation function should lead to more ‘real zeros’ as we sweep across possible inputs. In other words, less neurons in our network would activate because of the limitations imposed by the rectifier activation function. https://www.quora.com/What-does-it-mean-that-activation-functions-like-ReLUs-in-NNs-induce-sparsity-in-the-hidden-units # + input_img = Input(shape=(input_dim,)) # add a Dense layer with a L1 activity regularizer encoded = Dense(encoding_dim, activation='relu', activity_regularizer=regularizers.l1(10e-5))(input_img) decoded = Dense(input_dim, activation='sigmoid')(encoded) sparse_autoencoder = Model(input_img, decoded) sparse_encoder = Model(input_img, encoded) # - encoded_input = Input(shape=(encoding_dim,)) # retrieve the last layer of the autoencoder model decoder_layer = sparse_autoencoder.layers[-1] # create the decoder model sparse_decoder = Model(encoded_input, decoder_layer(encoded_input)) # Sparse Autoencoder sparse_autoencoder.compile(optimizer='adadelta', loss='binary_crossentropy') sparse_autoencoder = keras.models.load_model('models/sparse_autoencoder.h5') sparse_autoencoder.fit(x_train, x_train, epochs=20, batch_size=batch_size, validation_data=(x_val, x_val), callbacks=[TensorBoard(log_dir='/tmp/autoencoder')]) # + # sparse_autoencoder.save('models/sparse_autoencoder.h5') # - score = sparse_autoencoder.evaluate(x_val, x_val, verbose=0) print(score) encoded_imgs = sparse_encoder.predict(x_val) # decoded_imgs = single_decoder.predict(encoded_imgs) decoded_imgs = sparse_decoder.predict(encoded_imgs) n = 10 plt.figure(figsize=(20, 6)) for i in range(n): # display original ax = plt.subplot(2, n, i + 1) plt.imshow(x_val[i].reshape(28, 28)) plt.gray() ax.get_xaxis().set_visible(False) ax.get_yaxis().set_visible(False) # display reconstruction ax = plt.subplot(2, n, i + 1 + n) plt.imshow(decoded_imgs[i].reshape(28, 28)) plt.gray() ax.get_xaxis().set_visible(False) ax.get_yaxis().set_visible(False) plt.show() encoded_imgs.mean() # ## Test the Vanilla Autoencoder with a simple Neural Net (not for presentation) single_ae_test_model = Sequential() # single_ae_test_model.add(Dense(16, activation='relu', input_shape=(36,))) # single_ae_test_model.add(Dropout(0.2)) single_ae_test_model.add(Dense(num_classes, activation='softmax', input_shape=(encoding_dim,))) single_ae_test_model.compile(loss='categorical_crossentropy', optimizer=RMSprop(), metrics=['accuracy']) single_ae_test_model = keras.models.load_model('models/single_ae_test_model.h5') # single_ae_test_model.fit(single_encoder.predict(x_train), y_train, # batch_size=batch_size, # epochs=40, # verbose=1, # validation_data=(single_encoder.predict(x_val), y_val)) single_ae_test_model.save('models/single_ae_test_model.h5') score = single_ae_test_model.evaluate(single_encoder.predict(x_val), y_val, verbose=0) print('Test loss:', score[0]) print('Test accuracy:', score[1]) # ## Test the Sparse Autoencoder with a simple Neural Net (not for presentation) sparse_ae_test_model = Sequential() # single_ae_test_model.add(Dense(16, activation='relu', input_shape=(36,))) # single_ae_test_model.add(Dropout(0.2)) sparse_ae_test_model.add(Dense(num_classes, activation='softmax', input_shape=(encoding_dim,))) sparse_ae_test_model.compile(loss='categorical_crossentropy', optimizer=RMSprop(), metrics=['accuracy']) # sparse_ae_test_model = keras.models.load_model('models/sparse_ae_test_model.h5') sparse_ae_test_model.fit(sparse_encoder.predict(x_train), y_train, batch_size=batch_size, epochs=40, verbose=1, validation_data=(sparse_encoder.predict(x_val), y_val)) sparse_ae_test_model.save('models/sparse_ae_test_model.h5') score = sparse_ae_test_model.evaluate(sparse_encoder.predict(x_val), y_val, verbose=0) print('Test loss:', score[0]) print('Test accuracy:', score[1]) # # Additional Information and Footage # ## Cross Entropy # ![Cross Entropy](images\2017-12-03 10_42_19-Machine Learning_ Should I use a categorical cross entropy or binary cross entro.png) # ## Batch Size # Batch size defines number of samples that going to be propagated through the network. # # For instance, let's say you have 1050 training samples and you want to set up batch_size equal to 100. Algorithm takes first 100 samples (from 1st to 100th) from the training dataset and trains network. Next it takes second 100 samples (from 101st to 200th) and train network again. We can keep doing this procedure until we will propagate through the networks all samples. The problem usually happens with the last set of samples. In our example we've used 1050 which is not divisible by 100 without remainder. The simplest solution is just to get final 50 samples and train the network. # # Advantages: # # * It requires less memory. Since you train network using less number of samples the overall training procedure requires less memory. It's especially important in case if you are not able to fit dataset in memory. # * Typically networks trains faster with mini-batches. That's because we update weights after each propagation. In our example we've propagated 11 batches (10 of them had 100 samples and 1 had 50 samples) and after each of them we've updated network's parameters. If we used all samples during propagation we would make only 1 update for the network's parameter. # # Disadvantages: # # * The smaller the batch the less accurate estimate of the gradient. In the figure below you can see that mini-batch (green color) gradient's direction fluctuates compare to the full batch (blue color). # ![https://stats.stackexchange.com/questions/153531/what-is-batch-size-in-neural-network](images\lU3sx.png) # to be done: # - check various encoders with different numbers of hidden layers # - feature extraction # + [markdown] tags=["dropout"] # # Dropout # - # Deep neural nets with a large number of parameters are very powerful machine learning # systems. However, overfitting is a serious problem in such networks. Large networks are also # slow to use, making it difficult to deal with overfitting by combining the predictions of many # different large neural nets at test time. Dropout is a technique for addressing this problem. # The key idea is to randomly drop units (along with their connections) from the neural # network during training. This prevents units from co-adapting too much. During training, # dropout samples from an exponential number of different “thinned” networks. At test time, # it is easy to approximate the effect of averaging the predictions of all these thinned networks # by simply using a single unthinned network that has smaller weights. This significantly # reduces overfitting and gives major improvements over other regularization methods. We # show that dropout improves the performance of neural networks on supervised learning # tasks in vision, speech recognition, document classification and computational biology, # obtaining state-of-the-art results on many benchmark data sets. # For more information on dropout visit http://www.cs.toronto.edu/~rsalakhu/papers/srivastava14a.pdf
misc/MNIST_Autoencoder_experimental.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ## Ejercicio 1 # # Crea una función llamada ejercicio1, que recibe la ruta donde se encuentra un dataset y devuelve una DataFrame con los datos que hay en el dataset. Para comprobar esta función utiliza el dataset `titanic.csv` que se incluye en esta actividad. # + import pandas as pd def ejercicio1(csv_path): return pd.read_csv(csv_path) df_titanic = ejercicio1('titanic.csv') df_titanic # - # ## Ejercicio 2 # # Crea otra función llamada ejercicio2. Esta función recibe un único argumento que es un dataframe. En concreto debe recibir el dataframe que se ha obtenido de leer el dataset `titanic.csv`. Esta función devolverá otro dataset que incluya únicamente a los pasajeros menores de 35 años y que viajaban en 3ª clase. def ejercicio2(df): filtro = lambda item: item['Age'] < 35 and item['Pclass'] == 3 return df[df.apply(filtro, axis=1)] df_titanic_filtrado = ejercicio2(df_titanic) df_titanic_filtrado.describe() # ## Ejercicio 3 # # Crea una función llamada ejercicio3, que recibiendo como argumento el dataframe de salida del ejercicio 2, calcule el porcentaje de pasajeros menores de 35 años de 3ª clase que sobrevivieron. def ejercicio3(df_original): df = ejercicio2(df_original) filtro = lambda item: item['Survived'] == 1 df_filtrado = df[df.apply(filtro, axis=1)] return (df_filtrado['Survived'].count() / df['Survived'].count()) * 100 # Percentage ejercicio3(df_titanic) # ## Ejercicio 4 # # Implementa una función llamada ejercicio4 que recibiendo el dataframe con los datos del Titanic, devuelva en una tupla el porcentaje de hombres y mujeres que viajaban en el Titanic, redondeados al segundo decimal. def ejercicio4(df): filtro_mujeres = lambda item: item['Sex'] == 'female' filtro_hombres = lambda item: item['Sex'] == 'male' df_female = df[df.apply(filtro_mujeres, axis=1)] percentage_female = (df_female['Sex'].count() / df['Sex'].count()) * 100 df_male = df[df.apply(filtro_hombres, axis=1)] percentage_male = (df_male['Sex'].count() / df['Sex'].count()) * 100 return round(percentage_male, 2), round(percentage_female, 2) ejercicio4(df_titanic) # ## Ejercicio 5 # # # Implementa una función llamada ejercicio5 que recibiendo el dataframe con los datos del Titanic, devuelva en una lista el número de pasajeros que viajaban en 1ª, 2ª y 3ª clase. def ejercicio5(df): filtro_1 = lambda item: item['Pclass'] == 1 filtro_2 = lambda item: item['Pclass'] == 2 filtro_3 = lambda item: item['Pclass'] == 3 df1 = df[df.apply(filtro_1, axis=1)] num1 = df1['Pclass'].count() df2 = df[df.apply(filtro_2, axis=1)] num2 = df2['Pclass'].count() df3 = df[df.apply(filtro_3, axis=1)] num3 = df3['Pclass'].count() return [num1, num2, num3] ejercicio5(df_titanic)
cursoPython/activadades/4/Actividad_4.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # %matplotlib inline import pandas as pd import matplotlib.pyplot as plt import numpy as np import statsmodels.api as sm from sklearn.linear_model import LinearRegression from sklearn.model_selection import train_test_split df = pd.read_csv("NBA_19_records.csv") df.head() df = df[['salary_2019to2020','age', 'gp','min','pts','fg_prc','three_pnt_prc','ft_prc','reb','ast', 'tov','plusminus']] df.head() df = df.rename(columns={"salary_2019to2020":"y","age":"X1", "gp":"X2","min":"X3","pts":"X4","fg_prc":"X5","three_pnt_prc":"X6","ft_prc":"X7","reb":"X8","ast":"X9", "tov":"X10","plusminus":"X11"}) df.head() new_df = df[['y', 'X1', 'X4', 'X8', 'X9', 'X11']] new_df print(new_df.max()) print(new_df.min()) # + # Create the bins in which Data will be held # Bins = [90K-5M, 6M-10M, 11M-15M, 16M-20M, 21M-25M, 26M-30M, 31M-35M, 36M-40M, 41M-45M] bins = [90000, 5000000, 10000000, 15000000, 20000000,25000000,30000000,35000000, 40000000, 45999999] # Create the names for the four bins group_names = ["90K-5M", "6M-10M", "11M-15M", "16M-20M", "21M-25M", "26M-30M", "31M-35M", "36M-40M", "40M-45M"] print(len (bins)) print(len(group_names)) # - new_df["Salary_Range"] = pd.cut(new_df["y"], bins, labels=group_names) new_df X = new_df.drop(columns=['y', 'Salary_Range']) y = new_df['y'] print(X.shape, y.shape) X.shape X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=42) model = LinearRegression(normalize=False) # + model.fit(X_train, y_train) training_score = model.score(X_train, y_train) testing_score = model.score(X_test, y_test) print(f"Training Score: {training_score}") print(f"Testing Score: {testing_score}") # - model.coef_ plt.scatter(model.predict(X_train), model.predict(X_train) - y_train, c="blue", label="Training Data") plt.scatter(model.predict(X_test), model.predict(X_test) - y_test, c="orange", label="Testing Data") plt.legend() plt.hlines(y=0, xmin=y.min(), xmax=y.max()) plt.title("Residual Plot") model.predict(np.array([34,10.8,6.5,1.2,-1.2]).reshape(1, -1))
.ipynb_checkpoints/Untitled-checkpoint.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # %load_ext autoreload # %autoreload 2 import sys sys.path.append("/home/evangelos/workspace/Synthesis/FlowMonitoringAndAnalysis/") import matplotlib.pyplot as plt import pytorch_lightning as pl from src.models import simple_model from src.utils import data_generators gen = data_generators.PRBS_GEN(2048,3) signal = gen.random_signal() plt.plot(signal[:,2]) prbs = gen.prbs() plt.plot(prbs[:,2]) import torch.nn as nn class Encoder(nn.Module): def __init__(self, length, channels, hidden_dim): super(Encoder, self).__init__() self.conv1 = nn.Conv1d(channels, 32, 3) self.conv2 = nn.Conv1d(32 , 64, 3) self.pool = nn.AdaptiveMaxPool1d(32) self.lin1 = nn.Linear(32, 16) self.lin2 = nn.Linear(16, hidden_dim) def forward(self,x): print(x.shape) x = self.conv1(x) x = self.conv2(x) x = self.pool(x) print(x.shape) x = self.lin1(x) x = self.lin2(x) return x class Decoder(nn.Module): def __init__(self, length, channels, hidden_dim): super(Decoder, self).__init__() self.lin1 = nn.Linear(4, 32) self.lin2 = nn.Linear(32, 16) self.pool = nn.MaxUnpool1d(3) self.conv1 = nn.ConvTranspose1d(64 , 32, 3) self.conv2 = nn.ConvTranspose1d(32, channels, 3) def forward(self,x): x = self.lin1(x) x = self.lin2(x) x = self.pool(x,0) print(x.shape) x = self.conv1(x) x = self.conv2(x) print(x.shape) return x encoder = Encoder(2048, 3, 4) decoder = Decoder(2048, 3, 4) latent = encoder(torch.Tensor(prbs).transpose_(0,1).unsqueeze(0)) latent.shape decoder(latent) # + import pytorch_lightning as pl class AE(pl.LightningModule): # model def __init__(self, length, channels, hidden_dim): self.conv1 = nn.Conv1d(channels, 32, 3) self.conv2 = nn.Conv1d(32 , 64, 3) self.pool = nn.AdaptiveMaxPool1d(128) self.lin1 = nn.Linear(128, 64) self.lin2 = nn.Linear(64, hidden_dim) # computations def forward(self, x): # training loop def training_step(self, batch, batch_idx): # validation loop def validation_step(self, batch, batch_idx): def validation_end(self, outputs): # test loop def test_step(self, batch, batch_idx): def test_epoch_end(self, outputs): # optimizer def configure_optimizers(self): # data def prepare_data(self): def train_dataloader(self): def val_dataloader(self): def test_dataloader(self): # -
notebooks/Untitled.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # A study of Object Detection models by [dividiti](http://dividiti.com) # ## Table of Contents # 1. [Overview](#overview) # 1. [Platform](#platform) # 1. [Settings](#settings) # 1. [Get experimental data](#get_data) # 1. [Access experimental data](#access_data) # 1. [Plot experimental data](#plot_data) # 1. [Plot accuracy](#plot_accuracy) # 1. [Plot performance](#plot_performance) # 1. [Plot exploration](#plot_exploration) # <a id="overview"></a> # ## Overview # This Jupyter Notebook studies performance (execution time per image, images per seconds) vs accuracy (mAP, Recall) of several Object Detection models on different size objects (large, medium and small). The experiments are performed via TensorFlow with several execution options on the CPU and the GPU. # | Model | Unique CK Tags (`<tags>`) | Is Custom? | mAP in % | # | --- | --- | --- | --- | # | [`faster_rcnn_nas_lowproposals_coco`](https://github.com/tensorflow/models/blob/master/research/object_detection/g3doc/detection_model_zoo.md) | `rcnn,nas,lowproposals,vcoco` | 0 | 44.340195 | # | [`faster_rcnn_resnet50_lowproposals_coco`](https://github.com/tensorflow/models/blob/master/research/object_detection/g3doc/detection_model_zoo.md) | `rcnn,resnet50,lowproposals` | 0 | 24.241037 | # | [`faster_rcnn_resnet101_lowproposals_coco`](https://github.com/tensorflow/models/blob/master/research/object_detection/g3doc/detection_model_zoo.md) | `rcnn,resnet101,lowproposals` | 0 | 32.594327 | # | [`faster_rcnn_inception_resnet_v2_atrous_lowproposals_coco`](https://github.com/tensorflow/models/blob/master/research/object_detection/g3doc/detection_model_zoo.md) | `rcnn,inception-resnet-v2,lowproposals` | 0 | 36.520117 | # | [`faster_rcnn_inception_v2_coco`](https://github.com/tensorflow/models/blob/master/research/object_detection/g3doc/detection_model_zoo.md) | `rcnn,inception-v2` | 0 | 28.309626 | # | [`ssd_inception_v2_coco`](https://github.com/tensorflow/models/blob/master/research/object_detection/g3doc/detection_model_zoo.md) | `ssd,inception-v2` | 0 | 27.765988 | # | [`ssd_mobilenet_v1_coco`](https://github.com/tensorflow/models/blob/master/research/object_detection/g3doc/detection_model_zoo.md) | `ssd,mobilenet-v1,non-quantized,mlperf,tf` | 0 | 23.111170 | # | [`ssd_mobilenet_v1_quantized_coco`](https://github.com/tensorflow/models/blob/master/research/object_detection/g3doc/detection_model_zoo.md)| `ssd,mobilenet-v1,quantized,mlperf,tf` | 0 | 23.591693 | # | [`ssd_mobilenet_v1_fpn_coco`](https://github.com/tensorflow/models/blob/master/research/object_detection/g3doc/detection_model_zoo.md) | `ssd,mobilenet-v1,fpn` | 0 | 35.353170 | # | [`ssd_resnet_50_fpn_coco`](https://github.com/tensorflow/models/blob/master/research/object_detection/g3doc/detection_model_zoo.md) | `ssd,resnet50,fpn` | 0 | 38.341120 | # | [`ssdlite_mobilenet_v2_coco`](https://github.com/tensorflow/models/blob/master/research/object_detection/g3doc/detection_model_zoo.md) | `ssdlite,mobilenet-v2,vcoco` | 0 | 24.281540 | # | [`yolo_v3_coco`](https://github.com/YunYang1994/tensorflow-yolov3) | `yolo-v3` | 1 | 28.532508 | # <a id="platform"></a> # ## Platform # See our [Docker container](https://github.com/ctuning/ck-object-detection/blob/master/docker/object-detection-tf-py.tensorrt.ubuntu-18.04/README.md) for more information on the software configuration. # <a id="CPU info"></a> # ### CPU # - Model: # - Intel Xeon E5-2650 v3 # - Frequency: # - 2.3 GHz # - Number of physical cores: # - 10 # - Number of virtual cores (hyperthreading): # - 20 # - RAM: # - 32 GB # - OS: # - Ubuntu 16.04 LTS Linux # <a id="GPU info"></a> # ### GPU # - Model: # - NVIDIA GeForce GTX 1080 # - Frequency: # - 1.6 GHz # - RAM: # - 8 GB # - CUDA version: # - 10.2 # - Driver version: # - 430.14 # <a id="settings"></a> # ## Settings # **NB:** Please ignore this section if you are not interested in re-running or modifying this notebook. # ### Includes # #### Standard import os import sys import json import re # #### Scientific # If some of the scientific packages are missing, please install them using: # ```bash # $ python -m pip install jupyter pandas numpy matplotlib --user # ``` import IPython as ip import pandas as pd import numpy as np import matplotlib as mp import seaborn as sb print ('IPython version: %s' % ip.__version__) print ('Pandas version: %s' % pd.__version__) print ('NumPy version: %s' % np.__version__) print ('Matplotlib version: %s' % mp.__version__) print ('Seaborn version: %s' % sb.__version__) from IPython.display import Image, display def display_in_full(df): pd.options.display.max_columns = len(df.columns) pd.options.display.max_rows = len(df.index) display(df) import matplotlib.pyplot as plt from matplotlib import cm # %matplotlib inline default_colormap = cm.autumn default_fontsize = 16 default_barwidth = 0.8 default_figwidth = 24 default_figheight = 3 default_figdpi = 200 default_figsize = [default_figwidth, default_figheight] if mp.__version__[0]=='2': mp.style.use('classic') mp.rcParams['figure.max_open_warning'] = 200 mp.rcParams['figure.dpi'] = default_figdpi mp.rcParams['font.size'] = default_fontsize mp.rcParams['legend.fontsize'] = 'medium' save_fig_ext = 'png' save_fig_dir = os.path.join(os.path.expanduser("~"), 'omnibench') if not os.path.exists(save_fig_dir): os.mkdir(save_fig_dir) from pprint import pprint # #### Collective Knowledge # If CK is not installed, please install it using: # ```bash # $ python -m pip install ck --user # ``` import ck.kernel as ck print ('CK version: %s' % ck.__version__) # <a id="get_data"></a> # ## Get the experimental data # Download experimental data and add CK repositories as follows: # ```bash # $ wget https://www.dropbox.com/s/0mxkvlstico349n/ckr-medium-object-detection-accuracy.zip # $ ck add repo --zip=ckr-medium-object-detection-accuracy.zip # # $ wget https://www.dropbox.com/s/zy68dsmp1yzv703/ckr-medium-object-detection-performance-docker.zip # $ ck add repo --zip=ckr-medium-object-detection-performance-docker.zip # # $ wget https://www.dropbox.com/s/7829e4zmmbgkqyu/ckr-medium-object-detection-performance-native.zip # $ ck add repo --zip=ckr-medium-object-detection-performance-native.zip # ``` # + repo_uoa = 'medium-object-detection-accuracy' print ("*"*80) print (repo_uoa) print ("*"*80) # !ck list $repo_uoa:experiment:* | sort print ("") perf_docker_repo_uoa = 'medium-object-detection-performance-docker' print ("*"*80) print (perf_docker_repo_uoa) print ("*"*80) # !ck list $perf_docker_repo_uoa:experiment:* | sort print ("") perf_native_repo_uoa = 'medium-object-detection-performance-native' print ("*"*80) print (perf_native_repo_uoa) print ("*"*80) # !ck list $perf_native_repo_uoa:experiment:* | sort # - # <a id="access_data"></a> # ## Access the experimental data def get_experimental_results(repo_uoa, module_uoa='experiment', tags='', accuracy=True): r = ck.access({'action':'search', 'repo_uoa':repo_uoa, 'module_uoa':module_uoa, 'tags':tags}) #pprint (r) if r['return']>0: print('Error: %s' % r['error']) exit(1) experiments = r['lst'] dfs = [] for experiment in experiments: data_uoa = experiment['data_uoa'] r = ck.access({'action':'list_points', 'repo_uoa':repo_uoa, 'module_uoa':module_uoa, 'data_uoa':data_uoa}) pipeline_file_path = os.path.join(r['path'], 'pipeline.json') with open(pipeline_file_path) as pipeline_file: pipeline_data_raw = json.load(pipeline_file) weights_env = pipeline_data_raw['dependencies']['weights']['dict']['env'] image_width = np.int64(weights_env.get('CK_ENV_TENSORFLOW_MODEL_DEFAULT_WIDTH',-1)) image_height = np.int64(weights_env.get('CK_ENV_TENSORFLOW_MODEL_DEFAULT_HEIGHT',-1)) tags = r['dict']['tags'] #print (tags) for point in r['points']: point_file_path = os.path.join(r['path'], 'ckp-%s.0001.json' % point) with open(point_file_path) as point_file: point_data_raw = json.load(point_file) #pprint (point_data_raw['choices']['env']) characteristics_list = point_data_raw['characteristics_list'] num_repetitions = len(characteristics_list) #platform = point_data_raw['features']['platform']['platform']['model'] if np.int64(point_data_raw['choices']['env'].get('CK_ENABLE_BATCH',-1))==1: batch_enabled = True batch_size = np.int64(point_data_raw['choices']['env'].get('CK_BATCH_SIZE',-1)) batch_count = np.int64(point_data_raw['choices']['env'].get('CK_BATCH_COUNT',-1)) else: batch_enabled = False batch_size = 1 batch_count = np.int64(point_data_raw['choices']['env'].get('CK_BATCH_COUNT',-1)) * \ np.int64(point_data_raw['choices']['env'].get('CK_BATCH_SIZE',-1)) characteristics = characteristics_list[0] if accuracy: data = [ { 'model': tags[0], 'backend':'cuda', 'batch_size': batch_size, 'batch_count': batch_count, 'batch_enabled': batch_enabled, 'image_height': image_height, 'image_width': image_width, 'num_reps':1, # runtime characteristics 'Recall': characteristics['run'].get('recall',0)*100, 'mAP': characteristics['run'].get('mAP',0)*100, 'mAP_large': characteristics['run']['metrics'].get('DetectionBoxes_Recall/AR@100 (large)', 0)*100, 'mAP_medium': characteristics['run']['metrics'].get('DetectionBoxes_Recall/AR@100 (medium)', 0)*100, 'mAP_small': characteristics['run']['metrics'].get('DetectionBoxes_Recall/AR@100 (small)', 0)*100, } ] # print(data[0]['model']) else: # performance ####### this conversion is still needed because some of the result have the old naming convention backend = 'default' trt = point_data_raw['choices']['env'].get('CK_ENABLE_TENSORRT',0) trt_dyn = point_data_raw['choices']['env'].get('CK_TENSORRT_DYNAMIC',0) if trt_dyn == '1': backend = 'tensorrt-dynamic' elif trt == '1': backend = 'tensorrt' elif tags[0] == 'tensorrt' or tags[0] =='source-cuda': backend = 'cuda' elif tags[0] == 'tf-src-cpu' or tags[0] =='source-cpu': backend = 'cpu' elif tags[0] == 'tf-prebuild-cpu' or tags[0] == 'prebuilt-cpu': backend = 'cpu-prebuilt' else: backend = tags[0] model = tags[1] data = [ { 'model': model, 'backend': backend, 'batch_size': batch_size, 'batch_count': batch_count, 'batch_enabled': batch_enabled, 'image_height': image_height, 'image_width': image_width, 'num_reps' : num_repetitions, # statistical repetition 'repetition_id': repetition_id, # runtime characteristics 'avg_fps': characteristics['run'].get('avg_fps', 'n/a')*batch_size, 'avg_time_ms': characteristics['run']['avg_time_ms']/batch_size, 'graph_load_time_ms': characteristics['run']['graph_load_time_s']*1e+3, 'images_load_time_avg_ms': characteristics['run']['images_load_time_avg_s']*1e+3, } for (repetition_id, characteristics) in zip(range(num_repetitions), characteristics_list) ] index = [ 'model', 'backend', 'batch_size', 'batch_count', 'batch_enabled', 'image_height', 'image_width', 'num_reps' ] # Construct a DataFrame. df = pd.DataFrame(data) df = df.set_index(index) # Append to the list of similarly constructed DataFrames. dfs.append(df) if dfs: # Concatenate all thus constructed DataFrames (i.e. stack on top of each other). result = pd.concat(dfs) result.sort_index(ascending=True, inplace=True) else: # Construct a dummy DataFrame the success status of which can be safely checked. result = pd.DataFrame(columns=['success?']) return result # #!ck recache repo dfs = get_experimental_results(repo_uoa, accuracy=True) dfs_perf = get_experimental_results(perf_docker_repo_uoa, accuracy=False) dfs_perf_native = get_experimental_results(perf_native_repo_uoa, accuracy=False) display_in_full(dfs) display_in_full(dfs_perf) # <a id="plot_data"></a> # ## Plot the experimental data # <a id="plot_accuracy"></a> # ### Plot accuracy # + def plot_accuracy(df, groupby_level='batch_enabled', accuracy_metric=['mAP','mAP_large','mAP_medium','mAP_small'], save_fig=False, save_fig_name='accuracy.resizing.', resize_type=['internal','external'], title='', figsize=[default_figwidth, 8], rot=90, colormap=cm.autumn): # Bars. df_bar = pd.DataFrame( data=df[accuracy_metric].values, columns=accuracy_metric, index=pd.MultiIndex.from_tuples( tuples=[ (m,be,nr) for (m,b,bs,bc,be,ih,iw,nr) in df.index.values ], names=[ 'model','batch_enabled','num_reps' ] ) ) # Plot. mean = df_bar.groupby(level=df_bar.index.names[:-1]).mean() std = pd.Series() axes = mean \ .groupby(level=groupby_level) \ .plot(yerr=std, kind='bar', grid=True, rot=rot, figsize=figsize, width=default_barwidth, fontsize=default_fontsize, colormap=colormap) xlabel = 'Model' xtics = df_bar.index.get_level_values('model').drop_duplicates() ylabel = 'mAP %' for count, ax in enumerate(axes): # Title. ax.set_title('Accuracy with %s resizing' % resize_type[count]) # X label. ax.set_xlabel(xlabel) # X ticks. ax.set_xticklabels(xtics) # Y axis. ax.set_ylabel(ylabel) if save_fig: save_fig_path = os.path.join(save_fig_dir, '%s.%s' % (save_fig_name+resize_type[count], save_fig_ext)) ax.figure.savefig(save_fig_path, dpi=default_figdpi, bbox_inches='tight') plot_accuracy(dfs, save_fig=True) # - # #### Resizing layer in the network # There are two types of resizing: __fixed__ and __keep_aspect_ratio__. # # The first one takes as input the image and returns an image of fixed dimensions, changing from network to network. # Using this layer are the following networks: # # - 'ssd_mobilenet_v1_fpn_coco', 640*640 # - 'faster_rcnn_nas_lowproposals_coco', 1200*1200 # - 'faster_rcnn_nas', 1200*1200 # - 'ssd_inception_v2_coco', 300*300 # - 'ssd_mobilenet_v1_coco', 300*300 # - 'ssd_mobilenet_v1_quantized_coco', 300*300 # - 'ssd_resnet_50_fpn_coco', 640*640 # - 'ssdlite_mobilenet_v2_coco', 300*300 # - 'yolo v3', 416*416 # # The second one behaviour is actually not completely clear, and have a minimum and maximum dimension of the output images. network using this layer are: # # - 'faster_rcnn_inception_resnet_v2_atrous_lowproposals_coco', min: 600 max: 1024 # - 'faster_rcnn_inception_resnet_v2_atrous_coco', min: 600 max: 1024 # - 'faster_rcnn_resnet101_lowproposal_coco', min: 600 max: 1024 # - 'faster_rcnn_resnet50_lowproposals_coco', min: 600 max: 1024 # # From the analysis on the performance-accuracy benchmarks, it seems that the models using fixed resizing performs better than the one with the keep aspect ratio. # # YOLO-v3 is actually in the middle ground: its preprocessing is doing a fixed resize to 416 * 416, however the resize is done keeping the aspect ratio and padding, outside the graph. The graph doesn't perform any resizing, but takes 416 * 416 images as input. # + def plot_accuracy_compare_resizing(df_raw, resize_type=['internal','external'], unstack_level='batch_enabled', groupby_level='batch_size', accuracy_metric=['mAP','mAP_large','mAP_medium','mAP_small'], save_fig=False, save_fig_name='accuracy.resizing.internal_vs_external', title='', figsize=[default_figwidth, 8], rot=90): # Bars. df_bar = pd.DataFrame( data=df_raw[accuracy_metric].values, columns=accuracy_metric, index=pd.MultiIndex.from_tuples( tuples=[ (m,be,bs,nr) for (m,b,bs,bc,be,ih,iw,nr) in df_raw.index.values ], names=[ 'model','batch_enabled','batch_size','num_reps' ] ) ) for index, row in df_bar.iterrows(): (model, batch_enabled, batch_size, num_reps) = index if model == 'yolo-v3': # batch_size and num_reps are both 1 for accuracy experiments. df_bar.loc[(model, True, 1, 1)] = df_bar.loc[(model, False, 1, 1)][accuracy_metric] # Colormap. colornames = [ 'lightcoral','cyan','indianred','darkturquoise','brown','cadetblue','darkred','steelblue' ] colormap = mp.colors.ListedColormap(colornames, name='from_list', N=len(colornames)) # Plot. mean = df_bar.groupby(level=df_bar.index.names[:-1]).mean().unstack(unstack_level) std = df_bar.groupby(level=df_bar.index.names[:-1]).std().unstack(unstack_level) axes = mean \ .groupby(level=groupby_level) \ .plot(yerr=std, kind='bar', grid=True, rot=rot, figsize=figsize, width=default_barwidth, fontsize=default_fontsize, colormap=colormap) xlabel = 'Model' ylabel = 'mAP %' xtics = df_bar.index.get_level_values('model').drop_duplicates() for count, ax in enumerate(axes): # Title. ax.set_title('Compare resizing: %s vs %s' % (resize_type[0], resize_type[1])) # X label. ax.set_xlabel(xlabel) # X ticks. ax.set_xticklabels(xtics) # Y axis. ax.set_ylabel(ylabel) patches, labels = ax.get_legend_handles_labels() # Legend. labels = [x.strip('(') for x in labels] labels = [x.strip(')') for x in labels] ax.legend(patches, labels, loc='best', title='Metric, External resizing?') if save_fig: save_fig_path = os.path.join(save_fig_dir, '%s.%s' % (save_fig_name, save_fig_ext)) ax.figure.savefig(save_fig_path, dpi=default_figdpi, bbox_inches='tight') plot_accuracy_compare_resizing(dfs, save_fig=True) # - # <a id="plot_performance"></a> # ### Plot performance # + def plot_performance_per_backend(df_raw, groupby_level='backend', unstack_level=['batch_size'], performance_metric=['avg_fps','avg_time_ms','graph_load_time_ms','images_load_time_avg_ms'], save_fig=False, save_fig_name='performance.backend.', title=None, figsize=[default_figwidth, 8], rot=90, colormap=cm.autumn): # Bars. df_bar = pd.DataFrame( data=df_raw[performance_metric].values, columns=performance_metric, index=pd.MultiIndex.from_tuples( tuples=[ (m,b,bs,be,nr) for (m,b,bs,bc,be,ih,iw,nr) in df_raw.index.values ], names=[ 'model','backend','batch_size','batch_enabled','num_reps' ] ) ) # Plot. mean = df_bar.groupby(level=df_bar.index.names[:-1]).mean().unstack(unstack_level) std = df_bar.groupby(level=df_bar.index.names[:-1]).std().unstack(unstack_level) axes = mean \ .groupby(level=groupby_level) \ .plot(yerr=std, kind='bar', grid=True, rot=rot, legend=False, figsize=figsize, width=default_barwidth, fontsize=default_fontsize, colormap=colormap) xlabel = 'Model' xtics = df_bar.index.get_level_values('model').drop_duplicates() ylabel = 'Images Per Second' for num, ax in enumerate(axes): # Title. ax.set_title('TensorFlow with the \'{}\' Backend'.format(axes.keys().to_numpy().item(num))) # X label. ax.set_xlabel(xlabel) # X ticks. ax.set_xticklabels(xtics) # Y axis. ax.set_ylabel(ylabel) # Legend. patches, labels = ax.get_legend_handles_labels() labels = [label[1] for label in [x.split(',') for x in labels]] labels = [x.strip(')') for x in labels] ax.legend(patches, labels, loc='best', title='Batch Size') # Save figure. if save_fig: save_fig_path = os.path.join(save_fig_dir, '%s.%s' % (save_fig_name+axes.keys().to_numpy().item(num), save_fig_ext)) ax.figure.savefig(save_fig_path, dpi=default_figdpi, bbox_inches='tight') plot_performance_per_backend(dfs_perf, performance_metric=['avg_fps'], save_fig=True) # + def plot_performance_per_model(df_raw, groupby_level='model', unstack_level=['batch_size'], performance_metric=['avg_fps','avg_time_ms','graph_load_time_ms','images_load_time_avg_ms'], save_fig=False, save_fig_name='performance.model.', title=None, figsize=[default_figwidth, 8], rot=0, colormap=cm.autumn): # Bars. df_bar = pd.DataFrame( data=df_raw[performance_metric].values, columns=performance_metric, index=pd.MultiIndex.from_tuples( tuples=[ (m,b,bs,be,nr) for (m,b,bs,bc,be,ih,iw,nr) in df_raw.index.values ], names=[ 'model','backend','batch_size','batch_enabled','num_reps' ] ) ) # Plot. mean = df_bar.groupby(level=df_bar.index.names[:-1]).mean().unstack(unstack_level) std = df_bar.groupby(level=df_bar.index.names[:-1]).std().unstack(unstack_level) axes = mean \ .groupby(level=groupby_level) \ .plot(yerr=std, kind='bar', grid=True, rot=rot, figsize=figsize, width=default_barwidth, fontsize=default_fontsize, legend=False, colormap=colormap) xlabel = 'Backend' xtics = df_bar.index.get_level_values('backend').drop_duplicates() ylabel = 'Images Per Second' for num, ax in enumerate(axes): # Title. ax.set_title(axes.keys().to_numpy().item(num)) # X label. ax.set_xlabel(xlabel) # X ticks. ax.set_xticklabels(xtics) # Y axis. ax.set_ylabel(ylabel) # Legend. patches, labels = ax.get_legend_handles_labels() labels = [label[1] for label in [x.split(',') for x in labels]] labels = [x.strip(')') for x in labels] ax.legend(patches, labels, loc='best', title='Batch Size') # Save figure. if save_fig: save_fig_path = os.path.join(save_fig_dir, '%s.%s' % (save_fig_name+axes.keys().to_numpy().item(num), save_fig_ext)) ax.figure.savefig(save_fig_path, dpi=default_figdpi, bbox_inches='tight') plot_performance_per_model(dfs_perf, performance_metric=['avg_fps'], save_fig=True) # + def plot_performance_compare_backends(df_raw, backends=['cuda','tensorrt-dynamic'], groupby_level='batch_enabled', unstack_level = ['backend','batch_size'], performance_metric=['avg_fps','avg_time_ms','graph_load_time_ms','images_load_time_avg_ms'], save_fig=False, save_fig_name='performance.backend.cuda_vs_tensorrt-dynamic', title='', figsize=[default_figwidth, 8], rot=90): # Bars. df_bar = pd.DataFrame( data=df_raw[performance_metric].values, columns=performance_metric, index=pd.MultiIndex.from_tuples( tuples=[ (m,b,bs,be,nr) for (m,b,bs,bc,be,ih,iw,nr) in df_raw.index.values ], names=[ 'model','backend','batch_size','batch_enabled','num_reps' ] ) ) df_bar = df_bar.query('backend in @backends') # Colormap. colormap = mp.colors.ListedColormap(['darkred','steelblue'], name='from_list', N=12) # Plot. mean = df_bar.groupby(level=df_bar.index.names[:-1]).mean().unstack(unstack_level[1]).unstack(unstack_level[0]) std = df_bar.groupby(level=df_bar.index.names[:-1]).std().unstack(unstack_level[1]).unstack(unstack_level[0]) axes = mean \ .groupby(level=groupby_level) \ .plot(yerr=std, kind='bar', grid=True, rot=rot, legend=False, figsize=figsize, width=default_barwidth, fontsize=default_fontsize, colormap=colormap) xlabel = 'Model' xtics = df_bar.index.get_level_values('model').drop_duplicates() ylabel = 'Images Per Second' for num, ax in enumerate(axes): # Title. ax.set_title('Compare Backends: \'%s\' vs \'%s\'' % (backends[0], backends[1])) # X label. ax.set_xlabel(xlabel) # X ticks. ax.set_xticklabels(xtics) # Y axis. ax.set_ylabel(ylabel) # Legend. patches, labels = ax.get_legend_handles_labels() labels = [label[2] for label in [x.split(',') for x in labels]] labels = [x.strip(')') for x in labels] ax.legend(patches[:2], labels[:2], loc='left', title='Backend') # Save figure. if save_fig: save_fig_path = os.path.join(save_fig_dir, '%s.%s' % (save_fig_name+str(num), save_fig_ext)) ax.figure.savefig(save_fig_path, dpi=default_figdpi, bbox_inches='tight') plot_performance_compare_backends(dfs_perf, performance_metric=['avg_fps'], save_fig=True) plot_performance_compare_backends(dfs_perf, performance_metric=['avg_fps'], save_fig=True, backends=['cpu-prebuilt','cpu'], save_fig_name='performance.backend.cpu-prebuilt_vs_cpu') # + def plot_performance_compare_docker_vs_native(df_docker, df_native, groupby_level='batch_enabled', unstack_level=['batch_size','backend'], save_fig=False, save_fig_name='performance.docker_vs_native', title='', figsize=[default_figwidth, 8], rot=90): # Bars. df_docker = df_docker[df_docker.index.get_level_values('batch_enabled').isin([True])] df_bar = pd.merge( df_docker, df_native, how='inner', suffixes=('_docker', '_native'), on=[ 'model', 'backend', 'batch_size', 'batch_enabled', 'num_reps' ]) df_bar = df_bar[['avg_fps_docker', 'avg_fps_native']] df_bar['avg_fps_norm'] = df_bar['avg_fps_docker'] / df_bar['avg_fps_native'] df_bar = df_bar[['avg_fps_norm']] # Colormap. reds = ['red', 'indianred', 'brown', 'firebrick', 'maroon', 'darkred'] yellows = ['cornsilk', 'lemonchiffon', 'palegoldenrod', 'khaki', 'yellow', 'gold'] greens = ['palegreen', 'lightgreen', 'limegreen', 'green', 'forestgreen', 'darkgreen'] blues = ['cornflowerblue', 'royalblue', 'mediumblue', 'blue', 'navy', 'midnightblue'] purples = ['orchid', 'fuchsia', 'mediumorchid', 'darkviolet', 'purple', 'indigo'] colornames = reds + yellows + greens + blues + purples colormap = mp.colors.ListedColormap(colornames, name='from_list', N=len(colornames)) # Plot. mean = df_bar.groupby(level=df_bar.index.names[:-1]).mean().unstack(unstack_level[1]).unstack(unstack_level[0]) std = df_bar.groupby(level=df_bar.index.names[:-1]).std().unstack(unstack_level[1]).unstack(unstack_level[0]) axes = mean \ .groupby(level=groupby_level) \ .plot(yerr=std, kind='bar', grid=True, rot=rot, ylim=[0.0,1.201], figsize=figsize, width=default_barwidth, fontsize=default_fontsize, colormap=colormap) title = 'Compare Docker vs Native Performance (Normalized to Native) on 5 Models from the Pareto Frontier' xtics = df_bar.groupby(level=df_bar.index.names[:-1]).median().index.get_level_values('model').drop_duplicates() xlabel = 'Model' ylabel = '' for num, ax in enumerate(axes): # Title. ax.set_title(title) # X label. ax.set_xlabel(xlabel) # X ticks. ax.set_xticklabels(xtics) # Y axis. ax.set_ylabel(ylabel) # Shrink current axis by 20% box = ax.get_position() ax.set_position([box.x0, box.y0, box.width * 0.8, box.height]) patches, labels = ax.get_legend_handles_labels() # Legend. labels = [x.strip('(avg_fps_norm,') for x in labels] labels = [x.strip(')') for x in labels] ax.legend(patches, labels, title='Backend, Batch Size', loc='center left', bbox_to_anchor=(1, 0.5),fontsize=default_fontsize) # Put a legend to the right of the current axis. if save_fig: save_fig_path = os.path.join(save_fig_dir, '%s.%s' % (save_fig_name, save_fig_ext)) ax.figure.savefig(save_fig_path, dpi=default_figdpi, bbox_inches='tight') plot_performance_compare_docker_vs_native(dfs_perf, dfs_perf_native, rot=45, save_fig=True) # - # <a id="plot_exploration"></a> # ### Plot exploration # + # Plotting settings. model_to_color = { 'rcnn-inception-resnet-v2-lowproposals' : 'gold', 'rcnn-inception-v2' : 'goldenrod', 'rcnn-nas-lowproposals' : 'red', 'rcnn-resnet101-lowproposals' : 'brown', 'rcnn-resnet50-lowproposals' : 'orangered', 'ssd-mobilenet-v1-fpn' : 'cyan', 'ssd-inception-v2' : 'cadetblue', 'ssd-mobilenet-v1-non-quantized-mlperf' : 'deepskyblue', 'ssd-mobilenet-v1-quantized-mlperf' : 'royalblue', 'ssd-resnet50-fpn' : 'navy', 'ssdlite-mobilenet-v2' : 'violet', 'yolo-v3' : 'gray' } backend_to_marker = { 'tensorrt' : '1', 'tensorrt-dynamic' : '2', 'cuda' : '3', 'cpu' : '4', 'cpu-prebuilt' : '+' } resize_to_marker = { 'no-resize' : 'x', 'model-resize' : '*' } bs_to_size = { 1 : 1.0, 2 : 1.5, 4 : 2.0, 8 : 2.5, 16 : 3.0, 32 : 3.5 } ### not used anymore, left in case should change the policy # model_to_real_name = { # 'ssd-mobilenet-v1-fpn' : 'ssd_mobilenet_v1_fpn_coco', # 'rcnn-inception-resnet-v2-lowproposals' : 'faster_rcnn_inception_resnet_v2_atrous_lowproposals_coco', # 'rcnn-nas-lowproposals' : 'faster_rcnn_nas_lowproposals_coco', # 'rcnn-resnet101-lowproposals' : 'faster_rcnn_resnet101_lowproposals_coco', # 'rcnn-inception-v2' : 'faster_rcnn_inception_resnet_v2_atrous_coco', # 'rcnn-nas-non-lowproposal' : 'faster_rcnn_nas', # 'ssd-inception-v2' : 'ssd_inception_v2_coco', # 'ssd-mobilenet-v1-non-quantized-mlperf' : 'ssd_mobilenet_v1_coco', # 'ssd-mobilenet-v1-quantized-mlperf' : 'ssd_mobilenet_v1_quantized_coco', # 'ssd-resnet50-fpn' : 'ssd_resnet_50_fpn_coco', # 'ssdlite-mobilenet-v2' : 'ssdlite_mobilenet_v2_coco', # 'rcnn-resnet50-lowproposals' : 'faster_rcnn_resnet50_lowproposals_coco', # 'yolo-v3' : 'yolo v3' # } import matplotlib.lines as mlines mark1 = mlines.Line2D([], [], color='black', marker='1', linestyle='None', markersize=5, label='tensorrt') mark2 = mlines.Line2D([], [], color='black', marker='2', linestyle='None', markersize=5, label='tensorrt-dynamic') mark3 = mlines.Line2D([], [], color='black', marker='3', linestyle='None', markersize=5, label='cuda') mark4 = mlines.Line2D([], [], color='black', marker='4', linestyle='None', markersize=5, label=' cpu') mark5 = mlines.Line2D([], [], color='black', marker='+', linestyle='None', markersize=5, label=' cpu-prebuilt') handles2 = [ mark1, mark2, mark3, mark4, mark5 ] #mark1 = mlines.Line2D([], [], color='black', marker='x', linestyle='None', # markersize=5, label='no resize') #mark2 = mlines.Line2D([], [], color='black', marker='*', linestyle='None', # markersize=5, label='model resize') #handles2 = [ mark1,mark2 ] # - def merge_performance_accuracy(df_performance, df_accuracy, performance_metric='avg_fps', accuracy_metric='mAP', ideal=False): df = df_performance[[performance_metric]] for metric in accuracy_metric: accuracy_list = [] for index, row in df.iterrows(): (model, backend, batch_size, batch_count, batch_enabled, image_height, image_width, num_reps) = index if ideal: accuracy = df_accuracy.loc[(model, 'cuda', 1, 5000, False, image_height, image_width, 1)][metric] else: resize = 'no-resize' if batch_size == 1 else 'model-resize' if resize == 'no-resize' or model == 'yolo-v3': accuracy = df_accuracy.loc[(model, 'cuda', 1, 5000, False, image_height, image_width, 1)][metric] else: accuracy = df_accuracy.loc[(model, 'cuda', 1, 5000, True, image_height, image_width, 1)][metric] accuracy_list.append(accuracy) # Assign to the value of accuracy_metric. kwargs = { metric : accuracy_list } df = df.assign(**kwargs) return df def finalize_plot(ax, xmin, xmax, xstep, ymin, ymax, ystep, save_fig, save_fig_name, accuracy_metric): # X axis. xlabel = 'Images Per Second' ax.set_xlabel(xlabel) ax.set_xlim(xmin, xmax) ax.set_xticks(np.arange(xmin, xmax, xstep)) for xtick in ax.xaxis.get_major_ticks(): xtick.label.set_fontsize(5) # Y axis. ylabel = accuracy_metric + ' %' ax.set_ylabel(ylabel) ax.set_ylim(ymin, ymax) ax.set_yticks(np.arange(ymin, ymax, ystep)) for ytick in ax.yaxis.get_major_ticks(): ytick.label.set_fontsize(5) # Legend. handles = [ mp.patches.Patch(color=color, label=model) for (model, color) in sorted(model_to_color.items()) ] handles += handles2 plt.legend(title='', handles=handles[::-1], loc='best', prop={'size': 5}) # Show with grid on. plt.grid(True) fig1 = plt.gcf() plt.show() plt.draw() # Save figure. if save_fig: save_fig_path = os.path.join(save_fig_dir, '%s.%s' % (save_fig_name, save_fig_ext)) fig1.savefig(save_fig_path, dpi=default_figdpi, bbox_inches='tight') # + def plot_dse(ideal=False, performance_metric='avg_fps', accuracy_metric=['mAP'], xmin=0.00, xmax=85.01, xstep=5, ymin=18.00, ymax=46.01, ystep=4, save_fig=False, save_fig_name='dse.full'): fig = plt.figure(figsize=(8,4), dpi=default_figdpi) ax = fig.gca() if ideal: save_fig_name += '.ideal' ax.set_title('Full Space Exploration with Ideal Accuracy') else: ax.set_title('Full Space Exploration with Measured Accuracy') df_performance_accuracy = merge_performance_accuracy( dfs_perf, dfs, performance_metric=performance_metric, accuracy_metric=accuracy_metric, ideal=ideal) df = df_performance_accuracy df = df.groupby(level=df.index.names[:-1]).mean() #display_in_full(df) accuracy_metric=accuracy_metric[0] for index, row in df.iterrows(): (model, backend, batch_size, batch_count, batch_enabled, image_height, image_width) = index performance = row[performance_metric] accuracy = row[accuracy_metric] # Mark Pareto-optimal points. is_on_pareto = True for index1, row1 in df.iterrows(): is_no_slower = row1[performance_metric] >= row[performance_metric] is_no_less_accurate = row1[accuracy_metric] >= row[accuracy_metric] is_faster = row1[performance_metric] > row[performance_metric] is_more_accurate = row1[accuracy_metric] > row[accuracy_metric] if ((is_faster and is_no_less_accurate) or (is_more_accurate and is_no_slower)): is_on_pareto = False break # Select marker, color and size. marker = backend_to_marker[backend] color = model_to_color[model] size = 2 + 4*bs_to_size[batch_size] # Plot. ax.plot(performance, accuracy, marker, markerfacecolor=color, markeredgecolor=color, markersize=size) # Mark Pareto-optimal points with scaled black pluses. if is_on_pareto: ax.plot(performance, accuracy, 'o', markersize=4,markerfacecolor='black', markeredgecolor='black') finalize_plot(ax, xmin, xmax, xstep, ymin, ymax, ystep, save_fig, save_fig_name, accuracy_metric) # NB: The accuracy metric has to be an array of 1 element for this function. plot_dse (ideal=False, ystep=2, save_fig=True, save_fig_name='dse.full') plot_dse (ideal=True, ystep=2, save_fig=True, save_fig_name='dse.full') plot_dse (ideal=True, accuracy_metric=['mAP_small'], ymin=0, ymax=22.01, ystep=2, save_fig=True, save_fig_name='dse.full.small') plot_dse (ideal=True, accuracy_metric=['mAP_large'], ymin=30, ymax=78.01, ystep=4, save_fig=True, save_fig_name='dse.full.large') # + def plot_fastest(ideal=False, performance_metric='avg_fps', accuracy_metric=['mAP','mAP_large','mAP_medium','mAP_small'], xmin=0.0, xmax=85.01, xstep=5, ymin=[22,42,16,0], ymax=[46.01,78.01,56.01,22.01], ystep=[2,4,4,2], labelsize=5, ticksize=4, save_fig=False, save_fig_name='dse.fastest'): fig,ax = plt.subplots(2,2,sharex='col') if ideal: save_fig_name += '.ideal' fig.suptitle('Fastest Configuration with Ideal Accuracy') else: fig.suptitle('Fastest Configuration with Measured Accuracy') df_performance_accuracy = merge_performance_accuracy( dfs_perf, dfs, performance_metric=performance_metric, accuracy_metric=accuracy_metric, ideal=ideal) df_full = df_performance_accuracy for num,metric in enumerate (accuracy_metric): pos = (int(num/2),num%2) df = df_full[[metric,performance_metric]] df = df.groupby(level=df.index.names[:-1]).mean() df = df.groupby(level=df.index.names[:-4]).max() #display_in_full(df) points_to_plot=[] for index, row in df.iterrows(): (model,backend, batch_size) = index performance = row[performance_metric] accuracy = row[metric] plot = True # Analyze points of the same model. for index1, row1 in df.iterrows(): if index == index1: continue if index1[0] != model: continue is_faster = row1[performance_metric] > row[performance_metric] if is_faster: plot = False continue if plot: # no faster points have been found with the same model. # Select marker, color and size. marker = backend_to_marker[backend] color = model_to_color[model] size = 2 + 4*bs_to_size[batch_size] # Plot. ax[pos[0],pos[1]].plot(performance, accuracy, marker, markerfacecolor=color, markeredgecolor=color, markersize=size) ax[pos[0],pos[1]].grid(True) # X axis. xlabel = 'Images Per Second' ax[1,0].set_xlabel(xlabel, fontsize=labelsize) ax[1,0].set_xlim(xmin, xmax) ax[1,0].set_xticks(np.arange(xmin, xmax, xstep)) for xtick in ax[1,0].xaxis.get_major_ticks(): xtick.label.set_fontsize(ticksize) ax[1,1].set_xlabel(xlabel, fontsize=labelsize) ax[1,1].set_xlim(xmin, xmax) ax[1,1].set_xticks(np.arange(xmin, xmax, xstep)) for xtick in ax[1,1].xaxis.get_major_ticks(): xtick.label.set_fontsize(ticksize) # Y axis. ylabel = accuracy_metric[0]+' %' ax[0,0].set_ylabel(ylabel, fontsize=labelsize) ax[0,0].set_ylim(ymin[0], ymax[0]) ax[0,0].set_yticks(np.arange(ymin[0], ymax[0], ystep[0])) for ytick in ax[0,0].yaxis.get_major_ticks(): ytick.label.set_fontsize(ticksize) ylabel = accuracy_metric[1]+' %' ax[0,1].set_ylabel(ylabel, fontsize=labelsize) ax[0,1].set_ylim(ymin[1], ymax[1]) ax[0,1].set_yticks(np.arange(ymin[1], ymax[1], ystep[1])) for ytick in ax[0,1].yaxis.get_major_ticks(): ytick.label.set_fontsize(ticksize) ylabel = accuracy_metric[2]+' %' ax[1,0].set_ylabel(ylabel, fontsize=labelsize) ax[1,0].set_ylim(ymin[2], ymax[2]) ax[1,0].set_yticks(np.arange(ymin[2], ymax[2], ystep[2])) for ytick in ax[1,0].yaxis.get_major_ticks(): ytick.label.set_fontsize(ticksize) ylabel = accuracy_metric[3]+' %' ax[1,1].set_ylabel(ylabel, fontsize=labelsize) ax[1,1].set_ylim(ymin[3], ymax[3]) ax[1,1].set_yticks(np.arange(ymin[3], ymax[3], ystep[3])) for ytick in ax[1,1].yaxis.get_major_ticks(): ytick.label.set_fontsize(ticksize) # Legend. handles = [ mp.patches.Patch(color=color, label=model) for (model, color) in sorted(model_to_color.items()) ] handles += handles2 fig.legend(title='', handles=handles[::-1], loc='center right', borderaxespad=0.1, fontsize=4) plt.subplots_adjust(right=0.78) # Show with grid on. plt.grid(True) fig1 = plt.gcf() plt.show() plt.draw() # Save figure. if save_fig: save_fig_path = os.path.join(save_fig_dir, '%s.%s' % (save_fig_name, save_fig_ext)) fig1.savefig(save_fig_path, dpi=default_figdpi, bbox_inches='tight') plot_fastest(ideal=True, save_fig=True)
jnotebook/object-detection/jnotebook.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + ##Import and define symbols import sympy as sp import numpy as np Ti = sp.Symbol('T_i'); k = sp.Symbol('k'); To = sp.Symbol('T_o'); Ti0 = sp.Symbol('T_i_0'); To0 = sp.Symbol('T_o_0'); ho = sp.Symbol('h_o'); hi = sp.Symbol('h_i'); r = sp.Symbol('r'); ro = sp.Symbol('r_o'); ri = sp.Symbol('r_i'); Cp = sp.Symbol('C_p'); rho = sp.Symbol('rho'); T = sp.Function('T')(r) U = sp.Function('U')(r) C1 = sp.Symbol('C1'); C2 = sp.Symbol('C2') # - # # Problem 2.1.1 in the problems manual # ![cylinder.png](attachment:cylinder.png) # ## Nomenclature table # | Nomenclature | Variable | Expression | # |--------------------------------------|----------|-------------------------| # | Temperature | T | | # | Radius | r | | # | Convective heat transfer coefficient | h | | # | Conductive heat transfer coefficient | k | | # | Biot number | Bi | $\frac{hR}{k}$ | # | Temperature fraction | $\phi$ | $\frac{T-T_o}{T_i-T_o}$ | # | Quantity "X" of internal fluid | $X_i$ | | # | Quantity "X" of external fluid | $X_o$ | | # ## Simplifying assumptions: # 1. Steady state; $\frac{dT}{dt} = 0$ # 2. Infinite symetric cylinder; $\frac{dT}{dz} = \frac{dT}{d\theta} = 0$; $T(r)$ # 3. No heat generation within the clinder; $q''' = 0$ # ## Differential conservation equation solution # The consititutive equation for cylindrical coordinates: # $$\rho c \frac{dT}{dt}= \frac{1}{r}\frac{d}{dr}(r\cdot k\frac{dT}{dr})+\frac{1}{r}\frac{d}{d\theta}(k\frac{dT}{d\theta})+\frac{d}{dz}(k\frac{dT}{dz})+q'''$$ # # When assumptions are applied: # # $$0 =\frac{d^2T}{dr^2}+\frac{1}{r}\frac{dT}{dr}$$ # The boundary conditions for convective heat transfer at the walls: # # $$\frac{dT}{dr}(r = r_o) = \frac{h_o}{k}[T_o - T(r = r_o)]$$ # # $$\frac{dT}{dr}(r = r_i) = \frac{-h_i}{k}[T_i - T(r = r_i)]$$ # Substituting the derivative of temperature $\frac{dT}{dr} = U(r)$ into the constitutive equation: # # $$0 = \frac{dU(r)}{dr} + \frac{1}{r}\cdot U(r)$$ # Seperating and integrating: # # $$U(r) = \frac{dT}{dr} = \frac{c_1}{r}$$ # # And again: # # $$T(r) = c_1\ln{r} + c_2$$ # Substituting in the temperature equations into the boundary conditions yields a system of two equations and unkowns $c_1, c_2$: # # $$\frac{c_1}{r_o} = \frac{h_o}{k}[T_o - (c_1\ln{r_o} + c_2)]$$ # # $$\frac{c_1}{r_i} = \frac{-h_i}{k}[T_i - (c_1\ln{r_i} + c_2)]$$ # + ## Solve DE #Define equation with U eqn = (sp.Derivative(U,r)+1/r*U) print('System differential equation with substitution for derivative of temperature:') display(eqn) #Solve DE for derivative of temperature (U) Diff_U = sp.dsolve(eqn, U) print('Expression for differential in temperature with respect to r:') display(Diff_U) #Redefine Temperature Diff_T = Diff_U.subs(U, sp.Derivative(T,r)) print('Differential equation for temperature:') display(Diff_T) #Solve for temperature Temp = sp.dsolve(Diff_T, T) print('Solved expression for temperature with integration constants:') display(Temp) # + #Define the two boundary conditions eqn1= ho/k*(To-(Temp.rhs.subs(r, ro)))-Diff_U.rhs.subs(r, ro) eqn2= -hi/k*(Ti-(Temp.rhs.subs(r, ri)))-Diff_U.rhs.subs(r, ri) print('First Equation') display(eqn1) print('Second Equation') display(eqn2) #Solve for c1 and c2 C1_ = sp.solve(eqn1,C1)[0] C2_ = sp.solve(eqn2,C2)[0] C1eq = C1_.subs(C2,C2_)-C1 C1_ = sp.simplify(sp.solve(C1eq,C1)[0]) C2_ = sp.simplify(C2_.subs(C1,C1_)) #Define biot numbers Bi_i = sp.Symbol('Bi_i') Bi_o = sp.Symbol('Bi_o') #substitute biot numbers into the equation C1_ = sp.simplify((C1_.subs(hi*ri, Bi_i*k)).subs(ho*ro, Bi_o*k)) C2_ = sp.simplify((C2_.subs(hi*ri, Bi_i*k)).subs(ho*ro, Bi_o*k)) print('C1 solved') display(C1_) print('C2 solved') display(C2_) # - # With $Bi = \frac{hR}{k}$ # Defining dimensionless parameter $\phi (r) = \frac{T(r)-T_o}{T_i-T_o}$ and solving for $\phi$ # # $$\phi(r) = \frac{c_1\ln{r}+c_2-T_o}{T_i-T_o}$$ # ## Investigating this behavior: # + ##Set some constants for r for a few cases #Thick wall vs thin wall import numpy as np import matplotlib.pyplot as plt r_i = 1 r_o = np.array([10, 5, 1.1]) #Investigate outside biot for constant inside biot Bi_i = 1 Bi_o = np.array([0.01, 1, 10]) T_i = 100 T_o = 200 for j, R_o in enumerate(r_o): rs = np.linspace(r_i, R_o, 100) phis = np.zeros((len(Bi_o), len(rs))) for k, Bi_out in enumerate(Bi_o): c1 = Bi_i*Bi_out*(T_i-T_o)/(Bi_i*Bi_out*np.log(r_i)-Bi_i*Bi_out*np.log(R_o)-Bi_i-Bi_out) c2 = (Bi_i*Bi_out*T_i*np.log(R_o)-Bi_i*Bi_out*T_o*np.log(r_i)+Bi_i*T_i+Bi_out*T_o)/(-Bi_i*Bi_out*np.log(r_i)+Bi_i*Bi_out*np.log(R_o)+Bi_i+Bi_out) #phis[k][:] = (c1*np.log(rs)+c2 - T_o)/(T_i-T_o) phis[k][:] = (np.log(rs/R_o))/(np.log(r_i/R_o)+1/Bi_out + 1/Bi_i) plt.figure(j) plt.plot(rs, phis[k][:],label = 'Bi_o/Bi_i ='+str(Bi_out)) plt.legend() plt.xlabel('r') plt.ylabel('phi') plt.title('R = '+str(R_o)) # - # In interpereting the graphs, it us useful to remember that $\phi = 1$ corresponds to the temperature being equal to the internal air temperature, and $\phi = 0$ corresponds to temoerature being equal to the external air temperature. # ## Points to note: # 1. For a thin wall (Thickness << cylinder diameter), the internal temperature is nearly constant and determined by the convective coefficients. If convective transfer is much more prominent on the external surface than the internal surface, then the cylinder temperature is equal to the external temperature and vice versa. For comparible convective forces, the cylinder temperature is somewhere in between the two air temperatures. # 2. For thin walls, the slight temperature distribution that is exhibited is nearly linear, approcimating this case to a slab wall instead of a cylinder wall. # 3. For thick walls (Thickness ~ cylinder diameter), a distribution of temperatures is much more prominent, and the curviture of the cylinder is noted as it is non linear. This is intuitive, as for a cyliner the area of flux increases as radius increases, so temperature change should slow down as radius increases, which we do see. # 4. What we note is that the greater a Biot number is compared to the other side of the cylinder, the closer the wall temperature on that side comes to the air temperature. Alternatively, if the Biot numbers are of similar magnitude, the wall temperature on both sides of the cylinder walls do not approach the air temperatures but are instead in between the two.
presentations/10_16_19_Evan.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Iris (Python 2) # language: python # name: iris_python2 # --- # + import iris iris.FUTURE.netcdf_promote = True url = "http://www.smast.umassd.edu:8080/thredds/dodsC/FVCOM/NECOFS/Forecasts/NECOFS_GOM3_FORECAST.nc" cubes = iris.load_raw(url) print(cubes) # - cube = cubes.extract_strict("sea_water_potential_temperature") # + import pyugrid import matplotlib.tri as tri def get_mesh(cube, url): ug = pyugrid.UGrid.from_ncfile(url) cube.mesh = ug cube.mesh_dimension = 1 return cube def get_triang(cube): lon = cube.mesh.nodes[:, 0] lat = cube.mesh.nodes[:, 1] nv = cube.mesh.faces return tri.Triangulation(lon, lat, triangles=nv) # + # %matplotlib inline import numpy as np import numpy.ma as ma import cartopy.crs as ccrs import matplotlib.pyplot as plt def plot_model(cube, time_idx=-1, depth_idx=None): lon = cube.mesh.nodes[:, 0] lat = cube.mesh.nodes[:, 1] triang = get_triang(cube) data = cube[time_idx, ...].data if depth_idx is not None: data = data[depth_idx, ...] print(data.shape) data = ma.masked_invalid(data) fig, ax = plt.subplots(figsize=(7, 7), subplot_kw=dict(projection=ccrs.PlateCarree())) ax.set_extent([lon.min(), lon.max(), lat.min(), lat.max()]) ax.coastlines() levs = np.arange(-1, 5, 0.2) cs = ax.tricontourf(triang, data, levels=levs) fig.colorbar(cs) ax.tricontour(triang, data, colors='k',levels=levs) gl = ax.gridlines(draw_labels=True) gl.xlabels_top = gl.ylabels_right = False return fig, ax # + cube = get_mesh(cube, url) fig, ax = plot_model(cube, time_idx=-1, depth_idx=0) # + from iris.analysis import trajectory ax.plot([-69, -63], [42, 44]) waypoints = [{'longitude': -69, 'latitude': 42}, {'longitude': -36, 'latitude': 44}] traj = trajectory.Trajectory(waypoints) lons = [d['longitude'] for d in traj.sampled_points] lats = [d['latitude'] for d in traj.sampled_points] sampled_points = [('longitude', lon), ('latitude', lat)] # + c = cube[-1, ...] for aux in c.aux_factories: c.remove_aux_factory(aux) # - glider = iris.analysis.trajectory.interpolate(c, sampled_points, method="nearest")
notebooks/glider/ugrid_glider.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # <h1>Game Time</h1> # # <h3>This notebook uses the microphone and LEDs to make a game</h3> # # <i>Click the play button next to the cell you want to run</i> # <h2>Setup</h2> from pgb_game import play, clear # You've probably played <b>Rock - Paper - Scissors</b> before, but have you ever played # <h1>Porcupine - Grapefruit - Bumblebee !!</h1> # # This game has the following rules: # # 1. Porcupine eats Grapefruit # 2. Grapefruit squashes Bumblebee # 3. Bumblebee stings Porcupine # # There are two players, <span style="color:red;">Red Player</span> and <span style="color:blue;">Blue Player</span>. # # <span style="color:red;">Red Player</span> goes first, they use the keyboard to pick either Bumblebee, Grapefruit, or Porcupine. (Don't tell the other person what you picked!) # # <span style="color:blue;">Blue Player</span> goes second, they speak their choice. The winner will be shown on the screen and the LEDs will light up with the winner's colors (or <span style="color:purple;">Purple</span> if it's a draw). play() # <h2>Hope you had fun, turn off the lights before you leave!</h2> clear()
02_ReSpeaker_Device/04_Game_Time.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] _uuid="eb49344adfa785e154e7f5cd60643298c51ca819" # ### Metastatic Model # + [markdown] _uuid="c1e92489350fd6543d60b41cbde9c69f332eb23e" # Welcome back. In this section we will create the Metastatic_model. This model will predict whether or not Metastatic cancer tissue is present in histopathologic image patches. # # **Dataset** # # We will use Kaggle's version of the PCam (PatchCamelyon) dataset. It's part of the [Histopathologic Cancer Detection competition](https://www.kaggle.com/c/histopathologic-cancer-detection) where the challenge is to identify metastatic tissue in histopathologic scans of lymph node sections. # # The dataset consists of 220,025 image patches of size 96x96 (130,908 Metastatic negative and 89,117 Metastatic positive). # # The images are in tiff format. Many web browsers, including Chrome, don't support the tiff format. Thus the web app wil not be able to accept tiff images. Before training, we will convert these images to png format. This will ensure that the model will be trained on images of similar quality to what we expect a user to submit. # + _cell_guid="79c7e3d0-c299-4dcb-8224-4455121ee9b0" _uuid="d629ff2d2480ee46fbb7e2d37f6b5fab8052498a" from numpy.random import seed seed(101) from tensorflow import set_random_seed set_random_seed(101) import pandas as pd import numpy as np import tensorflow as tf from tensorflow import keras from tensorflow.keras.preprocessing.image import ImageDataGenerator from tensorflow.keras.layers import Conv2D, MaxPooling2D from tensorflow.keras.layers import Dense, Dropout, Flatten, Activation from tensorflow.keras.models import Sequential from tensorflow.keras.callbacks import EarlyStopping, ReduceLROnPlateau, ModelCheckpoint from tensorflow.keras.optimizers import Adam import os import cv2 from sklearn.utils import shuffle from sklearn.metrics import confusion_matrix from sklearn.model_selection import train_test_split import itertools import shutil import matplotlib.pyplot as plt # %matplotlib inline # + _uuid="b7bbdd52c81188b8e9c528b88d9fd0da176bf4bc" IMAGE_SIZE = 96 IMAGE_CHANNELS = 3 SAMPLE_SIZE = 80000 # the number of images we use from each of the two classes # + [markdown] _uuid="25802a26e4afba8f6dc42754f7f61da4c8cfea5c" # ### What files are available? # + _uuid="699bb899bde433ba20fb0d086fa0f33a0f61a250" os.listdir('../input/histopathologic-cancer-detection') # + [markdown] _uuid="c24d1662f8bd5fc4d8c57c29449990a3520cd96b" # ### Labels as per csv file # # 0 = no met tissue<br> # 1 = has met tissue. <br> # # + [markdown] _uuid="5a285343c286be191aa827ff9b836b67821176a5" # ### How many training images are in each folder? # + _uuid="54461212efed65ac377369a468c80e7d708010f4" print(len(os.listdir('../input/histopathologic-cancer-detection/train'))) # + [markdown] _uuid="b90854e07d495d9f945a0e40189fd32a0c32bff5" # ### Create a Dataframe containing all images # + _uuid="e9c9f40ffab35044641b0dc7d9b18609af1aa25e" df_data = pd.read_csv('../input/histopathologic-cancer-detection/train_labels.csv') # removing this image because it caused a training error previously df_data = df_data[df_data['id'] != 'dd6dfed324f9fcb6f93f46f32fc800f2ec196be2'] # removing this image because it's black df_data = df_data[df_data['id'] != '9369c7278ec8bcc6c880d99194de09fc2bd4efbe'] print(df_data.shape) # + [markdown] _uuid="cfbd53b7f8ea1929952ffed6221b380012618e32" # ### Check the class distribution # + _uuid="e18560bf69d3dfc0c4772e7c79bb119fd2eb634b" df_data['label'].value_counts() # + [markdown] _uuid="6efe2e5de99c4bf92079b1a7d0b892d30fc9d518" # ### Display a random sample of train images by class # + _kg_hide-input=true _uuid="1c5143f227da4262eafce8cf0210a02c8072fb8e" # source: https://www.kaggle.com/gpreda/honey-bee-subspecies-classification def draw_category_images(col_name,figure_cols, df, IMAGE_PATH): """ Give a column in a dataframe, this function takes a sample of each class and displays that sample on one row. The sample size is the same as figure_cols which is the number of columns in the figure. Because this function takes a random sample, each time the function is run it displays different images. """ categories = (df.groupby([col_name])[col_name].nunique()).index f, ax = plt.subplots(nrows=len(categories),ncols=figure_cols, figsize=(4*figure_cols,4*len(categories))) # adjust size here # draw a number of images for each location for i, cat in enumerate(categories): sample = df[df[col_name]==cat].sample(figure_cols) # figure_cols is also the sample size for j in range(0,figure_cols): file=IMAGE_PATH + sample.iloc[j]['id'] + '.tif' im=cv2.imread(file) ax[i, j].imshow(im, resample=True, cmap='gray') ax[i, j].set_title(cat, fontsize=16) plt.tight_layout() plt.show() # + _uuid="bd38bcfb5839975e4fee9e70b93d42c29c1b5d2e" IMAGE_PATH = '../input/histopathologic-cancer-detection/train/' draw_category_images('label',4, df_data, IMAGE_PATH) # + [markdown] _uuid="1da4226777aefe65b1bb3430208ea91ea7ca7d9a" # ### Create the Train and Val Sets # + _uuid="547f9f571ee82e20b7647e16ed36de7550046032" df_data.head() # + [markdown] _uuid="c1150500d2772b7f36cdaa5aa5fd7f0fb4a72628" # #### Balance the target distribution # We will reduce the number of samples in class 0. # + _uuid="270fc18640b552ecc3cb0e1dd3036441db7a4a2b" # take a random sample of class 0 with size equal to num samples in class 1 df_0 = df_data[df_data['label'] == 0].sample(SAMPLE_SIZE, random_state = 101) # filter out class 1 df_1 = df_data[df_data['label'] == 1].sample(SAMPLE_SIZE, random_state = 101) # concat the dataframes df_data = pd.concat([df_0, df_1], axis=0).reset_index(drop=True) # shuffle df_data = shuffle(df_data) df_data['label'].value_counts() # + _uuid="a166dec3ef84c66ad9cd815b63fc1a753df2eb76" df_data.head() # + _uuid="15ba9792e6a370b7560330af15b3cfe21185c1cb" # train_test_split # stratify=y creates a balanced validation set. y = df_data['label'] df_train, df_val = train_test_split(df_data, test_size=0.10, random_state=101, stratify=y) print(df_train.shape) print(df_val.shape) # + _uuid="7de70d915a5f1d2599725e00bdb3b9103d947883" df_train['label'].value_counts() # + _uuid="392c0eea00be8e43a6e55438d1458650e842030b" df_val['label'].value_counts() # + [markdown] _uuid="ba17dd34b75367fc61df6634d51dac94c3ab4951" # ### Create a Directory Structure # + _kg_hide-input=true _uuid="ff8acc2e92a1b1b5002d6e1bf9a1180c3256f19d" # Create a new directory base_dir = 'base_dir' os.mkdir(base_dir) #[CREATE FOLDERS INSIDE THE BASE DIRECTORY] # now we create 2 folders inside 'base_dir': # train_dir # a_no_met_tissue # b_has_met_tissue # val_dir # a_no_met_tissue # b_has_met_tissue # create a path to 'base_dir' to which we will join the names of the new folders # train_dir train_dir = os.path.join(base_dir, 'train_dir') os.mkdir(train_dir) # val_dir val_dir = os.path.join(base_dir, 'val_dir') os.mkdir(val_dir) # [CREATE FOLDERS INSIDE THE TRAIN AND VALIDATION FOLDERS] # Inside each folder we create seperate folders for each class # create new folders inside train_dir no_met_tissue = os.path.join(train_dir, 'a_no_met_tissue') os.mkdir(no_met_tissue) has_met_tissue = os.path.join(train_dir, 'b_has_met_tissue') os.mkdir(has_met_tissue) # create new folders inside val_dir no_met_tissue = os.path.join(val_dir, 'a_no_met_tissue') os.mkdir(no_met_tissue) has_met_tissue = os.path.join(val_dir, 'b_has_met_tissue') os.mkdir(has_met_tissue) # + _uuid="03ca5d4b8b027c2712d7096314d3a79ef829b23c" # check that the folders have been created os.listdir('base_dir/train_dir') # + [markdown] _uuid="6a2e56340ba18f3b63c1b129fd995fecfadaa21d" # ### Transfer the images into the folders # + _uuid="e84c8a9642b030094b1888af3299063f883112a6" # Set the id as the index in df_data df_data.set_index('id', inplace=True) # + _kg_hide-input=true _uuid="afb8969a9ee75c13bddc808a4bcc326611baaaaf" # Get a list of train and val images train_list = list(df_train['id']) val_list = list(df_val['id']) # Transfer the train images for image in train_list: # the id in the csv file does not have the .tif extension therefore we add it here fname_tif = image + '.tif' # get the label for a certain image target = df_data.loc[image,'label'] # these must match the folder names if target == 0: label = 'a_no_met_tissue' if target == 1: label = 'b_has_met_tissue' # source path to image src = os.path.join('../input/histopathologic-cancer-detection/train', fname_tif) # change the new file name to png fname_png = image + '.png' # destination path to image dst = os.path.join(train_dir, label, fname_png) # read the file as an array cv2_image = cv2.imread(src) # save the image at the destination as a png file cv2.imwrite(dst, cv2_image) # Transfer the val images for image in val_list: # the id in the csv file does not have the .tif extension therefore we add it here fname_tif = image + '.tif' # get the label for a certain image target = df_data.loc[image,'label'] # these must match the folder names if target == 0: label = 'a_no_met_tissue' if target == 1: label = 'b_has_met_tissue' # source path to image src = os.path.join('../input/histopathologic-cancer-detection/train', fname_tif) # change the new file name to png fname_png = image + '.png' # destination path to image dst = os.path.join(val_dir, label, fname_png) # read the file as an array cv2_image = cv2.imread(src) # save the image at the destination as a png file cv2.imwrite(dst, cv2_image) # + _uuid="71532bfc32608289b1f773ffdbc8a7cea1bfb94c" # check how many train images we have in each folder print(len(os.listdir('base_dir/train_dir/a_no_met_tissue'))) print(len(os.listdir('base_dir/train_dir/b_has_met_tissue'))) # + _uuid="897e9df543bb65b47bb00019dc681125ca08ee5d" # check how many val images we have in each folder print(len(os.listdir('base_dir/val_dir/a_no_met_tissue'))) print(len(os.listdir('base_dir/val_dir/b_has_met_tissue'))) # + _uuid="ef780ac9a9fc89b4ff4f042593eb68992f354a1d" # End of Data Preparation ### ================================================================================== ### # Start of Model Building # + [markdown] _uuid="f8dce940ee8a7a42aacb062e4c6b5a4a54dba58f" # ### Set Up the Generators # + _uuid="ef4fe7be09f11ff4badfd22d5fd5e03f8521ed58" train_path = 'base_dir/train_dir' valid_path = 'base_dir/val_dir' test_path = '../input/test' num_train_samples = len(df_train) num_val_samples = len(df_val) train_batch_size = 10 val_batch_size = 10 train_steps = np.ceil(num_train_samples / train_batch_size) val_steps = np.ceil(num_val_samples / val_batch_size) # + _uuid="68fbd9d5fbb80859a82f94a12e335ce05a93bd51" datagen = ImageDataGenerator(rescale=1.0/255) train_gen = datagen.flow_from_directory(train_path, target_size=(IMAGE_SIZE,IMAGE_SIZE), batch_size=train_batch_size, class_mode='categorical') val_gen = datagen.flow_from_directory(valid_path, target_size=(IMAGE_SIZE,IMAGE_SIZE), batch_size=val_batch_size, class_mode='categorical') # Note: shuffle=False causes the test dataset to not be shuffled test_gen = datagen.flow_from_directory(valid_path, target_size=(IMAGE_SIZE,IMAGE_SIZE), batch_size=1, class_mode='categorical', shuffle=False) # + [markdown] _uuid="79da4d0a66a90cffe40580a596dd4d0e2bc45a9b" # ### Create the Model Architecture¶ # + _kg_hide-output=true _uuid="b9835ea0fd0bca54138904895c39d38227a70c22" # Source: https://www.kaggle.com/fmarazzi/baseline-keras-cnn-roc-fast-5min-0-8253-lb kernel_size = (3,3) pool_size= (2,2) first_filters = 32 second_filters = 64 third_filters = 128 dropout_conv = 0.3 dropout_dense = 0.3 model = Sequential() model.add(Conv2D(first_filters, kernel_size, activation = 'relu', input_shape = (96, 96, 3))) model.add(Conv2D(first_filters, kernel_size, activation = 'relu')) model.add(Conv2D(first_filters, kernel_size, activation = 'relu')) model.add(MaxPooling2D(pool_size = pool_size)) model.add(Dropout(dropout_conv)) model.add(Conv2D(second_filters, kernel_size, activation ='relu')) model.add(Conv2D(second_filters, kernel_size, activation ='relu')) model.add(Conv2D(second_filters, kernel_size, activation ='relu')) model.add(MaxPooling2D(pool_size = pool_size)) model.add(Dropout(dropout_conv)) model.add(Conv2D(third_filters, kernel_size, activation ='relu')) model.add(Conv2D(third_filters, kernel_size, activation ='relu')) model.add(Conv2D(third_filters, kernel_size, activation ='relu')) model.add(MaxPooling2D(pool_size = pool_size)) model.add(Dropout(dropout_conv)) model.add(Flatten()) model.add(Dense(256, activation = "relu")) model.add(Dropout(dropout_dense)) model.add(Dense(2, activation = "softmax")) model.summary() # + [markdown] _uuid="75cfc4fcb8dd3408d1c4fcf8cd85e0e2f5b611d7" # ### Train the Model # + _uuid="9de9715f49a63b55775b10abd2f461b395e23b5d" model.compile(Adam(lr=0.0001), loss='binary_crossentropy', metrics=['accuracy']) # + _uuid="227d84a4f44c0b7256855c06ba04dabd58d89d84" # Get the labels that are associated with each index print(val_gen.class_indices) # + _uuid="a746769db61563f226288eba9aa8a6584b9e8e0b" filepath = "model.h5" checkpoint = ModelCheckpoint(filepath, monitor='val_acc', verbose=1, save_best_only=True, mode='max') reduce_lr = ReduceLROnPlateau(monitor='val_acc', factor=0.5, patience=2, verbose=1, mode='max', min_lr=0.00001) callbacks_list = [checkpoint, reduce_lr] history = model.fit_generator(train_gen, steps_per_epoch=train_steps, validation_data=val_gen, validation_steps=val_steps, epochs=20, verbose=1, callbacks=callbacks_list) # + [markdown] _uuid="fa15a8afda3593973726e9087cbd98073041c908" # ### Evaluate the model using the val set # + _uuid="70104420ec7f400cd06203f875dbeba30f4d8a96" # get the metric names so we can use evaulate_generator model.metrics_names # + _uuid="428bdf5b24ff8cef35012205c3f2eb37006fc9e9" # Here the best epoch will be used. model.load_weights('model.h5') val_loss, val_acc = \ model.evaluate_generator(test_gen, steps=len(df_val)) print('val_loss:', val_loss) print('val_acc:', val_acc) # + [markdown] _uuid="93556c9e4b6a188cf9cb67a6519c9bc365c60caf" # ### Plot the Training Curves # + _kg_hide-input=true _uuid="385da8ba94a1079d17909790716b295fc2737584" # display the loss and accuracy curves import matplotlib.pyplot as plt acc = history.history['acc'] val_acc = history.history['val_acc'] loss = history.history['loss'] val_loss = history.history['val_loss'] epochs = range(1, len(acc) + 1) plt.plot(epochs, loss, 'bo', label='Training loss') plt.plot(epochs, val_loss, 'b', label='Validation loss') plt.title('Training and validation loss') plt.legend() plt.figure() plt.plot(epochs, acc, 'bo', label='Training acc') plt.plot(epochs, val_acc, 'b', label='Validation acc') plt.title('Training and validation accuracy') plt.legend() plt.figure() # + [markdown] _uuid="5636e76f23202dd1f2a27ace25e15e09619a5e4e" # ### Make a prediction on the val set # We need these predictions to calculate the AUC score, print the Confusion Matrix and calculate the F1 score. # + _uuid="652d9d6aa51dc1818d1c5171212d10e141ad7de9" # make a prediction predictions = model.predict_generator(test_gen, steps=len(df_val), verbose=1) # + _uuid="edf1866df4638ded26de2e0e3d2ba0f5e00e1ace" predictions.shape # + [markdown] _uuid="6bbf4f095fe5412561924c09c96d8dc2ea4adfc0" # **A note on Keras class index values** # # Keras assigns it's own index value (here 0 and 1) to the classes. It infers the classes based on the folder structure. # Important: These index values may not match the index values we were given in the train_labels.csv file. # # I've used 'a' and 'b' folder name pre-fixes to get keras to assign index values to match what was in the train_labels.csv file - I guessed that keras is assigning the index value based on folder name alphabetical order. # + _uuid="dc71f69944e7db83329417c5265a5bc31f9c4fc3" # This is how to check what index keras has internally assigned to each class. test_gen.class_indices # + _uuid="4a6709d73969f7fd597128223b110be077f84edb" # Put the predictions into a dataframe. # The columns need to be oredered to match the output of the previous cell df_preds = pd.DataFrame(predictions, columns=['no_met_tissue', 'has_met_tissue']) df_preds.head() # + _uuid="0954d61a4ef8bc056452b3bbad9456d45c00bed1" # Get the true labels y_true = test_gen.classes # Get the predicted labels as probabilities y_pred = df_preds['has_met_tissue'] # + [markdown] _uuid="f07c814d213e814cdac4e3d05d0a8db847fbfe28" # ### What is the AUC Score? # + _uuid="0b7b7a56c6fa47cc40764d0c06d64860580cbea1" from sklearn.metrics import roc_auc_score roc_auc_score(y_true, y_pred) # + [markdown] _uuid="9d8404e8e6b6008c8989b8184700ec5562b99366" # ### Create a Confusion Matrix # + _kg_hide-input=true _uuid="91f570e8e5f07126e5361bbf92929d786e853a09" # Source: Scikit Learn website # http://scikit-learn.org/stable/auto_examples/ # model_selection/plot_confusion_matrix.html#sphx-glr-auto-examples-model- # selection-plot-confusion-matrix-py def plot_confusion_matrix(cm, classes, normalize=False, title='Confusion matrix', cmap=plt.cm.Blues): """ This function prints and plots the confusion matrix. Normalization can be applied by setting `normalize=True`. """ if normalize: cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis] print("Normalized confusion matrix") else: print('Confusion matrix, without normalization') print(cm) plt.imshow(cm, interpolation='nearest', cmap=cmap) plt.title(title) plt.colorbar() tick_marks = np.arange(len(classes)) plt.xticks(tick_marks, classes, rotation=45) plt.yticks(tick_marks, classes) fmt = '.2f' if normalize else 'd' thresh = cm.max() / 2. for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])): plt.text(j, i, format(cm[i, j], fmt), horizontalalignment="center", color="white" if cm[i, j] > thresh else "black") plt.ylabel('True label') plt.xlabel('Predicted label') plt.tight_layout() # + _uuid="c20bc358dc753020d5a560dc56f2350c7f40a4f9" # Get the labels of the test images. test_labels = test_gen.classes # + _uuid="3a537724473df19c74bb1bf6928f531dd0fcfdb3" test_labels.shape # + _uuid="0bb4323e931ff53081994bbf58e82b1ec93ab327" # argmax returns the index of the max value in a row cm = confusion_matrix(test_labels, predictions.argmax(axis=1)) # + _uuid="ec2058223eb30485898a12c0e904bb170c0aa884" # Print the label associated with each class test_gen.class_indices # + _uuid="ba26c7e718df937a18aa2035c4ba883252e44c79" # Define the labels of the class indices. These need to match the # order shown above. cm_plot_labels = ['no_met_tissue', 'has_met_tissue'] plot_confusion_matrix(cm, cm_plot_labels, title='Confusion Matrix') # + [markdown] _uuid="2576c1144cfe93d66f3197396990f3e43addd499" # ### Create a Classification Report # + _uuid="91dcace7eb99aca310774b7a3a55535c9127ce55" from sklearn.metrics import classification_report # Generate a classification report # For this to work we need y_pred as binary labels not as probabilities y_pred_binary = predictions.argmax(axis=1) report = classification_report(y_true, y_pred_binary, target_names=cm_plot_labels) print(report) # + [markdown] _uuid="36880545abb112946ebe48242d5da6c8517cf61b" # **Recall **= Given a class, will the classifier be able to detect it?<br> # **Precision **= Given a class prediction from a classifier, how likely is it to be correct?<br> # **F1 Score** = The harmonic mean of the recall and precision. Essentially, it punishes extreme values. # # From the confusion matrix and classification report we see that our model is equally good at detecting both classes. # + [markdown] _uuid="4b0b4f6065e0ba97d9170cc6d9e23ae3d064a73e" # ### Convert the model to from Keras to Tensorflowjs # This conversion needs to be done so that the model can be loaded into the web app. # + _uuid="1510fc5dedb309e4b03c6cbee3acf27f55739ecc" # Delete the base_dir directory we created to free up disk space to download tensorflowjs # and to prevent a Kaggle error. # Kaggle allows a max of 500 files to be saved. shutil.rmtree('base_dir') # + _kg_hide-output=true _uuid="28dd1e74473a0523885e50e8d17fd7d50036d9df" # !pip install tensorflowjs # + _uuid="d565698d4fd2e7479bb0331bf9c9c73fd97fb64d" # Use the command line conversion tool to convert the model # !tensorflowjs_converter --input_format keras model.h5 tfjs_model_2/model # + [markdown] _uuid="daf2016cea165e5aad1ad28f92a81f0ca08d800f" # ### Lessons learned while building the web app # + [markdown] _uuid="c9ae43bfacf8c8a579484705e9dce379daf1e54a" # **1.**<br> # Most web browsers don't support the tiff image format, which is contained in the dataset. While preprocessing the same for our web application we converted the images to .png format such that the model is trained with data i.e. similar to the expected input.. # # **2.**<br> # Because Tensorflowjs is a new technology, web apps bulit using it may not work in some browsers. The user will see a message saying the "Ai is loading..." but that message will never go away because the app is actually frozen. It's better to advise users to use the latest version of Chrome. # # **3.**<br> # The web app for this project uses the Javascript language for the most part. We also used Javascript to feed the images to the model. The challenge is that Javascript is very fast whereas the model isn't fast enough to keep up. This difference in speed can lead to incorrect predictions. We used async/await to fix this. # + [markdown] _uuid="eb92189bf34d85d3e5088803fc170297456da902" # # ### Resources # # # 1. <NAME>, Honey Bee Subspecies Classification <br> # https://www.kaggle.com/gpreda/honey-bee-subspecies-classification<br> # # 2. Beluga, Black and White CNN<br> # https://www.kaggle.com/gaborfodor/black-white-cnn-lb-0-77 # # 3. <NAME>, Baseline Keras CNN<br> # https://www.kaggle.com/fmarazzi/baseline-keras-cnn-roc-fast-5min-0-8253-lb # # 4. YouTube tutorial by deeplizard on how to use Tensorflowjs<br> # https://www.youtube.com/watch?v=HEQDRWMK6yY # # 5. Tutorial by jenkov.com explaining the HTML5 File API<br> # http://tutorials.jenkov.com/html5/file-api.html # # 6. Blog post by <NAME> on how to handle async/await<br> # https://blog.lavrton.com/javascript-loops-how-to-handle-async-await-6252dd3c795 # # 7. jQuery tutorial by W3Schools<br> # https://www.w3schools.com/jquery/default.asp # # + _uuid="9b789103dec6faf61148e291c59f9c7f13d1344a"
ml-helper-files/breast-cancer/metastatic-cancer-model-breast-cancer-analyzer-web-app.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: 'Python 3.8.8 64-bit (''base'': conda)' # name: python3 # --- import pandas as pd df = pd.read_csv(r'D:\data\edgar\edgar_abs_since_2015.csv') df df['auto'] = df['company'].apply(lambda company: True if " auto " in company.lower() else False) df = df[df['auto'] == False] df['size'].sum() / 1024 / 1024 /1024 df # df.to_csv(r'D:\data\edgar\edgar_abs_since_2015_non_auto.csv', index=False) df['cik'].drop_duplicates().to_csv('cik.txt', index=False) xmlDf = df[df['doc_name'].str.endswith('xml')] xmlDf xmlDf['size'].sum() / 1024 /1024 / 1024 xmlDf['doc_link'].iloc[0] xml = pd.read_xml(r'C:\Users\fubiye\Desktop\woart2018bex102jun19.xml') # + # only keep import os import numpy as np import shutil def parse_lines_in_file(filename): file = open(filename, 'r') lines = list() try: lines_origin = file.readlines() for line in lines_origin: lines.append(line[:-1]) finally: file.close() return lines ciks = set(parse_lines_in_file('cik.txt')) print(len(ciks)) print('816512' in ciks) DATA_PATH = r'D:\data\edgar\Archives\edgar\data' folders = set() for folder in os.listdir(DATA_PATH): folders.add(folder) # if folder not in ciks: # shutil.rmtree(os.path.join(DATA_PATH, folder)) for cik in ciks: if cik not in folders: print(cik) # -
analysis/edgar-abs-since-2015.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # New Notebook ! # + # Scientific libraries import numpy as np import scipy # Graphic libraries import matplotlib.pyplot as plt # %matplotlib notebook # Creating alias for magic commands # LPPview Classes from LPPview import * plt.style.use("presentation") plt.rcParams['figure.figsize'] = (4, 4) qe = 1.6021766E-19 me = 9.109E-31 mi = 219.6E-27 eps0 = 8.854187817e-12 savepath = "./" sigma_0 = 0.5 sigma_max= 2.9 # - import autograd as agrd def funct_scl_tosolve(dphi, Te = 20, epsi=200, gamma=1.35): """Here, dphi is normilized by Te0, but Te0 still needs to be given for sigma """ a = -(gamma-1)/gamma * dphi Te_w = Te*(1 + a) sigma_tmp = sigma_maxw_unsat(Te_w, epsi=epsi) sigma = np.where(sigma_tmp > 0.982, 0.982, sigma_tmp) return np.power(1 + a,1/(gamma -1)) * np.sqrt( 1 - a) - np.sqrt(4*gamma*np.pi*me/mi)/(1-sigma) # + def sigma_maxw_unsat(Te, epsi): return sigma_0 + 2.0*(Te)*(1.0 - sigma_0)/epsi # - import autograd.numpy as numpy import autograd.numpy as np def funct_tosolve(dphi, Te = 20, epsi=200, gamma=1.35): """Here, dphi is normilized by Te0, but Te0 still needs to be given for sigma """ a = -(gamma-1)/gamma * dphi Te_w = Te*(1 + a) sigma = sigma_maxw_unsat(Te_w, epsi=epsi) return (1-sigma)*numpy.power(1 + a,1/(gamma -1)) * numpy.sqrt( 1 - a) - numpy.sqrt(4*gamma*numpy.pi*me/mi) grad_dphi = agrd.elementwise_grad(funct_scl_tosolve) # + plt.figure() dphi_vect = np.linspace(0.5, 3.5, 500) plt.plot(dphi_vect, funct_scl_tosolve(dphi_vect, Te=40, epsi=45)) plt.plot(dphi_vect, [grad_dphi(d, Te=40, epsi=45) for d in dphi_vect]) # - from scipy.optimize import minimize # # Using Autograd to find the double solution # + plt.figure() epsi=45 gamma=1.28 cost_fun = lambda x: np.abs(funct_tosolve(x[0], x[1], epsi, gamma)) + np.abs(grad_dphi(x[0], x[1], epsi, gamma)) dphi_vect = np.linspace(1, 3, 5000) plt.plot(dphi_vect, cost_fun([dphi_vect, 30])) plt.plot(dphi_vect, cost_fun([dphi_vect, 35])) plt.plot(dphi_vect, cost_fun([dphi_vect, 50])) plt.plot(dphi_vect, cost_fun([dphi_vect, Te_cr])) # - def messy_thresold(espi=45, gamma=1.35, Te0=30, dT0=1): """This method is messy, but should work: We itarate """ dphi_vect = np.linspace(1, 3, 5000) Te = Te0 dT = dT0 steps = 0 cost_fun = lambda x: np.abs(funct_tosolve(x[0], x[1], epsi, gamma)) + np.abs(grad_dphi(x[0], x[1], epsi, gamma)) proceed = True old_minimum = 10 def fun(Te): values = cost_fun([dphi_vect, Te]) minimum = values.min() return minimum solution = minimize(fun, Te0, method="Nelder-Mead", tol=1e-12) Te_cr = solution.x[0] if not np.isclose(fun(Te_cr), 0.0, atol=1e-4): print(fun(Te_cr)) Te_cr = np.NAN return Te_cr Te_cr = messy_thresold(45, 1.35, 30, 5) Te_cr Te_cr = messy_thresold(45, 1.35, 30, 5) Te_cr # + epsi = 45 gamma_vect = np.linspace(1.05, 1.8, 50) def f(g): return messy_thresold(epsi, g, 35, -1) Te_cr_vect_45 = p.map( f ,gamma_vect ) # + epsi = 35 Te_cr_vect_35 = [messy_thresold(epsi, g, 35, -1) for g in gamma_vect] # + plt.figure() plt.plot(gamma_vect, Te_cr_vect_45, label="$\\epsilon^*=45$ V") plt.plot(gamma_vect, Te_cr_vect_35, label="$\\epsilon^*=35$ V") plt.xlabel("Polytropic index $\\gamma$") plt.ylabel("Maximum temperature T$_{e}^1$ V") plt.legend() # + def tresold(x0, epsi=45, bounds=[0.5, 3.856], xtol=1e-26): """Overide fsove to add bounds""" r = minimize(fun=lambda x: np.abs(funct_tosolve(x, epsi)) + np.abs(grad_dphi(x[0], x[1], epsi)), x0=x0, bounds=[bounds, [0, 200]], tol=xtol) r² return r # - tresold([2, 38])
src/Chapitre4/figure/thresthold_values.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import numpy as np data = np.sin(np.arange(20)).reshape(5,4) print(data) # 取每列的最大值的索引 ind = data.argmax(axis=0) print(ind) # 取每列的最大值 data_max = data[ind, range(data.shape[1])] print(data_max) all(data_max == data.max(axis=0)) a = np.arange(0, 40, 10) b = np.tile(a, (3, 5)) print(b) # a = np.arange(0, 40, 10) # print a # print '---' # b = np.tile(a, (1, 4)) # print b # #print a # #print b # a = np.array([[4, 3, 5], [1, 2, 1]]) print(a) print('--------------') # 按行的顺序从小到大排列 b = np.sort(a, axis=1) print(b) a.sort(axis=1) print(a) print('--------------') # 从小到大的索引值 a = np.array([4, 3, 1, 2]) j = np.argsort(a) print(j) # 按照从小到大的顺序打印 print(a[j])
ziliu/LeaningNumpy/code/numpy_5.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 2 # language: python # name: python2 # --- import matplotlib.pyplot as plt import tensorflow as tf import pandas as pd # %matplotlib # + raw_data={'cG1': pd.read_csv('0808_112404_cbcg.csv'), 'G1' : pd.read_csv('0810_191625_bcg.csv'), 'rcG1': pd.read_csv('0821_213901_rcbcg.csv')} xlabel='Training Step' dfs=[pd.DataFrame(data={k:v['Value'].values,xlabel:v['Step'].values}) for k,v in raw_data.items()] # + raw_data={'Causal Graph 1' : pd.read_csv('0810_191625_bcg.csv'), 'complete Causal Graph 1': pd.read_csv('0808_112404_cbcg.csv'), 'edge-reversed complete Causal Graph 1': pd.read_csv('0821_213901_rcbcg.csv')} xlabel='Training Step' dfs=[pd.DataFrame(data={k:v['Value'].values,xlabel:v['Step'].values}) for k,v in raw_data.items()] # + def my_merge(df1,df2): return pd.merge(df1,df2,how='outer',on=xlabel) plot_data=reduce(my_merge,dfs) # - ax=plot_data.plot.line(x=xlabel,xlim=[0,18000],ylim=[0,1],style = ['bs-','ro-','y^-']) ax.set_ylabel('Total Variation Distance',fontsize=18) ax.set_title('TVD of Label Generation',fontsize=18) ax.set_xlabel(xlabel,fontsize=18) plt.savefig('tvd_vs_step.pdf')
assets/tvdplot.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] id="dF8Nb2m-KUcy" # ![JohnSnowLabs](https://nlp.johnsnowlabs.com/assets/images/logo.png) # + [markdown] id="DugWaXTeKX_H" # [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/JohnSnowLabs/spark-nlp-workshop/blob/master/tutorials/Certification_Trainings/Healthcare/18.Text2SQL.ipynb) # + [markdown] id="5M6S6tfJ9u_0" # # Text2SQL (only works after enterprise v2.7) # + id="tyBHihhdKkce" outputId="6f1828a9-8bc0-4362-c1ae-b6462ef5bad7" colab={"resources": {"http://localhost:8080/nbextensions/google.colab/files.js": {"data": "<KEY>", "ok": true, "headers": [["content-type", "application/javascript"]], "status": 200, "status_text": ""}}, "base_uri": "https://localhost:8080/", "height": 109} import json from google.colab import files license_keys = files.upload() with open(list(license_keys.keys())[0]) as f: license_keys = json.load(f) license_keys.keys() # + id="yTalvEmWl_m_" import os # Install java # ! apt-get update -qq # ! apt-get install -y openjdk-8-jdk-headless -qq > /dev/null os.environ["JAVA_HOME"] = "/usr/lib/jvm/java-8-openjdk-amd64" os.environ["PATH"] = os.environ["JAVA_HOME"] + "/bin:" + os.environ["PATH"] # ! java -version secret = license_keys['SECRET'] os.environ['SPARK_NLP_LICENSE'] = license_keys['SPARK_NLP_LICENSE'] os.environ['AWS_ACCESS_KEY_ID']= license_keys['AWS_ACCESS_KEY_ID'] os.environ['AWS_SECRET_ACCESS_KEY'] = license_keys['AWS_SECRET_ACCESS_KEY'] version = license_keys['PUBLIC_VERSION'] jsl_version = license_keys['JSL_VERSION'] # ! pip install --ignore-installed -q pyspark==2.4.4 # ! python -m pip install --upgrade spark-nlp-jsl==$jsl_version --extra-index-url https://pypi.johnsnowlabs.com/$secret # ! pip install --ignore-installed -q spark-nlp==$version import sparknlp print (sparknlp.version()) import json import os from pyspark.sql import SparkSession from sparknlp.annotator import * from sparknlp_jsl.annotator import * from sparknlp.base import * import sparknlp_jsl from pyspark.sql import functions as F from pyspark.ml import Pipeline, PipelineModel spark = sparknlp_jsl.start(secret) # sparknlp_jsl.start(secret, public=version) if you want to start with different version of public sparknlp # + [markdown] id="1mAgduB1KrHb" # ## Convert schema json from SqLite schema # + [markdown] id="ksxY2tYXPPc7" # ### explore SqLite tables # + id="rL7-4n8GA-7g" # !wget -q https://raw.githubusercontent.com/JohnSnowLabs/spark-nlp-workshop/master/tutorials/Certification_Trainings/Healthcare/data/text2sql/university_basketball.sqlite # + id="EMfeny-cLgGg" outputId="6a255b63-c2c1-4d4c-8548-b3303b9db152" colab={"base_uri": "https://localhost:8080/", "height": 34} import sqlite3 conn = sqlite3.connect('university_basketball.sqlite') cursor = conn.cursor() cursor.execute("SELECT name FROM sqlite_master WHERE type='table';") tables = cursor.fetchall() tables # + id="pehoWllvLpKS" outputId="3a4b86a5-005a-4c70-e20c-96e6c0656c18" colab={"base_uri": "https://localhost:8080/", "height": 360} import pandas as pd from IPython.display import display, HTML for table_name in tables: table_name = table_name[0] table = pd.read_sql_query("SELECT * from %s" % table_name, conn) display (table) # + [markdown] id="B3K_0_AkLzBf" # ### convert to tetx2SQL format # + id="Kjm7M6M5LMpf" outputId="d088a790-1833-4a77-d6fb-c8e2833f4e88" colab={"base_uri": "https://localhost:8080/", "height": 51} from sparknlp_jsl._tf_graph_builders.text2sql.util import sqlite2json schema_json_path = 'schema_converted.json' sqlite2json("university_basketball.sqlite",schema_json_path) # + [markdown] id="3wj8TrBILWO_" # ## Prepare DB schema # + [markdown] id="-bGWsnu6LSKh" # This is a one-time process for every new db schema that you want to work on # + id="9MI-9FGeNrgt" def prepare_db_schema(schema_json_path, output_json_path): document = DocumentAssembler()\ .setInputCol("text")\ .setOutputCol("document") tables = Text2SQLSchemaParser() \ .setOutputCol("table_metadata_chunk") \ .setSchemaPath(schema_json_path) \ .setInputCols(["document"]) chunk2doc = Chunk2Doc() \ .setInputCols(["table_metadata_chunk"]) \ .setOutputCol("table_metadata_doc") table_tokenizer = Tokenizer() \ .setOutputCol("table_token") \ .setInputCols(["table_metadata_doc"]) table_embedding = WordEmbeddingsModel.pretrained("glove_6B_300", "xx") \ .setInputCols(["table_metadata_doc", "table_token"]) \ .setOutputCol("table_embedding") table_chunk_embeddings = ChunkEmbeddings() \ .setOutputCol("table_metadata_chunk_embedding") \ .setInputCols("table_metadata_chunk", "table_embedding") table_exporter = Text2SQLSchemaExporter()\ .setInputCols(["table_metadata_chunk_embedding","table_metadata_chunk"])\ .setOutputPath(output_json_path) table_pl = Pipeline() \ .setStages([ document, tables, chunk2doc, table_tokenizer, table_embedding, table_chunk_embeddings, table_exporter ]) data = spark.createDataFrame([ [1, ""]]) \ .toDF("id", "text").cache() table_model = table_pl.fit(data).transform(data) print (output_json_path, 'is created and saved') return table_model.show() # + id="pQkamGRwDajY" outputId="b05cc835-f60b-415d-caec-d74795f5b057" colab={"base_uri": "https://localhost:8080/", "height": 207} schema_json_path = 'schema_converted.json' output_json_path = "db_embeddings.json" prepare_db_schema(schema_json_path, output_json_path) # + [markdown] id="fSBeGVBoNCW1" # ## Prepare Text2SQL pipeline # + [markdown] id="9JMqrcGXLgBf" # This is a one-time process for every new db schema that you want to work on # + id="aLQ8orBBQN88" def get_text2sql_model (schema_json_path, output_json_path): question_document = DocumentAssembler()\ .setInputCol("text")\ .setOutputCol("document") sentence_detector = SentenceDetectorDLModel.pretrained()\ .setInputCols("document")\ .setOutputCol("sentence") tokenizer = Tokenizer()\ .setInputCols("sentence")\ .setOutputCol("token") question_embbeding = WordEmbeddingsModel.pretrained("glove_6B_300", "xx") \ .setInputCols(["sentence", "token"]) \ .setOutputCol("question_embedding") text2sql_model = Text2SQLModel.pretrained('text2sql_glove', 'en', 'clinical/models') \ .setSchemaPath(schema_json_path) \ .setTableEmbeddingPath(output_json_path)\ .setInputCols(["token", "question_embedding", "chunk_emb", "table_metadata_chunk"]) \ .setOutputCol("sql") sql_pipeline = Pipeline(stages=[ question_document, sentence_detector, tokenizer, question_embbeding, text2sql_model ]) data = spark.createDataFrame([[""]]).toDF("text") sql_prediction_model = sql_pipeline.fit(data) sql_prediction_light = LightPipeline(sql_prediction_model) print ('text2sql prediction model is built') return sql_prediction_light # + id="1Ah8WnNREKss" outputId="fc20e3d8-693f-44cb-e2b3-bc8e731c0aae" colab={"base_uri": "https://localhost:8080/", "height": 187} sql_prediction_light = get_text2sql_model (schema_json_path, output_json_path) # + [markdown] id="j6MLWSozOuZp" # ## Example queries # + id="_Kk6v-4lRK87" import sqlparse def annotate_and_print(question, sql_light=sql_prediction_light, markdown=False, param=None): sql = sql_light.annotate(question)["sql"][0] print(sqlparse.format(sql, reindent=True, keyword_case='upper')) print("\n") if markdown: print (pd.read_sql(sql,conn,params=param).to_markdown()) else: display(pd.read_sql(sql,conn,params=param)) # + id="aB9vv6nmRRGq" outputId="c758d02d-22f8-4d81-ddbb-a2d8a265fc6c" colab={"base_uri": "https://localhost:8080/", "height": 199} annotate_and_print("What are the enrollment and primary conference for the university which was founded the earliest?") # + id="JppP0IVEJV0I" outputId="296060c9-208d-4e51-bec9-2e250db167ca" colab={"base_uri": "https://localhost:8080/", "height": 187} annotate_and_print("What are the enrollment and primary conference for the university which was founded the earliest?", markdown=True) # + id="nAkIQRfVW6bV" outputId="000c1ff8-7172-4948-bc8f-b3cfbe7a83eb" colab={"base_uri": "https://localhost:8080/", "height": 165} annotate_and_print("What is the total and minimum enrollment of all schools?") # + id="aGLxQHeyOZgB" outputId="4bb9bfea-f24c-49b3-dad0-6dfe5b4b02c6" colab={"base_uri": "https://localhost:8080/", "height": 165} annotate_and_print("Return the total and minimum enrollments across all schools.") # + id="ETgHNfcUOdfm" outputId="6b670b63-2573-48ac-d713-90e5f62de800" colab={"base_uri": "https://localhost:8080/", "height": 213} annotate_and_print("Find the total student enrollment for different affiliation type schools.") # + id="ygD1T5OiOhaW" outputId="faedb51e-e803-4124-f4ed-5d539de10daa" colab={"base_uri": "https://localhost:8080/", "height": 148} annotate_and_print("Find how many different affiliation types there are.") # + [markdown] id="aJEkt8xaAbSD" # ## Use case: Hospital records # + id="aEx7T23wAzZd" # !wget -q https://raw.githubusercontent.com/JohnSnowLabs/spark-nlp-workshop/master/tutorials/Certification_Trainings/Healthcare/data/text2sql/hospital_records.sqlite # + id="mG5oOsbtAv0M" outputId="6f60f271-aaeb-4abb-d258-ef6fc2ced08b" colab={"base_uri": "https://localhost:8080/", "height": 272} import sqlite3 conn = sqlite3.connect('/content/hospital_records.sqlite') cursor = conn.cursor() cursor.execute("SELECT name FROM sqlite_master WHERE type='table';") tables = cursor.fetchall() tables # + id="WC6f3ifgBldB" outputId="3825ce46-40be-4744-f2d8-5bfc22578fe0" colab={"base_uri": "https://localhost:8080/", "height": 1000} import pandas as pd from IPython.display import display, HTML for table_name in tables: table_name = table_name[0] table = pd.read_sql_query("SELECT * from %s" % table_name, conn) print (table_name) display (table.head(5)) print ('========') # + id="I7n_gwQAAfW7" outputId="c8021138-e9b3-4a1f-8847-ef501a10910e" colab={"base_uri": "https://localhost:8080/", "height": 51} from sparknlp_jsl._tf_graph_builders.text2sql.util import sqlite2json sqlite2json("hospital_records.sqlite","hospital_schema_converted.json") # + id="2WeSAMxFDCan" outputId="7a56b86e-38a6-4e1c-ad0a-79cc8a25059a" colab={"base_uri": "https://localhost:8080/", "height": 377} schema_json_path = "hospital_schema_converted.json" output_json_path = "hospital_db_embeddings.json" prepare_db_schema(schema_json_path, output_json_path) hospital_sql_prediction_light = get_text2sql_model (schema_json_path, output_json_path) # + id="h7it5o_LE0nl" outputId="8c336152-a6b4-4bff-f24d-216476b0aa92" colab={"base_uri": "https://localhost:8080/", "height": 199} annotate_and_print("Find the id of the appointment with the most recent start date", hospital_sql_prediction_light) # + id="oudUrb1oFFH8" outputId="2678174c-1edd-4e2f-d893-7c8a697d9791" colab={"base_uri": "https://localhost:8080/", "height": 187} annotate_and_print("What is the name of the patient who made the most recent appointment", hospital_sql_prediction_light, markdown=True) # + id="--ArvFIDF6aA" outputId="8ed0726a-c24b-49c7-dbcd-af0f63242fa7" colab={"base_uri": "https://localhost:8080/", "height": 216} annotate_and_print("What is the name of the nurse has the most appointments?", hospital_sql_prediction_light) # + id="wVmo0p4TcoqZ" outputId="40897ba8-f5ce-4a7c-8f8a-c3e39f5cce88" colab={"base_uri": "https://localhost:8080/", "height": 204} annotate_and_print("What is the name of the nurse has the most appointments?", hospital_sql_prediction_light, markdown=True) # + id="Vmd-dz6uF_U_" outputId="95c1b004-9c6c-4915-e46e-5abe10773ed8" colab={"base_uri": "https://localhost:8080/", "height": 261} annotate_and_print("How many patients do each physician take care of? List their names and number of patients they take care of.", hospital_sql_prediction_light) # + id="BsL3ayVJGFpe" outputId="b362ea2f-6edb-4e22-92a0-d5937f9ac25e" colab={"base_uri": "https://localhost:8080/", "height": 221} annotate_and_print("How many patients do each physician take care of? List their names and number of patients they take care of.", hospital_sql_prediction_light, markdown=True) # + id="5EzwFfbEKGn3" outputId="1a3740a8-e153-40e8-9f0b-ed0d0af4f2b1" colab={"base_uri": "https://localhost:8080/", "height": 170} annotate_and_print("Give me trhe name of the departments", hospital_sql_prediction_light, markdown=True) # + id="IeBRWOcUX93q" outputId="bdaf18f4-a7fa-46a3-e40d-ffb0ad2dcc06" colab={"base_uri": "https://localhost:8080/", "height": 182} annotate_and_print("What is the most expensive procedure?", hospital_sql_prediction_light) # + id="sP3Nvsv2YZIW" outputId="596ddf1a-98b8-4b36-8f98-3f50c92d31c7" colab={"base_uri": "https://localhost:8080/", "height": 182} annotate_and_print("What is the cheapest procedure?", hospital_sql_prediction_light) # + id="c4FIpxBXZUL9"
tutorials/Certification_Trainings/Healthcare/18.Text2SQL.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + import numpy as np import matplotlib.pyplot as plt import seaborn as sns from sklearn import datasets from sklearn.metrics import accuracy_score # - cancer = datasets.load_breast_cancer() X = cancer['data'] y = cancer['target'] # + def sign(a): return (-1)**(a < 0) def to_binary(y): return y > 0 def standard_scaler(X): return (X-X.mean(axis=0))/X.std(axis=0) # + class Perceptron: def fit(self, X, y, n_iter=10**3, lr=0.001, add_intercept=True, standardize = True): if standardize: X = standard_scaler(X) if add_intercept: ones = np.ones(len(X)).reshape(-1, 1) self.X = X self.N, self.D = self.X.shape self.y = y self.n_iter = n_iter self.lr = lr self.converged = False beta = np.random.randn(self.D)/5 for i in range(self.n_iter): yhat = to_binary(sign(np.dot(self.X, beta))) if np.all(yhat == sign(self.y)): self.converged = True self.iterations_until_convergence = i break # only penalize wrong predictions for n in range(self.N): yhat_n = sign(np.dot(beta, self.X[n])) if self.y[n]*yhat_n == -1: beta += self.lr*self.y[n]*self.X[n] self.beta = beta self.yhat = to_binary(sign(np.dot(self.X, self.beta))) # - perceptron = Perceptron() perceptron.fit(X, y, n_iter=10**3, lr=0.01) print("Perceptron converged? %d, acc: %.3f" % (perceptron.converged, accuracy_score(perceptron.y, perceptron.yhat)))
Traditional Algorithms/3. Discriminative Classifiers/Perceptron.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: fastai2 # language: python # name: fastai2 # --- # + from fastai2.vision.all import * from pathlib import Path ROOT = Path('..').resolve() sys.path.append(f'{ROOT}/src') # + import tensorflow as tf from tfrecord.tfrecord import * import kornia from mish_cuda import MishCuda from deconvolution.models.deconv import * from xresnet_deconv import * # + def replace_model_layer(model, layer_type_old, new_layer): conversion_count = 0 for name, module in reversed(model._modules.items()): if len(list(module.children())) > 0: # recurse model._modules[name] = replace_model_layer(module, layer_type_old, new_layer) if type(module) == layer_type_old: layer_old = module model._modules[name] = new_layer return model def gem(x, p=3, eps=1e-6): return F.avg_pool2d(x.clamp(min=eps).pow(p), (x.size(-2), x.size(-1))).pow(1./p) class GeM(nn.Module): def __init__(self, p=3, eps=1e-6): super(GeM,self).__init__() self.p = nn.Parameter(torch.ones(1)*p) self.eps = eps def forward(self, x): return gem(x, p=self.p, eps=self.eps) def __repr__(self): return self.__class__.__name__ + '(' + 'p=' + '{:.4f}'.format(self.p.data.tolist()[0]) + ', ' + 'eps=' + str(self.eps) + ')' # - # # Get data # + def unpack_sample(feats): return { 'class' : unpack_int64_list(feats['class']), 'image' : unpack_bytes_list(feats['image']) } train_fnames = get_files(ROOT/'data/flowers/train', extensions='.tfrec') test_fnames = get_files(ROOT/'data/flowers/val', extensions='.tfrec') data = [] for name in train_fnames+test_fnames: r = Reader(str(name), unpack_sample) for sample in r: data.append([sample['image'][0], sample['class'][0]]) # + get_x = lambda o: PILImage.create(io.BytesIO(o[0])) get_y = lambda o: o[1] def get_items(noop): return data splitter = IndexSplitter(range(12753, len(data))) # + item_tfms = [RandomResizedCrop(224, min_scale=0.7)] batch_tfms = [*aug_transforms()] block = DataBlock( blocks=(ImageBlock, CategoryBlock), get_items=get_items, get_x=get_x, get_y=get_y, splitter=splitter, item_tfms=item_tfms, batch_tfms=batch_tfms ) dls = block.dataloaders('', bs=64) # + [markdown] heading_collapsed=true # # Train model # + hidden=true model = xresnet50_deconv(n_out=dls.c) learn = Learner(dls, model, opt_func=ranger, metrics=accuracy) learn.lr_find() # + [markdown] hidden=true # ## lr=6e-3, wd=1e-2 # + hidden=true model = xresnet50_deconv(n_out=dls.c) learn = Learner(dls, model, opt_func=ranger, metrics=accuracy) learn.fit_flat_cos(5, 6e-3, wd=1e-2) # + [markdown] hidden=true # # ## + self-attention # + hidden=true model = xresnet50_deconv(n_out=dls.c, sa=True) learn = Learner(dls, model, opt_func=ranger, metrics=accuracy) learn.fit_flat_cos(5, 6e-3, wd=1e-2) # + [markdown] hidden=true # # ## + Mish # + hidden=true model = xresnet50_deconv(n_out=dls.c, sa=True, act_cls=MishCuda) learn = Learner(dls, model, opt_func=ranger, metrics=accuracy) learn.fit_flat_cos(5, 6e-3, wd=1e-2) # + [markdown] hidden=true # # ## + LabelSmoothingCrossEntropy # + hidden=true model = xresnet50_deconv(n_out=dls.c, sa=True, act_cls=MishCuda) learn = Learner(dls, model, opt_func=ranger, metrics=accuracy, loss_func=LabelSmoothingCrossEntropy()) learn.fit_flat_cos(5, 6e-3, wd=1e-2) # + [markdown] hidden=true # # ## + maxBlurPool # + hidden=true model = xresnet50_deconv(n_out=dls.c, sa=True, act_cls=MishCuda) model = replace_model_layer(model, nn.MaxPool2d, kornia.contrib.MaxBlurPool2d(3, True)) learn = Learner(dls, model, opt_func=ranger, metrics=accuracy, loss_func=LabelSmoothingCrossEntropy()) learn.fit_flat_cos(5, 6e-3, wd=1e-2) # + [markdown] hidden=true # # ## + increase wd # + hidden=true model = xresnet50_deconv(n_out=dls.c, sa=True, act_cls=MishCuda) model = replace_model_layer(model, nn.MaxPool2d, kornia.contrib.MaxBlurPool2d(3, True)) learn = Learner(dls, model, opt_func=ranger, metrics=accuracy, loss_func=LabelSmoothingCrossEntropy()) learn.fit_flat_cos(5, 6e-3, wd=5e-2) # + [markdown] hidden=true # # ## + earlier annealing # + hidden=true model = xresnet50_deconv(n_out=dls.c, sa=True, act_cls=MishCuda) model = replace_model_layer(model, nn.MaxPool2d, kornia.contrib.MaxBlurPool2d(3, True)) learn = Learner(dls, model, opt_func=ranger, metrics=accuracy, loss_func=LabelSmoothingCrossEntropy()) learn.fit_flat_cos(5, 6e-3, pct_start=0.5, wd=5e-2) # + [markdown] hidden=true # # ## + early annealing -maxblurpool # + hidden=true model = xresnet50_deconv(n_out=dls.c, sa=True, act_cls=MishCuda) learn = Learner(dls, model, opt_func=ranger, metrics=accuracy, loss_func=LabelSmoothingCrossEntropy()) learn.fit_flat_cos(5, 6e-3, pct_start=0.5, wd=5e-2) # + [markdown] hidden=true # # ## + early annealing -maxblurpool, original wd # + hidden=true model = xresnet50_deconv(n_out=dls.c, sa=True, act_cls=MishCuda) learn = Learner(dls, model, opt_func=ranger, metrics=accuracy, loss_func=LabelSmoothingCrossEntropy()) learn.fit_flat_cos(5, 6e-3, pct_start=0.5, wd=1e-2) # + [markdown] hidden=true # # ## + lowered label smoothing, + slightly increased wd # + hidden=true model = xresnet50_deconv(n_out=dls.c, sa=True, act_cls=MishCuda) learn = Learner(dls, model, opt_func=ranger, metrics=accuracy, loss_func=LabelSmoothingCrossEntropy(eps=0.05)) learn.fit_flat_cos(5, 6e-3, wd=3e-2) # + [markdown] hidden=true # ## Best hyperparams: # - lr=6e-3 # - wd=3e-2? # - sa=True # - act_cls=MishCuda # - loss_func=LabelSmoothingCrossEntropy() -- perhaps eps=0.05? # - fit_flat_cos with default annealing # - # # tweaked xse_resnext9_deconv # see e.g., `mini_net` from https://github.com/pete88b/data-science/blob/master/fastai-things/train-imagenette-mininet.ipynb # def xse_resnext9_deconv(pretrained=False, **kwargs): return XResNet_deconv( SEResNeXtBlock, expansion=1, layers=[1,1,1,1], **kwargs ) model = xse_resnext9_deconv(n_out=dls.c, sa=True, act_cls=MishCuda, groups=32, reduction=16) learn = Learner(dls, model, opt_func=ranger, metrics=accuracy, loss_func=LabelSmoothingCrossEntropy(eps=0.05)) # xse_resnext9 with deconv stem model # ## Test default groups/reduction = 32/16 model = xse_resnext9_deconv(n_out=dls.c, sa=True, act_cls=MishCuda, groups=32, reduction=16) learn = Learner(dls, model, opt_func=ranger, metrics=accuracy, loss_func=LabelSmoothingCrossEntropy(eps=0.05)) learn.fit_flat_cos(5, 6e-3, wd=3e-2) # ## try groups/reduction = 64/8 model = xse_resnext9_deconv(n_out=dls.c, sa=True, act_cls=MishCuda, groups=64, reduction=8) learn = Learner(dls, model, opt_func=ranger, metrics=accuracy, loss_func=LabelSmoothingCrossEntropy(eps=0.05)) learn.fit_flat_cos(5, 6e-3, wd=3e-2) # ## Increase lr model = xse_resnext9_deconv(n_out=dls.c, sa=True, act_cls=MishCuda, groups=64, reduction=8) learn = Learner(dls, model, opt_func=ranger, metrics=accuracy, loss_func=LabelSmoothingCrossEntropy(eps=0.05)) learn.fit_flat_cos(5, 1e-2, wd=3e-2) # ## Re-up labelsmoothing eps model = xse_resnext9_deconv(n_out=dls.c, sa=True, act_cls=MishCuda, groups=64, reduction=8) learn = Learner(dls, model, opt_func=ranger, metrics=accuracy, loss_func=LabelSmoothingCrossEntropy(eps=0.1)) learn.fit_flat_cos(5, 1e-2, wd=3e-2) # ## decrease wd=1e-2 model = xse_resnext9_deconv(n_out=dls.c, sa=True, act_cls=MishCuda, groups=64, reduction=8) learn = Learner(dls, model, opt_func=ranger, metrics=accuracy, loss_func=LabelSmoothingCrossEntropy(eps=0.05)) learn.fit_flat_cos(5, 1e-2, wd=1e-2) # ## decrease wd=6e-3 model = xse_resnext9_deconv(n_out=dls.c, sa=True, act_cls=MishCuda, groups=64, reduction=8) learn = Learner(dls, model, opt_func=ranger, metrics=accuracy, loss_func=LabelSmoothingCrossEntropy(eps=0.05)) learn.fit_flat_cos(5, 1e-2, wd=6e-3) # ## increase lr=3e-2 model = xse_resnext9_deconv(n_out=dls.c, sa=True, act_cls=MishCuda, groups=64, reduction=8) learn = Learner(dls, model, opt_func=ranger, metrics=accuracy, loss_func=LabelSmoothingCrossEntropy(eps=0.05)) learn.fit_flat_cos(5, 3e-2, wd=1e-2) # ## + maxblurpool model = xse_resnext9_deconv(n_out=dls.c, sa=True, act_cls=MishCuda, groups=64, reduction=8) model = replace_model_layer(model, nn.MaxPool2d, kornia.contrib.MaxBlurPool2d(3, True)) learn = Learner(dls, model, opt_func=ranger, metrics=accuracy, loss_func=LabelSmoothingCrossEntropy(eps=0.05)) learn.fit_flat_cos(5, 1e-2, wd=1e-2) # # Final xse_resnext9 with deconv results: # ## With MBP: 77.76% ± 0.28% for i in range(5): model = xse_resnext9_deconv(n_out=dls.c, sa=True, act_cls=MishCuda, groups=64, reduction=8) model = replace_model_layer(model, nn.MaxPool2d, kornia.contrib.MaxBlurPool2d(3, True)) learn = Learner(dls, model, opt_func=ranger, metrics=accuracy, loss_func=LabelSmoothingCrossEntropy(eps=0.05)) learn.fit_flat_cos(5, 1e-2, wd=1e-2) res = np.array([0.778825, 0.779903, 0.773168, 0.780711, 0.775593]) print('{:.2f}% +/- {:.2f}%'.format(100*res.mean(), 100*res.std())) # ## Increased wd: 77.75% ± 0.38% for i in range(5): model = xse_resnext9_deconv(n_out=dls.c, sa=True, act_cls=MishCuda, groups=64, reduction=8) model = replace_model_layer(model, nn.MaxPool2d, kornia.contrib.MaxBlurPool2d(3, True)) learn = Learner(dls, model, opt_func=ranger, metrics=accuracy, loss_func=LabelSmoothingCrossEntropy(eps=0.05)) learn.fit_flat_cos(5, 1e-2, wd=3e-2) res = np.array([0.780981, 0.782328, 0.772090, 0.777209, 0.775054]) print('{:.2f}% +/- {:.2f}%'.format(100*res.mean(), 100*res.std())) # # Larger model (xse_resnext18): 79.53% ± 0.50% def xse_resnext18_deconv(pretrained=False, **kwargs): return XResNet_deconv( SEResNeXtBlock, expansion=1, layers=[2, 2, 2, 2], **kwargs ) for i in range(5): model = xse_resnext18_deconv(n_out=dls.c, sa=True, act_cls=MishCuda, groups=64, reduction=8) model = replace_model_layer(model, nn.MaxPool2d, kornia.contrib.MaxBlurPool2d(3, True)) learn = Learner(dls, model, opt_func=ranger, metrics=accuracy, loss_func=LabelSmoothingCrossEntropy(eps=0.05)) learn.fit_flat_cos(5, 1e-2, wd=1e-2) res = np.array([0.804688, 0.790948, 0.795528, 0.794181, 0.791218]) print('{:.2f}% +/- {:.2f}%'.format(100*res.mean(), 100*res.std())) # # And even larger (xse_resnext34): 79.92% ± 0.72% def xse_resnext34_deconv(pretrained=False, **kwargs): return XResNet_deconv( SEResNeXtBlock, expansion=1, layers=[3, 4, 6, 3], **kwargs ) for i in range(5): model = xse_resnext34_deconv(n_out=dls.c, sa=True, act_cls=MishCuda, groups=64, reduction=8) model = replace_model_layer(model, nn.MaxPool2d, kornia.contrib.MaxBlurPool2d(3, True)) learn = Learner(dls, model, opt_func=ranger, metrics=accuracy, loss_func=LabelSmoothingCrossEntropy(eps=0.05)) learn.fit_flat_cos(5, 1e-2, wd=1e-2) res = np.array([0.812231, 0.794450, 0.795797, 0.792026, 0.801455]) print('{:.2f}% +/- {:.2f}%'.format(100*res.mean(), 100*res.std())) # # Any larger requires fp16/lower lr (xse_resnext50): 79.95% ± 0.23% def xse_resnext50_deconv(pretrained=False, **kwargs): return XResNet_deconv( SEResNeXtBlock, expansion=4, layers=[3, 4, 6, 3], **kwargs ) for i in range(5): model = xse_resnext50_deconv(n_out=dls.c, sa=True, act_cls=MishCuda, groups=64, reduction=8) model = replace_model_layer(model, nn.MaxPool2d, kornia.contrib.MaxBlurPool2d(3, True)) learn = Learner(dls, model, opt_func=ranger, metrics=accuracy, loss_func=LabelSmoothingCrossEntropy(eps=0.05)).to_fp16() learn.fit_flat_cos(5, 6e-3, wd=1e-2) res = np.array([0.803071, 0.801185, 0.796875, 0.797953, 0.798222]) print('{:.2f}% +/- {:.2f}%'.format(100*res.mean(), 100*res.std()))
nbs/03. Network deconvolution experiments.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ## Import Libraries # # %matplotlib ipympl # # %matplotlib inline # %matplotlib wx import matplotlib.pyplot as plt plt.ion() from pydgilib_extra import * from atprogram.atprogram import atprogram from os import getcwd, path, pardir import pickle # ## Compile and program project project_path = [path.curdir, "AES_Flash-S"] project_path atprogram("AES_Flash-S", verbose=2) # + #atprogram(path.abspath(path.join(*project_path)), verbose=2) # - # ## Data Logging live_plot = False # Create a figure for the plot. if live_plot: fig = plt.figure(figsize=(6, 4)) fig.show() # Create the configuration dictionary for `DGILibExtra`. config_dict = { "loggers": [LOGGER_OBJECT, LOGGER_CSV], "file_name_base": "experiment_aes_flash" } config_dict_plot = { "loggers": [LOGGER_OBJECT, LOGGER_PLOT, LOGGER_CSV], "plot_pins": [False, False, True, True], "plot_pins_method": "line", "plot_xmax": 5, "window_title": "Experiment AES-256 Flash", } # Stop criteria to pass to the logger: def stop_fn(logger_data): return all(logger_data.gpio.values[-1]) # Perform the measurement. # + data = [] cd = config_dict.copy() if live_plot: fig.clf() for ax in fig.get_axes(): ax.cla() cd.update(config_dict_plot) cd["fig"] = fig with DGILibExtra(**cd) as dgilib: dgilib.device_reset() dgilib.logger.log(1000,stop_fn) data = dgilib.data # - # # Store Data import pickle pickle.dump(data, open("aes_flash_logger_data.p", "wb")) # # Load Data data = pickle.load(open("aes_flash_logger_data.p", "rb")) iteration = 0 name = "AES-256_Flash" data = pickle.load(open(path.join(path.pardir, path.pardir, f"{name}_{iteration}.p"), "rb")) # ## Analysis # Create Stop Function to stop parsing the data when all pins are high. def stop_function(pin_values): return all(pin_values) # Parse the data. aes_charge, aes_time = power_and_time_per_pulse(data, 2, stop_function=stop_function) flash_charge, flash_time = power_and_time_per_pulse(data, 3, stop_function=stop_function) length = len(aes_charge) assert length == len(aes_time) assert length == len(flash_charge) assert length == len(flash_time) print(length) aes_encrypt_charge = aes_charge[0::2] aes_decrypt_charge = aes_charge[1::2] aes_encrypt_time = aes_time[0::2] aes_decrypt_time = aes_time[1::2] aes_flash_write_charge = flash_charge[0::2] aes_flash_read_charge = flash_charge[1::2] aes_flash_write_time = flash_time[0::2] aes_flash_read_time = flash_time[1::2] len(aes_encrypt_charge), len(aes_decrypt_charge), len(aes_encrypt_time), len(aes_decrypt_time), len(aes_flash_write_charge), len(aes_flash_read_charge), len(aes_flash_write_time), len(aes_flash_read_time) drop = 0 cutoff = min(len(aes_encrypt_charge), len(aes_decrypt_charge), len(aes_encrypt_time), len(aes_decrypt_time), len(aes_flash_write_charge), len(aes_flash_read_charge), len(aes_flash_write_time), len(aes_flash_read_time)) - drop aes_encrypt_charge = aes_encrypt_charge[:cutoff] aes_decrypt_charge = aes_decrypt_charge[:cutoff] aes_encrypt_time = aes_encrypt_time[:cutoff] aes_decrypt_time = aes_decrypt_time[:cutoff] aes_flash_write_charge = aes_flash_write_charge[:cutoff] aes_flash_read_charge = aes_flash_read_charge[:cutoff] aes_flash_write_time = aes_flash_write_time[:cutoff] aes_flash_read_time = aes_flash_read_time[:cutoff] length = len(aes_encrypt_charge) assert length == len(aes_decrypt_charge) assert length == len(aes_encrypt_time) assert length == len(aes_decrypt_time) assert length == len(aes_flash_write_charge) assert length == len(aes_flash_read_charge) assert length == len(aes_flash_write_time) assert length == len(aes_flash_read_time) print(length) # # Convert to Joule # + voltage = 3.33 j_scale = 1e3 # m t_scale = 1e3 # m model_j_scale = 1e6 # n model_t_scale = 1e3 # u experiment_name = 'AES-256' # + aes_encrypt_energy = aes_encrypt_charge[:cutoff] aes_flash_write_energy = aes_flash_write_charge[:cutoff] aes_flash_read_energy = aes_flash_read_charge[:cutoff] aes_decrypt_energy = aes_decrypt_charge[:cutoff] aes_encrypt_time_s = aes_encrypt_time[:cutoff] aes_flash_write_time_s = aes_flash_write_time[:cutoff] aes_flash_read_time_s = aes_flash_read_time[:cutoff] aes_decrypt_time_s = aes_decrypt_time[:cutoff] for i in range(len(aes_encrypt_energy)): aes_encrypt_energy[i] = aes_encrypt_energy[i] * voltage * j_scale for i in range(len(aes_flash_write_energy)): aes_flash_write_energy[i] = aes_flash_write_energy[i] * voltage * j_scale for i in range(len(aes_flash_read_energy)): aes_flash_read_energy[i] = aes_flash_read_energy[i] * voltage * j_scale for i in range(len(aes_decrypt_energy)): aes_decrypt_energy[i] = aes_decrypt_energy[i] * voltage * j_scale for i in range(len(aes_encrypt_time_s)): aes_encrypt_time_s[i] = aes_encrypt_time_s[i] * t_scale for i in range(len(aes_flash_write_time_s)): aes_flash_write_time_s[i] = aes_flash_write_time_s[i] * t_scale for i in range(len(aes_flash_read_time_s)): aes_flash_read_time_s[i] = aes_flash_read_time_s[i] * t_scale for i in range(len(aes_decrypt_time_s)): aes_decrypt_time_s[i] = aes_decrypt_time_s[i] * t_scale # - MBEDTLS_AES_BLOCK_SIZE = 16 STEP_SIZE = MBEDTLS_AES_BLOCK_SIZE MIN_NUM_BYTES = STEP_SIZE num_bytes = range(MIN_NUM_BYTES, MIN_NUM_BYTES + STEP_SIZE * len(aes_encrypt_energy), STEP_SIZE) print(f"MAX_NUM_BYTES: {num_bytes[-1]}") # + from lmfit import Model def line(x, slope, intercept): """a line""" return [slope*i + intercept for i in x] mod = Model(line) pars = mod.make_params(slope=0, intercept=1) # pars['intercept'].set(min=0) # - results = [] ylabels = (['Energy [mJ]'] * 2 + ['Time [ms]'] * 2) * 2 + ['Energy [mJ]'] + ['Time [ms]'] parameter_names = [ 'Encrypt Energy', 'Flash Write Energy', 'Flash Read Energy', 'Decrypt Energy', 'Encrypt Time', 'Flash Write Time', 'Flash Read Time', 'Decrypt Time', 'Total Energy', 'Total Time', ] for y in [aes_encrypt_energy, aes_flash_write_energy, aes_flash_read_energy, aes_decrypt_energy, aes_encrypt_time_s, aes_flash_write_time_s, aes_flash_read_time_s, aes_decrypt_time_s, [e + w + r + d for (e,w,r,d) in zip(aes_encrypt_energy, aes_flash_write_energy, aes_flash_read_energy, aes_decrypt_energy)], [e + w + r + d for (e,w,r,d) in zip(aes_encrypt_time_s, aes_flash_write_time_s, aes_flash_read_time_s, aes_decrypt_time_s)]]: result = mod.fit(y, pars, x=num_bytes) print(result.fit_report()) fig, grid = result.plot( xlabel='Checkpoint Size [Bytes]', ylabel=ylabels[len(results)]) fig.tight_layout(rect=(0.05, 0.05, 1, 1)) fig.set_size_inches(5, 4.5, forward=True) fig.canvas.set_window_title( f"Residuals of {experiment_name} {parameter_names[len(results)]}") fig.show() results.append(result) fig2 = plt.figure(figsize=(8, 6)) fig2.canvas.set_window_title(f"Analysis {experiment_name}") charge_color = 'r' time_color = 'b' fig2.clf() # fig2.suptitle("Energy analysis of AES") ax1 = fig2.add_subplot(1, 1, 1) ax2 = ax1.twinx() ax1.set_xlabel('Checkpoint Size [Bytes]') ax1.set_ylabel('Energy [mJ]', color=charge_color) ax2.set_ylabel('Time [ms]', color=time_color) ax1.tick_params('y', colors=charge_color) ax2.tick_params('y', colors=time_color) lines = [] lines += ax1.plot(num_bytes, aes_encrypt_energy, charge_color+'-', label=f'{parameter_names[len(lines)]}') lines += ax1.plot(num_bytes, aes_flash_write_energy, charge_color+'-.', label=f'{parameter_names[len(lines)]}') lines += ax1.plot(num_bytes, aes_flash_read_energy, charge_color+':', label=f'{parameter_names[len(lines)]}') lines += ax1.plot(num_bytes, aes_decrypt_energy, charge_color+'--', label=f'{parameter_names[len(lines)]}') lines += ax2.plot(num_bytes, aes_encrypt_time_s, time_color+'-', label=f'{parameter_names[len(lines)]}') lines += ax2.plot(num_bytes, aes_flash_write_time_s, time_color+'-.', label=f'{parameter_names[len(lines)]}') lines += ax2.plot(num_bytes, aes_flash_read_time_s, time_color+':', label=f'{parameter_names[len(lines)]}') lines += ax2.plot(num_bytes, aes_decrypt_time_s, time_color+'--', label=f'{parameter_names[len(lines)]}') ax1.legend(handles=lines) ax1.set_title( f"{parameter_names[0]}: Slope {results[0].params['slope'].value * model_j_scale:.04} nJ/B, Intercept {results[0].params['intercept'].value * model_j_scale:.04} nJ\n" + f"{parameter_names[1]}: Slope {results[1].params['slope'].value * model_j_scale:.04} nJ/B, Intercept {results[1].params['intercept'].value * model_j_scale:.04} nJ\n" + f"{parameter_names[2]}: Slope {results[2].params['slope'].value * model_j_scale:.04} nJ/B, Intercept {results[2].params['intercept'].value * model_j_scale:.04} nJ\n" + f"{parameter_names[3]}: Slope {results[3].params['slope'].value * model_j_scale:.04} nJ/B, Intercept {results[3].params['intercept'].value * model_j_scale:.04} nJ\n" + f"{parameter_names[4]}: Slope {results[4].params['slope'].value * model_t_scale:.04} $\mu$s/B, Intercept {results[4].params['intercept'].value * model_t_scale:.04} $\mu$s\n" + f"{parameter_names[5]}: Slope {results[5].params['slope'].value * model_t_scale:.04} $\mu$s/B, Intercept {results[5].params['intercept'].value * model_t_scale:.04} $\mu$s\n" + f"{parameter_names[6]}: Slope {results[6].params['slope'].value * model_t_scale:.04} $\mu$s/B, Intercept {results[6].params['intercept'].value * model_t_scale:.04} $\mu$s\n" + f"{parameter_names[7]}: Slope {results[7].params['slope'].value * model_t_scale:.04} $\mu$s/B, Intercept {results[7].params['intercept'].value * model_t_scale:.04} $\mu$s\n" + f"{parameter_names[8]}: Slope {results[8].params['slope'].value * model_j_scale:.04} nJ/B, Intercept {results[8].params['intercept'].value * model_j_scale:.04} nJ\n" + f"{parameter_names[9]}: Slope {results[9].params['slope'].value * model_t_scale:.04} $\mu$s/B, Intercept {results[9].params['intercept'].value * model_t_scale:.04} $\mu$s\n") fig2.tight_layout() fig2.show() print( f"{parameter_names[0]}: Slope {results[0].params['slope'].value * model_j_scale:.020} nJ/B, Intercept {results[0].params['intercept'].value * model_j_scale:.020} nJ\n" + f"{parameter_names[1]}: Slope {results[1].params['slope'].value * model_j_scale:.020} nJ/B, Intercept {results[1].params['intercept'].value * model_j_scale:.020} nJ\n" + f"{parameter_names[2]}: Slope {results[2].params['slope'].value * model_j_scale:.020} nJ/B, Intercept {results[2].params['intercept'].value * model_j_scale:.020} nJ\n" + f"{parameter_names[3]}: Slope {results[3].params['slope'].value * model_j_scale:.020} nJ/B, Intercept {results[3].params['intercept'].value * model_j_scale:.020} nJ\n" + f"{parameter_names[4]}: Slope {results[4].params['slope'].value * model_t_scale:.020} $\mu$s/B, Intercept {results[4].params['intercept'].value * model_t_scale:.020} $\mu$s\n" + f"{parameter_names[5]}: Slope {results[5].params['slope'].value * model_t_scale:.020} $\mu$s/B, Intercept {results[5].params['intercept'].value * model_t_scale:.020} $\mu$s\n" + f"{parameter_names[6]}: Slope {results[6].params['slope'].value * model_t_scale:.020} $\mu$s/B, Intercept {results[6].params['intercept'].value * model_t_scale:.020} $\mu$s\n" + f"{parameter_names[7]}: Slope {results[7].params['slope'].value * model_t_scale:.020} $\mu$s/B, Intercept {results[7].params['intercept'].value * model_t_scale:.020} $\mu$s\n" + f"{parameter_names[8]}: Slope {results[8].params['slope'].value * model_j_scale:.020} nJ/B, Intercept {results[8].params['intercept'].value * model_j_scale:.020} nJ\n" + f"{parameter_names[9]}: Slope {results[9].params['slope'].value * model_t_scale:.020} $\mu$s/B, Intercept {results[9].params['intercept'].value * model_t_scale:.020} $\mu$s\n" ) # Save Charge amount list into pickle file import pickle pickle.dump(aes_encrypt_energy, open("aes_flash_encrypt_energy_mJ.p", "wb")) pickle.dump(aes_decrypt_energy, open("aes_flash_decrypt_energy_mJ.p", "wb")) pickle.dump(aes_flash_write_energy, open("aes_flash_write_energy_mJ.p", "wb")) pickle.dump(aes_flash_read_energy, open("aes_flash_read_energy_mJ.p", "wb")) pickle.dump(aes_encrypt_time_s, open("aes_flash_encrypt_time_ms.p", "wb")) pickle.dump(aes_decrypt_time_s, open("aes_flash_decrypt_time_ms.p", "wb")) pickle.dump(aes_flash_write_time_s, open("aes_flash_write_time_ms.p", "wb")) pickle.dump(aes_flash_read_time_s, open("aes_flash_read_time_ms.p", "wb")) aes = [aes_encrypt_energy, aes_flash_write_energy, aes_flash_read_energy, aes_decrypt_energy, aes_encrypt_time_s, aes_flash_write_time_s, aes_flash_read_time_s, aes_decrypt_time_s] for i in aes: print(len(i), len(i)*16) # ## Write config file # + import json config = {} config["name"] = "AES-256 Flash" config["project_paths"] = [project_path] config["config_dict"] = config_dict config["config_dict_plot"] = config_dict_plot config["analysis"] = {"pins":{2: ["AES-256 Encrypt", "AES-256 Decrypt"], 3: ["AES-256 Flash Write", "AES-256 Flash Read"]}, "result_types": ["Charge", "Time"], "section_types": {"init": [], "store": ["AES-256 Encrypt", "AES-256 Flash Write"], "load": ["AES-256 Flash Read", "AES-256 Decrypt"], "exit": []}, "labels": { "Charge": {"x":"Data Size", "x_unit": "byte", "y": "Charge", "y_unit": "C"}, "Time": {"x":"Data Size", "x_unit": "byte", "y": "Time", "y_unit": "s"}, }, "x_step": MBEDTLS_AES_BLOCK_SIZE} with open("looped_experiment.json", 'w') as config_file: json.dump(config, config_file, indent=4) # - # # Write model data dump_pickle = True fit_lm = True verbose = 2 show_lm_plot = 2 # Parse data analysis_config = config.get("analysis") result_types = analysis_config.get("result_types") x_step = analysis_config.get("x_step") parsed_data = {} for pin, parameter_names in analysis_config.get("pins").items(): data2 = power_and_time_per_pulse( data, int(pin), stop_function=stop_function) num_names = len(parameter_names) for i, parameter_name in enumerate(parameter_names): end_index = -drop * num_names or None parsed_data[parameter_name] = { result_types[0]: data2[0][i:end_index:num_names], result_types[1]: data2[1][i:end_index:num_names], "x_step": x_step} if dump_pickle: pickle.dump(parsed_data, open( path.join(path.curdir, f"{config_dict.get('file_name_base')}_looped.p"), "wb")) # Fit lm if fit_lm: model = None if model is None: def line(x, intercept, slope): """a line""" return [intercept + slope*i for i in x] model = Model(line) params = model.make_params(intercept=0, slope=1) # params['intercept'].set(min=0) else: params = model.params model_results = {} labels = analysis_config.get("labels") for parameter_name in parsed_data.keys(): length = len(parsed_data[parameter_name][result_types[0]]) x_step = parsed_data[parameter_name]["x_step"] num_bytes = range(x_step, (length+1)*x_step, x_step) if verbose: print( f"Fitting model to {parameter_name} with {length} " + f"samples, from {min(num_bytes)} to {max(num_bytes)} " f"bytes in steps of {x_step}.") model_result = {} for result_type in result_types: model_result[result_type] = model.fit( parsed_data[parameter_name][result_type], params, x=num_bytes) if verbose >= 2: print(model_result[result_type].fit_report()) # Plot multiple view if show_lm_plot >= 2: fig, grid = model_result[result_type].plot( xlabel=f"{labels[result_type]['x']} " + f"[{labels[result_type]['x_unit']}]", ylabel=f"{labels[result_type]['y']} " + f"[{labels[result_type]['y_unit']}]") fig.canvas.set_window_title( f"Residuals of {parameter_name}") fig.tight_layout() fig.show() model_results[parameter_name] = model_result # Plot single view if show_lm_plot: import matplotlib.pyplot as plt fig = plt.figure(figsize=(9, 6)) fig.canvas.set_window_title(f"Analysis {config.get('name')}") colors = dict(zip(result_types, ['r', 'b'])) line_styles = ( line_style for line_style in ('-', '--', '-.', ':') * 2) # fig.suptitle(f"Energy analysis of {config.get('name')}") ax = {} ax[result_types[0]] = fig.add_subplot(1, 1, 1) ax[result_types[1]] = ax[result_types[0]].twinx() ax[result_types[0]].set_xlabel( f"{labels[result_types[0]]['x']} " + f"[{labels[result_types[0]]['x_unit']}]") for result_type in result_types: ax[result_type].set_ylabel( f"{labels[result_type]['y']} " + f"[{labels[result_type]['y_unit']}]", color=colors[result_type]) ax[result_type].tick_params('y', colors=colors[result_type]) lines = [] title_str = "" for parameter_name in parsed_data.keys(): length = len(parsed_data[parameter_name][result_types[0]]) x_step = parsed_data[parameter_name]["x_step"] num_bytes = range(x_step, (length+1)*x_step, x_step) model_result = {} line_style = next(line_styles) for result_type in result_types: label = f"{parameter_name} {labels[result_type]['y']}" lines += ax[result_type].plot( num_bytes, parsed_data[parameter_name][result_type], colors[result_type] + line_style, label=label) title_str += f"{label} " for param in params.keys(): title_str += "".join( f"{params[param].name.capitalize()}: ") title_str += "".join( f"{model_results[parameter_name][result_type].params[param].value: .03} ") title_str += "".join( f"{labels[result_type]['y_unit']}, ") title_str = title_str[:-2] + \ f" per {labels[result_type]['x_unit']}\n" ax[result_types[0]].legend(handles=lines) ax[result_types[0]].set_title(title_str[:-1]) # fig.tight_layout() fig.tight_layout(rect=(0.05, 0.05, 1, 1)) fig.set_size_inches(8, 6, forward=True) fig.show() # Save model results to file if dump_pickle: model_results_dump = {} for parameter_name in model_results.keys(): model_results_dump[parameter_name] = {} for result_type in model_results[parameter_name].keys(): model_results_dump[parameter_name][result_type] = \ model_results[parameter_name][result_type].values pickle.dump(model_results_dump, open(path.join( path.curdir, f"{config_dict.get('file_name_base')}_model.p"), "wb"))
SAM-L11 Cortex-M23/Experiments - AES Mode ECB/AES-256_Flash/Experiment_AES_Flash.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Chunker de Textos - Implantação # # * Este componente requer conexão com a internet para o download online de dependências do NLTK. # * Este componente faz janelamento de textos longos, dividindo um texto em partes menores, cada parte de texto criada (chunk), possui uma quantidade de elementos (palavras ou sentenças) pré-definidas pelo usuário; como também a quantidade de elementos que se sobrepõem entre cada chunk consecutivo. # * Exemplo: # # - Texto: "Hoje o dia amanheceu ensolarado, vou fazer uma caminhada e ouvir os sons da natureza." # # - Hiperparâmetros: { # "chunkenizer": "word", # "chunk_size": 5, # "chunk_overlap": 2 # } # # - Saída: [ # "Hoje o dia amanheceu ensolarado,", # "amanheceu ensolarado, vou fazer uma", # "fazer uma caminhada e ouvir", # "e ouvir os sons da", # "sons da natureza." # ] # # # ### **Em caso de dúvidas, consulte os [tutoriais da PlatIAgro](https://platiagro.github.io/tutorials/).** # ## Declaração de Classe para Predições em Tempo Real # # A tarefa de implantação cria um serviço REST para predições em tempo-real.<br> # Para isso você deve criar uma classe `Model` que implementa o método `predict`. # + # %%writefile Model.py import joblib import numpy as np import pandas as pd from chunker import Chunker from utils import generate_dataframe class Model: def __init__(self): self.loaded = False def load(self): # Load artifacts artifacts = joblib.load("/tmp/data/chunker.joblib") self.model_parameters = artifacts["model_parameters"] self.inference_parameters = artifacts["inference_parameters"] # Initialize chunker self.chunker = Chunker(**self.model_parameters) # Set model loaded self.loaded = True print("Loaded model") def class_names(self): column_names = list(self.inference_parameters['columns']) + [self.inference_parameters['output_column_name']] return column_names def predict(self, X, feature_names, meta=None): if not self.loaded: self.load() if feature_names: # Antes de utilizar o conjunto de dados X no modelo, reordena suas features de acordo com a ordem utilizada no treinamento df = pd.DataFrame(X, columns=feature_names) X = df[self.inference_parameters['columns']] else: X = pd.DataFrame(X, columns=self.inference_parameters['columns']) # Generate Chunks chunks = self.chunker(X[self.inference_parameters['text_column_name']]) # Generate Dataframe if self.inference_parameters['replicate_data'] == 'sim': # Replicate Data output_df = generate_dataframe(X, chunks, self.inference_parameters) else: output_df = X.copy() output_df[self.inference_parameters['output_column_name']] = chunks # Output output = output_df.to_numpy() return output
tasks/chunker/Deployment.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 (ipykernel) # language: python # name: python3 # --- # # Circuit simulations # # Simphony can also chain components into circuits import matplotlib.pyplot as plt import gdslib.simphony as gs import gdslib.simphony.components as gc import gdsfactory as gf c = gf.components.mzi(delta_length=10) c c.plot_netlist() # + circuit = gs.components.mzi(delta_length=10, splitter=gs.components.mmi1x2) gs.plot_circuit( circuit, start=1500e-9, stop=1600e-9, logscale=True, ) # + circuit = gs.components.mzi(delta_length=100, splitter=gs.components.mmi1x2) gs.plot_circuit( circuit, start=1500e-9, stop=1600e-9, logscale=True, ) # - for element in circuit.elements: print(element) # Lets add grating couplers to the mzi circuit. c = gc.gc1550te() gs.plot_model(c, pin_in='port 1') mzi_layout= gf.components.mzi(delta_length=100) mzi_with_gc_layout = gf.routing.add_fiber_single(component=mzi_layout, with_loopback=False) mzi_with_gc_layout # + circuit_gc = gs.add_gc(circuit=circuit, gc=gs.components.gc1550te) gs.plot_circuit( circuit_gc, start=1520e-9, stop=1580e-9, logscale=True, ) # - # ## Montecarlo sweep variation # # We can also account for montecarlo variations of the circuits # # FIXME gs.plot_circuit_montecarlo( circuit_gc, start=1500e-9, stop=1600e-9, logscale=True, runs=10 )
docs/notebooks/10_circuits_mzi.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + import gym import numpy as np from IPython.display import clear_output # - env = gym.make("MountainCar-v0") # + print(f"Observation Space Low : {env.observation_space.low}") print(f"Observation Space High: {env.observation_space.high}") print(f"\nAction Space: {env.action_space}") # + discrete_os_shape = [18, 14] discrete_os_win_size = abs(env.observation_space.high - env.observation_space.low) / discrete_os_shape print(f"Discrete OS Window Size: {discrete_os_win_size}") # - def getDiscreteState(state): discrete_state = (state - env.observation_space.low) / discrete_os_win_size return tuple(discrete_state.astype(int)) def getEpsilonGreedyPolicy(Q, epsilon, nA): def policy(state): A = np.ones(nA) * (epsilon / nA) best_action = np.argmax(Q[state]) A[best_action] = A[best_action] + (1 - epsilon) return A return policy def QLearning(env, num_episodes, discount=1.0, alpha=0.1, epsilon=0.1): Q = np.zeros(discrete_os_shape + [env.action_space.n]) policy = getEpsilonGreedyPolicy(Q, epsilon, env.action_space.n) episode_rewards = np.zeros(num_episodes) episode_finished = [] for i_episode in range(num_episodes): state = env.reset() discrete_state = getDiscreteState(state) for t in range(200): # if (i_episode + 1) % 5000 == 0: # env.render() action_prob = policy(discrete_state) action_pos = [i for i in range(env.action_space.n)] action = np.random.choice(action_pos, p=action_prob) next_state, reward, done, _ = env.step(action) discrete_next_state = getDiscreteState(next_state) episode_rewards[i_episode] = episode_rewards[i_episode] + reward best_next_action = np.argmax(Q[discrete_next_state]) td_target = reward + discount * Q[discrete_next_state][best_next_action] td_delta = td_target - Q[discrete_state][action] Q[discrete_state][action] = Q[discrete_state][action] + alpha * td_delta if done: if episode_rewards[i_episode] > -200: episode_finished.append([i_episode, episode_rewards[i_episode]]) break discrete_state = discrete_next_state clear_output(True) print(f"[{i_episode + 1:>5}/{num_episodes:>5}] Episode Total Reward: {episode_rewards[i_episode]}") env.close() return Q, episode_finished Q, episode_finished = QLearning(env, 10000, alpha=0.05, epsilon=0.0005) print("Episodes Finished:") for episode in episode_finished[-5:]: print(f"[{episode[0] + 1:>5}] Total Reward: {episode[1]}") def playEnvOnce(env, Q): state = env.reset() discrete_state = getDiscreteState(state) for t in range(200): env.render() action = np.argmax(Q[discrete_state]) next_state, reward, done, _ = env.step(action) discrete_next_state = getDiscreteState(next_state) if done: break discrete_state = discrete_next_state env.close() playEnvOnce(env, Q)
MountainCar-v0/Q-Learning.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="view-in-github" colab_type="text" # <a href="https://colab.research.google.com/github/nin-ed/Split-Learning/blob/master/Extended_Vanilla_SplitNN.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # + [markdown] id="t0NdEpm45cSa" colab_type="text" # # Tutorial - Extended Vanilla Split Learning # # Extended Vanilla Split Learning is basically a combination of multi-layer splitnn and vertically partitioned data. Here all our partitioned input data goes to one location and labels to another location. But instead of just training our model in two locations we can use few trusted locations having more complex model through which we can train our model. # # ![alt text](https://media.arxiv-vanity.com/render-output/2178060/Supplem1.png) # # # + id="HtlaO14r6HhH" colab_type="code" colab={} """import necessary modules""" import syft, torch from torch import nn, optim from torchvision import datasets, transforms from torch.utils.data import DataLoader # + id="cjQF7PYTEcdm" colab_type="code" colab={} """Create hook and virtual workers.""" hook = syft.TorchHook(torch) """Bob will hold the first cut layer and the input data.""" bob = syft.VirtualWorker(hook, id='bob') """Alice will hold the last cut layer and the labels.""" alice = syft.VirtualWorker(hook, id='alice') """Secure Worker will hold the middle cut layer whom we can trust and train the model through it.""" secure_worker = syft.VirtualWorker(hook, id='secure_worker') # + [markdown] id="jnMh0MDJEmcq" colab_type="text" # ### Defining a function create_models which returns a list of models for training. # # # + id="93LQg8b8E43q" colab_type="code" colab={} def create_models(partition, input_size, hidden_sizes, output_size): models = list() """Create models for each partition in bob's location""" for i in range(partition-1): models.append(nn.Sequential(nn.Linear(int(input_size/partition), hidden_sizes[0]), nn.ReLU())) """Compute the last remaining features as features may or may not be divided equally.""" rem = input_size - int(input_size/partition * (partition-1)) models.append(nn.Sequential(nn.Linear(rem, hidden_sizes[0]), nn.ReLU())) """Create a model for secure_worker""" """Since all partition models will send information having equal second dimension which is hidden size. So we need to multiply input size with partition to match the concatenated layer dimensions.""" models.append(nn.Sequential(nn.Linear(hidden_sizes[0]*partition, hidden_sizes[1]), nn.ReLU())) """Create a model for alice""" models.append(nn.Sequential(nn.Linear(hidden_sizes[1], output_size), nn.LogSoftmax(dim=1))) return models # + [markdown] id="xlk33b7FFGvO" colab_type="text" # ### Creating SplitNN class for adding split learning functionality to our model. # + id="_ErOyWeZFYnt" colab_type="code" colab={} class SplitNN: def __init__(self, models, optimizers, partition, hidden_sizes): super().__init__() self.models = models self.optimizers = optimizers self.partition = partition self.hidden = hidden_sizes self.outputs = [None] * partition def zero_grads(self): for opt in self.optimizers: opt.zero_grad() def forward(self, x): for i in range(self.partition): self.outputs[i] = self.models[i](x[i]) """Concatenate outputs of each partitioned model""" concat_out = torch.cat(tuple(self.outputs[i] for i in range(self.partition)), dim=1) """Transfer this concatenated layer to secure_worker's location""" if concat_out.location == self.models[-2].location: secure_inp = concat_out.detach().requires_grad_() else: secure_inp = concat_out.detach().move(self.models[-2].location).requires_grad_() """Get the output from secure worker's model""" secure_out = self.models[-2](secure_inp) """Transfer this output to alice's location""" if secure_out.location == self.models[-1].location: alice_inp = secure_out.detach().requires_grad_() else: alice_inp = secure_out.detach().move(self.models[-1].location).requires_grad_() """Get the output from alice's model and return it""" alice_out = self.models[-1](alice_inp) self.concat_out = concat_out self.secure_inp = secure_inp self.secure_out = secure_out self.alice_inp = alice_inp self.alice_out = alice_out return alice_out def backward(self): """Get the gradients from alice's location and pass it to secure_worker's location""" if self.secure_out.location == self.alice_inp.location: grad1 = self.alice_inp.grad.copy() else: grad1 = self.alice_inp.grad.copy().move(self.secure_out.location) """Backpropagate and find the gradients of secure_worker's model""" self.secure_out.backward(grad1) """Get the gradients from secure_worker's location, and divide and pass it to bob's partitioned models""" if self.concat_out.location == self.secure_inp.location: grad2 = self.secure_inp.grad.copy() else: grad2 = self.secure_inp.grad.copy().move(self.models[0].location) i = 0 while i < self.partition - 1: self.outputs[i].backward(grad2[:, self.hidden[0] * i: self.hidden[0] * (i + 1)], retain_graph=True) i += 1 self.outputs[i].backward(grad2[:, self.hidden[0] * i:], retain_graph=True) def step(self): for opt in self.optimizers: opt.step() # + [markdown] id="mlvAeCPtFinH" colab_type="text" # ### Defining a function to train our model # + id="FznG7XzsFruh" colab_type="code" colab={} def training(models, splitNN, data, target, epochs): def train(data, target, splitnn): splitnn.zero_grads() pred = splitnn.forward(data) criterion = nn.NLLLoss() loss = criterion(pred, target) loss.backward() splitnn.backward() splitnn.step() return loss avg_loss = 0. for e in range(epochs): total_loss = 0. data1, data2, data3 = data for x1, x2, x3, y in zip(data1, data2, data3, target): x1, x2, x3 = x1.send(models[0].location), x2.send(models[0].location), x3.send(models[0].location) y = y.send(models[-1].location) loss = train([x1, x2, x3], y, splitNN) total_loss += loss.get() avg_loss += total_loss/len(data[0]) print(f"Epoch: {e+1}... Training Loss: {total_loss/len(data[0])}") print(f"Average Loss: {avg_loss/epochs}") # + [markdown] id="SZSSA6DDFtNo" colab_type="text" # Now lets define a main function. # + id="jmSJX3uwFxfo" colab_type="code" colab={} def main(): """Assign the number of partitions in vertically partitioned data.""" partition = 3 """Define a transform""" transform = transforms.Compose([transforms.ToTensor(), transforms.Normalize((0.5,), (0.5,))]) """Import dataset and load it.""" trainset = datasets.MNIST('mnist', download=False, train=True, transform=transform) trainloader = DataLoader(trainset, batch_size=64, shuffle=True) """Lets define sizes.""" input_size = 784 hidden_sizes = [128, 256, 512] output_size = 10 """Lets create 3 lists that would act as our vertically paritioned datasets and one list for labels""" image_set1, image_set2, image_set3 = list(), list(), list() labels = list() """Assign how many data should contain in a single dataset. Since mnist dataset have a shape of [28 x 28 x 1] and we sliced the dataset in 64 batches, after reshaping it would be [64 x 784]. Now we have to divide these 784 features into three datasets""" distr = int(input_size/partition) for image, label in trainloader: image = image.view(image.shape[0], -1) image_set1.append(image[:, 0:distr]) image_set2.append(image[:, distr:distr*2]) image_set3.append(image[:, distr*2:]) labels.append(label) models = create_models(partition, input_size, hidden_sizes, output_size) optimizers = [optim.SGD(model.parameters(), lr=0.01) for model in models] """Build a list locations where each model has to be sent.""" model_locations = list() for _ in range(partition): model_locations.append(bob) model_locations.append(secure_worker) for _ in range(partition): model_locations.append(alice) """Send each model to its specific location.""" for model, location in zip(models, model_locations): model.send(location) """Create an object of SplitNN class""" splitNN = SplitNN(models, optimizers, partition, hidden_sizes) """Now to train our model, call training function.""" epochs = 20 training(models, splitNN, [image_set1, image_set2, image_set3], labels, epochs) # + id="g4g8zCo7F1DJ" colab_type="code" colab={} if __name__ == '__main__': main()
Extended_Vanilla_SplitNN.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + #default_exp test # - #export from local.imports import * from local.notebook.showdoc import show_doc from PIL import Image # # Test # # > Helper functions to quickly write tests in notebooks # ## Simple test functions # We can check that code raises an exception when that's expected (`test_fail`). To test for equality or inequality (with different types of things) we define a simple funciton `test` that compares two object with a given `cmp` operator. #export def test_fail(f, msg='', contains=''): "Fails with `msg` unless `f()` raises an exception and (optionally) has `contains` in `e.args`" try: f() except Exception as e: assert not contains or contains in str(e) return assert False,f"Expected exception but none raised. {msg}" # + def _fail(): raise Exception("foobar") test_fail(_fail, contains="foo") def _fail(): raise Exception() test_fail(_fail) # - #export def test(a, b, cmp,cname=None): "`assert` that `cmp(a,b)`; display inputs and `cname or cmp.__name__` if it fails" if cname is None: cname=cmp.__name__ assert cmp(a,b),f"{cname}:\n{a}\n{b}" test([1,2],[1,2], operator.eq) test_fail(lambda: test([1,2],[1], operator.eq)) test([1,2],[1], operator.ne) test_fail(lambda: test([1,2],[1,2], operator.ne)) show_doc(all_equal) test(['abc'], ['abc'], all_equal) show_doc(equals) test([['abc'],['a']], [['abc'],['a']], equals) #export def nequals(a,b): "Compares `a` and `b` for `not equals`" return not equals(a,b) test(['abc'], ['ab' ], nequals) # ## test_eq test_ne, etc... # Just use `test_eq`/`test_ne` to test for `==`/`!=`. `test_eq_type` check things are equals and of the same type. We define them using `test`: #export def test_eq(a,b): "`test` that `a==b`" test(a,b,equals, '==') # + hide_input=false test_eq([1,2],[1,2]) test_eq([1,2],map(int,[1,2])) test_eq(array([1,2]),array([1,2])) test_eq(array([1,2]),array([1,2])) test_eq([array([1,2]),3],[array([1,2]),3]) test_eq(dict(a=1,b=2), dict(b=2,a=1)) test_fail(lambda: test_eq([1,2], 1), contains="==") test_eq({'a', 'b', 'c'}, {'c', 'a', 'b'}) # + df1 = pd.DataFrame(dict(a=[1,2],b=['a','b'])) df2 = pd.DataFrame(dict(a=[1,2],b=['a','b'])) test_eq(df1,df2) test_eq(df1.a,df2.a) class T(pd.Series): pass test_eq(df1.iloc[0], T(df2.iloc[0])) # - #export def test_eq_type(a,b): "`test` that `a==b` and are same type" test_eq(a,b) test_eq(type(a),type(b)) if isinstance(a,(list,tuple)): test_eq(map(type,a),map(type,b)) # + hide_input=false test_eq_type(1,1) test_fail(lambda: test_eq_type(1,1.)) test_eq_type([1,1],[1,1]) test_fail(lambda: test_eq_type([1,1],(1,1))) test_fail(lambda: test_eq_type([1,1],[1,1.])) # - #export def test_ne(a,b): "`test` that `a!=b`" test(a,b,nequals,'!=') # + hide_input=false test_ne([1,2],[1]) test_ne([1,2],[1,3]) test_ne(array([1,2]),array([1,1])) test_ne(array([1,2]),array([1,1])) test_ne([array([1,2]),3],[array([1,2])]) test_ne([3,4],array([3])) test_ne([3,4],array([3,5])) test_ne(dict(a=1,b=2), ['a', 'b']) test_ne(['a', 'b'], dict(a=1,b=2)) # - #export def is_close(a,b,eps=1e-5): "Is `a` within `eps` of `b`" if hasattr(a, '__array__') or hasattr(b,'__array__'): return (abs(a-b)<eps).all() if isinstance(a, (Iterable,Generator)) or isinstance(b, (Iterable,Generator)): return is_close(np.array(a), np.array(b), eps=eps) return abs(a-b)<eps #export def test_close(a,b,eps=1e-5): "`test` that `a` is within `eps` of `b`" test(a,b,partial(is_close,eps=eps),'close') test_close(1,1.001,eps=1e-2) test_fail(lambda: test_close(1,1.001)) test_close([-0.001,1.001], [0.,1.], eps=1e-2) test_close(np.array([-0.001,1.001]), np.array([0.,1.]), eps=1e-2) test_close(array([-0.001,1.001]), array([0.,1.]), eps=1e-2) #export def test_is(a,b): "`test` that `a is b`" test(a,b,operator.is_, 'is') test_fail(lambda: test_is([1], [1])) a = [1] test_is(a, a) #export def test_shuffled(a,b): "`test` that `a` and `b` are shuffled versions of the same sequence of items" test_ne(a, b) test_eq(Counter(a), Counter(b)) a = list(range(50)) b = copy(a) random.shuffle(b) test_shuffled(a,b) test_fail(lambda:test_shuffled(a,a)) a = 'abc' b = 'abcabc' test_fail(lambda:test_shuffled(a,b)) a = ['a', 42, True] b = [42, True, 'a'] test_shuffled(a,b) #export def test_stdout(f, exp, regex=False): "Test that `f` prints `exp` to stdout, optionally checking as `regex`" s = io.StringIO() with redirect_stdout(s): f() if regex: assert re.search(exp, s.getvalue()) is not None else: test_eq(s.getvalue(), f'{exp}\n' if len(exp) > 0 else '') test_stdout(lambda: print('hi'), 'hi') test_fail(lambda: test_stdout(lambda: print('hi'), 'ho')) test_stdout(lambda: 1+1, '') test_stdout(lambda: print('hi there!'), r'^hi.*!$', regex=True) #export TEST_IMAGE = 'images/puppy.jpg' im = Image.open(TEST_IMAGE).resize((128,128)) im #export def test_fig_exists(ax): "Test there is a figure displayed in `ax`" assert ax and len(np.frombuffer(ax.figure.canvas.tostring_argb(), dtype=np.uint8)) fig,ax = plt.subplots() ax.imshow(array(im)); test_fig_exists(ax) # ## Export - #hide from local.notebook.export import notebook2script notebook2script(all_fs=True)
dev/00_test.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 2 # language: python # name: python2 # --- # ## Introduction To PyBrain # # This is a tutorial to introduce basic knowledge of Pybrain and follow that by implementing rudimentary neural networks to solve simple classification problems. While neural networks generally takes a while to understand, this tutorial provides an simple and fast way to experience some of the capabilities neural networks have. PyBrain is short for **Py**thon-**B**ased **R**einforcement Learning, **A**rtificial **I**ntelligence and **N**eural Network Library. Though no longer actively developed, PyBrain is simpler than PyLearn and other packages out there since the library features perceptron as well as neural networks. # ### Table of Contents # - [Required Packages](#Required-Packages) # - [Understanding Our Dataset](#Understanding-Our-Dataset) # - [Getting Data In](#Getting-Data-In) # - [Creating the Neural Network](#Creating-the-Neural-Network) # - [Summary and Further Resources](#Summary-and-Further-Resources) # # # ## Required Packages # # - PyBrain # - Numpy # - Matplotlib # - Scikit-Learn # # Before getting started, you'll need to install the various libraries that we will be using. You can install all of the required pacakges through `pip`: # # > pip install --upgrade pybrain # > pip install --upgrade numpy # > pip install --upgrade pandas # > pip install --upgrade matplotlib # > pip install --upgrade scikit-learn # # # import pybrain import numpy as np import pandas as pd import matplotlib.pyplot as plt import sklearn # ## Understanding Our Dataset # # The Iris flower data set also known as Fisher's Iris data set is a data set created by <NAME> in 1936. The data set contains 50 samples from three species of Iris flower. For each sample, four features were used to measure the flower: length and width of the sepals and petals (cm). To understand our data better, let's take a look into the Iris flower data set included in Scikit-Learn. from sklearn import datasets iris = datasets.load_iris() X = iris.data y = iris.target # + # Print first 5 samples of the data set print ["Sepal Length", "Sepal Width", "Petal Length", "Petal Width"] print X[:5] # Print dimensions of the data set print ["Rows", "Cols"] print X.shape # - # Stored in our variable ```X```, we have our data, where each row was a sample and the columns' indices being Sepal Length, Sepal Width, Petal Length and Petal Width. Here we see the first 5 samples in the data set. The entire data set has 150 samples, with 4 features per sample. # + # Print target results print y.tolist() yList = y.tolist() # List of unique elements print np.unique(y) # Counts of each type of Iris print "Count of Iris setosa: " + str(yList.count(0)) print "Count of Iris versicolor: " + str(yList.count(1)) print "Count of Iris virginica: " + str(yList.count(2)) # - # Our particular data set includes only three types of irises shown in ```y``` as values from 0 to 2 representing a particular type. The type of irises include: Iris setosa, Iris versicolor, and Iris virginica. In our data set we have 50 samples of each type of Iris. # # To understand what each flower type's characteristics are like, we can do some exploratory data analysis on our data. Let's first split our data set into separate lists based on its type. # + # Split iris.data to 3 lists of length 50 listOfIrises = np.split(X, 3) # Load the data into Panda DataFrames IrisOne = pd.DataFrame(listOfIrises[0], columns=["Sepal Length", "Sepal Width", "Petal Length", "Petal Width"], index=range(50)) IrisTwo = pd.DataFrame(listOfIrises[1], columns=["Sepal Length", "Sepal Width", "Petal Length", "Petal Width"], index=range(50)) IrisThree = pd.DataFrame(listOfIrises[2], columns=["Sepal Length", "Sepal Width", "Petal Length", "Petal Width"], index=range(50)) # Retrieve sepal length and petal length sepalLengthOne = IrisOne['Sepal Length'].values sepalLengthTwo = IrisTwo['Sepal Length'].values sepalLengthThree = IrisThree['Sepal Length'].values petalLengthOne = IrisOne['Petal Length'].values petalLengthTwo = IrisTwo['Petal Length'].values petalLengthThree = IrisThree['Petal Length'].values sepalWidthOne = IrisOne['Sepal Width'].values sepalWidthTwo = IrisTwo['Sepal Width'].values sepalWidthThree = IrisThree['Sepal Width'].values petalWidthOne = IrisOne['Petal Width'].values petalWidthTwo = IrisTwo['Petal Width'].values petalWidthThree = IrisThree['Petal Width'].values print("Iris setosa Statistics (Blue)") print(IrisOne.describe()) print print("Iris versicolor Statistics (Green)") print(IrisTwo.describe()) print print("Iris virginica Statistics (Red)") print(IrisThree.describe()) print ############# ## PLOT #1 ## ############# _, ax = plt.subplots() # Plot points in scatter plot ax.scatter(sepalLengthOne.tolist(), petalLengthOne.tolist(), color='blue') ax.scatter(sepalLengthTwo.tolist(), petalLengthTwo.tolist(), color='green') ax.scatter(sepalLengthThree.tolist(), petalLengthThree.tolist(), color='red') # Set x and y labels ax.set_xlabel('Sepal Length (cm)') ax.set_ylabel('Petal Length (cm)') ax.set_title('Sepal Length v Petal Length') plt.show() # - # By plotting sepal length on the x-axis and petal length on the y-axis, we can see noticeable differences in the types of Iris flower. # # The blue-labeled Irises have a mean petal length of 1.46 while the mean petal lengths of green and red labeled Irises each have values of 4.26 and 5.55 respectively. We can also see a difference in the range of sepal lengths of the blue compared to red and green, where blue has a range of (4.3, 5.8) while red and green have (4.9, 7.0) and (4.9, 7.9) respectively. # # Now let's plot sepal width with petal width and analyze the scatter plot. # # + ############# ## PLOT #2 ## ############# import matplotlib.pyplot as plt _ , ax2 = plt.subplots() # Plot points in scatter plot ax2.scatter(sepalWidthOne.tolist(), petalWidthOne.tolist(), color='blue') ax2.scatter(sepalWidthTwo.tolist(), petalWidthTwo.tolist(), color='green') ax2.scatter(sepalWidthThree.tolist(), petalWidthThree.tolist(), color='red') # Set x and y labels ax2.set_xlabel('Sepal Width (cm)') ax2.set_ylabel('Petal Width (cm)') ax2.set_title('Sepal Width v Petal Width') plt.show() # - # By plotting sepal width on the x-axis and petal width on the y-axis, we can see noticeable differences in the types of Iris flower. # # Similar to the previous plot, we see a discrepancy of blue with red and green. Red in this plot seems to have a little bit more of a distance from green's scatter. The blue-labeled Irises have a mean petal width of 0.24 while the mean petal width of green and red labeled Irises each have values of 1.32 and 2.02 respectively. # # Similar to how we would discriminate the differences that we see in these plots, a neural network would use similar logic to determine what to classify an input. # ## Getting Data In # # Let's first import the Python modules from Pybrain that we'll need for creating our neural network. # from pybrain.datasets.classification import ClassificationDataSet from pybrain.tools.shortcuts import buildNetwork from pybrain.utilities import percentError from pybrain.supervised.trainers import BackpropTrainer from pybrain.structure.modules import SoftmaxLayer # Now that we have taken a look at the data, and we have imported the necessary components of our neural network, let's now load the data and specify the parameters for the target data. # + # initialize empty data set data = ClassificationDataSet(inp=4, nb_classes=3, class_labels=["Iris setosa", "Iris versicolor", "Iris virginica"]) # append data samples to the data set for i in range(len(X)): data.addSample(X[i], y[i]) # - # Here we instantiate our data set to take in our input. The parameter `inp` specifies the dimensionality of our input, which is 4 in our case the four features of the flower. The parameter `nb_classes` is used to explicitly state the number of target classes for our outputs. Lastly, the parameter `class_labels` is used to name the three classes we have targeted previously. # # After initializing our empty data set, we input the data and target by row into the dataset. # # Common in training neural networks, we'll split our datasets in three: `training`, `validation`, and `testing` data. # - The `training` data is used to train our neural network, letting our algorithm adjust parameters based on the data. # - The `validation` data is used to eliminate which models paired with their parameters overfits the data. Here we are verifying that an increase in accuracy over the training data, will correspond to an increase in accuracy over data not shown to the neural network before (ie the validation data). Any decrease in accuracy means that we are overfitting to the data in our training dataset. # - The `testing` data is used for testing the final model to see the predictive power of the model (usually a float between 0, 1). # # We can split the data using Pybrain's own function `splitWithProportion`. We will use the proportion of 70%, 15%, 15% respectively as recommended by sources like Mathworks: # + # Split by 70% followed by 50% training, rest = data.splitWithProportion(0.7) validation, testing = rest.splitWithProportion(0.5) # Check the ratios of our data sets print "Data ratio:" print len(training), ":", len(validation), ":", len(testing) print print "Percentage training data:" print 1.0*len(training)/len(data) print "Percentage validation data:" print 1.0*len(validation)/len(data) print "Percentage testing data:" print 1.0*len(testing)/len(data) # - # Since we are doing a classification problem with neural networks, its best that we encode the classes using one output neuron per class. This is because we will eventually use a Softmaxlayer activation function to determine the class, which returns a binary output. # Convert target data into a binary representation that is suitable for our classification problem training._convertToOneOfMany() validation._convertToOneOfMany() testing._convertToOneOfMany() # Doing this converts the target classes into a list of three binary outputs, an encoding that is suitable for three neurons. # + # Take a look at the first 10 target outputs after the encoding targets = training['target'] for i in range(10): print targets[i] # - # Lastly let's just check all of our data sets for the right dimensionalities. They should all have inputs of 4 dimensions, and outputs of 3 (the three classes we defined). Then let's check if we properly imported all the data from the iris data set into our blank data set. # + print training.indim, training.outdim print validation.indim, validation.outdim print testing.indim, testing.outdim print len(data['input']), len(data['target']) # - # Everything looks good, so lets move on! # ## Creating the Neural Network # Finally reaching the part where we create the neural network, let's first define our neural network. In the code below, we instantiate our network to have 4 inputs, 3 hidden neurons, and 3 output neurons. # # Neurons are key structures in neural networks, that simulate how our brains work. They take in multiple binary inputs, $n_1, n_2, n_3, ...$ and produces a singular binary output. Our brains work in the same way because we choose to take in certain information when making decisions. To mimic even further how our brains work, there are weights associated to each input $w_1, w_2, w_3, ...$, similar to how we value certain information more than others. To create the binary output, the neuron would get a sum of the weighted inputs: $\sum{n_i w_i}$ and case on if it reached a certain total called the $threshold$ $value$. These neurons are the basic building blocks of the construction of neural networks. # # Lastly, we use the outclass called a `SoftmaxLayer` because we are trying to solve a classification problem. The Softmax activation function used in the final layer of the network converts a vector and an index to a real value, in our case 0/1. # # Next, we instantiate our backpropagation trainer. This trainer will run training data and calculates the gradient of a loss function for all the weights used in the network. Then the trainer will attempt to optimize the weights using the loss function gradient to minimize the loss function. To think about this, imagine the errors that occur in the later stages of the neural network and how they are closely related to the errors in the previous neurons. This trainer works backwards through the layers to optimize and minimize losses. # + # Creating our neural network neuralNet = buildNetwork(4, 3, 3, outclass=SoftmaxLayer) # Creating our backpropagation Trainer trainer = BackpropTrainer(neuralNet, dataset=training, momentum=0.1, verbose=False, weightdecay=0.01) # Here we train our backpropagation trainer on our training data set for 100 cycles trainingError, validationError = trainer.trainUntilConvergence(dataset=training, maxEpochs=100) # Let's plot the error with the number of cycles the trainer has gone through _ , ax3 = plt.subplots() ax3.plot(trainingError, 'g', validationError, 'r') ax3.set_xlabel('Epochs') ax3.set_ylabel('Error (Percentage)') ax3.set_title('Training Error (Green) v Validation Error (red)') plt.show() # - # In the graph above, we can see the error percentage trend towards 0, which is a good sign because it mean we are getting better accuracy. In the graph above, we are only training the network for 100 epochs, which is equivalent to 100 cycles. Generally, the more cycles that one iterates through, the better accuracy one gets. Let's try that below by running the trainer on the training set for 500 cycles. You can also turned on verbosity, to see the percentage errors at each cycle. The final total error I had after 500 cycles hovered around 0.02. trainer.trainOnDataset(training, 500) # The reason why our percentage error at the end hovered around 0.02 and never went significantly below that level was because of our training data set. Imagine learning from only one textbook. At a certain point, you'll be saturated with information from the one textbook. However, since you only understand ideas from your textbook, when you find information from the web that you've never seen before, you can only get so much right. # # Lastly, let's take a look at the percent error our model has on the remaining part of the data set we never touched: the `testing` data. output = neuralNet.activateOnDataset(testing).argmax(axis=1) percentError(output, testing['class']) # From this part, I received 4.34% error, meaning that I have a 95.66% score for the test data! That's a really good start. However, we can do better. Think about the various parameters you inputted for creating the neural network, as well as the number of cycles for the Backpropagation trainer. Try it out with 6 hidden neurons and instead of running it for 500 cycles, do 1000. What differences do you see? # # A lot of machine learning and creating neural networks requires users to tweak and play around with the parameters. There's a balance of weighting certain parameters heavier than others that could cause the model to better predict the training set, but estimate poorly for unseen data. Thus, it is up to the user to decide what parameters to put in and to evaluate the outcomes. # ## Summary and Further Resources # # This tutorial highlighted just a few elements of what is possible to do with neural networks in Python. Much more detail about the libraries and some other interesting projects you could try out are listed below: # # 1. PyBrain: http://pybrain.org/docs/ # 2. Sklearn: http://scikit-learn.org/stable/documentation.html # 3. Iris Data Set: https://en.wikipedia.org/wiki/Iris_flower_data_set # 4. Classifying faces (olivetti dataset): https://goo.gl/Z7Bnbb # 5. Recognizing handwritten digits: https://goo.gl/N4fLal # 6. Financial trading with neural nets: https://goo.gl/jiEfSq
2016/tutorial_final/127/PyBrain and Iris .ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="view-in-github" colab_type="text" # <a href="https://colab.research.google.com/github/SanghunOh/test_visuallization/blob/main/autompg_xgboost.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # + [markdown] id="G5X9MZRrMFeA" # ## 데이터 로딩 # + id="NpSFfXXFtwIy" colab={"base_uri": "https://localhost:8080/"} outputId="94d5a89f-0e46-4e75-d631-c81f942cc003" import pandas as pd df = pd.read_csv('./auto-mpg.csv', header=None) df.columns = ['mpg','cylinders','displacement','horsepower','weight', 'acceleration','model year','origin','name'] df.info() # + colab={"base_uri": "https://localhost:8080/"} id="SKbdHh4Xuepa" outputId="cc5b8547-9ff1-4e75-dd1b-80cf7106ec4a" df[['horsepower','name']].describe(include='all') # + [markdown] id="4B0YQvRQMjZm" # ## replace() # + colab={"base_uri": "https://localhost:8080/"} id="iK6E24d2vKZS" outputId="298e22cd-121b-4b52-c010-8d8fb1c5bd97" df['horsepower'].value_counts() # + colab={"base_uri": "https://localhost:8080/"} id="QHDSskZRxNvf" outputId="1635b7a6-33ba-4661-a786-2d137f015b7f" df['horsepower'].unique() # + id="tKczWxAcyMTQ" colab={"base_uri": "https://localhost:8080/"} outputId="485b0212-267f-4cf8-96b5-ecb53f3e6f86" df_horsepower = df['horsepower'].replace(to_replace='?', value=None, inplace=False) df_horsepower.unique() # + colab={"base_uri": "https://localhost:8080/"} id="JXMKxsUKym1w" outputId="5e5bcbd8-ac9b-496e-f65a-e52dbc75eef6" df_horsepower = df_horsepower.astype('float') df_horsepower.mean() # + colab={"base_uri": "https://localhost:8080/"} id="aJ1Iqr5T032g" outputId="fcc11b1c-4b53-4f8a-c270-185b3f1a7ef3" df['horsepower'] = df_horsepower.fillna(104) df.info() # + id="Z9QVMX561iOv" colab={"base_uri": "https://localhost:8080/"} outputId="6706912b-eebc-40b2-f78c-2c785f55d5ac" df['name'].unique() # + colab={"base_uri": "https://localhost:8080/"} id="_j2SOU391wYu" outputId="49e95da7-ea1e-4410-9b43-9a61e933e4e8" df.head() # + [markdown] id="0y8AD95RLzQX" # ## 분류와 연속 컬럼 구분 # + id="t1pOz2Oq2j9e" colab={"base_uri": "https://localhost:8080/", "height": 298} outputId="00950417-5496-48bc-bc3b-3d7c6d9950a6" df.head(8) # + [markdown] id="c3t8-vQDM6cu" # ### check columns # - 연속형 : displacement, horsepower, weight, acceleration, mpg # - 분류형 : model year, name, cylinders, origin # + colab={"base_uri": "https://localhost:8080/"} id="crrCV2smV_Bb" outputId="b8356e73-6f88-425d-93d8-80abd2a4d956" df['name'].value_counts() # + colab={"base_uri": "https://localhost:8080/"} id="dR33CZz8Ps4t" outputId="af3aa403-8bb4-4af4-ecb2-f2868c87c256" df['origin'].value_counts() # + colab={"base_uri": "https://localhost:8080/"} id="nAmKrnH4M06P" outputId="ca7c47a0-88f0-4a3d-8942-749ba0a2df39" df['mpg'].describe(include='all') # + colab={"base_uri": "https://localhost:8080/"} id="ZxxhjhpTOgX2" outputId="dc3ae19b-82b2-4ca2-8726-aa7cd35a4424" df['mpg'].value_counts() # + [markdown] id="ZND5xAAlQLM1" # ## 정규화 단계 # + id="FFBoB_kbPCb2" Y = df['mpg'] X_contiuns = df[['displacement', 'horsepower', 'weight', 'acceleration']] X_category = df[['model year', 'cylinders', 'origin']] # + id="v8ZWm20iYIkZ" from sklearn import preprocessing # + colab={"base_uri": "https://localhost:8080/"} id="GTA2BfxEYu8b" outputId="6f3b4df1-ca64-4895-a1bc-c43268850cca" scaler = preprocessing.StandardScaler() type(scaler) # + colab={"base_uri": "https://localhost:8080/"} id="QcFCc48MYfcR" outputId="27e9c31a-1026-4e02-a1f0-f9997e37b706" scaler.fit(X_contiuns) # + id="RM3sv8eVZaao" X = scaler.transform(X_contiuns) # + id="HIefUHGAZvZo" from sklearn.linear_model import LinearRegression # + colab={"base_uri": "https://localhost:8080/"} id="eXCUt886aI94" outputId="c9ea74da-651b-4379-a3f3-207477f9010b" lr = LinearRegression() type(lr) # + colab={"base_uri": "https://localhost:8080/"} id="rzCcG_ZpaSYY" outputId="d770a326-5778-4191-bbb1-9b127cc8a810" lr.fit(X,Y) # + colab={"base_uri": "https://localhost:8080/"} id="okkO8aIoabOg" outputId="47f264fd-00ed-41d4-e78e-877566560221" lr.score(X,Y) # + id="bd-MVmiFn8y6" df.head(1) # + [markdown] id="3q-HAG51q38C" # ### X_contiuns = df[['displacement', 'horsepower', 'weight', 'acceleration']] # # + id="v7f_CfHJr7Lq" x_cusmter = scaler.transform([[307.0,130.0,3504.0,12.0]]) x_cusmter.shape # + id="q-0CMXvjqz55" lr.predict(x_cusmter) # + [markdown] id="FzEBB48fR-34" # ### XGboost # + id="1kWANYVJR-ay" import xgboost as xgb model_xgb = xgb.XGBRegressor() model_xgb.fit(X, Y) # + colab={"base_uri": "https://localhost:8080/"} id="9ROvbAniS17S" outputId="ee1bf2a4-2609-404d-8a7a-5114de5ba317" model_xgb.score(X,Y) # + colab={"base_uri": "https://localhost:8080/"} id="LGeRP1OaSsgV" outputId="275cd76b-fcbc-4e10-fdb3-ea7feaa3b4fb" model_xgb.predict(x_cusmter) # + [markdown] id="LfdCPRfeWHOW" # ### LightXGboost # + id="x1YAJmpqWGgd" from lightgbm import LGBMRegressor # + colab={"base_uri": "https://localhost:8080/"} id="PNLeYGneWOb9" outputId="d78a176a-05ca-48e8-b7f7-a1dae3457231" model_lxgb = LGBMRegressor() model_lxgb.fit(X, Y) model_lxgb.score(X, Y) # + [markdown] id="zJ-K6FZFnlez" # ## pickle # + id="IU4Jv-1XavZ4" import pickle pickle.dump(lr, open('./autompg_lr.pkl','wb')) # + [markdown] id="KtL2gRboHOBn" # # + colab={"base_uri": "https://localhost:8080/"} id="VbdldP0tfs1g" outputId="6d2564cb-73f0-41bb-a016-629512e86b4b" # !ls -l ./saves/autompg_lr.pkl # + colab={"base_uri": "https://localhost:8080/"} id="eRiW3TRsgk2e" outputId="ba64de5f-2428-4b5f-d962-03524d074b5c" pickle.load(open('./saves/autompg_lr.pkl', 'rb')) # + id="GYMh64wyg1Me" pickle.dump(scaler, open('./autompg_standardscaler.pkl','wb')) # + id="YGyQbr5ptuuI" # + [markdown] id="1ZyrqMgeHWQu" # ## One hot encoding # + colab={"base_uri": "https://localhost:8080/", "height": 142} id="9TeCcRmMfqhZ" outputId="bbeeddae-6139-4100-8f26-1badf85c58f4" X_category.head(3) # + colab={"base_uri": "https://localhost:8080/"} id="Zs_r29JQgKk5" outputId="7e4a64ec-ff6f-4429-c100-69d761a280e3" X_category['origin'].value_counts() # 1, 2, 3 #? | ? | ? # 1 | 0 | 0 -> 1 # 0 | 1 | 0 -> 2 # 0 | 0 | 1 _ 3 # 3 1 -> 1 # + id="i9PG7HwjhHgy" # data, prefix=None df_origin = pd.get_dummies(X_category['origin'], prefix='origin') # + id="9VUC92BYlFBe" df_cylinders = pd.get_dummies(X_category['cylinders'], prefix='cylinders') # + id="JyaG8XTZmCQ_" df_origin.shape, df_cylinders.shape # + colab={"base_uri": "https://localhost:8080/", "height": 142} id="Rok2im7ImafW" outputId="0c4bb1af-20d4-4429-9ed9-21183cbb1fef" X_contiuns.head(3) # + colab={"base_uri": "https://localhost:8080/", "height": 205} id="JBLswBFVmsJ2" outputId="502f4265-037f-47f7-a504-4fdf1b9f30d2" # X_contiuns + df_cylinders + df_origin # objs, axis=0 X = pd.concat([X_contiuns, df_cylinders, df_origin], axis='columns') X.head(5) # + id="ONvAr3aou805" scaler_xgb = preprocessing.StandardScaler() scaler_xgb.fit(X) X = scaler_xgb.transform(X) # + id="5MCAWavS32jw" import pickle pickle.dump(scaler_xgb,open('./scaler_xgb.pkl', 'wb')) # + colab={"base_uri": "https://localhost:8080/"} id="5robv836oGnG" outputId="a25ab2b4-0138-4472-d6ac-78ae8a9888f3" from sklearn.model_selection import train_test_split x_train, x_test, y_train, y_test = train_test_split(X, Y) x_train.shape, x_test.shape, y_train.shape, y_test.shape # + id="K2zW94sxpdk9" import xgboost # + colab={"base_uri": "https://localhost:8080/"} id="juD7TV_IsHI1" outputId="59cc51c6-72b2-4952-a81f-bd0a9e3e928c" xgb = xgboost.XGBRegressor() xgb # + colab={"base_uri": "https://localhost:8080/"} id="zcb9xl15skHT" outputId="03f3e4bf-0910-4f69-fb61-89a3701aa28d" xgb.fit(x_train, y_train) # + id="UnO0Za0-4iIB" pickle.dump(xgb,open('./xgb_model.pkl','wb')) # + colab={"base_uri": "https://localhost:8080/"} id="Kgu1EF-Ds1H7" outputId="6a509786-8616-4912-99c4-4f0ae8aff6e7" xgb.score(x_train, y_train) # + colab={"base_uri": "https://localhost:8080/"} id="FYaay3x9tCzz" outputId="eb37dc18-e5e4-4e0f-9afd-27fbafa05f0d" xgb.score(x_test, y_test) # + colab={"base_uri": "https://localhost:8080/"} id="BFboN0za0-fK" outputId="0c091c18-b915-4ede-a921-025a4f425f57" X[0] # + id="5QtlxQzr0ZcX" xgb.predict('????')
autompg_xgboost.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + import numpy as np from sklearn.manifold import TSNE # %matplotlib inline from matplotlib import pyplot as plt from sklearn.cluster import KMeans from sklearn.decomposition import PCA from sklearn.linear_model import LinearRegression import h5py from pytorch_pretrained_bert import BertTokenizer from collections import defaultdict # - fi_preds = np.load("/u/scr/ethanchi/relationOutputs/fi-en-8/train-pred.npy", allow_pickle=True) is_adjunct = np.vectorize(lambda x: x and ('PBArgM' in x or 'AM' in x) and '|' not in x and 'R' not in x) fi_indices = is_adjunct(fi_preds) fi_preds = fi_preds[fi_indices] fi_words = np.load("/u/scr/ethanchi/relationOutputs/fi-en-8/train-words.npy", allow_pickle=True)[fi_indices] fi_words = list(fi_words) # + subword_tokenizer = BertTokenizer.from_pretrained('bert-base-multilingual-cased') subwords = subword_tokenizer.wordpiece_tokenizer.tokenize(' '.join(fi_words)) subword_mapping = defaultdict(int) for subword in subwords: subword = subword.replace('##', '') subword_mapping[subword] += 1 print(sorted(subword_mapping.items(), key=lambda kv: -subword_mapping[kv[0]])) # - total_subwords = sum(subword_mapping[k] for k in subword_mapping) print("Total number of subwords is", total_subwords) # + en_preds = np.load("/u/scr/ethanchi/relationOutputs/fi-en-8/pred.npy", allow_pickle=True) is_adjunct = np.vectorize(lambda x: x and ('PBArgM' in x or 'AM' in x) and '|' not in x and 'R' not in x) en_indices = is_adjunct(en_preds) en_preds = en_preds[en_indices] en_words = np.load("/u/scr/ethanchi/relationOutputs/fi-en-8/words.npy", allow_pickle=True)[en_indices] en_words = list(en_words) en_logit_file = h5py.File('/u/scr/ethanchi/relationOutputs/fi-en-8/logits.hdf5') en_logits = en_logit_file.get('logits')[()] en_logits = en_logits[en_indices] en_logit_file.close() en_labels = np.load("/u/scr/ethanchi/relationOutputs/fi-en-8/labels.npy") en_labels = en_labels[en_indices] # - softmax = np.exp(en_logits) / np.sum(np.exp(en_logits), axis=1, keepdims=True) predicted_prob = softmax[np.arange(len(en_logits)), en_labels.astype(int)] tokenized = [subword_tokenizer.wordpiece_tokenizer.tokenize(word) for word in en_words] freq = [sum(subword_mapping[token] for token in token_seq) / (len(token_seq)) for token_seq in tokenized] freq = np.array(freq) plt.scatter(freq, predicted_prob, s=0.1) # + linreg = LinearRegression() linreg.fit(freq.reshape(-1, 1), predicted_prob) plt.figure(figsize=(6, 6)) plt.scatter(freq, predicted_prob, s=0.1) plt.plot(freq_data[:, 0], linreg.predict(freq_data[:, 0].reshape(-1, 1)), color="blue") # + plt.figure(figsize=(6, 6)) freq_data = {} for possible_freq in np.unique(freq): freq_data[possible_freq] = np.average(predicted_prob[freq == possible_freq]) freq_data = np.array([kv for kv in freq_data.items()]) print(freq_data.shape) plt.scatter(freq_data[:, 0], freq_data[:, 1], c="black") linreg = LinearRegression() linreg.fit(freq_data[:, 0].reshape(-1, 1), freq_data[:, 1]) plt.plot(freq_data[:, 0], linreg.predict(freq_data[:, 0].reshape(-1, 1)), color="blue") # -
structural-probes/notebooks/Subword Similarity Metric Evaluation.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # <h1 align="center"> PCA + Logistic Regression (MNIST) </h1> # The MNIST database of handwritten digits, available from this page, has a training set of 60,000 examples, and a test set of 10,000 examples. It is a subset of a larger set available from NIST. The digits have been size-normalized and centered in a fixed-size image. # <br> # It is a good database for people who want to try learning techniques and pattern recognition methods on real-world data while spending minimal efforts on preprocessing and formatting. # Parameters | Number # --- | --- # Classes | 10 # Samples per class | ~7000 samples per class # Samples total | 70000 # Dimensionality | 784 # Features | integers values from 0 to 255 # The MNIST database of handwritten digits is available on the following website: [MNIST Dataset](http://yann.lecun.com/exdb/mnist/) # %matplotlib inline from sklearn.datasets import fetch_mldata from sklearn.decomposition import PCA import numpy as np import matplotlib.pyplot as plt from sklearn.preprocessing import StandardScaler # ## Download and Load the Data # You can add the parameter data_home to wherever to where you want to download your data mnist = fetch_mldata('MNIST original') mnist # These are the images mnist.data.shape # These are the labels mnist.target.shape # ### Originally I didnt standardize the data (You should uncomment line) # + #scaler = StandardScaler() # Fit on training set only. #mnist.data = scaler.fit_transform(mnist.data) # - # Make an instance of PCA pca = PCA(.95) # Reduce the dimensionality of your data lower_dimensional_data = pca.fit_transform(mnist.data) pca.n_components_ # The idea with going from 784 components to 154 is to reduce the running time of a supervised learning algorithm (in this case logistic regression) which we will see at the end of the tutorial. One of the cool things about PCA is that we can go from a compressed representation (154 components) back to an approximation of the original high dimensional data (784 components). approximation = pca.inverse_transform(lower_dimensional_data) # + plt.figure(figsize=(8,4)); # Original Image plt.subplot(1, 2, 1); plt.imshow(mnist.data[1].reshape(28,28), cmap = plt.cm.gray, interpolation='nearest', clim=(0, 255)); plt.xlabel('784 components', fontsize = 14) plt.title('Original Image', fontsize = 20); # 154 principal components plt.subplot(1, 2, 2); plt.imshow(approximation[1].reshape(28, 28), cmap = plt.cm.gray, interpolation='nearest', clim=(0, 255)); plt.xlabel('154 components', fontsize = 14) plt.title('95% of Explained Variance', fontsize = 20); # - # ## Showing Graph of Explained Variance vs Number of Principal Components # if n_components is not set all components are kept (784 in this case) pca = PCA() pca.fit(mnist.data) pca.n_components_ # Summing explained variance tot = sum(pca.explained_variance_) tot var_exp = [(i/tot)*100 for i in sorted(pca.explained_variance_, reverse=True)] print(var_exp[0:5]) tot = sum(pca.explained_variance_) tot var_exp = [(i/tot)*100 for i in sorted(pca.explained_variance_, reverse=True)] print(var_exp[0:5]) # Cumulative explained variance cum_var_exp = np.cumsum(var_exp) # Plot can help you understand the level of redundancy present in multiple dimensions. # PLOT OUT THE EXPLAINED VARIANCES SUPERIMPOSED plt.figure(figsize=(10, 5)) plt.step(range(1, 785), cum_var_exp, where='mid',label='cumulative explained variance') plt.title('Cumulative Explained Variance as a Function of the Number of Components') plt.ylabel('Cumulative Explained variance') plt.xlabel('Principal components') plt.axhline(y = 95, color='k', linestyle='--', label = '95% Explained Variance') plt.axhline(y = 90, color='c', linestyle='--', label = '90% Explained Variance') plt.axhline(y = 85, color='r', linestyle='--', label = '85% Explained Variance') plt.legend(loc='best') plt.show() # ## Number of Principal Components for 99%, 95%, 90%, and 85% of Explained Variance # Indices corresponding to the first occurrence are returned with the np.argmax function # Adding 1 to the end of value in list as principal components start from 1 and indexes start from 0 (np.argmax) componentsVariance = [784, np.argmax(cum_var_exp > 99) + 1, np.argmax(cum_var_exp > 95) + 1, np.argmax(cum_var_exp > 90) + 1, np.argmax(cum_var_exp >= 85) + 1] componentsVariance # + from sklearn.decomposition import PCA # This is an extremely inefficient function. Will get to why in a later tutorial def explainedVariance(percentage, images): # percentage should be a decimal from 0 to 1 pca = PCA(percentage) pca.fit(images) components = pca.transform(images) approxOriginal = pca.inverse_transform(components) return approxOriginal # + plt.figure(figsize=(20,4)); # Original Image (784 components) plt.subplot(1, 5, 1); plt.imshow(mnist.data[5].reshape(28,28), cmap = plt.cm.gray, interpolation='nearest', clim=(0, 255)); plt.xlabel('784 Components', fontsize = 12) plt.title('Original Image', fontsize = 14); # 331 principal components plt.subplot(1, 5, 2); plt.imshow(explainedVariance(.99, mnist.data)[5].reshape(28, 28), cmap = plt.cm.gray, interpolation='nearest', clim=(0, 255)); plt.xlabel('331 Components', fontsize = 12) plt.title('99% of Explained Variance', fontsize = 14); # 154 principal components plt.subplot(1, 5, 3); plt.imshow(explainedVariance(.95, mnist.data)[5].reshape(28, 28), cmap = plt.cm.gray, interpolation='nearest', clim=(0, 255)); plt.xlabel('154 Components', fontsize = 12) plt.title('95% of Explained Variance', fontsize = 14); # 87 principal components plt.subplot(1, 5, 4); plt.imshow(explainedVariance(.90, mnist.data)[5].reshape(28, 28), cmap = plt.cm.gray, interpolation='nearest', clim=(0, 255)); plt.xlabel('87 Components', fontsize = 12) plt.title('90% of Explained Variance', fontsize = 14); # 59 principal components plt.subplot(1, 5, 5); plt.imshow(explainedVariance(.85, mnist.data)[5].reshape(28, 28), cmap = plt.cm.gray, interpolation='nearest', clim=(0, 255)); plt.xlabel('59 Components', fontsize = 12) plt.title('85% of Explained Variance', fontsize = 14); # - mnist.target
Sklearn/PCA/PCA_Image_Reconstruction_and_such.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Test from __future__ import print_function from __future__ import division import torch torch.manual_seed(0) import torch.nn as nn import torch.optim as optim import numpy as np import torchvision from torchvision import datasets, models, transforms from torch.utils.data import DataLoader, Dataset import matplotlib.pyplot as plt # %matplotlib inline import time import os import copy import pandas as pd print("PyTorch Version: ",torch.__version__) print("Torchvision Version: ",torchvision.__version__) torch.backends.cudnn.deterministic = True torch.backends.cudnn.benchmark = False # ### Dataset b_sz = 4 class ColorizeDataset(Dataset): def __init__(self, root_dir, transform=None): """ Args: root_dir (string): Directory with all the images; make sure the directory only contains images. transform (callable, optional): Optional transform to be applied on a sample. """ self.root_dir = root_dir self.transform = transform self._files = [x for x in os.listdir(self.root_dir) if x.find('.ipynb') == -1] def __len__(self): return len(self._files) def __getitem__(self, idx): img_name = os.path.join(self.root_dir, self._files[idx]) sample = Image.open(img_name) if self.transform: sample = self.transform(sample) return sample[0,:,:], sample[1:,:,:] data for x, y in from PIL import Image from skimage.color import rgb2lab, lab2rgb, rgb2gray, xyz2lab from skimage.io import imsave def transform_rgb2lab(image): image = image.convert("RGB") image = np.asarray(image) image = rgb2lab(image/255) return image from torchvision.transforms import Lambda from torchvision.transforms import RandomAffine from torchvision.transforms import RandomHorizontalFlip from torchvision.transforms import RandomResizedCrop # shear, zoom, rotation and horizontal flip transform = transforms.Compose([RandomAffine(degrees=0.2, scale=(0.8, 1.0), shear=0.2), RandomHorizontalFlip(p=0.5), Lambda(lambda image: transform_rgb2lab(image)), transforms.ToTensor(), ]) dataset = ColorizeDataset('../Full-version/Train/', transform) # TODO: Resolve error `num_workers` > 1 dataloader = DataLoader(dataset, batch_size=b_sz, shuffle=True, num_workers=1) # ### Network
Alpha-version/test.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Creating a Bar Chart Using Matplotlib import matplotlib.pyplot as plt % matplotlib inline # There are two required arguments in pyplot's `bar` function: the x-coordinates of the bars, and the heights of the bars. plt.bar([1, 2, 3], [224, 620, 425]); # You can specify the x tick labels using pyplot's `xticks` function, or by specifying another parameter in the `bar` function. The two cells below accomplish the same thing. # + # plot bars plt.bar([1, 2, 3], [224, 620, 425]) # specify x coordinates of tick labels and their labels plt.xticks([1, 2, 3], ['a', 'b', 'c']); # - # plot bars with x tick labels plt.bar([1, 2, 3], [224, 620, 425], tick_label=['a', 'b', 'c']); # Set the title and label axes like this. plt.bar([1, 2, 3], [224, 620, 425], tick_label=['a', 'b', 'c']) plt.title('Some Title') plt.xlabel('Some X Label') plt.ylabel('Some Y Label');
matplotlib_example.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ## Load the age descriptions of population within camp and save them in the format of the population paramter template import numpy as np import pandas as pd np.random.seed(seed=42) age=pd.read_csv('age_and_sex.csv') template=pd.read_csv('camp_params_template.csv') template['Camp']=['Moria']*len(template) N=18700 age_sampled=np.random.choice(age.V1.values,N) percentage=[] start=0 for i in range(8): if i==7: #include 80+ to 70=80 bucket percentage.append(((start <= age_sampled) & (age_sampled < start+20)).sum()/N*100) else: percentage.append(((start <= age_sampled) & (age_sampled < start+10)).sum()/N*100) start+=10 percentage.append(N) template['Value']=percentage template template.to_csv('moria_params.csv')
Parameters/.ipynb_checkpoints/Prepare camp parameters-checkpoint.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .r # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: R # language: R # name: ir # --- library(readr) library(here) library(aTSA) library(data.table) library(xtable) # ### Simulated data simu_df = read_delim(here('src', 'data', 'simu_data_lecture5.csv'), ";", escape_double = FALSE, trim_ws = TRUE) head(simu_df) y1 = simu_df$y1 par(mfrow=c(2,1)) acf.inven <- list('acf' = acf(y1, lag.max = 15, main=" ACF for y1"), 'pacf' = pacf(y1, lag.max = 15, main=" PACF for y1")) adf.inven <- adf.test(y1) # + ic.inven <- list('AIC' = data.table(), 'BIC' = data.table()) for (ar.lag in 0:11) { arma.stat <- rep(0, 6) for (ma.lag in 0:2) { arma.fit <- arima(y1, order = c(ar.lag, 0, ma.lag)) # arma.fit # AIC arma.stat[ma.lag + 1] <- arma.fit$aic # BIC arma.stat[ma.lag + 4] <- -2 * arma.fit$loglik + (ar.lag + ma.lag) * log(length(y1)) } ic.inven$AIC <- rbindlist(list(ic.inven$AIC, data.table(t(arma.stat[1:3])))) ic.inven$BIC <- rbindlist(list(ic.inven$BIC, data.table(t(arma.stat[4:6])))) } setnames(ic.inven$AIC, c('MA0', 'MA1', 'MA2')) ic.inven$AIC[, AR := 0:11] setnames(ic.inven$BIC, c('MA0', 'MA1', 'MA2')) ic.inven$BIC[, AR := (0:11)] BIC_selec.mat <- rbind(ic.inven$BIC[, AR := (0:11)]) print(xtable(BIC_selec.mat)) # - arma_y1 <- arima(simu_df$y1, order = c(2, 0, 0)) library('xts') as.ts(simu_df$y1) install.packages("devtools") devtools::install_github('IRkernel/repr') install.packages('rlang') # ### Model diagnostic acf.inven$resid <- list('acf' = acf(arma_y1$residuals, lag.max = 12, main="ACF for Residual ARMA(2,0) for y1"), 'pacf' = pacf(arma_y1$residuals, lag.max = 12, main="PACF for Residual ARMA(2,0) for y1")) # + white_test_df = cbind() arima.inven[1:72, eps := as.numeric(arma.fit$rcpi$residuals)] arima.inven[2:72, 'eps1' := diff(arima.inven[1:72, eps], 1)] arima.inven[3:72, 'eps2' := diff(arima.inven[2:72, eps1], 1)] # - summary(lm(eps ~ eps1 + eps2, data = arima.inven[3:72])) white.test(arma.fit$rcpi$residuals)
src/.ipynb_checkpoints/Time_series_models-checkpoint.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + pal = [(39/255,95/255,153/255),(80/255,176/255,220/255), (237/255,244/255,248/255), (146/255,144/255,142/255), (78/255,78/255,80/255), (235/255,154/255,88/255),(87/255,126/255,130/255)] # imports from statannot import add_stat_annotation from scipy.stats import chi2 import statsmodels.stats.multitest as mt import sys from matplotlib import rcParams sys.path.append("/Users/rcarlson/Documents/GitHub/lasagna3/snakes") from ops.imports_ipython import * from ops.firedove_barplots_v2 import * import snf # + ## IRF3 translocation upon SeV stimulation df = pd.read_hdf('m120.hdf') df.gene = df.gene.astype('category') sorter = ['nontargeting','DDX58','MAVS','ATP13A1','CAPN15','ATP2B1','MAU2','MED16','MED24', 'TADA2B'] df.gene.cat.set_categories(sorter, inplace=True) df = df.sort_values(["gene"]) df.gene = df.gene.astype('string') aucs, grped = produce_barplot_general(df, df_name = 'm120', pshow = False, genes = ['ATP13A1','ATP2B1','CAPN15','MAU2','MED16','MED24','TADA2B','DDX58','MAVS','nontargeting'], gate = '100 < i <1380 & 100 < j < 1380 & area_nuclear <= 350 & dapi_median_nuclear < 50000 ', replist = [1,2,3,4], nbins=50, feature='dapi_gfp_corr_nuclear', pal = 'green', range_vals = (-1,1), alpha=.05) ## # + ## RIG-I induction upon SeV stimulation df = pd.read_hdf('m136.hdf') df.gene = df.gene.astype('category') sorter = ['nontargeting','DDX58','MAVS','ATP13A1','CAPN15','ATP2B1','MAU2','MED16','MED24', 'TADA2B'] df.gene.cat.set_categories(sorter, inplace=True) df = df.sort_values(["gene"]) df.gene = df.gene.astype('string') aucs, grped = produce_barplot_general(df, df_name = 'm136', pshow = False, genes = df.gene, gate = '100 < i <1380 & 100 < j < 1380 & area < 3000 & channel_cell_median < 50000', replist = [1,2,3,4], nbins=50, feature='channel_cell_median', pal = 'red', plot_nt = True, range_vals = (500, 7000), alpha=.05, pval_sort = False) # + ## IRF3 translocation upon SeV stimulation in U937 cells df = pd.read_hdf('m105.hdf') df.gene = df.gene.astype('category') sorter = ['nontargeting','DDX58','MAVS','ATP13A1','CAPN15','ATP2B1','MAU2','MED16','MED24', 'TADA2B'] df.gene.cat.set_categories(sorter, inplace=True) df = df.sort_values(["gene"]) df.gene = df.gene.astype('string') aucs, grped = produce_barplot_general(df, df_name = 'm105', pshow = False, genes = pd.unique(df.gene), gate = '100 < i < 1380 & 100 < j < 1380 & area_nuclear <= 350 & dapi_median_nuclear < 40000 ', replist = [1,2,3], nbins=50, feature='dapi_gfp_corr_nuclear', pal = 'green', plot_nt = True, range_vals = (-1,1), alpha=.05, pval_sort = False) ## # + df = pd.read_hdf('m139.hdf') df = df.sort_values('well') df.gene = df.gene.astype('category') sorter = ['nontargeting','DDX58','MAVS','ATP13A1','CAPN15','ATP2B1','MAU2','MED16','MED24', 'TADA2B'] df.gene.cat.set_categories(sorter, inplace=True) df = df.sort_values(["gene"]) df.gene = df.gene.astype('string') aucs, grped = produce_barplot_general(df, df_name = 'm139', pshow = False, genes = sorter, gate = '100 < i <1380 & 100 < j < 1380 & area < 500 & area > 50 & channel_cell_median < 40000', replist = [1,2,3,4], nbins=50, feature='channel_cell_median', pal = 'red', plot_nt = True, range_vals = (200,5000), alpha=.05, pval_sort = False) ##
Code/Figure 4-a-d.ipynb
import os import numpy as np import matplotlib.pyplot as plt import pandas as pd from random import shuffle from math import floor # + #image_dir = '/mnt/RAW/FILES/SYNAPSE/POC/UDACITY/PRODUCT_IMAGES_CLASSIFIER/' image_dir = '/local_disk0/tmp/PRODUCT_IMAGES_CLASSIFIER/' train_split = .8 image_dir_dbfs = '/dbfs' + image_dir image_dir_train = image_dir + 'TRAIN/' image_dir_test = image_dir + 'TEST/' image_dir_valid = image_dir + 'VALID/' # + image_counter = dict() path, dirs, files = next(os.walk(image_dir)) for dir in dirs: file_list = os.listdir(image_dir + dir) # dir is your directory path number_files = len(file_list) image_counter[dir] = number_files # - #df_photo_counter = pd.DataFrame(image_counter.items(), columns=['Item #', 'Image Counter']) df_photo_counter = pd.DataFrame.from_dict(image_counter, orient='index', dtype='str') df_photo_counter.columns = ['Count'] df_photo_counter = df_photo_counter.astype({'Count': 'int32'}) df_photo_counter.sort_values(by='Count', ascending=True, na_position='first') df_photo_counter print('Average Number of Photos by Item: {}'.format(df_photo_counter.mean(axis = 0)[0])) df_photo_counter_100_1000 = df_photo_counter[df_photo_counter.Count > 100] df_photo_counter_100_1000 = df_photo_counter_100_1000[df_photo_counter_100_1000.Count < 1000] df_photo_counter_100_1000 ax = df_photo_counter_100_1000.reset_index().plot.bar(x='index', y='Count', rot=90, figsize=(20, 5)) display(ax.figure) # + #For Local Storage: import shutil for index, row in df_photo_counter.iterrows(): if ( row['Count'] < 100 ) or ( row['Count'] > 999 ): path_to_delete = os.path.join(image_dir, index) print(path_to_delete) #dbutils.fs.rm(path_to_delete, True) shutil.rmtree(path_to_delete) # + #For Data Lake: #dbutils.fs.rm(image_dir_train, True) #dbutils.fs.rm(image_dir_test, True) #dbutils.fs.rm(image_dir_valid, True) #dbutils.fs.mkdirs(image_dir_train) #dbutils.fs.mkdirs(image_dir_test) #dbutils.fs.mkdirs(image_dir_valid) #For Local Storage: if os.path.exists(image_dir_train) and os.path.isdir(image_dir_train): shutil.rmtree(image_dir_train) if os.path.exists(image_dir_test) and os.path.isdir(image_dir_test): shutil.rmtree(image_dir_test) if os.path.exists(image_dir_valid) and os.path.isdir(image_dir_valid): shutil.rmtree(image_dir_valid) os.mkdir(image_dir_train) os.mkdir(image_dir_test) os.mkdir(image_dir_valid) # + image_counter = dict() #path, dirs, files = next(os.walk(image_dir_dbfs)) path, dirs, files = next(os.walk(image_dir)) for dir in dirs: dir = str(dir) if dir.lower() != 'train' and dir.lower() != 'test' and dir.lower() != 'valid': print('--') print('Directory: {}'.format(dir)) #file_list = os.listdir(image_dir_dbfs + dir) file_list = os.listdir(image_dir + dir) shuffle(file_list) #randomize list of files split_index_train = floor(len(file_list) * train_split) training_file_list = file_list[:split_index_train] testing_and_validation_file_list = file_list[split_index_train:] split_index_test = floor(len(testing_and_validation_file_list) * 0.5) testing_file_list = testing_and_validation_file_list[split_index_test:] validation_file_list = testing_and_validation_file_list[:split_index_test] print('# of TRAIN: {}'.format(len(training_file_list))) print('# of TEST: {}'.format(len(testing_file_list))) print('# of VALID {}'.format(len(validation_file_list))) print('Total # of FILES: {}'.format(len(file_list))) #copy files to TRAIN #dbutils.fs.mkdirs(image_dir_train + dir) os.mkdir(image_dir_train + dir) for train_file in training_file_list: #dbutils.fs.cp(os.path.join(image_dir, dir, train_file), os.path.join(image_dir_train, dir, train_file)) shutil.copy(os.path.join(image_dir, dir, train_file), os.path.join(image_dir_train, dir, train_file)) #copy file to TEST #dbutils.fs.mkdirs(image_dir_test + dir) os.mkdir(image_dir_test + dir) for test_file in testing_file_list: #dbutils.fs.cp(os.path.join(image_dir, dir, test_file), os.path.join(image_dir_test, dir, test_file)) shutil.copy(os.path.join(image_dir, dir, test_file), os.path.join(image_dir_test, dir, test_file)) #copy files to VALID dbutils.fs.mkdirs(image_dir_valid + dir) os.mkdir(image_dir_valid + dir) for valid_file in validation_file_list: #dbutils.fs.cp(os.path.join(image_dir, dir, valid_file), os.path.join(image_dir_valid, dir, valid_file)) shutil.copy(os.path.join(image_dir, dir, valid_file), os.path.join(image_dir_valid, dir, valid_file)) # - # %sh ###COMPRESS ALL FOLDERS & COPY TO DATA LAKE pushd /local_disk0/tmp/PRODUCT_IMAGES_CLASSIFIER/ zip -r /local_disk0/tmp/PRODUCT_IMAGES_CLASSIFIER_DATA_SPLIT.ZIP TRAIN/ zip -r /local_disk0/tmp/PRODUCT_IMAGES_CLASSIFIER_DATA_SPLIT.ZIP TEST/ zip -r /local_disk0/tmp/PRODUCT_IMAGES_CLASSIFIER_DATA_SPLIT.ZIP VALID/ # cp /local_disk0/tmp/PRODUCT_IMAGES_CLASSIFIER_DATA_SPLIT.ZIP /dbfs/mnt/RAW/FILES/SYNAPSE/POC/UDACITY/PRODUCT_IMAGES_CLASSIFIER/PRODUCT_IMAGES_CLASSIFIER_DATA_SPLIT.ZIP popd
build_model/Data Explore and Prep.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: conda_python3 # language: python # name: conda_python3 # --- import pandas # + df = pandas.read_excel('s3://wei-ia241-2021spring-orth/Diamonds.xls') df[:10] # - df.describe() df['PRICE'] df[1:5] df.loc[df['PRICE']>1500] df['COLOR'].value_counts() df['COLOR'].count() df['PRICE'] df['PRICE'].sem() df.groupby('COLOR').std() df[:5] df['unit_price']=df['PRICE']/df['WEIGHT'] df[:5] df['unit_price'].mean() from scipy import stats # + result = stats.linregress( df['WEIGHT'],df['PRICE'] ) print('Slope is {}'.format(result.slope)) print('Intercept is {}'.format(result.intercept)) print('R Square is {}'.format(result.rvalue *result.rvalue )) print('P value is {}'.format(result.pvalue)) # - print(' The price of a diamond with the weight of {} is ${}'.format(0.9,0.9*result.slope+result.intercept)) # !pip install textblob # + from textblob import TextBlob result = TextBlob('I hate dog') # - print('The polarity is {}'.format(result.sentiment.polarity)) print('The subjectivity is {}'.format(result.sentiment.subjectivity))
lec12.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 (ipykernel) # language: python # name: python3 # --- # %load_ext autoreload # %autoreload 2 import pandas as pd # + an_pa_in = pd.read_csv( "data/titanic.tsv", "\t", usecols=["sex", "age", "fare", "survived"] ).dropna() an_pa_in.index = ["Passenger {}".format(la + 1) for la in an_pa_in.index] an_pa_in # + ge_ = (an_pa_in["sex"] == "male").astype(int) ge_.name = "Gender" ge_.sort_values() # + ag_ = an_pa_in["age"] ag_.name = "Age" ag_.sort_values() # - import kwat # + fa_ = kwat.array.log(an_pa_in["fare"] + 1) fa_.name = "Fare" fa_.sort_values() # + su_ = an_pa_in["survived"] su_.name = "Survival" su_.sort_values() # - import numpy as np def package(se_): di_ = [se.name for se in se_] return ( se_, kwat.probability.get_posterior_probability( np.array(se_).T, ta=0, co__=[kwat.grid.make_1d_grid(se.min(), se.max(), 1 / 8, 8) for se in se_], pl=False, di_=di_, ), kwat.probability.get_posterior_probability( np.array(se_).T, ta=1, co__=[kwat.grid.make_1d_grid(se.min(), se.max(), 1 / 8, 8) for se in se_], pl=True, di_=di_, ), ) # + ma_pa = {} for se_ in [[ge_], [ag_], [fa_], [ge_, ag_], [ge_, fa_], [ag_, fa_]]: ma_pa[", ".join(se.name for se in se_)] = package(se_ + [su_]) # - kwat.probability.plot( (su_ == 0).sum() / su_.size, (su_ == 1).sum() / su_.size, list(ma_pa.keys()), [t[1][1] for t in ma_pa.values()], [t[2][1] for t in ma_pa.values()], ) from sklearn import metrics # + ma_ro = {} for ma, (se_, _, (co_po_dino, pono_)) in ma_pa.items(): co__ = kwat.grid.get_1d_grid(co_po_dino) po_po_di = pono_.reshape([co_.size for co_ in co__]) xxx = np.full(su_.size, np.nan) for ie in range(xxx.size): xxx[ie] = po_po_di[ tuple(np.argmin(np.absolute(co_ - se[ie])) for co_, se in zip(co__, se_)) ] fpr, tpr = metrics.roc_curve(su_, kwat.array.normalize(xxx, "0-1"))[:2] ma_ro[ma] = {"fpr": fpr, "tpr": tpr, "auc": metrics.auc(fpr, tpr)} # - kwat.plot.plot_plotly( { "data": [ { "name": "Random = 0.50", "x": [0, 1], "y": [0, 1], "mode": "lines", "marker": {"color": "#d8d8d8"}, }, *( { "name": "{} = {:.2f}".format(ma, ro["auc"]), "x": ro["fpr"], "y": ro["tpr"], "mode": "lines", } for ma, ro in ma_ro.items() ), ], "layout": { "height": 640, "width": 800, "title": {"text": "Receiver Operating Characteristic"}, "xaxis": {"title": "False Positive Rate"}, "yaxis": {"title": "True Positive Rate"}, }, } )
nb/titanic.ipynb
# -*- coding: utf-8 -*- # --- # jupyter: # jupytext: # text_representation: # extension: .jl # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Julia 1.1.0 # language: julia # name: julia-1.1 # --- # # Introduction to [JuMP](https://github.com/JuliaOpt/JuMP.jl/blob/master/README.md) # # February 27, 2020 # # *Uses JuMP version 0.21* # # ### What is JuMP? # JuMP stands for **Ju**lia for **M**athematical Programming. It is a "domain-specific modeling language for mathematical optimization embedded in Julia." # # What does "domain-specific" mean? It means that some algorithms are useful in some knowledge domains (e.g. supply chain management) that may not be useful in other knowledge domains (e.g. estimating statistical model parameters). # # What is great about JuMP is that it allows the user to change algorithms with little or no change to the modeling code. This means that someone without domain-specific knowledge can easily utilize JuMP to accomplish a task that would otherwise be impossible. # # ### What goes into JuMP? # To use JuMP, a user must specify the following components: # # - An objective function (you gotta solve something!) # - Variables # - Constraints # # JuMP does the rest! (I'll show you examples shortly of how you can do this.) # # ### What kinds of problems can JuMP handle? # JuMP provides the ability to solve the following types of objective functions: # # - Linear # - Convex Quadratic # - Nonlinear (convex and nonconvex) # # It also provides the ability to use constraints of the following types: # # - Linear # - Convex Quadratic # - Second-order Conic # - Semidefinite # - Nonlinear (convex and nonconvex) # # Finally, it provides support for any of the following types of variables: # # - Continuous # - Integer-valued # - Semicontinuous # - Semi-integer # # ### What examples will we go through today? # Today I'll go through four examples: # # 1. Solve a very simple constrained system of equations (adapted from JuMP's Github repository [here](https://github.com/JuliaOpt/JuMP.jl/blob/master/examples/basic.jl)) # 2. Solve a Sudoku puzzle (adapted from JuMP's Github repository [here](https://github.com/JuliaOpt/JuMP.jl/blob/master/examples/sudoku.jl)) # 3. Solve a professional sports team's salary budget problem # 4. Estimate the parameters of a linear regression model # # --- # ## 1. A simple constrained system of equations # Suppose you want to solve the following (constrained) system of equations: # # \begin{equation*} # \max\,\, 5x + 3y # \end{equation*} # # subject to the following constraints: # # \begin{align} # x + 5y & \leq 3 \\ # x & \geq 0 \\ # y & \geq 0 \\ # x & \leq 2 \\ # y & \leq 30 \\ # \end{align} # # In economics, this problem may represent a utility maximization problem, where utility is linear in $x$ and $y$, the individul has an income of 3, and the price of good $x$ is 1, with the price of good $y$ equal to 5. (And there are supply constraints on $x$ and $y$ governed by the market in general.) # # If we wanted to, we could solve this using a Lagrangian (although in this case with linear utility, we know we will have a corner solution, so no need to bother with calculus). # # But we want the computer to do this, so let's see how it's done. # # ### Specifying the model components # As explained above, JuMP needs an objective function, variables, and constraints. Additionally, we need to tell JuMP which optimization algorithm to use. # # #### Optimizer # The first step is to declare a model and attach an optimizer. We are going to use the GLPK optimizer: # ```julia # model = Model(GLPK.Optimizer) # ``` # # #### Variables # Next, we need to tell it what the variables are, and any constraints on those variables: # ```julia # @variable(model, 0 <= x <= 2) # @variable(model, 0 <= y <= 30) # ``` # # #### Objective function # Next, we tell it the objective function (and whether we want to maximize or minimize): # ```julia # @objective(model, Max, 5x + 3y) # ``` # # #### Constraints # Finally, we give it the constraints. Note that the single constraints on each variable were incorporated when we declared the variables themselves, but we have one additional constraint that is a function of both variables. We could also impose a constraint that the optimal values of $x$ and $y$ be integers (if these represented indivisible objects, for example). # ```julia # @constraint(model, 1x + 5y <= 3) # ``` # # ### Optimizing the model # Once we have all of the components declared, we can optimize the model: # ```julia # JuMP.optimize!(model) # ``` # # We can then look at the output of the optimization as follows: # ```julia # obj_value = JuMP.objective_value(model) # x_value = JuMP.value(x) # y_value = JuMP.value(y) # ``` # We'll put all of the code together below so it can be run on your machine. # # Following Julia protocol, we will also wrap everything in a function and then call that function. # # ### All of the code together # + using JuMP, GLPK # wrap all of our code inside a function (for better performance) function example_basic() # define model and optimizer model = Model(GLPK.Optimizer) # define variables @variable(model, 0 <= x <= 2) @variable(model, 0 <= y <= 30) # define objective function @objective(model, Max, 5x + 3y) # add additional constraints @constraint(model, 1x + 5y <= 3.0) # display the model print(model) # optimize the model JuMP.optimize!(model) # return and print objective function and optimal values of variables obj_value = JuMP.objective_value(model) x_value = JuMP.value(x) y_value = JuMP.value(y) println("Objective value: ", obj_value) println("x = ", x_value) println("y = ", y_value) end # call the function defined above example_basic() # - # So our optimal values are: # # \begin{align} # x^* & = 2 \\ # y^* & = 0.2 \\ # \text{Objective} & = 10.6 # \end{align} # # As I mentioned before we did any programming, this problem would yield a corner solution (where, for one of the goods, the optimal value hits one of the constraints). In this case, it was $x^* = 2$. # # #### Other types of constraints # As I mentioned above, we can also add other constraints, for example, that the optimal values be integers (e.g. if $x$ and $y$ are indivisible). In this case, we would add `@constraint(model, x in MOI.Integer())` and similar for $y$. # # We could also put a constraint on the objective function itself (e.g. that the objective value be no larger than 10). To do this we would add the following: # ```julia # @expression(model, objval, 5x + 3y) # @constraint(model, objval <= 10) # ``` # --- # ## 2. Solving a Sudoku puzzle # Now that we are more comfortable with JuMP, we can solve a Sudoku puzzle. All we need to do is appropriately tell JuMP how to understand the puzzle board. # # ### Sudoku objective # In this case, the objective function is to fill in the puzzle board given a starting grid that has some numbers filled in. We won't have a formal objective function for this; we will just give JuMP a starting grid and tell it to satisfy all of the constraints, where one of the constraints is that the board gets filled. (A blank board has an infinite number of solutions, but a partially completed board should have just one unique solution.) # # ### Sudoku variables # We will communicate the state of the puzzle board with an array of variables. The variables will be put in a 3-dimensional array, where the first two dimensions tell the "latitude" and "longitude" of the cell on the puzzle board, and the third dimension keeps track of which of the numbers 1-9 will fill that cell. # # Mathematically, we have # ```julia # x[i, j, k] # ``` # which, if equal to 1, indicates that cell $(i,j)$ should contain the number $k$. The indices $(i,j,k)$ each must take on integer values from 1 to 9, since the puzzle board has 81 squares. So our `x` array is a 9 x 9 x 9 cube. # # ### Sudoku constraints # The constraints of Sudoku are as follows: # # 1. Each cell can only contain one number (duh, but we have to explain this to the computer!) # 2. Each row contains each number exactly once # 3. Each column contains each number exactly once # 4. Each 3x3 subgrid contains each number exactly once # # ### Code # + using JuMP, GLPK function example_sudoku() # input the initial puzzle board (0s mean blanks) initial_grid = [ 3 1 0 0 5 8 0 0 4; 0 0 9 3 2 0 0 0 0; 0 2 5 1 0 4 0 9 0; 0 0 0 0 0 0 3 8 9; 0 0 8 0 0 0 5 0 0; 5 4 6 0 0 0 0 0 0; 0 8 0 2 0 3 6 5 0; 0 0 0 0 7 1 4 0 0; 7 0 0 4 8 0 0 2 1 ] # use GLPK Optimizer model = Model(GLPK.Optimizer) # Set up the variables: each one can only take on binary values, so we add "Bin" to the end as a constraint @variable(model, x[1:9, 1:9, 1:9], Bin) # Add the constraints @constraints(model, begin # Constraint 1 - Only one value appears in each cell cell[i in 1:9, j in 1:9], sum(x[i, j, :]) == 1 # Constraint 2 - Each value appears in each row once only row[i in 1:9, k in 1:9], sum(x[i, :, k]) == 1 # Constraint 3 - Each value appears in each column once only col[j in 1:9, k in 1:9], sum(x[:, j, k]) == 1 # Constraint 4 - Each value appears in each 3x3 subgrid once only subgrid[i=1:3:7, j=1:3:7, val=1:9], sum(x[i:i + 2, j:j + 2, val]) == 1 end) # Add additional constraints that reflect the starting point of the puzzle board # (i.e. don't attempt to update the numbers that were given as part of the puzzle) for row in 1:9, col in 1:9 if initial_grid[row, col] != 0 @constraint(model, x[row, col, initial_grid[row, col]] == 1) end end # Solve it JuMP.optimize!(model) term_status = JuMP.termination_status(model) primal_status = JuMP.primal_status(model) is_optimal = term_status == MOI.OPTIMAL # Check solution if is_optimal mip_solution = JuMP.value.(x) sol = zeros(Int, 9, 9) for row in 1:9, col in 1:9, val in 1:9 if mip_solution[row, col, val] >= 0.9 sol[row, col] = val end end return sol else error("The solver did not find an optimal solution.") end end function print_sudoku_solution(solution) println("Solution:") println("[-----------------------]") for row in 1:9 print("[ ") for col in 1:9 print(solution[row, col], " ") if col % 3 == 0 && col < 9 print("| ") end end println("]") if row % 3 == 0 println("[-----------------------]") end end end sol = example_sudoku() print_sudoku_solution(sol) # - # --- # # 3. Solving a salary cap problem # NBA general managers want to build a championship team. What is their objective function? To win a championship. Well, that's difficult to exactly write down, but we can look at other more easily measurable outputs (like points scored, points allowed, etc.) # # Suppose we want to find the team that will have the best statistics, but at the cheapest price. # # ### Data # I obtained the data using [this R script](https://github.com/tyleransom/DScourseS20/blob/master/WebData/getNBAplayerStats.R), which makes use of the `nbastatR` package. The data is in CSV format, which we will directly read into Julia using the `HTTP` and `JuliaDB` packages. # # ### Objective function # It's not clear what objective function we should use, but let's start with a simple one: maximize points scored. # # ### Constraints # We have at least two constraints: our team can only have 15 players on it, and the total team salary must be below the luxury tax threshold (\\$132.6m). We will talk about other important constraints after trying things out with this most basic constraint. # # ### Variables # In this case, the variables are the players that we pick. # # ### Code # + using HTTP, JuliaDB, JuMP, GLPK # first function: read in the player data from the class GitHub repository function read_in_data(url) newtable = csvread(IOBuffer(HTTP.get(url).body), skiplines_begin=0, header_exists=true) players = newtable[1][2] salaries = newtable[1][4]./1000000 ppg = newtable[1][11] return players,salaries,ppg end function SolveModel(players,salary,points) N = length(salary) m = Model(GLPK.Optimizer) # define the variables: they are 0 if the player did not make the team, 1 if the player did make the team @variable(m, picked[1:N], Bin) # categories: @objective(m, Max, sum( points[i] * picked[i] for i in 1:N)) @constraints m begin # Constraint 1 - payroll <= 132.6m sum(salary[i] * picked[i] for i in 1:N) <= 132.6 # Constraint 2 - must have exactly 15 players on roster sum(picked[i] for i in 1:N) == 15 end # Solve it JuMP.optimize!(m); pck = convert(BitArray,JuMP.value.(picked)) lineup = players[pck] points = JuMP.objective_value(m) payroll = sum(salary[pck]) return lineup,points,payroll end # call first function (to import data) players,salaries,pts = read_in_data("https://raw.githubusercontent.com/tyleransom/DScourseS20/master/WebData/playerSalaryStats.csv") # pass data into second function to get optimal lineup lineup,total_points,payroll = SolveModel(players,salaries,pts) println("team: ",lineup) println("total points scored per game: ",total_points) println("payroll: ",payroll) # - # The output of our optimization tells us that with the team listed above, we should expect them to score more than 354 points per game! Unfortunately, that answer makes no sense. What went wrong? # # Two major things that went wrong: # # 1. We didn't account for the fact that there are only 240 minutes in an NBA game (48 minutes times 5 players on the floor) # 2. We didn't account for the fact that teams can typically attempt no more than 80 field goals in an NBA game # # Let's adjust our code so that we account for these important constraints and see if we get anything more reasonable. We will need to add minutes and field goal attampts into our data import function, and we will need to add a constraint on total minutes and total field goal attempts. # + # first function: read in the player data from the class GitHub repository function read_in_data(url) newtable = csvread(IOBuffer(HTTP.get(url).body), skiplines_begin=0, header_exists=true) players = newtable[1][2] salaries = newtable[1][4]./1000000 mpg = newtable[1][5] fgaG5 = 1.0.*((newtable[1][9]).>5) fga = newtable[1][9] ppg = newtable[1][11] return players,salaries,mpg,fgaG5,fga,ppg end function SolveModel(players,salary,minutes,field_goals_over5,field_goals,points) N = length(salary) m = Model(GLPK.Optimizer) # define the variables: they are 0 if the player did not make the team, 1 if the player did make the team @variable(m, picked[1:N], Bin) # categories: @objective(m, Max, sum( points[i] * picked[i] for i in 1:N)) @constraints m begin # Constraint 1 - payroll <= 132.6m sum(salary[i] * picked[i] for i in 1:N) <= 132.6 # Constraint 2 - must have exactly 15 players on roster sum(picked[i] for i in 1:N) == 15 # Constraint 3 - total minutes must not exceed 240 sum(minutes[i] * picked[i] for i in 1:N) <= 240 # Constraint 4 - total shot attempts must be lower than 80 sum(field_goals[i] * picked[i] for i in 1:N) <= 80 end # Solve it JuMP.optimize!(m); pck = convert(BitArray,JuMP.value.(picked)) lineup = players[pck] totmin = sum(minutes[pck]) payroll = sum(salary[pck]) totshots = sum(field_goals[pck]) points = JuMP.objective_value(m) return lineup,points,totmin,payroll,totshots end # call first function (to import data) players,salaries,minutes,over5fg,fga,pts = read_in_data("https://raw.githubusercontent.com/tyleransom/DScourseS20/master/WebData/playerSalaryStats.csv") # pass data into second function to get optimal lineup lineup,total_points,total_minutes,payroll,totshots = SolveModel(players,salaries,minutes,over5fg,fga,pts) println("team: ",lineup) println("total points scored per game: ",total_points) println("payroll: ",payroll) println("total shots per game: ",totshots) println("total minutes per game: ",total_minutes) # - # The results gives us a much mroe reasonable number of 121 points, 80 field goal attempts, and a super-cheap payroll of \\$84m, which is \\$25m lower than the lowest in the NBA right now. # # --- # # # 4. Estimating Linear Regression Coefficients # We can also use JuMP to estimate linear regression coefficients. In this case, we must use the `Ipopt` (pronounced eye-PEE-opt) optimizer. Why? Because our objective function is nonlinear (we are minimizing the sum of the squared residuals) and the optimizers we have used above are only valid for linear objective functions. # # ### Objective function # The objective function for OLS is # # \begin{equation} # \min_{\beta} \sum_{i} (y_i - \beta_0 - \beta_1 x_1 - \beta_2 x_2 - \cdots - \beta_k x_k)^2 # \end{equation} # # ### Variables # The variables in this case are the parameters we want to estimate---the $\beta$'s. # # ### Code # + using HTTP, JuliaDB, JuMP, Ipopt function import_auto(url) newtable = csvread(IOBuffer(HTTP.get(url).body), skiplines_begin=0, header_exists=true) depvar = log.(newtable[1][2]) # log price indepvars = cat(ones(size(depvar)),newtable[1][3],newtable[1][5],newtable[1][6]; dims=2) # constant, mpg, headroom, trunk return depvar,indepvars end Y,X = import_auto("https://tyleransom.github.io/teaching/MetricsLabs/auto.csv") function jumpOLS(Y,X,startval=zeros(size(X,2),1)) OLS = Model(Ipopt.Optimizer) # Declare the variables you are optimizing over @variable(OLS, b[i=1:size(X,2)], start = startval[i]) # Write your objective function @NLobjective(OLS, Min, sum( (Y[i]-sum( X[i,k]*b[k] for k in 1:size(X,2) ))^2 for i in 1:size(X,1) ) ) # Solve the objective function JuMP.optimize!(OLS) SSR = JuMP.objective_value(OLS) b_value = JuMP.value.(b) println("Objective value: ", SSR) println("beta hat = ", b_value) println("RMSE = ", sqrt(SSR/(size(X,1)-size(X,2)))) end jumpOLS(Y,X) # - # We can check our answer in R with the following code: # ```r # df <- read.csv("https://tyleransom.github.io/teaching/MetricsLabs/auto.csv") %>% as_tibble %>% # mutate(logprice = log(price)) %>% # drop_na(foreign) # summary(lm(logprice ~ mpg + headroom + trunk, data=df)) # ``` # # which gives us # # ``` # Call: # lm(formula = log(price) ~ mpg + headroom + trunk, data = df) # # Residuals: # Min 1Q Median 3Q Max # -0.6017 -0.2521 -0.1082 0.2104 1.0445 # # Coefficients: # Estimate Std. Error t value Pr(>|t|) # (Intercept) 9.278489 0.314134 29.537 < 2e-16 *** # mpg -0.029650 0.008434 -3.515 0.000775 *** # headroom -0.115783 0.062605 -1.849 0.068619 . # trunk 0.024728 0.013857 1.785 0.078667 . # --- # Signif. codes: 0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1 # # Residual standard error: 0.3388 on 70 degrees of freedom # Multiple R-squared: 0.2842, Adjusted R-squared: 0.2535 # F-statistic: 9.263 on 3 and 70 DF, p-value: 3.082e-05 # ```
ModelingOptimization/JuMPintro.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] pycharm={"name": "#%% md\n"} # ## Pools Liquidity in GBOOT # + pycharm={"name": "#%%\n"} import pandas as pd import numpy as np from math import isnan from IPython.core.display import display, HTML import matplotlib.pyplot as plt from src.data_extractors import get_pools, get_prices, get_price_enriched # + [markdown] pycharm={"name": "#%% md\n"} # #### Get Bostrom and Osmosis Pools Data # + pycharm={"name": "#%%\n"} pools_df = get_pools(display_data=True) # - # #### Calculate Prices # + pycharm={"name": "#%%\n"} price_df = get_prices(pools_df=pools_df, display_data=True) # - # #### Prices without References to the Networks # + pycharm={"name": "#%%\n"} price_enriched_df = get_price_enriched(price_df=price_df, display_data=True) # + [markdown] pycharm={"name": "#%% md\n"} # #### Get Pools Liquidity # + pycharm={"name": "#%%\n"} def get_pool_liquidity(balances: list, target_denom: str = 'boot', main_liquidity_denom: str = 'hydrogen', price_enriched_df: pd.DataFrame = price_enriched_df) -> float: denoms = [item['denom'] for item in balances] balances_dict = {item['denom']:int(item['amount']) for item in balances} if target_denom in denoms: return balances_dict[target_denom] * 2 elif not isnan(price_enriched_df.loc[target_denom, denoms[0]]): return balances_dict[denoms[0]] * price_enriched_df.loc[target_denom, denoms[0]] * 2 elif not isnan(price_enriched_df.loc[target_denom, denoms[1]]): return balances_dict[denoms[1]] * price_enriched_df.loc[target_denom, denoms[1]] * 2 elif not isnan(price_enriched_df.loc[target_denom, main_liquidity_denom]) and not isnan(price_df.loc[main_liquidity_denom, denoms[0]]): return balances_dict[denoms[0]] * price_enriched_df.loc[target_denom, main_liquidity_denom] * price_enriched_df.loc[main_liquidity_denom, denoms[0]] * 2 elif not isnan(price_enriched_df.loc[target_denom, main_liquidity_denom]) and not isnan(price_df.loc[main_liquidity_denom, denoms[1]]): return balances_dict[denoms[1]] * price_enriched_df.loc[target_denom, main_liquidity_denom] * price_enriched_df.loc[main_liquidity_denom, denoms[1]] * 2 return pools_df['liquidity, GBOOT'] = pools_df['balances'].map(get_pool_liquidity)/1e9 display(HTML(pools_df[['network', 'id', 'reserve_coin_denoms', 'liquidity, GBOOT']] .to_html(index=False, notebook=True, show_dimensions=False, float_format='{0:7,.1f}'.format) .replace('text-align: left;', 'text-align: right') .replace('<tr>', '<tr align="right">'))) fig, ax = plt.subplots(figsize=(10,10)) size = 0.3 cm = plt.get_cmap("jet", 20) cout = cm(np.arange(2)*10) pools_df['denoms'] = pools_df.reserve_coin_denoms.map(lambda x: f'{x[0]} - {x[1]}') outer = pools_df[pools_df.denoms != 'uatom in osmosis - uosmo'].sort_values('liquidity, GBOOT').groupby('network')['liquidity, GBOOT'].sum().sort_values() inner = pools_df[pools_df.denoms != 'uatom in osmosis - uosmo'].sort_values('liquidity, GBOOT').groupby(['network', 'denoms'])['liquidity, GBOOT'].sum().sort_values() inner_labels = ['{1} {2:>,.1f} GBOOT'.format(i[0], i[1],j) for i,j in zip(inner.index, inner)] outer_labels = ['{0} {1:>,.1f} GBOOT'.format(i, j) for i, j in zip(outer.index, outer)] ax.pie(outer.values.flatten(), radius=1, labeldistance=0.73, wedgeprops=dict(width=size), colors=cout) cin = cm(np.array([1,2,3,4,5,6,7,8,9,14,12,13])) ax.pie(inner.values.flatten(), radius=1-size, wedgeprops=dict(width=size), labeldistance=0.5, colors=cin) ax.set(aspect="equal", title='Pools Liquidity in GBOOT') plt.legend(outer_labels + inner_labels, loc=(0.2, 0.32)) handles, labels = ax.get_legend_handles_labels() plt.show() # + pycharm={"name": "#%%\n"}
pools_liquidity_in_gboot.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- mkcap_2017=pd.read_csv("mkcap_2017.csv") esg_co = pd.read_csv("filter_co.csv") mkcap_2017.iloc[:,0] mkcap_2017.index = pd.to_datetime(mkcap_2017.iloc[:,0]) mkcap_2017.drop(columns='Symbol Name',inplace =True) mkcap_2017 rank = [mkcap_2017.iloc[-1:,:].quantile(round(0.1*i,1),axis=1) for i in range(1,11,1)] rank mkcap_2017 # + trash =['진에어', '경남은행', '셀트리온헬스케어', '동양피스톤', 'PI첨단소재', '현대증권', '하림지주', '전북은행', 'KB손해보험', '오렌지라이프', '광주은행'] esg_ls = list(esg_co['0']) for i in range(len(trash)): esg_ls.remove(trash[i]) esg_ls # - df_esg = mkcap_2017[esg_ls] df_esg rank[-2][0] final_esg = df_esg.iloc[-1,:][df_esg.iloc[-1,:] <= rank[-3][0]] df_esgstock = pd.DataFrame(final_esg) print(df_esgstock.index) print(len(df_esgstock))
esg_stockpicking.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ## 4.3 PyTorchでMNIST # + # 手書き数字の画像データMNISTをダウンロード from sklearn.datasets import fetch_mldata mnist = fetch_mldata('MNIST original', data_home=".") # data_homeは保存先を指定します # + # 1. データの前処理(画像データとラベルに分割し、正規化) X = mnist.data / 255 # 0-255を0-1に正規化 y = mnist.target # + # MNISTのデータの1つ目を可視化する import matplotlib.pyplot as plt % matplotlib inline plt.imshow(X[0].reshape(28, 28), cmap='gray') print("この画像データのラベルは{:.0f}です".format(y[0])) # + # 2. DataLoderの作成 import torch from torch.utils.data import TensorDataset, DataLoader from sklearn.model_selection import train_test_split # 2.1 データを訓練とテストに分割(6:1) X_train, X_test, y_train, y_test = train_test_split( X, y, test_size=1/7, random_state=0) # 2.2 データをPyTorchのTensorに変換 X_train = torch.Tensor(X_train) X_test = torch.Tensor(X_test) y_train = torch.LongTensor(y_train) y_test = torch.LongTensor(y_test) # 2.3 データとラベルをセットにしたDatasetを作成 ds_train = TensorDataset(X_train, y_train) ds_test = TensorDataset(X_test, y_test) # 2.4 データセットのミニバッチサイズを指定した、Dataloaderを作成 # Chainerのiterators.SerialIteratorと似ている loader_train = DataLoader(ds_train, batch_size=64, shuffle=True) loader_test = DataLoader(ds_test, batch_size=64, shuffle=False) # + # 3. ネットワークの構築 # Keras風の書き方 from torch import nn model = nn.Sequential() model.add_module('fc1', nn.Linear(28*28*1, 100)) model.add_module('relu1', nn.ReLU()) model.add_module('fc2', nn.Linear(100, 100)) model.add_module('relu2', nn.ReLU()) model.add_module('fc3', nn.Linear(100, 10)) print(model) # + # 4. 誤差関数と最適化手法の設定 from torch import optim # 誤差関数の設定 loss_fn = nn.CrossEntropyLoss() # 変数名にはcriterionが使われることも多い # 重みを学習する際の最適化手法の選択 optimizer = optim.Adam(model.parameters(), lr=0.01) # + # 5. 学習と推論の設定 # 5-1. 学習1回でやることを定義します # Chainerのtraining.Trainer()に対応するものはない def train(epoch): model.train() # ネットワークを学習モードに切り替える # データローダーから1ミニバッチずつ取り出して計算する for data, targets in loader_train: optimizer.zero_grad() # 一度計算された勾配結果を0にリセット outputs = model(data) # 入力dataをinputし、出力を求める loss = loss_fn(outputs, targets) # 出力と訓練データの正解との誤差を求める loss.backward() # 誤差のバックプロパゲーションを求める optimizer.step() # バックプロパゲーションの値で重みを更新する print("epoch{}:終了\n".format(epoch)) # + # 5. 学習と推論の設定 # 5-2. 推論1回でやることを定義します # Chainerのtrainer.extend(extensions.Evaluator())に対応するものはない def test(): model.eval() # ネットワークを推論モードに切り替える correct = 0 # データローダーから1ミニバッチずつ取り出して計算する with torch.no_grad(): # 微分は推論では必要ない for data, targets in loader_test: outputs = model(data) # 入力dataをinputし、出力を求める # 推論する _, predicted = torch.max(outputs.data, 1) # 確率が最大のラベルを求める correct += predicted.eq(targets.data.view_as(predicted)).sum() # 正解と一緒だったらカウントアップ # 正解率を出力 data_num = len(loader_test.dataset) # データの総数 print('\nテストデータの正解率: {}/{} ({:.0f}%)\n'.format(correct, data_num, 100. * correct / data_num)) # - # 学習なしにテストデータで推論してみよう test() # + # 6. 学習と推論の実行 for epoch in range(3): train(epoch) test() # + # 例えば2018番目の画像データを推論してみる index = 2018 model.eval() # ネットワークを推論モードに切り替える data = X_test[index] output = model(data) # 入力dataをinputし、出力を求める _, predicted = torch.max(output.data, 0) # 確率が最大のラベルを求める print("予測結果は{}".format(predicted)) X_test_show = (X_test[index]).numpy() plt.imshow(X_test_show.reshape(28, 28), cmap='gray') print("この画像データの正解ラベルは{:.0f}です".format(y_test[index])) # + #----------------------------------------------- # + # 3. ネットワークの構築 # ニューラルネットワークの設定(Chainer風の書き方) import torch.nn as nn import torch.nn.functional as F class Net(nn.Module): def __init__(self, n_in, n_mid, n_out): super(Net, self).__init__() self.fc1 = nn.Linear(n_in, n_mid) # Chainerと異なり、Noneは受けつけない self.fc2 = nn.Linear(n_mid, n_mid) self.fc3 = nn.Linear(n_mid, n_out) def forward(self, x): # 入力xに合わせてforwardの計算を変えられる h1 = F.relu(self.fc1(x)) h2 = F.relu(self.fc2(h1)) output = self.fc3(h2) return output model = Net(n_in=28*28*1, n_mid=100, n_out=10) # ネットワークのオブジェクトを生成 print(model) # -
JSTfair/Deep-Reinforcement-Learning-Book-master/program/4_3_PyTorch_MNIST.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- from miscpy.utils.sympyhelpers import * init_printing() from sympy.utilities.codegen import codegen # ## Set up rotation matrices representing a 3-1-3 $(\psi,\theta,\phi)$ Euler angle set. aCi = rotMat(3,psi) cCa = rotMat(1,th) bCc = rotMat(3,ph) aCi,cCa,bCc bCi = bCc*cCa*aCi; bCi #3-1-3 rotation bCi_dot = difftotalmat(bCi,t,{th:thd,psi:psid,ph:phd}); bCi_dot # ## $\tilde{\omega} = {}^\mathcal{B}C^{\mathcal{I}} {}^\mathcal{B}{\dot{C}}^{\mathcal{I}}$ omega_tilde = bCi*bCi_dot.T; omega_tilde # ## $\left[{}^\mathcal{I}\boldsymbol{\omega}^{\mathcal{B}}\right]_\mathcal{B} = \left[ {}^\mathcal{B}C^{\mathcal{I}}_{32} \quad {}^\mathcal{B}C^{\mathcal{I}}_{13} \quad {}^\mathcal{B}C^{\mathcal{I}}_{21} \right]$ omega = simplify(Matrix([omega_tilde[2,1],omega_tilde[0,2],omega_tilde[1,0]])) omega w1,w2,w3 = symbols('omega_1,omega_2,omega_3') s0 = solve(omega - Matrix([w1,w2,w3]),[psid,thd,phd]); s0 # ## Find EOM (second derivatives of Euler Angles) I1,I2,I3 = symbols("I_1,I_2,I_3",real=True,positive=True) iWb_B = omega I_G_B = diag(I1,I2,I3) I_G_B diffmap = {th:thd,psi:psid,ph:phd,thd:thdd,psid:psidd,phd:phdd} diffmap t1 = I_G_B*difftotalmat(iWb_B,t,diffmap) t2 = skew(iWb_B)*I_G_B*iWb_B t1,t2 dh_G_B = t1+t2 dh_G_B t3 = expand(dh_G_B[0]*cos(ph)*I2 - dh_G_B[1]*sin(ph)*I1) sol_thdd = simplify(solve(t3,thdd)) sol_thdd t4= expand(dh_G_B[0]*sin(ph)*I2 + dh_G_B[1]*cos(ph)*I1) t4 sol_psidd = simplify(solve(t4,psidd)) sol_psidd sol_phdd = solve(dh_G_B[2],phdd) sol_phdd # ## Find initial orientation such that $\mathbf h$ is down-pointing h = sqrt(((I_G_B*Matrix([w1,w2,w3])).transpose()*(I_G_B*Matrix([w1,w2,w3])))[0]);h eqs1 = simplify(bCi.transpose()*I_G_B*Matrix([w1,w2,w3]) - Matrix([0,0,-h])); eqs1 #equal 0 simplify(solve(simplify(eqs1[0]*cos(psi) + eqs1[1]*sin(psi)),ph)) #phi solution solve(simplify(expand(simplify(-eqs1[0]*sin(psi) + eqs1[1]*cos(psi)).subs(ph,atan(I1*w1/I2/w2)))),th) #th solution simplify(eqs1[2].subs(ph,atan(I1*w1/I2/w2))) # ### Generate MATLAB Code out = codegen(("eom1",sol_psidd[0]), 'Octave', argument_sequence=[th,thd,psi,psid,ph,phd,I1,I2,I3]);out codegen(("eom1",sol_thdd[0]), 'Octave', argument_sequence=[th,thd,psi,psid,ph,phd,I1,I2,I3]) codegen(("eom1",sol_phdd[0]), 'Octave', argument_sequence=[th,thd,psi,psid,ph,phd,I1,I2,I3,psidd]) codegen(("eom1",[s0[psid],s0[thd],s0[phd]]), 'Octave', argument_sequence=[w1,w2,w3,th,thd,psi,psid,ph,phd,I1,I2,I3,psidd]) codegen(("eom1",bCi), 'Octave', argument_sequence=[th,thd,psi,psid,ph,phd,I1,I2,I3,psidd]) codegen(("eom1",omega), 'Octave', argument_sequence=[w1,w2,w3,th,thd,psi,psid,ph,phd,I1,I2,I3,psidd])
Notebooks/Torque Free 3-1-3 Body Dynamics.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernel_info: # name: misc-exp # kernelspec: # display_name: misc-exp # language: python # name: misc-exp # --- # + [markdown] inputHidden=false outputHidden=false # # Bandits from Logged Data # # - <NAME>, and <NAME>, "[Doubly Robust Policy Evaluation and Learning](https://arxiv.org/abs/1103.4601)" (2017). # - <NAME> and <NAME>, "[The Self-Normalized Estimator for Counterfactual Learning](https://www.microsoft.com/en-us/research/publication/self-normalized-estimator-counterfactual-learning/)" (2015). # - <NAME> and <NAME>, "[The Offset Tree for Learning with Partial Labels](https://arxiv.org/abs/0812.4044)" (2008). # + [markdown] inputHidden=false outputHidden=false # ## Turning a classification task into a bandit problem # # This section follows "Doubly Robust Policy Evaluation and Learning". # # Assume a dataset with features $x$ and class $c$. This dataset can be turned into a bandit problem. The actions are $a(x)$ are to select a the class for instance $x$. The Reward is $1$, when the correct class is selected and $0$ otherwise. # # To build an off-policy dataset, fit a classifactor on a subset of the dataset and use the predicted class probability as a policy. I.e., if the classificator preditcs $\hat{p}(c|x)$, select action $a(x) = c$ with proability $\hat{p}(c|x)$. # # For the [census dataset](http://archive.ics.uci.edu/ml/machine-learning-databases/census-income-mld), this transformation is implemented in `chmp.app.causality.dataset.census`. It contains the following additional columns: # # - `value_mean`: a scalar reward, that depends the correctness of the choice and on the true class. # - `value`: `value_mean` with additional Gaussian nose. # - # %matplotlib inline # + import pathlib import matplotlib.pyplot as plt import numpy as np import sklearn.datasets import sklearn.ensemble import sklearn.metrics # - from chmp.ds import shuffled # + n_samples = 2_000 base_x, y = sklearn.datasets.make_classification( n_samples=n_samples, n_features=10, n_informative=2, n_repeated=2, shuffle=False, n_classes=2, n_clusters_per_class=2, random_state=21, ) idx = shuffled(131_313, np.arange(n_samples)) base_x = base_x[idx].astype('float32') y = y[idx].astype('int64') # + est = sklearn.ensemble.GradientBoostingClassifier(n_estimators=20, learning_rate=0.02, random_state=32_032) est.fit(base_x[:10], y[:10]) logging_policy = est.predict_proba(base_x) logging_action = est.predict(base_x) #logging_policy = 0.5 + 0.0 * logging_policy print('accuracy {:.3}'.format((logging_action == y).mean())) print('auc score {:.3}'.format(sklearn.metrics.roc_auc_score(y, logging_policy[:, 1]))) # + action = np.random.binomial(n=1, p=logging_policy[:, 1], size=n_samples).astype('int64') reward = (action == y).astype('float32') p_action = logging_policy[np.arange(n_samples), action].astype('float32') # include the action inside the features x = np.concatenate((base_x, action[:, None]), axis=1) x = x.astype('float32') # + x_train = x[:1000] y_train = { 'propensity': p_action[:1000], 'action': action[:1000], 'reward': reward[:1000], 'optimal_action': y[:1000], } x_test = x[1000:] y_test = { 'propensity': p_action[1000:], 'action': action[1000:], 'reward': reward[1000:], 'optimal_action': y[1000:], } # + _, (ax1, ax2) = plt.subplots(1, 2, figsize=(8, 4)) plt.sca(ax1) plt.plot(x[y == 0, 0], x[y == 0, 1], '.', alpha=0.2) plt.plot(x[y == 1, 0], x[y == 1, 1], '.', alpha=0.2) plt.sca(ax2) plt.plot(x[action == 0, 0], x[action == 0, 1], '.', alpha=0.2) plt.plot(x[action == 1, 0], x[action == 1, 1], '.', alpha=0.2) # - from chmp.ds import reload # + inputHidden=false outputHidden=false import functools as ft import numpy as np import pandas as pd reload('chmp.app.causality.dataset.census') from chmp.app.causality.dataset.census import features, columns import torch import torch.nn.functional as F from chmp.torch_utils.model import Model # + inputHidden=false outputHidden=false #df = pd.read_parquet('data/census.parquet') # + inputHidden=false outputHidden=false #p = np.asarray(df['action_p'], dtype='float32') #a = np.asarray(df['action'], dtype='int64') #r = np.asarray(df['value'], dtype='float32') #x = np.asarray(pd.get_dummies(df[[*features, 'action']]), dtype='float32') #y = (p, a, r) # - n_samples, n_features = x_train.shape n_classes = 2 # + inputHidden=false outputHidden=false print(f'{n_samples:,d} samples, {n_features} features') # - # Two problems: # # - Policy optimization: optimize a policy based on historic observations # - Policy evalulation: given an existing policy evaluate the expected reward based on historic observations # # Interactive vs. Non interactive # ## Offset tree reduction # Basic idea: turn the bandit problem into a weighted-classificaiton problem. The predicted class is the optimal action. The classifier with the lowest classification error, will also be the optimial policy. # # Problem description: # # - $x$: features # - $a \in \{-1, +1\}$: action # - $r(a) \in [0, 1]$: reward for action $a$ # # Goal is to chose $\pi$ as to maximze: # # $$ # \mathbb{E}_{x,a \sim \pi(a|x)} \left[ r(a) \right] # $$ # # Note: alternative view (hence the name partially labelled): all rewards are generated on-block, but only the reward for the chosen action is revealed. # # Note for non-binary action spaces, decompose the action space to binary problems and apply the same technique. # + [markdown] inputHidden=false outputHidden=false # ## Inverse propensity estimator (IPS) # # $$ # \begin{aligned} # \hat{V}^\pi_\mathrm{IPS} # &= \sum_{x,a,r_a,p_a} \frac{r_a \mathbb{1}(\pi(x) = a)}{p_a} # \end{aligned} # $$ # # Turn it into a loss function # # $$ # \begin{aligned} # \mathcal{L}_\mathrm{IPS}(\pi) # &= \sum_{x,a,r_a,p_a} \frac{p^{\pi}(a|x)}{p_a} r_a # \end{aligned} # $$ # + def ips_loss(p_model, truth): """The inverse propensity score loss""" p_new = ( truth['action'].type(torch.float32) * p_model[:, 1] + (1 - truth['action']).type(torch.float32) * p_model[:, 0] ) weights = p_new / truth['propensity'] return -(truth['reward'] * weights).mean() def snips_loss(p_model, truth): """The Self-Normalized Estimator for Counterfactual Learning""" p_new = ( truth['action'].type(torch.float32) * p_model[:, 1] + (1 - truth['action']).type(torch.float32) * p_model[:, 0] ) weights = p_new / truth['propensity'] return -(truth['reward'] * weights).sum() / weights.sum() def offset_tree_loss(p_model, truth): """Loss to learn according the binary offset-tree reduction. Based on <NAME> and <NAME>, "The Offset Tree for Learning with Partial Labels" (2008). """ weights = abs(truth['reward'] - 0.5) / truth['propensity'] target = 0.5 * (1 + torch.sign((2 * truth['action'].type(torch.float32) - 1) * (truth['reward'] - 0.5))) nll = (1 - target) * torch.log(p_model[:, 0]) + target * torch.log(p_model[:, 1]) return -(weights * nll).mean() def true_reward(p_model, truth): action = p_model.argmax(dim=1) return (action == truth['optimal_action']).type(torch.float32).mean() # - reload('chmp.torch_utils.model') from chmp.torch_utils.model import Model from distributed import Client, LocalCluster def _train(loss, weight_decay): model = Model( module=torch.nn.Sequential( torch.nn.Linear(n_features, n_classes), torch.nn.Softmax(dim=1), ), # loss=offset_tree_loss, # loss=snips_loss, loss=loss, optimizer_kwargs=dict(lr=5e-3, weight_decay=weight_decay), ) history = model.fit( x_train, y_train, epochs=2_000, metrics=true_reward, batch_size=len(x_train), validation_data=(x_test, y_test), verbose=True, ) return model, history from chmp.ds import cached pathlib.Path('cache').mkdir(exist_ok=True) result = [] for loss in offset_tree_loss, ips_loss, snips_loss: for decay in 0, 1e-4, 3.2e-4, 1e-3, 3.2e-3, 1e-2, 3.2e-2, 1e-1, 3.2e-1, 1: cache_name = f'cache/{loss.__name__}_{int(decay * 1_000_00):06d}.pickle' model, history = cached(cache_name)(ft.partial(_train, loss, decay)) result.append( { 'loss': loss.__name__, 'decay': decay, 'history': history, 'model': model, } ) # + _, axes = plt.subplots(1, 3, figsize=(16, 4)) for l, ax in zip(['offset_tree_loss', 'ips_loss', 'snips_loss'], axes): plt.sca(ax) for r in [r for r in result if r['loss'] == l]: plt.plot(r['history']['epoch'], r['history']['metrics', 'true_reward'], label=repr(r['decay'])) plt.title(l) plt.legend(loc='best') # - for l in 'offset_tree_loss', 'ips_loss', 'snips_loss': subset = [r for r in result if r['loss'] == l] plt.plot( [r['decay'] for r in subset], [r['history']['metrics', 'true_reward'][-1] for r in subset], '.-', label=l, ) plt.xscale('log') plt.xlabel('Weight decay') plt.ylabel('True reward') plt.legend(loc='best') from chmp.ds import mpl_set # + c0, c1 = get_color_cycle(2) _, axes = plt.subplots(1, 3, figsize=(16, 5)) for l, ax in zip(['offset_tree_loss', 'ips_loss', 'snips_loss'], axes): plt.sca(ax) example, *_ = [r for r in result if r['loss'] == l] plt.plot( example['history']['epoch'], -np.asarray(example['history']['loss']), label='Loss', color=c0, ) plt.plot( example['history']['epoch'], np.asarray(example['history']['metrics', 'true_reward']), label='Truth', color=c1, ) mpl_set( xlabel='Epoch', ylabel='Mean reward', legend=True, ylim=(0, 1), title=l, ) # - # # Todo # # - use test set # - shifted rewards # - Lagrange multiplier ... # - More complex reward model ... # - Unblanced data set # # https://portal.ds.microsoft.com/ # + outputHidden=false inputHidden=false
20180107-Causality/02_BanditsLoggedData.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="view-in-github" colab_type="text" # <a href="https://colab.research.google.com/github/mohameddhameem/TensorflowCertification/blob/main/Tensorflow_2_x_CNN.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # + [markdown] id="vuDEK-18Qb7j" # # Tensorflow 2.x CNN # + id="7UlpggItQLTJ" import tensorflow as tf from tensorflow.keras import datasets, layers, models, optimizers EPOCHS = 20 BATCH_SIZE = 128 VERBOSE = 1 OPTIMIZER = tf.keras.optimizers.Adam() VALIDATION_SPLIT = 0.3 IMG_ROWS, IMG_COLS = 28, 28 INPUT_SHAPE = (IMG_ROWS, IMG_COLS, 1) # We use only 1 channel NB_CLASSES = 10 # + id="Ui0JhfZEQ-XG" #Lets build the model def build(input_shape, classes): model = models.Sequential() # CONV => RELU => POOL model.add(layers.Convolution2D(20, (2,2), activation='relu', input_shape = input_shape)) model.add(layers.MaxPool2D(pool_size=(2,2), strides=(2,2))) # CONV => RELU => POOL model.add(layers.Convolution2D(50, (5, 5), activation='relu')) model.add(layers.MaxPooling2D(pool_size=(2, 2), strides=(2, 2))) # Flatten => RELU layers - before going for final classifier flatten it model.add(layers.Flatten()) model.add(layers.Dense(500, activation='relu')) # a softmax classifier - final layer model.add(layers.Dense(classes, activation="softmax")) return model # + colab={"base_uri": "https://localhost:8080/"} id="ib3pbfRqSVgw" outputId="41f57a73-ada1-49e5-9766-dcc5a0b4e3ef" # Build the data set (X_train, y_train), (X_test, y_test) = datasets.mnist.load_data() print(X_train.shape, y_train.shape) print(X_test.shape, y_test.shape) #reshape X_train = X_train.reshape((60000, 28, 28, 1)) X_test = X_test.reshape((10000, 28, 28, 1)) #normalize X_train, X_test = X_train / 255.0, X_test / 255.0 #convert / cast to float 32 X_train = X_train.astype('float32') X_test = X_test.astype('float32') # convert class vectors to binary class matrices y_train = tf.keras.utils.to_categorical(y_train, NB_CLASSES) y_test = tf.keras.utils.to_categorical(y_test, NB_CLASSES) print(X_train.shape, y_train.shape) print(X_test.shape, y_test.shape) model = build(INPUT_SHAPE, NB_CLASSES) model.compile(loss='categorical_crossentropy', optimizer=OPTIMIZER, metrics=["accuracy"]) model.summary() #custom callback callbacks = [ tf.keras.callbacks.TensorBoard(log_dir='./logs') ] history = model.fit(X_train, y_train, batch_size = BATCH_SIZE, epochs = EPOCHS, verbose = VERBOSE, validation_split=VALIDATION_SPLIT, callbacks = callbacks) score = model.evaluate(X_test,y_test, verbose=VERBOSE) print("\nTest score:", score[0]) print('Test accuracy:', score[1]) # + [markdown] id="eRb6HJBOfgHD" # # CIFAR Dataset # + id="1ab2nLvpTVUA" import tensorflow as tf from tensorflow.keras import datasets, layers, models, regularizers, optimizers from tensorflow.keras.preprocessing.image import ImageDataGenerator import numpy as np EPOCHS = 50 NUM_CLASSES = 10 def load_data(): (x_train, y_train), (x_test, y_test) = datasets.cifar10.load_data() x_train = x_train.astype('float32') x_test = x_test.astype('float32') # normalize mean = np.mean(x_train,axis=(0,1,2,3)) std = np.std(x_train,axis=(0,1,2,3)) x_train = (x_train-mean)/(std+1e-7) x_test = (x_test-mean)/(std+1e-7) y_train = tf.keras.utils.to_categorical(y_train,NUM_CLASSES) y_test = tf.keras.utils.to_categorical(y_test,NUM_CLASSES) return x_train, y_train, x_test, y_test def build_model(): model = models.Sequential() # 1st block model.add(layers.Conv2D(32, (3,3), padding='same', input_shape=x_train.shape[1:], activation='relu')) model.add(layers.BatchNormalization()) model.add(layers.Conv2D(32, (3,3), padding='same', activation='relu')) model.add(layers.BatchNormalization()) model.add(layers.MaxPooling2D(pool_size=(2,2))) model.add(layers.Dropout(0.2)) # 2nd block model.add(layers.Conv2D(64, (3,3), padding='same', activation='relu')) model.add(layers.BatchNormalization()) model.add(layers.Conv2D(64, (3,3), padding='same', activation='relu')) model.add(layers.BatchNormalization()) model.add(layers.MaxPooling2D(pool_size=(2,2))) model.add(layers.Dropout(0.3)) # 3d block model.add(layers.Conv2D(128, (3,3), padding='same', activation='relu')) model.add(layers.BatchNormalization()) model.add(layers.Conv2D(128, (3,3), padding='same', activation='relu')) model.add(layers.BatchNormalization()) model.add(layers.MaxPooling2D(pool_size=(2,2))) model.add(layers.Dropout(0.4)) # dense model.add(layers.Flatten()) model.add(layers.Dense(NUM_CLASSES, activation='softmax')) return model # + colab={"base_uri": "https://localhost:8080/"} id="1kf_epoOgKio" outputId="6e14a455-f853-4c08-8889-6f3711d20c16" # Then we need to have a part to train the network: (x_train, y_train, x_test, y_test) = load_data() model = build_model() model.summary() model.compile(loss='categorical_crossentropy', optimizer='RMSprop', metrics=['accuracy']) # train batch_size = 64 model.fit(x_train, y_train, batch_size=batch_size, epochs=EPOCHS, validation_data=(x_test,y_test)) score = model.evaluate(x_test, y_test, batch_size=BATCH_SIZE) print("\nTest score:", score[0]) print('Test accuracy:', score[1]) # + [markdown] id="5WRz8PV8iab5" # ## Keras image augmentation # + id="Pak0oh9-gixW" from tensorflow.keras.preprocessing.image import ImageDataGenerator datagen = ImageDataGenerator( rotation_range = 30, width_shift_range=0.2, height_shift_range=0.2, horizontal_flip=True, ) datagen.fit(x_train) # + colab={"base_uri": "https://localhost:8080/"} id="6dFPjW3wi5tr" outputId="4c8c1306-1a6d-4aaa-b8aa-598c91ad889b" batch_size = 64 model.fit_generator(datagen.flow(x_train, y_train, batch_size=batch_size), epochs=EPOCHS, verbose=1,validation_data=(x_test,y_test)) # + colab={"base_uri": "https://localhost:8080/"} id="eHGpoTTujYET" outputId="8f456bc5-6add-4f37-843e-919633637aa5" # save to disk model_json = model.to_json() with open('cifar10_architecture.json', 'w') as json_file: json_file.write(model_json) model.save_weights('cifar10_weights.h5') # test scores = model.evaluate(x_test, y_test, batch_size=128, verbose=1) print('\nTest result: %.3f loss: %.3f' % (scores[1]*100,scores[0])) # + [markdown] id="QxGcQNAKTX4g" # # Use Pretrained model for prediction # + id="n1oSrk4JUo8b" import numpy as np from skimage.transform import resize from imageio import imread from tensorflow.keras.models import model_from_json from tensorflow.keras.optimizers import SGD # + id="flqnqLzWS_xT" dog = 'https://raw.githubusercontent.com/PacktPublishing/Deep-Learning-with-TensorFlow-2-and-Keras/master/Chapter%204/dog.jpg' cat = 'https://raw.githubusercontent.com/PacktPublishing/Deep-Learning-with-TensorFlow-2-and-Keras/master/Chapter%204/cat-standing.jpg' # + id="HdyigaRdUs3z" # load model model_architecture = 'cifar10_architecture.json' model_weights = 'cifar10_weights.h5' model = model_from_json(open(model_architecture).read()) model.load_weights(model_weights) # + colab={"base_uri": "https://localhost:8080/"} id="Ao_jp_CZUvtj" outputId="ca72c59c-f688-431c-ff33-d384dfb546cc" # load images img_names = [cat, dog] imgs = [resize(imread(img_name), (32, 32)).astype("float32") for img_name in img_names] imgs = np.array(imgs) / 255 print("imgs.shape:", imgs.shape) # + colab={"base_uri": "https://localhost:8080/"} id="4ZGHnIY6U9s4" outputId="110137de-3089-490d-e8d1-b84bb5b417b8" # train optim = SGD() model.compile(loss='categorical_crossentropy', optimizer=optim, metrics=['accuracy']) # predict predictions = model.predict_classes(imgs) print("predictions:", predictions) # + [markdown] id="Bc-iPlwKTpS1" # # tf.keras built-in VGG16 Net # + id="oinXgKpsVCse" import tensorflow as tf from tensorflow.keras.applications.vgg16 import VGG16 import matplotlib.pyplot as plt import numpy as np import cv2 from imageio import imread # + colab={"base_uri": "https://localhost:8080/"} id="CpcV5fNGTsvK" outputId="43154d2e-1e1f-4174-fea6-a1b20ddf7ca5" # prebuild model with pre-trained weights on imagenet model = VGG16(weights='imagenet', include_top=True) model.compile(optimizer='sgd', loss='categorical_crossentropy') # + id="14NW-8leUIHc" IMG_PATH = "https://raw.githubusercontent.com/PacktPublishing/Deep-Learning-with-TensorFlow-2-and-Keras/master/Chapter%204/steam-locomotive.jpg" # resize into VGG16 trained images' format im = cv2.resize(imread(IMG_PATH), (224, 224)) im = np.expand_dims(im, axis=0) im = im.astype(np.float32) # + colab={"base_uri": "https://localhost:8080/", "height": 282} id="xKOcGTcoUZ_Z" outputId="07ba42a3-a436-4a8f-a7be-65294a507c23" # predict out = model.predict(im) index = np.argmax(out) print(index) plt.plot(out.ravel()) plt.show() # + [markdown] id="KBjJoGAJnCsj" # # Recycling prebuilt deep learning models for extracting features # + colab={"base_uri": "https://localhost:8080/"} id="mNY7cXgjm4Qm" outputId="e9eb7c03-02f2-4c01-b34e-89304b6175b8" import tensorflow as tf from tensorflow.keras.applications.vgg16 import VGG16 from tensorflow.keras import models from tensorflow.keras.preprocessing import image from tensorflow.keras.applications.vgg16 import preprocess_input import numpy as np import cv2 # prebuild model with pre-trained weights on imagenet base_model = VGG16(weights='imagenet', include_top=True) base_model.summary # + colab={"base_uri": "https://localhost:8080/"} id="tkml9RrunNQW" outputId="c1b6a382-da10-49e7-88dc-eda57be9b85a" for i, layer in enumerate(base_model.layers): print (i, layer.name, layer.output_shape) # + id="xHvHO27OnZGv" # extract features from block4_pool block model = models.Model(inputs=base_model.input, outputs=base_model.get_layer('block4_pool').output) # + colab={"base_uri": "https://localhost:8080/"} id="FeBrRnr5ng0G" outputId="29521fd8-2ac4-4920-a793-ad1a0f949d69" img_path = 'https://raw.githubusercontent.com/PacktPublishing/Deep-Learning-with-TensorFlow-2-and-Keras/master/Chapter%204/cat.jpg' import urllib.request urllib.request.urlretrieve(img_path, "cat.jpg") img = image.load_img('cat.jpg', target_size=(224, 224, 3)) x = image.img_to_array(img) x = np.expand_dims(x, axis=0) x = preprocess_input(x) # get the features from this block features = model.predict(x) print(features) # + id="-8i9oGzcnw1L"
Tensorflow_2_x_CNN.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + # Load packages import tensorflow as tf import pandas as pd from tensorflow import keras import numpy as np import pandas as pd import os import scipy as scp import scipy.stats as scps import time from datetime import datetime # Load my own functions import dnnregressor_train_eval_keras as dnnk from kde_training_utilities import kde_load_data from kde_training_utilities import kde_make_train_test_split import make_data_wfpt as mdw # + # Handle some cuda business os.environ["CUDA_DEVICE_ORDER"]="PCI_BUS_ID" # see issue #152 os.environ["CUDA_VISIBLE_DEVICES"]="1" from tensorflow.python.client import device_lib print(device_lib.list_local_devices()) # + # Make dnnk class (cpm for choice probability model) cpm = dnnk.dnn_trainer() # Define folder in which dataset lies data_folder = '/media/data_cifs/afengler/data/kde/full_ddm/train_test_data/' # - # Make train test split kde_make_train_test_split(folder = data_folder, p_train = 0.8) # Load train test split cpm.data['train_features'], cpm.data['train_labels'], cpm.data['test_features'], cpm.data['test_labels'] = kde_load_data(folder = data_folder) cpm.data['test_features'].shape cpm.data['train_features'].shape cpm.data['train_features'].iloc[171247010, :] cpm.data['train_features']['log_l'] = cpm.data['train_labels'] cpm.data['train_features'].sort_values(by = 'log_l') cpm.data['train_features'] cpm.data['train_features'].iloc[22428, :] cpm.data['train_labels'][22428, ] # Make all parameters we can specify explicit # Model parameters cpm.model_params # Parameters governing training cpm.train_params # Parameters concerning data storage cpm.data_params # + # If necessary, specify new set of parameters here: # Model params cpm.model_params['output_activation'] = 'linear' cpm.model_params['hidden_layers'] = [20, 40, 60, 80, 100, 120] cpm.model_params['hidden_activations'] = ['relu', 'relu', 'relu', 'relu', 'relu', 'relu'] cpm.model_params['input_shape'] = cpm.data['train_features'].shape[1] # cpm.model_params['l1_activation'] = [0.0, 0.0, 0.0, 0.0, 0.0, 0.0] # cpm.model_params['l2_activation'] = [0.0, 0.0, 0.0, 0.0, 0.0, 0.0] cpm.model_params['l1_kernel'] = [0.0, 0.0, 0.0, 0.0, 0.0, 0.0] cpm.model_params['l2_kernel'] = [0.0, 0.0, 0.0, 0.0, 0.0, 0.0] # Train params cpm.train_params['batch_size'] = 1000000 cpm.train_params['max_train_epochs'] = 250 cpm.train_params['min_delta'] = 0.00001 # Data params cpm.data_params['data_type'] = 'kde' cpm.data_params['data_type_signature'] = '_full_ddm_' cpm.data_params['training_data_size'] = cpm.data['train_features'].shape[0] cpm.data_params['timestamp'] = datetime.now().strftime('%m_%d_%y_%H_%M_%S') cpm.data_params['model_directory'] = '/media/data_cifs/afengler/data/kde/full_ddm/keras_models/' # - # Make model cpm.keras_model_generate(save_model = True) # Train model cpm.run_training(save_history = True, warm_start = False)
trash/keras_fit_model_kde_full_ddm.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Deep AI # language: python # name: dl # --- # + import numpy as np import torch from dcj_comp import dcj_dist # - # version 2.0 # distance between result sequence and the median sequence for seqlen in [10, 50, 100]: print(f'seqlen: {seqlen}') print(f'\t lower_bound \t dist_mean \t ratio \t\t std \t\t number') for rate in list(range(1, 10)) + [10, 15, 20]: rate = rate / 10 fname = (f'val_seq_g3m_3_' f'{seqlen}_{int(seqlen * rate)}' f'_200/raw/g3raw_' f'{seqlen}_{int(seqlen * rate)}.pt') source, target = torch.load(fname) lower_bound = np.array([np.ceil(np.sum([dcj_dist(g1, g2)[-1], dcj_dist(g1, g3)[-1], dcj_dist(g2, g3)[-1]])/2) for g1, g2, g3 in source]) tmp = torch.load(f'dist_result_pt/{seqlen:0>4}_{rate}.pt') # dist_list = np.array([d[-1] for d in tmp]) # dist_diff = np.abs(dist_list - lower_bound) dist_diff = np.abs([dcj_dist(a, b[0])[-1] for a, b in zip(target, tmp)]) print(f'{rate} & \t' f'{np.mean(lower_bound):>7.4f} &\t' f'{dist_diff.mean():>7.4f} &\t' f'{dist_diff.mean()/np.mean(lower_bound) :>7.4f} &\t' f'{dist_diff.std():>7.4f} &\t' f'{np.sum(dist_diff == 0):>4} &\t \\\\ \hline') print('*************') import matplotlib as mpl mpl.rcParams['pdf.fonttype'] = 42 import matplotlib.pyplot as plt # for seqlen in [10, 50, 100]: result = [] seqlen = 10 for rate in list(range(1, 10)) + [10, 15, 20]: rate = rate / 10 fname = (f'val_seq_g3m_3_' f'{seqlen}_{int(seqlen * rate)}' f'_200/raw/g3raw_' f'{seqlen}_{int(seqlen * rate)}.pt') source, target = torch.load(fname) # lower_bound = np.array([np.ceil(np.sum([dcj_dist(g1, g2)[-1], # dcj_dist(g1, g3)[-1], # dcj_dist(g2, g3)[-1]])/2) # for g1, g2, g3 in source]) tmp = torch.load(f'dist_result_pt/{seqlen:0>4}_{rate}.pt') # dist_list = np.array([d[-1] for d in tmp]) # dist_diff = np.abs(dist_list - lower_bound) dist_diff = np.abs([dcj_dist(a, b[0])[-1] for a, b in zip(target, tmp)]) # tmp = torch.load(f'dist_result_pt/{seqlen:0>4}_{rate}.pt') # dist_list = np.array([d[-1] for d in tmp]) # dist_diff = np.abs(dist_list - lower_bound) result.append(dist_diff) bins = np.arange(np.max(result) + 1) # x = np.arange(np.max(result) + 1) plt.figure() H = plt.hist(result, bins = 11) plt.xticks(np.arange(np.ceil(np.max(H[1])) + 1)) # plt.xticklabels(x) plt.savefig('g3val_dist_10.svg', bbox_inches = 'tight') plt.show() np.sum(H[0], axis = 1) # for seqlen in [10, 50, 100]: result = [] seqlen = 50 for rate in list(range(1, 10)) + [10, 15, 20]: rate = rate / 10 fname = (f'val_seq_g3m_3_' f'{seqlen}_{int(seqlen * rate)}' f'_200/raw/g3raw_' f'{seqlen}_{int(seqlen * rate)}.pt') source, target = torch.load(fname) tmp = torch.load(f'dist_result_pt/{seqlen:0>4}_{rate}.pt') dist_diff = np.abs([dcj_dist(a, b[0])[-1] for a, b in zip(target, tmp)]) result.append(dist_diff) # bins = np.arange(np.max(result) + 1) # x = np.arange(11) plt.figure() H = plt.hist(result, bins = 11) plt.xticks(np.arange(0, np.ceil(np.max(H[1])) + 1, 3)) # plt.xticklabels(x) plt.savefig('g3val_dist_50.svg', bbox_inches = 'tight') plt.show() np.sum(H[0], axis = 1) # for seqlen in [10, 50, 100]: result = [] seqlen = 100 for rate in list(range(1, 10)) + [10, 15, 20]: rate = rate / 10 fname = (f'val_seq_g3m_3_' f'{seqlen}_{int(seqlen * rate)}' f'_200/raw/g3raw_' f'{seqlen}_{int(seqlen * rate)}.pt') source, target = torch.load(fname) tmp = torch.load(f'dist_result_pt/{seqlen:0>4}_{rate}.pt') dist_diff = np.abs([dcj_dist(a, b[0])[-1] for a, b in zip(target, tmp)]) result.append(dist_diff) x = np.arange(11) plt.figure() H = plt.hist(result, bins = 11) plt.xticks(np.arange(0, np.ceil(np.max(H[1])) + 1, 5)) # plt.xticklabels(x) plt.savefig('g3val_dist_100.svg', bbox_inches = 'tight') plt.show() np.sum(H[0], axis = 1) for seqlen in [10, 50, 100]: print(f'seqlen: {seqlen}') print(f'\t\t lower_bound \t dist_mean \t ratio \t\t std \t\t number') for rate in list(range(1, 10)) + [10, 15, 20]: rate = rate / 10 fname = (f'val_seq_g3m_3_' f'{seqlen}_{int(seqlen * rate)}' f'_200/raw/g3raw_' f'{seqlen}_{int(seqlen * rate)}.pt') seq = torch.load(fname)[0] lower_bound = np.array([np.ceil(np.sum([dcj_dist(g1, g2)[-1], dcj_dist(g1, g3)[-1], dcj_dist(g2, g3)[-1]])/2) for g1, g2, g3 in seq]) tmp = torch.load(f'dist_result_pt/{seqlen:0>4}_{rate}.pt') dist_list = np.array([d[-1] for d in tmp]) dist_diff = np.abs(dist_list - lower_bound) print(f'{rate} & \t' f'{np.mean(lower_bound):>7.4f} &\t' f'{dist_diff.mean():>7.4f} &\t' f'{dist_diff.mean()/np.mean(lower_bound) :>7.4f} &\t' f'{dist_diff.std():>7.4f} &\t' f'{np.sum(dist_list == lower_bound):>4} &\t \\\\ \hline') print('*************') import matplotlib as mpl mpl.rcParams['pdf.fonttype'] = 42 import matplotlib.pyplot as plt # for seqlen in [10, 50, 100]: result = [] seqlen = 10 for rate in list(range(2, 10)) + [10, 15, 20]: rate = rate / 10 fname = (f'val_seq_g3m_3_' f'{seqlen}_{int(seqlen * rate)}' f'_200/raw/g3raw_' f'{seqlen}_{int(seqlen * rate)}.pt') seq = torch.load(fname)[0] lower_bound = np.array([np.ceil(np.sum([dcj_dist(g1, g2)[-1], dcj_dist(g1, g3)[-1], dcj_dist(g2, g3)[-1]])/2) for g1, g2, g3 in seq]) tmp = torch.load(f'dist_result_pt/{seqlen:0>4}_{rate}.pt') dist_list = np.array([d[-1] for d in tmp]) dist_diff = np.abs(dist_list - lower_bound) result.append(dist_diff) bins = np.arange(np.max(result) + 1) # x = np.arange(np.max(result) + 1) plt.figure() H = plt.hist(result, bins = 11) plt.xticks(np.arange(np.ceil(np.max(H[1])) + 1)) # plt.xticklabels(x) plt.savefig('g3val_dist_10.svg', bbox_inches = 'tight') plt.show() np.sum(H[0], axis = 1) # for seqlen in [10, 50, 100]: result = [] seqlen = 50 for rate in list(range(2, 10)) + [10, 15, 20]: rate = rate / 10 fname = (f'val_seq_g3m_3_' f'{seqlen}_{int(seqlen * rate)}' f'_200/raw/g3raw_' f'{seqlen}_{int(seqlen * rate)}.pt') seq = torch.load(fname)[0] lower_bound = np.array([np.ceil(np.sum([dcj_dist(g1, g2)[-1], dcj_dist(g1, g3)[-1], dcj_dist(g2, g3)[-1]])/2) for g1, g2, g3 in seq]) tmp = torch.load(f'dist_result_pt/{seqlen:0>4}_{rate}.pt') dist_list = np.array([d[-1] for d in tmp]) dist_diff = np.abs(dist_list - lower_bound) result.append(dist_diff) # bins = np.arange(np.max(result) + 1) # x = np.arange(11) plt.figure() H = plt.hist(result, bins = 11) plt.xticks(np.arange(0, np.ceil(np.max(H[1])) + 1, 5)) # plt.xticklabels(x) plt.savefig('g3val_dist_50.svg', bbox_inches = 'tight') plt.show() np.sum(H[0], axis = 1) # for seqlen in [10, 50, 100]: result = [] seqlen = 100 for rate in list(range(2, 10)) + [10, 15, 20]: rate = rate / 10 fname = (f'val_seq_g3m_3_' f'{seqlen}_{int(seqlen * rate)}' f'_200/raw/g3raw_' f'{seqlen}_{int(seqlen * rate)}.pt') seq = torch.load(fname)[0] lower_bound = np.array([np.ceil(np.sum([dcj_dist(g1, g2)[-1], dcj_dist(g1, g3)[-1], dcj_dist(g2, g3)[-1]])/2) for g1, g2, g3 in seq]) tmp = torch.load(f'dist_result_pt/{seqlen:0>4}_{rate}.pt') dist_list = np.array([d[-1] for d in tmp]) dist_diff = np.abs(dist_list - lower_bound) result.append(dist_diff) x = np.arange(11) plt.figure() H = plt.hist(result, bins = 11) plt.xticks(np.arange(0, np.ceil(np.max(H[1])) + 1, 5)) # plt.xticklabels(x) plt.savefig('g3val_dist_100.svg', bbox_inches = 'tight') plt.show() np.sum(H[0], axis = 1)
result_illustration.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3.8.6 64-bit # metadata: # interpreter: # hash: 0cd5d5c698fda9e224ecedcd720182965d67d027171e40fb256e465c8ee4d634 # name: python3 # --- # # Libraries # + import pandas as pd from IPython.core.display import HTML import smtplib from email.mime.multipart import MIMEMultipart from email.mime.text import MIMEText from email.utils import formatdate # - # # Read HTML File body = open('html/card.html', 'r', encoding='utf8').read() HTML(body) # # Import Recipients df = pd.read_csv('recipients.csv') df # # Email Settings my_username = '<EMAIL>' my_password = '<PASSWORD>' send_from = '<EMAIL>' sender_name = '<NAME>' email_subject = 'Happy New Year!' # # Function for Sending Email def send_email(send_from, sender_name, subject, body, recipients, username, password): """Perform email transaction to emails in a dataframe """ recipients_list = [i for i in zip(recipients['email'], recipients['message'], recipients['recipient'])] for (email, message, recipient) in recipients_list: msg = MIMEMultipart() msg['From'] = send_from msg['Subject'] = subject msg['Date'] = formatdate(localtime=True) body_temp = body.replace('{sender name}', sender_name) body_temp = body_temp.replace('{recipient name}', recipient) body_temp = body_temp.replace('{text}', message) msg['To'] = email msgText = MIMEText(body_temp, 'html') msg.attach(msgText) smtp = smtplib.SMTP('smtp.office365.com', 587) smtp.ehlo() smtp.starttls() smtp.login(username, password) smtp.sendmail(send_from, [email], msg.as_string()) smtp.quit() # # Send to recipients send_email(send_from, sender_name, email_subject, body, df, username=my_username, password=<PASSWORD>)
develop.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # In this notebook, we calculate some generalized network statistics about the input network, and save the output as a .csv. # Import the usual suspects for network / data analysis import networkx as nx import os, sys, time import pandas as pd import numpy as np # ### Defining the Main Function # In the next block, we define the main process which is applied against an input network, passed to the function. This function does all of the calculations we are interested in. Note the in-line comments for specifics. def NetStats(g, ISO): # g is a passed in path to a networkx object, in string format. # ISO is the ISO-3 country code for the country. It is used mainly as a device for keeping track of the output. # all results will be loaded into a results dictionary. We define an empty dict here. results = {} # load the Graph G = nx.read_gpickle(g) # the first of our results - load the number of nodes and edges in as entries to the results dict results['number_of_edges'] = G.number_of_edges() results['number_of_nodes'] = G.number_of_nodes() # genertae the list of connected sub-graphs - usually 1, but often more. Gs = list(nx.strongly_connected_component_subgraphs(G)) # Here, we identify the sub-graphs worthy of analysis (set thresholds appropriately!) iterator = 0 # we create empty buckets for the edges and nodes, and one for the iterator counts, edges, nodes = [],[],[] for g in Gs: counts.append(iterator) edges.append(g.number_of_edges()) nodes.append(g.number_of_nodes()) iterator+=1 # After iterating through all sub-graphs, we load into a dataframe the results. # Each graph is summarized as a line in this df df = pd.DataFrame({'id':counts,'edges':edges,'nodes':nodes}) # we sort by the number of edges, largest graph first df = df.sort_values(by = 'edges', ascending = False) # We set the threshold for graph analysis here. The threshold is - half the number of edges of the largest graph. thresh = df.edges.iloc[0] * 0.5 # we remove any sub-graphs that don't meet this newly impoed standard df = df.loc[df.edges >= thresh] print(df) # we generate a list of the graph IDs through which to iterate in the actual summary statistics stage id_list = list(df.id) # now, we are ready to calculate some stats. We do the following process for each interesting subgraph: for i in range(0, len(id_list)): ### Section 1 # note that all results are appended to the dictionary with 'i' - the graph ID - to # allow multiple results for each major-network # set up timing start = time.time() i = 0 # pick out current graph from Gs, the list of graphs curr_G = Gs[id_list[i]] # generate an UNdirected graph from the current graph for this stage of calcs undirected_G = nx.Graph(curr_G) # calculate cyclomatic number # https://networkx.github.io/documentation/networkx-1.10/reference/generated/networkx.algorithms.cycles.cycle_basis.html circuits = nx.cycle_basis(undirected_G) cyclomatic_number = len(circuits) results['G%s_cyclomatic_number' % i] = cyclomatic_number # get simple number of nodes and edges e = undirected_G.number_of_edges() v = undirected_G.number_of_nodes() results['G%s_number_of_edges'% i] = e results['G%s_number_of_nodes'% i] = v # print out elapsed time for the above calculations print('\tTime elapsed for Section 1: %s seconds' % (time.time() - start)) ### Section 2 start = time.time() # calculate the network's alpha, beta and gamma as derivatives from the cyclomatic number + number of nodes and edges results['G%s_alpha'% i] = cyclomatic_number / ((2 * v) - 5) results['G%s_beta'% i] = e / v results['G%s_gamma'% i] = e / (3 * (v - 2)) print('\tTime elapsed for Section 2: %s seconds' % (time.time() - start)) ### Section 3 # here, you see some cell magic with the '%' symbol. These commands will only work in a jupyter env. start = time.time() # calculate eccentricity # https://networkx.github.io/documentation/networkx-1.10/reference/generated/networkx.algorithms.distance_measures.eccentricity.html # %time ecc = nx.eccentricity(undirected_G) # calculate network diameter # https://networkx.github.io/documentation/networkx-1.7/reference/generated/networkx.algorithms.distance_measures.diameter.html # %time results['G%s_diameter' %i] = nx.diameter(undirected_G, ecc) # calculate network radius # https://networkx.github.io/documentation/networkx-1.10/reference/generated/networkx.algorithms.distance_measures.radius.html # %time results['G%s_radius' %i] = nx.radius(undirected_G, ecc) # calculate average clustering # https://networkx.github.io/documentation/networkx-1.9/reference/generated/networkx.algorithms.cluster.average_clustering.html # %time results['G%s_average_clustering' %i] = nx.average_clustering(undirected_G) # calculate this massive monster of a mouthful # https://networkx.github.io/documentation/networkx-1.10/reference/generated/networkx.algorithms.assortativity.degree_assortativity_coefficient.html # %time results['G%s_degree_assortativity_coefficient' %i] = nx.degree_assortativity_coefficient(undirected_G) # the following two calcs were very time-expensive, so were commented out # #%time results['G%s_global_efficiency' %i] = nx.global_efficiency(undirected_G) # #%time results['G%s_av_node_connectivity' %i] = nx.average_node_connectivity(undirected_G) print('\tTime elapsed for Section 3: %s seconds' % (time.time() - start)) ### Section 4 start = time.time() # here, we calculate some by-node stats (mean). # We also calculate the median, 1st quartile and 3rd quartile of these results # https://networkx.github.io/documentation/networkx-1.9/reference/generated/networkx.algorithms.centrality.degree_centrality.html Z = list(nx.degree_centrality(undirected_G).values()) results['G%s_av_degree_centrality' % i] = np.mean(Z) results['G%s_0.25_degree_centrality' % i] = np.percentile(Z, 25) results['G%s_0.50_degree_centrality' % i] = np.percentile(Z, 50) results['G%s_0.75_degree_centrality' % i] = np.percentile(Z, 75) print('\tTime elapsed for Section 4: %s seconds' % (time.time() - start)) ### Section 5 # same idea as for Section 4 here, but with closeness centrality. # https://networkx.github.io/documentation/networkx-1.10/reference/generated/networkx.algorithms.centrality.closeness_centrality.html start = time.time() Z = list(nx.closeness_centrality(undirected_G).values()) results['G%s_av_closeness_centrality' % i] = np.mean(Z) results['G%s_0.25_closeness_centrality' % i] = np.percentile(Z, 25) results['G%s_0.50_closeness_centrality' % i] = np.percentile(Z, 50) results['G%s_0.75_closeness_centrality' % i] = np.percentile(Z, 75) print('\tTime elapsed for Section 5: %s seconds' % (time.time() - start)) # Section 6 # same idea as for Section 4 here, but with betweenness centrality. # https://networkx.github.io/documentation/networkx-1.10/reference/generated/networkx.algorithms.centrality.betweenness_centrality.html start = time.time() Z = list(nx.betweenness_centrality(undirected_G).values()) results['G%s_av_betweenness_centrality' % i] = np.mean(Z) results['G%s_0.25_betweenness_centrality' % i] = np.percentile(Z, 25) results['G%s_0.50_betweenness_centrality' % i] = np.percentile(Z, 50) results['G%s_0.75_betweenness_centrality' % i] = np.percentile(Z, 75) print('\tTime elapsed for Section 6: %s seconds' % (time.time() - start)) # Section 7 # same idea as for Section 4 here, but with eigenvector centrality. # https://networkx.github.io/documentation/networkx-1.10/reference/generated/networkx.algorithms.centrality.eigenvector_centrality.html start = time.time() try: Z = list(nx.eigenvector_centrality(undirected_G).values()) results['G%s_av_eigenvector_centrality' % i] = np.mean(Z) results['G%s_0.25_eigenvector_centrality' % i] = np.percentile(Z, 25) results['G%s_0.50_eigenvector_centrality' % i] = np.percentile(Z, 50) results['G%s_0.75_eigenvector_centrality' % i] = np.percentile(Z, 75) except: pass print('\tTime elapsed for Section 7: %s seconds' % (time.time() - start)) """ This section was again commented out as it was slowing the code to a crawl. # Section 8 start = time.time() try: Z = list(nx.communicability_betweenness_centrality(undirected_G).values()) results['G%s_av_communicability_betweenness_centrality' % i] = np.mean(Z) results['G%s_0.25_av_communicability_betweenness_centrality' % i] = np.percentile(Z, 25) results['G%s_0.50_av_communicability_betweenness_centrality' % i] = np.percentile(Z, 50) results['G%s_0.75_av_communicability_betweenness_centrality' % i] = np.percentile(Z, 75) except: pass print('\tTime elapsed for Section 8: %s seconds' % (time.time() - start)) """ # generate a dataframe of the results we have calculated, by network df = pd.DataFrame(results, index = ['value']) # here the dataframe is transposed, the index reset, and the index # renamed to 'var_name' to make it a 2D data table that can be efficiently sliced df = df.transpose().reset_index().rename(columns = {'index':'var_name'}) # we add the country of interest as an additional column - the only time we use the ISO variable df['country'] = ISO # return the results df return df # ### Execution # + # set the root path to all of the networks which will be abalyzed. root = r'D:\Criticality II\country_networks' # we walk the root path, picking out the pickles. This will look different depending on your file structure. Q = [] for q, t, folder in os.walk(root): if q[-6:] == 'output': Q.append(q) # we iterate through each of our valid paths for q in Q: # from my folder structure we pick out the ISO code. Again, this is user-specific to their file structure ISO = q[-10:-7] if ISO not in ['ABW','AFG']: print('...processing %s' % ISO) # we define g as the path to the pickled networkx object g = os.path.join(q, '{}_processed.pickle'.format(ISO)) # we assign D to be the output dataframe from the netstats function D = NetStats(g, ISO) # we save the stats for this network down to the path location as a .csv path = r'C:\Users\charl\Documents\CE\Criticality\Netstats' D.to_csv(os.path.join(path, '%s_processed_netstats.csv' % ISO))
Implementations/FY20/ACC_Criticality II/NetworkStatistics.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import matplotlib matplotlib.use('tkAgg') import matplotlib.pyplot as plt import glob import sys from matplotlib.ticker import MultipleLocator import json import os import pandas as pd import numpy as np import pdb from IPython.display import Image gpus = ['t4', 'v100'] models = ['bert', 'resnet', 'vgg'] modes = ['train', 'inf'] batch = [256, 512, 512] #script false --no-raise-error ######## training ########### for gpu in gpus: for model in models: fig, axs = plt.subplots(5, 1, gridspec_kw={'hspace': 0.1, 'wspace': 0.2, 'bottom': 0.13, 'top': 0.95, 'right':0.995, 'left':0.17}, figsize=(6,10)) fig.suptitle(f'{model} training on {gpu}', fontsize=14) with open(f'../{gpu}_duration_{model}_train.json') as f: duration = json.load(f) df = pd.read_csv(f'../power/{gpu}_{model}_train.csv') batch_time = [] for epoch, times in duration.items(): batch_time += times[1:-1] batch_time = [k for k in batch_time if k < 1] max_time = max(batch_time) batch_time = [k/max_time for k in batch_time] # plotting axs[0].plot(batch_time, label='mini-batch time') temp_csv = list(df[' temperature.gpu']) axs[1].plot(temp_csv, label='temprature') axs[2].plot(list(df[' power.draw [W]']), label='power') axs[3].plot(list(df[' clocks.current.graphics [MHz]']), label='clk_gr') axs[3].plot(list(df[' clocks.current.sm [MHz]']), label='clk_sm') #axs[3].plot(list(df[' clocks.current.memory [MHz]']), label='clk_memory') axs[4].plot(list(df[' ecc.errors.corrected.volatile.total']), label='ECC_single') axs[4].plot(list(df[' ecc.errors.uncorrected.volatile.total']), label='ECC_double') # config axs[0].set_ylabel('Time (Norm.)', fontsize=14) axs[1].set_ylabel('Temp. (degC)', fontsize=14) axs[2].set_ylabel('Power (W)', fontsize=14) axs[3].set_ylabel('Freq. (MHz)', fontsize=14) axs[4].set_ylabel('Err. Count', fontsize=14) axs[4].set_xlabel('Training Timestamps (1=200ms)', fontsize=14) for ax in axs: if ax != axs[4]: ax.set_xticklabels([]) ax.grid(which='major', axis='y', ls='dashed') ax.legend(loc=4) axs[1].set_ylim(30,85) axs[1].yaxis.set_major_locator(MultipleLocator(10)) #plt.show() fig.savefig(f'plots_train/{model}_{gpu}.png', bbox_inches='tight') ######## inference ########### for gpu in gpus: for model in models: ind = models.index(model) fig, axs = plt.subplots(5, 1, gridspec_kw={'hspace': 0.1, 'wspace': 0.2, 'bottom': 0.13, 'top': 0.95, 'right':0.995, 'left':0.17}, figsize=(6,10)) fig.suptitle(f'{model} inference on {gpu}', fontsize=14) with open(f'../{gpu}_duration_{model}_inf{batch[ind]}.json') as f: duration = json.load(f) df = pd.read_csv(f'../power/{gpu}_{model}_inf.csv') batch_time = [] for epoch, times in duration.items(): batch_time += times[:-1]#[1:-1] #pdb.set_trace() thr = np.percentile(batch_time,99.5) #pdb.set_trace() batch_time = [k for k in batch_time if k <= thr] # running average series = pd.Series(batch_time) window=100 batch_time = series.rolling(window).mean().tolist()[window-1:] max_time = max(batch_time) batch_time = [k/max_time for k in batch_time] # plotting axs[0].plot(batch_time, label='mini-batch latency') temp_csv = list(df[' temperature.gpu']) axs[1].plot(temp_csv, label='temprature') axs[2].plot(list(df[' power.draw [W]']), label='power') axs[3].plot(list(df[' clocks.current.graphics [MHz]']), label='clk_gr') axs[3].plot(list(df[' clocks.current.sm [MHz]']), label='clk_sm') #axs[3].plot(list(df[' clocks.current.memory [MHz]']), label='clk_memory') axs[4].plot(list(df[' ecc.errors.corrected.volatile.total']), label='ECC_single') axs[4].plot(list(df[' ecc.errors.uncorrected.volatile.total']), label='ECC_double') # config axs[0].set_ylabel('Latency (Norm.)', fontsize=14) axs[1].set_ylabel('Temp. (degC)', fontsize=14) axs[2].set_ylabel('Power (W)', fontsize=14) axs[3].set_ylabel('Freq. (MHz)', fontsize=14) axs[4].set_ylabel('Err. Count', fontsize=14) axs[4].set_xlabel('Inference Timestamps (1=200ms)', fontsize=14) for ax in axs: if ax != axs[4]: ax.set_xticklabels([]) ax.grid(which='major', axis='y', ls='dashed') ax.legend(loc=4) axs[0].set_ylim(0.5,1.05) axs[1].set_ylim(30,85) axs[0].yaxis.set_major_locator(MultipleLocator(0.1)) axs[1].yaxis.set_major_locator(MultipleLocator(10)) #plt.show() fig.savefig(f'plots_inf/{model}_{gpu}.png', bbox_inches='tight')
examples/pwr_run/mit_supercloud/characterization/logs/archive/visual/.ipynb_checkpoints/figure-checkpoint.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + # import pathlib # temp = pathlib.PosixPath # pathlib.PosixPath = pathlib.WindowsPath # import drqa.tokenizers # - #this helps to solve error of notimplemented import pathlib temp = pathlib.PosixPath pathlib.PosixPath = pathlib.WindowsPath import drqa.tokenizers drqa.tokenizers.set_default('corenlp_classpath', '/your/corenlp/classpath/*') pip install wexpect from drqa.tokenizers import CoreNLPTokenizer # tok = CoreNLPTokenizer() # tok.tokenize('hello world').words() tok = CoreNLPTokenizer() tok.tokenize('hello world').words()
DrQA.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python [conda env:PromoterArchitecturePipeline] * # language: python # name: conda-env-PromoterArchitecturePipeline-py # --- import pandas as pd from pybedtools import BedTool select_genes_file = '../../data/genomes/ara_housekeeping_list.out' select_genes_file_100_random = '../../data/genes/constitutive-variable-random_100_each.csv' genes_overlap_bedfile = '../../data/genomes/promoterandgenes_only_overlap.bed' #this is actually genome features overlap promoters_overlapping_bedfile = '../../data/genomes/promoters_overlapping.bed' select_genes = pd.read_table(select_genes_file, sep='\t', header=None) cols = ['gene','gene_type'] select_genes.columns = cols genes_overlap = pd.read_table(genes_overlap_bedfile, sep='\t', header=None) cols = ['chr', 'start', 'stop', 'gene', 'dot', 'strand', 'source', 'type', 'dot2', 'details'] genes_overlap.columns = cols promoters_overlapping = pd.read_table(promoters_overlapping_bedfile, sep='\t', header=None) promoters_overlapping.columns = cols random_100_only = pd.read_csv(select_genes_file_100_random, header=0) del random_100_only['Unnamed: 0'] random_100_only.rename(columns={'promoter_AGI':'gene'}, inplace=True) random_100_only select_genes # ### filter out unwanted genes merged = pd.merge(select_genes, genes_overlap, on='gene') merged2 = pd.merge(select_genes, promoters_overlapping, on='gene') merged_random100 = pd.merge(random_100_only, genes_overlap, on='gene') merged2_random100 = pd.merge(random_100_only, promoters_overlapping, on='gene') merged ## how many of each gene_type, genes_overlap merged_counts = merged_random100['gene_type'].value_counts() merged_counts ## how many of each gene_type, genome features overlap merged_counts2 = merged2_random100['gene_type'].value_counts() merged_counts2 merged2 #check if any NaN - there won't be NaN as did inner merge merged[pd.isna(merged.chr)] # ### change column order to bed file format filtered_proms = merged.loc[:, ['chr', 'start', 'stop', 'gene', 'dot', 'strand', 'source', 'type', 'dot2', 'details']] filtered_proms2 = merged2.loc[:, ['chr', 'start', 'stop', 'gene', 'dot', 'strand', 'source', 'type', 'dot2', 'details']] #sort the df by chromosome then by motif start position filtered_proms_sorted = filtered_proms.sort_values(['chr','start']) filtered_proms_sorted2 = filtered_proms2.sort_values(['chr','start']) filtered_proms_sorted # ### write out bed file of merged dfs BedTool.from_dataframe(filtered_proms_sorted).saveas('../../data/genomes/promoterandgenes_only_overlap_const_var.bed') BedTool.from_dataframe(filtered_proms_sorted2).saveas('../../data/genomes/promoters_overlapping__const_var.bed')
src/data_sorting/filter_by_geneID_overlapping.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Demonstrating the SuperMarket Class # # ### First import libraries import sys # sys.path.insert(0, '../') from calculation_library import Customer, SuperMarket import pandas as pd import datetime import numpy as np # ### We get our desired transition matrix MATRIX_PATH = '../data/generated_matrices/' MATRIX_FILENAME = 'mm_monday.csv' matrix_monday = pd.read_csv(f'{MATRIX_PATH}{MATRIX_FILENAME}', index_col = 0).T matrix_monday = np.array(matrix_monday) # ### We initiate a market object from the class, it takes the transition matrix as a parameter market = SuperMarket(matrix_monday) # ### The Market has a simulate method which simulates customers # # We pass the initial (total) number of customers, the open time and the close time of the market market.simulate(3,'8:00','15:00') # While the market is running it has a `current_state` and a ``total_state` attribute, `current_state` is the state of the market at the current time and will be useful for an animation if we do that. `total_state` keeps track of the complete market state over time # # The number of customers in the market is constant if `n` customers leave we insert `n` customers # # The `current_state` and `total_state` is kept as numpy arrays, this makes it a little tedius but faster. We can also keep these as data frames but there is a risk will be slower # ### After a simulation finishes, we can access the `total_state` and see what happened over time, there is also a results() method which returns the `total_state` as a dataframe a = market.total_state market.results()
jupyter_notebooks/supermaket_class_demo.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="view-in-github" colab_type="text" # <a href="https://colab.research.google.com/github/Omaam/gcl/blob/main/gcl/hardlag_sim.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # + id="9sOy_3viCR4_" import numpy as np import matplotlib.pyplot as plt import seaborn as sns sns.set(style='whitegrid', context='talk') # + id="GhVAh85rDzSH" def symccf(a, base_side='left'): # make array corr_sym = np.copy(a) if base_side == 'left' else np.copy(a[::-1]) # get center index + 1 idx_med_m2 = int(np.floor(len(corr_sym)/2)) - 1 idx_med_p1 = int(np.ceil(len(corr_sym)/2)) # substitute corr_sym[idx_med_p1:] = corr_sym[idx_med_m2::-1] # substraction corr_rest = a - corr_sym # arrangement out = corr_rest if base_side == 'left' else corr_rest[::-1] return out # + colab={"base_uri": "https://localhost:8080/"} id="CKXTwJ_NCk9W" outputId="0e9cb20f-6ed3-435e-b8a1-f210c5a225f5" t = np.linspace(-4, 4, 9) base = np.logspace(-1, 4, 9) base = np.append(base, base[-2::-1]) t, base # + id="cUxb88nQQZEO" c0 = 8*base[4:13] c1 = base[3:12] # + id="ol6lxE2dEOOF" b = 2.5*c0 + 1.0*c1 c = 2.0*c0 + 1.5*c1 d = 1.5*c0 + 2.0*c1 b_sym = symccf(b) c_sym = symccf(c) d_sym = symccf(d) # + colab={"base_uri": "https://localhost:8080/", "height": 1000} id="GW8QNL7aDJG8" outputId="80c57065-f21f-4b06-975b-e1129855a918" fig, ax = plt.subplots(2, figsize=(7, 9), sharex=True, dpi=300) ax[0].plot(t, c0, t, c1, ls=':') ax[0].plot(t, b, color='m', label='band1') ax[0].plot(t, c, color='c', label='band2') ax[0].plot(t, d, color='y', label='band3') ax[0].set_ylabel('CCF') ax[0].legend(['C0', 'C1']) ax[1].plot(t, b_sym, color='m') ax[1].plot(t, c_sym, color='c') ax[1].plot(t, d_sym, color='y') ax[1].set_ylabel('Hard lag') ax[1].set_xlabel('Lag (s)') # ax[1].legend(['band 1 (2.5xC0+1.0xC1)', 'band 2 (2.0xC0+1.5xC1)', 'band 3 (1.5xC0+2.0xC1)'], ax[1].legend(['band 1', 'band 2', 'band 3'], loc='best') plt.show()
gcl/hardlag_sim.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Using XGBoost in pipelines # > Take your XGBoost skills to the next level by incorporating your models into two end-to-end machine learning pipelines. You'll learn how to tune the most important XGBoost hyperparameters efficiently within a pipeline, and get an introduction to some more advanced preprocessing techniques. This is the Summary of lecture "Extreme Gradient Boosting with XGBoost", via datacamp. # # - toc: true # - badges: true # - comments: true # - author: <NAME> # - categories: [Python, Datacamp, Machine_Learning] # - image: import pandas as pd import numpy as np import matplotlib.pyplot as plt import xgboost as xgb # ## Review of pipelines using sklearn # - Pipeline review # - Takes a list of 2-tuples (name, pipeline_step) as input # - Tuples can contain any arbitrary scikit-learn compatible estimator or transformer object # - Pipeline implements fit/predict methods # - Can be used as input estimator into grid/randomized search and `cross_val_score` methods # ### Encoding categorical columns I - LabelEncoder # Now that you've seen what will need to be done to get the housing data ready for XGBoost, let's go through the process step-by-step. # # First, you will need to fill in missing values - as you saw previously, the column LotFrontage has many missing values. Then, you will need to encode any categorical columns in the dataset using one-hot encoding so that they are encoded numerically. # # The data has five categorical columns: MSZoning, PavedDrive, Neighborhood, BldgType, and HouseStyle. Scikit-learn has a [LabelEncoder](http://scikit-learn.org/stable/modules/generated/sklearn.preprocessing.LabelEncoder.html) function that converts the values in each categorical column into integers. You'll practice using this here. df = pd.read_csv('./dataset/ames_unprocessed_data.csv') df.head() # > Warning: Below Method is depreciated. Instead, Use ColumnTransformer # + from sklearn.preprocessing import LabelEncoder # Fill missing values with 0 df.LotFrontage = df.LotFrontage.fillna(0) # Create a boolean mask for categorical columns categorical_mask = (df.dtypes == 'object') # Get list of categorical columns names categorical_columns = df.columns[categorical_mask].tolist() # Print the head of the categorical columns print(df[categorical_columns].head()) # Create LabelEncoder object: le le = LabelEncoder() # Apply LabelEncode to categorical columns df[categorical_columns] = df[categorical_columns].apply(lambda x: le.fit_transform(x)) # Print the head of the LabelEncoded categorical columns print(df[categorical_columns].head()) # - # ### Encoding categorical columns II - OneHotEncoder # Okay - so you have your categorical columns encoded numerically. Can you now move onto using pipelines and XGBoost? Not yet! In the categorical columns of this dataset, there is no natural ordering between the entries. As an example: Using `LabelEncoder`, the CollgCr Neighborhood was encoded as 5, while the Veenker Neighborhood was encoded as 24, and Crawfor as 6. Is Veenker "greater" than Crawfor and CollgCr? No - and allowing the model to assume this natural ordering may result in poor performance. # # As a result, there is another step needed: You have to apply a one-hot encoding to create binary, or "dummy" variables. You can do this using scikit-learn's [OneHotEncoder](http://scikit-learn.org/stable/modules/generated/sklearn.preprocessing.OneHotEncoder.html). # # > Warning: Instead using LabelEncoder, use make_column_transformer # + from sklearn.preprocessing import OneHotEncoder from sklearn.compose import make_column_transformer df = pd.read_csv('./dataset/ames_unprocessed_data.csv') # Fill missing values with 0 df.LotFrontage = df.LotFrontage.fillna(0) # Create a boolean mask for categorical columns categorical_mask = (df.dtypes == 'object') # Get list of categorical columns names categorical_columns = df.columns[categorical_mask].tolist() # Generate unique list of each categorical columns unique_list = [df[c].unique().tolist() for c in categorical_columns] # Create OneHotEncoder: ohe ohe = OneHotEncoder(categories=unique_list) # Create preprocess object for onehotencoding preprocess = make_column_transformer( (ohe, categorical_columns), ('passthrough', categorical_mask[~categorical_mask].index.tolist()) ) # apply OneHotEncoder to categorical columns - output is no longer a dataframe: df_encoded df_encoded = preprocess.fit_transform(df) # Print first 5 rows of the resulting dataset - again, this will no longer be a pandas dataframe print(df_encoded[:5, :]) # Print the shape fo the original DataFrame print(df.shape) # Print the shape of the transformed array print(df_encoded.shape) # - # ### Encoding categorical columns III: DictVectorizer # Alright, one final trick before you dive into pipelines. The two step process you just went through - `LabelEncoder` followed by `OneHotEncoder` - can be simplified by using a [DictVectorizer](http://scikit-learn.org/stable/modules/generated/sklearn.feature_extraction.DictVectorizer.html). # # Using a `DictVectorizer` on a DataFrame that has been converted to a dictionary allows you to get label encoding as well as one-hot encoding in one go. # # Your task is to work through this strategy in this exercise! # + from sklearn.feature_extraction import DictVectorizer # Convert df into a dictionary: df_dict df_dict = df.to_dict("records") # Create the DictVectorizer object: dv dv = DictVectorizer(sparse=False) # Apply dv on df: df_encoded df_encoded2 = dv.fit_transform(df_dict) # Print the resulting first five rows print(df_encoded2[:5, :]) # Print the vocabulary print(dv.vocabulary_) # - # Besides simplifying the process into one step, DictVectorizer has useful attributes such as vocabulary_ which maps the names of the features to their indices. With the data preprocessed, it's time to move onto pipelines! # ### Preprocessing within a pipeline # Now that you've seen what steps need to be taken individually to properly process the Ames housing data, let's use the much cleaner and more succinct DictVectorizer approach and put it alongside an XGBoostRegressor inside of a scikit-learn pipeline. df = pd.read_csv('./dataset/ames_unprocessed_data.csv') X, y = df.iloc[:, :-1], df.iloc[:, -1] # + from sklearn.pipeline import Pipeline # Fill LotFrontage missing values with 0 X.LotFrontage = X.LotFrontage.fillna(0) # Setup the pipeline steps: steps steps = [('ohe_onestep', DictVectorizer(sparse=False)), ('xgb_model', xgb.XGBRegressor())] # Create the pipeline: xgb_pipeline xgb_pipeline = Pipeline(steps) # Fit the pipeline xgb_pipeline.fit(X.to_dict("records"), y) # - # ## Incorporating XGBoost into pipelines # - Additional components introduced for pipelines # - `sklearn_pandas`: # - `DataFrameMapper` - Interoperability between pandas and scikit-learn # - `CategoricalImputer` - Allow for imputation of categorical variables before conversion to integers # - `sklearn.preprocessing`: # - `Imputer` - Native imputation of numerical columns in scikit-learn # - `sklearn.pipeline`: # - `FeatureUnion` - combine multiple pipelines of features into a single pipeline of features # ### Cross-validating your XGBoost model # In this exercise, you'll go one step further by using the pipeline you've created to preprocess and cross-validate your model. # # df = pd.read_csv('./dataset/ames_unprocessed_data.csv') X, y = df.iloc[:, :-1], df.iloc[:, -1] # + from sklearn.feature_extraction import DictVectorizer from sklearn.pipeline import Pipeline from sklearn.model_selection import cross_val_score # Fill LotFrontage missing values with 0 X.LotFrontage = X.LotFrontage.fillna(0) # Setup the pipeline steps: steps steps = [("ohe_onestep", DictVectorizer(sparse=False)), ("xgb_model", xgb.XGBRegressor(max_depth=2, objective='reg:squarederror'))] # Create the pipeline: xgb_pipeline xgb_pipeline = Pipeline(steps) # Cross-validate the model cross_val_scores = cross_val_score(xgb_pipeline, X.to_dict('records'), y, scoring='neg_mean_squared_error', cv=10) # Print the 10-fold RMSE print("10-fold RMSE: ", np.mean(np.sqrt(np.abs(cross_val_scores)))) # - # ### Kidney disease case study I - Categorical Imputer # You'll now continue your exploration of using pipelines with a dataset that requires significantly more wrangling. The [chronic kidney disease dataset](https://archive.ics.uci.edu/ml/datasets/chronic_kidney_disease) contains both categorical and numeric features, but contains lots of missing values. The goal here is to predict who has chronic kidney disease given various blood indicators as features. # # As Sergey mentioned in the video, you'll be introduced to a new library, [sklearn_pandas](https://github.com/pandas-dev/sklearn-pandas), that allows you to chain many more processing steps inside of a pipeline than are currently supported in scikit-learn. Specifically, you'll be able to impute missing categorical values directly using the `Categorical_Imputer()` class in sklearn_pandas, and the `DataFrameMapper()` class to apply any arbitrary sklearn-compatible transformer on DataFrame columns, where the resulting output can be either a NumPy array or DataFrame. # # We've also created a transformer called a `Dictifier` that encapsulates converting a DataFrame using `.to_dict("records")` without you having to do it explicitly (and so that it works in a pipeline). Finally, we've also provided the list of feature names in kidney_feature_names, the target name in kidney_target_name, the features in X, and the target in y. # # In this exercise, your task is to apply the `CategoricalImputer` to impute all of the categorical columns in the dataset. You can refer to how the numeric imputation mapper was created as a template. Notice the keyword arguments `input_df=True` and `df_out=True`? This is so that you can work with DataFrames instead of arrays. By default, the transformers are passed a numpy array of the selected columns as input, and as a result, the output of the DataFrame mapper is also an array. Scikit-learn transformers have historically been designed to work with numpy arrays, not pandas DataFrames, even though their basic indexing interfaces are similar. X = pd.read_csv('./dataset/chronic_kidney_X.csv') y = pd.read_csv('./dataset/chronic_kidney_y.csv').to_numpy().ravel() # + from sklearn_pandas import DataFrameMapper, CategoricalImputer from sklearn.impute import SimpleImputer # Check number of nulls in each feature columns nulls_per_column = X.isnull().sum() print(nulls_per_column) # Create a boolean mask for categorical columns categorical_feature_mask = X.dtypes == object # Get list of categorical column names categorical_columns = X.columns[categorical_feature_mask].tolist() # Get list of non-categorical column names non_categorical_columns = X.columns[~categorical_feature_mask].tolist() # Apply numeric imputer numeric_imputation_mapper = DataFrameMapper( [([numeric_feature], SimpleImputer(strategy='median')) for numeric_feature in non_categorical_columns], input_df=True, df_out=True ) # Apply categorical imputer categorical_imputation_mapper = DataFrameMapper( [(category_feature, CategoricalImputer()) for category_feature in categorical_columns], input_df=True, df_out=True ) # - # ### Kidney disease case study II - Feature Union # Having separately imputed numeric as well as categorical columns, your task is now to use scikit-learn's [FeatureUnion](http://scikit-learn.org/stable/modules/generated/sklearn.pipeline.FeatureUnion.html) to concatenate their results, which are contained in two separate transformer objects - `numeric_imputation_mapper`, and `categorical_imputation_mapper`, respectively. # # Just like with pipelines, you have to pass it a list of `(string, transformer)` tuples, where the first half of each tuple is the name of the transformer. # + from sklearn.pipeline import FeatureUnion # Combine the numeric and categorical transformations numeric_categorical_union = FeatureUnion([ ("num_mapper", numeric_imputation_mapper), ("cat_mapper", categorical_imputation_mapper) ]) # - # ### Kidney disease case study III - Full pipeline # It's time to piece together all of the transforms along with an `XGBClassifier` to build the full pipeline! # # Besides the `numeric_categorical_union` that you created in the previous exercise, there are two other transforms needed: the `Dictifier()` transform which we created for you, and the `DictVectorizer()`. # # After creating the pipeline, your task is to cross-validate it to see how well it performs. # + from sklearn.base import BaseEstimator, TransformerMixin # Define Dictifier class to turn df into dictionary as part of pipeline class Dictifier(BaseEstimator, TransformerMixin): def fit(self, X, y=None): return self def transform(self, X): if type(X) == pd.core.frame.DataFrame: return X.to_dict("records") else: return pd.DataFrame(X).to_dict("records") # + # Create full pipeline pipeline = Pipeline([ ("featureunion", numeric_categorical_union), ("dictifier", Dictifier()), ("vectorizer", DictVectorizer(sort=False)), ("clf", xgb.XGBClassifier(max_depth=3)) ]) # Perform cross-validation cross_val_scores = cross_val_score(pipeline, X, y, scoring='roc_auc', cv=3) # Print avg. AUC print("3-fold AUC: ", np.mean(cross_val_scores)) # - # ## Tuning XGBoost hyperparameters # # ### Bringing it all together # Alright, it's time to bring together everything you've learned so far! In this final exercise of the course, you will combine your work from the previous exercises into one end-to-end XGBoost pipeline to really cement your understanding of preprocessing and pipelines in XGBoost. # # Your work from the previous 3 exercises, where you preprocessed the data and set up your pipeline, has been pre-loaded. Your job is to perform a randomized search and identify the best hyperparameters. # + from sklearn.model_selection import RandomizedSearchCV # Create the parameter grid gbm_param_grid = { 'clf__learning_rate': np.arange(0.05, 1, 0.05), 'clf__max_depth': np.arange(3, 10, 1), 'clf__n_estimators': np.arange(50, 200, 50) } # Perform RandomizedSearchCV randomized_roc_auc = RandomizedSearchCV(estimator=pipeline, param_distributions=gbm_param_grid, n_iter=2, scoring='roc_auc', cv=2, verbose=1) # Fit the estimator randomized_roc_auc.fit(X, y) # Compute metrics print('Score: ', randomized_roc_auc.best_score_) print('Estimator: ', randomized_roc_auc.best_estimator_) # - # ## Final Thoughts # - Advanced Topic # - Using XGBoost for ranking/recommandation problems (Netflix/Amazon problem) # - Using more sophisticated hyperparamter tuning strategies for tuning XGBoost model (Bayesian Optimization) # - Using XGBoost as part of an ensemble of other models for regression/classification
_notebooks/2020-07-07-03-Using-XGBoost-in-pipelines.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- from simpletransformers.ner import NERModel import pandas as pd import logging import sklearn import sklearn_crfsuite # Creating train_df and eval_df for demonstrat # Evaluate the model from sklearn_crfsuite import metrics # + # path to files path_2_en_train = "en_format_train.conll" path_2_en_test = "en_format_test.conll" path_2_es_train = "es_format_train.conll" path_2_es_test = "es_format_test.conll" path_2_th_train = "th_format_train.conll" path_2_th_test = "th_format_test.conll" path_2_checkpoints = "BA/slot_filling_models" # - labels = ['B-alarm/alarm_modifier', 'I-reminder/reference', 'B-reminder/reminder_modifier', 'I-reminder/todo', 'NoLabel', 'B-timer/attributes', 'B-datetime', 'B-reminder/todo', 'B-reminder/recurring_period', 'B-timer/noun', 'I-weather/noun', 'B-negation', 'B-reminder/noun', 'I-weather/attribute', 'I-alarm/alarm_modifier', 'B-weather/noun', 'I-datetime', 'B-weather/attribute', 'I-reminder/recurring_period', 'I-location', 'B-demonstrative_reference', 'B-location', 'I-reminder/reminder_modifier', 'B-reminder/reference', 'B-weather/temperatureUnit', 'I-reminder/noun', 'B-news/type', 'I-demonstrative_reference', 'I-negation', 'B-alarm/recurring_period', "I-alarm/recurring_period"] args = {'fp16': False, 'reprocess_input_data': True,'evaluate_during_training':False, "evaluate_during_training_verbose": False,\ 'overwrite_output_dir': True, 'num_train_epochs': 5} macro = lambda x,y: metrics.flat_f1_score(x,y, average= 'macro') micro = lambda x,y: metrics.flat_f1_score(x,y, average= 'micro') report = lambda x,y: metrics.flat_classification_report(x,y,digits = 5) report_dict = lambda x,y: metrics.flat_classification_report(x,y,digits = 5,output_dict = True,labels = list(range(len(labels))),target_names = labels) accuracy = lambda x,y: metrics.flat_accuracy_score(x,y) seq_accuracy = lambda x,y: metrics.sequence_accuracy_score(x,y) model = NERModel('xlmroberta','xlm-roberta-base', labels = labels, args=args) # Train the model model.train_model(path_2_en_train,output_dir = path_2_checkpoints) result, model_outputs, predictions = model.eval_model\ (path_2_en_test, macro=macro, micro=micro,accuracy=accuracy, report=report, seq_accuracy = seq_accuracy) print(result["report"]) print(result["seq_accuracy"]) result, model_outputs, predictions = model.eval_model\ (path_2_es_test, macro=macro, micro=micro,accuracy=accuracy, report=report, seq_accuracy = seq_accuracy) print(result["report"]) print(result["seq_accuracy"]) result, model_outputs, predictions = model.eval_model\ (path_2_th_test, macro=macro, micro=micro,accuracy=accuracy, report=report, seq_accuracy = seq_accuracy) print(result["report"]) print(result["seq_accuracy"]) model.train_model(path_2_es_train,output_dir = path_2_checkpoints) result, model_outputs, predictions = model.eval_model\ (path_2_en_test, macro=macro, micro=micro,accuracy=accuracy, report=report, seq_accuracy = seq_accuracy) print(result["report"]) print(result["seq_accuracy"]) result, model_outputs, predictions = model.eval_model\ (path_2_es_test, macro=macro, micro=micro,accuracy=accuracy, report=report, seq_accuracy = seq_accuracy) print(result["report"]) print(result["seq_accuracy"]) result, model_outputs, predictions = model.eval_model\ (path_2_th_test, macro=macro, micro=micro,accuracy=accuracy, report=report, seq_accuracy = seq_accuracy) print(result["report"]) print(result["seq_accuracy"]) model.train_model(path_2_th_train,output_dir = path_2_checkpoints) result, model_outputs, predictions = model.eval_model\ (path_2_th_test, macro=macro, micro=micro,accuracy=accuracy, report=report, seq_accuracy = seq_accuracy) print(result["report"]) print(result["seq_accuracy"]) result, model_outputs, predictions = model.eval_model\ (path_2_es_test, macro=macro, micro=micro,accuracy=accuracy, report=report, seq_accuracy = seq_accuracy) print(result["report"]) print(result["seq_accuracy"]) result, model_outputs, predictions = model.eval_model\ (path_2_en_test, macro=macro, micro=micro,accuracy=accuracy, report=report, seq_accuracy = seq_accuracy) print(result["report"]) print(result["seq_accuracy"])
deprecated/preliminary_experiments/Slot-filling.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import os import numpy as np import matplotlib.pyplot as plt import seaborn as sns sns.set_style('white') sns.set_context('notebook', font_scale=1.5) # %matplotlib inline # In today's demonstration, we will show you how to generate the predicted BOLD signal of a simple block design fMRI experiment. In fact, we will be making the predicted BOLD signal for an experiment run by a couple of PNI graduent students a few years ago (more on that later). # ## Section 1: Load and inspect the fMRI data # # In this first section, we will load and inspect fMRI data from one participant. As a reminder, this participant completed an experiment where they watched alternating blocks of a [visual checkerboard](https://www.youtube.com/watch?v=xEd1h_lz4rA) (warning: flashing lights) and an empty black background. Each stimulus (i.e. checkerboard, blank screen) was presented for 20 seconds a time. # # We will use a new python package, `nibabel`, to open this participant's raw data file. # + import nibabel as nib ## Specify path to file. f = 'sub-01_task-visualcontrol_space-T1w_desc-preproc_bold.nii.gz' ## Load functional data. func = nib.load(f).get_fdata() print('Functional data dim:\t(X=%s, Y=%s, Z=%s, T=%s)' %func.shape) # - # As the data's dimensions note, the data is a 4d matrix: three spatial dimensions and time. The x-axis defines the space between the left to right hemispheres. The y-axis defines the space from anterior to posterior (front to back). The z-axis defines the space from dorsal to ventral (top to bottom). Each element of this matrix is known as a **voxel**, or a 3d pixel, and is a measurement of the BOLD response at a given point and time during the scan. For this scan, each voxel was approximately $2 \text{mm}^3$. # # As you may have noticed, the z-axis is only one voxel deep. To simplify the analysis today, you will be analyzing only one **axial slice** (i.e. one horizontal slice) of this participant's brain. # # Let's begin by inspecting the brain. We can do this averaging over all time points (i.e. the 4th dimension). After averaging (and squeezing), use either Matplotlib's (`plt.imshow`) or Seaborn's (`sns.heatmap`) heatmap function to visualize the brain. We recommend setting `cmap=binary_r` for best results. # + ## Average and squeeze. avg = func.mean(axis=-1).squeeze() ## Plot. sns.heatmap(avg, cmap='binary_r', cbar=False, square=True, xticklabels=[], yticklabels=[]); # - # In the image above, can you find the visual cortex? (Hint: in raw, unmasked fMRI data, the eyeballs are still visually prominent.) # # The image above suggests we need to perform **masking**. That is, we need to remove the voxels corresponding to (oxygen-rich) non-neural tissues including the eyes and skull layers. Fortunately for us, we have an anatomical reference image for this participant that has undergone **skull-stripping** (i.e. digital removal of non-neural tissues). # # We load in this image below. # + ## Specify path to file. f = 'sub-01_task-visualcontrol_space-T1w_desc-boldref.nii.gz' ## Load anatomical data. anat = nib.load(f).get_fdata() print('Anatomical data dim:\t(X=%s, Y=%s, Z=%s)' %anat.shape) # - # As you can see, the anatomical reference image is a single snapshot with the same spatial dimensions as the functional data. To confirm that skull-stripping has worked, plot the anatomical reference image below using a heatmap function just like in the previous step. ## Plot. sns.heatmap(anat.squeeze(), cmap='binary_r', cbar=False, square=True, xticklabels=[], yticklabels=[]); # The anatomical reference looks good! The skull layers and eyes have been successfully removed. We will use this image to mask our functional data. # # Define a new matrix `mask` that indicates where any voxel in our anatomical reference image `anat` is greater than zero. Visualize the mask template to make sure your method worked. # + ## Define mask image. mask = anat > 0 ## Plot. sns.heatmap(mask.squeeze(), cmap='binary_r', cbar=False, square=True, xticklabels=[], yticklabels=[]); # - # ## Section 2: Filtering and Percent Signal Change # # Just like electrophysiological data, fMRI data is also usually filtered prior to analysis. Highpass filtering fMRI data is useful as it removes low frequency drifts in the fMRI signal. (For a great overview of sources of noise in fMRI data, see [Greve et al., 2013](https://doi.org/10.1007/s11336-012-9294-0).) Highpass filtering can be performed in a number of ways, but thankfully `nilearn` makes this very easy for us. We will use the `clean` function to highpass filter the data. The appropriate frequency cutoff may depend on your particular experiment, but standard values are 1/100 (0.01) Hz, 1/128 Hz (0.0078), and 1/200 (0.005) Hz. # # If you do not have the `nilearn` package installed, open a terminal and run: # # ```bash # pip install nilearn # ``` # # Importantly, applying a highpass filter will demean our data (i.e. subtract the mean from each voxel). This is not ideal if we are interested in converting our data to percent signal change ([Chen et al. 2017](https://www.sciencedirect.com/science/article/pii/S1053811916305432)). Thus, we will store the mean intensities of each voxel prior to filtering. # # # + from nilearn.signal import clean ## Define filtering parameters. high_pass = 1 / 100. tr = 1 ## Reshape the data for filtering. nx, ny, nz, nt = func.shape raw = func.reshape(-1,nt).T ## Compute mean signal. mu = raw.mean(axis=0) ## Apply highpass data. filt = clean(raw, detrend=True, standardize=False, high_pass=high_pass, t_r=tr) ## Convert to percent signal change. filt = filt / mu * 100 ## Shape back into original dimensions. filt = filt.T.reshape(nx,ny,nz,nt) # - # Let's inspect the filtered brain. average over all the time points (i.e. the 4th dimension) of the new matrix `filt`. After averaging (and squeezing), make a heatmap of the averaged image. How does the image look now? # + ## Average and squeeze. avg = filt.mean(axis=-1).squeeze() ## Plot. sns.heatmap(avg, cmap='binary_r', cbar=False, square=True, xticklabels=[], yticklabels=[]); # - # If the brain is harder to see now, that's good! That's because what normally separates the brain from the background image is the average voxel intensity (average BOLD signal) which is much higher in biological tissue. After filtering, however, we've demeaned and normalized the brain so that this average intensity has been removed. This is helpful for our analysis because it now makes measurements of BOLD signal change comparable across the brain. # # Let's now use our brain mask to mask all voxels not corresponding to neural tissue. Use the brain mask to index into `filt` and set all non-neural voxels (i.e. all voxels in `mask` with a value less than one) to zero. ## Mask data. filt[~mask] = 0 # To confirm that this procedure worked, remake the image from the step above. It should be easier now to tell brain from non-brain. # + ## Average and squeeze. avg = filt.mean(axis=-1).squeeze() ## Plot. sns.heatmap(avg, cmap='binary_r', cbar=False, square=True, xticklabels=[], yticklabels=[]); # - # ## Section 3: Design Matrix & Regression # In this step, similar to what you did last time, we will make the predicted BOLD signal in response to our alternating visual checkerboard stimulus. Now that you know how to make the predicted BOLD signal, we will not have you repeat every step. Instead, we will have you use a new function, `fmritools.design.design_matrix`, that will generate the BOLD timeseries for you. # # Importantly, the function `design_matrix` requires three important pieces of information: (1) the repetition time (TR), or length of time to collect one image, (2) the total number of images collected, and (3) a 3-column matrix, `events`, where each column corresponds to a stimulus onset, offset, and condition, respectively. # # Fortunately, you have all of this information! We know the TR=1, and from above, we know our data has T=250 total timepoints. From last session, you know the event onsets and offsets. **Important:** For this participant's data, the first checkerboard stimulus appeared 10s after the start of the scan. That is, all of your onsets/offsets from last time should be shifted forwards by 10s. # # Make the 3-column `events` matrix, where the first column is *onsets*, the second column is *offsets*, and the third column is an array of ones. # + ## Define events. onsets = [10,50,90,130,170,210] offsets = [30,70,110,150,190,230] conds = [1,1,1,1,1,1] events = np.column_stack([onsets, offsets, conds]) # - # Now we will generate our design matrix. Note, we are also having the `design_matrix` function return our generated boxcar function for comparison. # + from fmritools.design import design_matrix ## Define task metadata. n_acq = 250 tr = 1 ## Make design matrix. times, bold, boxcars = design_matrix(tr, n_acq, events, return_boxcars=True) # - # To confirm that the function worked, plot the boxcar function and predicted BOLD signal on the same plot. # + ## Intialize canvas. fig, ax = plt.subplots(1,1,figsize=(12,3)) ## Plot. ax.plot(times, boxcars) ax.plot(times, bold) ax.set(xlim=(times.min(), times.max()), xlabel='Time (s)') sns.despine() plt.tight_layout() # - # The moment has arrived! We will now regress our predicted BOLD signal against our participant's observed BOLD signal. # # Like last time, we need to construct our design matrix, `X`. Our design matrix will be comprised of two arrays: an *intercept* and `bold` timeseries. The intercept is just an array made entirely of 1s. Using `np.ones_like` and `np.column_stack`, make our design matrix `X` in the cell below. X = np.column_stack([np.ones_like(bold), bold]) # Let's perform regression! Note, we perform some ugly data reshaping in order to regress our design matrix against every voxel in our participant's brain slice simultaneously. # + ## Define image dimensions. nx, ny, nz, nt = filt.shape ## Perform regression. coef, _, _, _ = np.linalg.lstsq(X,filt.reshape(-1,nt).T,rcond=-1) ## Reshape regression coefficients. coef = coef.T.reshape(nx,ny,nz,-1)[...,-1] # - # Now let's plot the regression coefficients. They will be easier to interpret if we overlay them on top of the reference anatomical image. To do this, we will make two heatmaps in the same subplot: first the anatomical image, then the regression coefficients. Importantly, we will threshold the coefficients such that values beneath the threshold will not be visualized. # + ## Define threshold. threshold = 1 ## Mask coefficients. viz = np.where(np.abs(coef) > threshold, coef, np.nan) ## Plot. sns.heatmap(anat.squeeze(), cmap='binary_r', cbar=False, square=True, xticklabels=[], yticklabels=[]); sns.heatmap(viz.squeeze(), vmin=-6, vmax=6, center=0, square=True, xticklabels=[], yticklabels=[]); # - # Questions to ponder: # 1. The units of the coefficients are in percent signal change. What does this mean? What does a threshold of 1 mean? # 2. Is there one area of the brain that seems more active (i.e. greater change in BOLD signal)? Is this in line with your expectations? # ## Section 4: Region of Interest Analysis # # By now, it should be clear that this task robustly activates visual cortex. This is unsurprising: that's exactly what visual checkerboards are good at! # # In this final step, let's extract the BOLD signal from voxels strongly activated by the task. To do this mask, use the `coef` matrix to index into the `filt` timeseries. Extract all voxels from `filt` with a corresponding coefficient greater than 5. ## Extract BOLD signal. roi = filt[coef > 5] # Now, average over each voxel in this new matrix and plot the resulting timeseries. What does it look like? # + ## Intialize canvas. fig, ax = plt.subplots(1,1,figsize=(12,3)) ## Plot. ax.plot(times, roi.mean(axis=0)) ax.set(xlim=(times.min(), times.max()), xlabel='Time (s)') sns.despine() plt.tight_layout() # - # ## Figure Ideas # 1. Plot the observed BOLD signal change in visual cortex against the predicted BOLD signal. How do they compare? # 2. Plot the observed BOLD signal change in visual cortex compared to a collection of voxels outside of visual cortex. How do they compare? # # In describing your figures, remember to briefly describe the experiment (e.g. what the participant saw and for how long). Please also interpret the units; that is, describe what percent signal change means.
fmri-02/fmri-02-solutions.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + from urllib.request import urlopen html = urlopen('http://pythonscraping.com/pages/page1.html') print(html.read()) # + from urllib.request import urlopen from bs4 import BeautifulSoup html = urlopen('http://www.pythonscraping.com/pages/page1.html') bs = BeautifulSoup(html.read(), 'html.parser') print(bs.h1) # + from urllib.request import urlopen from urllib.error import HTTPError from urllib.error import URLError try: html = urlopen("https://pythonscrapingthisurldoesnotexist.com") except HTTPError as e: print("The server returned an HTTP error") except URLError as e: print("The server could not be found!") else: print(html.read()) # + from urllib.request import urlopen from urllib.error import HTTPError from bs4 import BeautifulSoup def getTitle(url): try: html = urlopen(url) except HTTPError as e: return None try: bsObj = BeautifulSoup(html.read(), "lxml") title = bsObj.body.h1 except AttributeError as e: return None return title title = getTitle("http://www.pythonscraping.com/pages/page1.html") if title == None: print("Title could not be found") else: print(title) # -
Chapter01_BeginningToScrape.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] id="EMefrVPCg-60" # ### Importing the libraries # + [markdown] id="x_ivG6XcqgZq" # Mounting with Gdrive # + id="lQonwVot6WUp" import os import cv2 import numpy as np import pandas as pd import tensorflow as tf from tensorflow import keras #from google.colab import drive from tensorflow.keras import layers from tensorflow.keras.preprocessing import image # + colab={"base_uri": "https://localhost:8080/"} id="yUFSB1c3qd_R" outputId="7d7f9317-8d3f-4205-f323-a3e68f5216dd" # drive.mount('/content/gdrive') # os.environ['KAGGLE_CONFIG_DIR'] = "/content/gdrive/My Drive/Kaggle" # #changing the working directory # # %cd /content/gdrive/My Drive/Kaggle # #pwd # + id="ly8H5lgMGqEK" # #!kaggle datasets download -d spandanpatnaik09/face-mask-detectormask-not-mask-incorrect-mask # + id="NptlMwJTGqG5" #unzipping the zip files and deleting the zip files # #!unzip \*.zip && rm *.zip # + id="nLFLGX7mGqM7" print("Num GPUs Available: ", len(tf.config.list_physical_devices('GPU'))) # + colab={"base_uri": "https://localhost:8080/", "height": 35} id="FIleuCAjoFD8" outputId="32200819-cd68-4cf5-97a2-0d34afbccb50" tf.__version__ # + id="W7p236PnBjfX" # + colab={"base_uri": "https://localhost:8080/"} id="qP-TUKsIBjfX" outputId="15e97cc8-cca3-4587-bf90-67ed34565daf" train = tf.keras.preprocessing.image_dataset_from_directory( "D:\learn\Projects\My Pjts\Mask\dataset", labels='inferred', label_mode='categorical', class_names=None, color_mode='rgb', batch_size=64, image_size=(64,64), shuffle=True, seed=45, validation_split=0.2, subset= "training", interpolation='bilinear', follow_links=False, smart_resize=True ) test= tf.keras.preprocessing.image_dataset_from_directory( "D:\learn\Projects\My Pjts\Mask\dataset", labels='inferred', label_mode='categorical', class_names=None, color_mode='rgb', batch_size=64, image_size=(64,64), shuffle=True, seed=45, validation_split=0.2, subset= "validation", interpolation='bilinear', follow_links=False, smart_resize=True ) # + [markdown] id="af8O4l90gk7B" # ## Part 2 - Building the CNN # + [markdown] id="ces1gXY2lmoX" # ### Initialising the CNN # + id="SAUt4UMPlhLS" cnn = tf.keras.models.Sequential() # + [markdown] id="u5YJj_XMl5LF" # ### Step 1 - Convolution # + id="XPzPrMckl-hV" cnn.add(tf.keras.layers.Conv2D(filters=64, kernel_size=3, activation='relu', input_shape=[64,64, 3])) # + [markdown] id="tf87FpvxmNOJ" # ### Step 2 - Pooling # + id="ncpqPl69mOac" cnn.add(tf.keras.layers.MaxPool2D(pool_size=2, strides=1)) # + [markdown] id="xaTOgD8rm4mU" # ### Adding a second convolutional layer # + id="i_-FZjn_m8gk" # cnn.add(tf.keras.layers.Conv2D(filters=128, kernel_size=3, activation='relu')) # cnn.add(tf.keras.layers.Conv2D(filters=128, kernel_size=3, activation='relu')) # cnn.add(tf.keras.layers.Conv2D(filters=64, kernel_size=3, activation='relu')) # cnn.add(tf.keras.layers.MaxPool2D(pool_size=2, strides=2)) # + id="atWCfhsKylcg" # cnn.add(tf.keras.layers.Conv2D(filters=64, kernel_size=3, activation='relu')) # cnn.add(tf.keras.layers.MaxPool2D(pool_size=2, strides=2)) # + [markdown] id="tmiEuvTunKfk" # ### Step 3 - Flattening # + id="6AZeOGCvnNZn" cnn.add(tf.keras.layers.Flatten()) # + [markdown] id="dAoSECOm203v" # ### Step 4 - Full Connection # + id="8GtmUlLd26Nq" # cnn.add(tf.keras.layers.Dense(units=256, activation='relu')) # cnn.add(tf.keras.layers.Dropout(0.5)) # + id="tYxJ8WIZxdHJ" # cnn.add(tf.keras.layers.Dense(units=128, activation='relu')) # cnn.add(tf.keras.layers.Dropout(0.2)) # + id="lPRCf6I13VVc" # cnn.add(tf.keras.layers.Dense(units=32, activation='relu')) cnn.add(tf.keras.layers.Dense(units=32, activation='relu')) # + id="6JJ6mUjx2zVV" # + [markdown] id="yTldFvbX28Na" # ### Step 5 - Output Layer # + id="1p_Zj1Mc3Ko_" cnn.add(tf.keras.layers.Dense(units=3, activation='softmax')) # + [markdown] id="D6XkI90snSDl" # ## Part 3 - Training the CNN # + [markdown] id="vfrFQACEnc6i" # ### Compiling the CNN # + id="NALksrNQpUlJ" cnn.compile(optimizer = 'adam', loss = 'categorical_crossentropy', metrics = ['accuracy']) # + [markdown] id="ehS-v3MIpX2h" # ### Training the CNN on the Training set and evaluating it on the Test set # + id="XUj1W4PJptta" cnn.fit(x = train, validation_data = test, epochs = 30) # + id="fvoLc2_KS5hW" cnn.save('mask7.h5') # + id="Xd58EpiOjzgq" sv = tf.keras.models.load_model('mask7.h5') # + id="fJS1ry2HUcZ1" msk = cv2.imread("mask.jpg") fac = cv2.imread("face.jpg") wrg = cv2.imread("wrong.jpg") # + id="zAd_wGg6UccF" nam = ["wrong","mask","face"] # + [markdown] id="U3PZasO0006Z" # ## Part 4 - Making a single prediction # + colab={"base_uri": "https://localhost:8080/"} id="gsSiWEJY1BPB" outputId="dcf2e193-b5c6-4556-a698-f19846d9e18c" test_image = image.load_img('wrong.jpg', target_size = (64,64)) test_image = image.img_to_array(test_image) test_image = np.expand_dims(test_image, axis = 0) result = sv.predict(test_image) print(result) result = np.argmax(result) print(nam[result])
Mask_CNN.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 (ipykernel) # language: python # name: python3 # --- # # Python Basics - Part 1 # # > This is Part 1 of a Python tutorial for beginners. # # - toc: true # - branch: master # - badges: true # - comments: true # - categories: [python] # As a self-taught Data Scientist and programmer, I always get asked about how I started my path towards learning, and a lot of non-coders ask me about how they can learn more about Data Science. And while I tell them about the umpteen Data Analytics and Data Visualization tools, various Machine Learning algorithms, and which Deep Learning frameworks to choose, it all starts with learning Python. Python is an interpreted high-level general-purpose programming language. In my view, Python is the best programming language to learn, in order to become a Data Scientist, owing to its readability, non-complexity, its large standard libraries, and its huge community. # I have been the beneficiary of several of books and YouTube tutorials that have helped me become a better Python Developer. This blog post is my way of giving back to the community. This might not be the best place to start learning how to code in Python. However, this blog post aims to be a good cheat sheet for beginners trying to look something up or if they want a refresher as to how certain objects perform. # --- # ### Display Output # You can use the `print` function to display output to your console. You can use either single or double quotes; just make sure that you stick to one for consistency. print('Hello, World!') print("Python is great") # #### Displaying blank lines # Blank lines make the output more readable. For a blank line, you can insert a `print` function with nothing inside. Each `print` function prints on a new line by default. # You can also use `\n` (the newline character) to print a new line at the end of a string or right in the middle of a string. print('Hello') print() print('Above is a blank line\n') print('Blank line \nin the middle of a string') # --- # ### Getting information from the user # You can use the `input` to ask for information from your user. We pass in a message with the `input` function. name = input('What is your name? ') print(name) # Here, whatever value is typed in by the user will be stored in the variable `name` and can be used as needed. We've chosen to print the value of `name` on the screen. # --- # ### Comments # Comments are a way of documenting your code. Comments can be added using `#`. These lines of code will not execute. # print('Hello') # the above code of line will not execute # but the below one will print('How are you?') # You can also use `''' '''` for multi-line comments. ''' Python is an important programming language in your Data Science journey ''' print('Python') # It's a good idea to write comments before your function explaining what that function does. # Commenting out lines can also help debug your code. # --- # ### String concepts # Strings can be stored in variables. Variables are just placeholders for some value inside our code. first_name = 'Pranav' print(first_name) # #### Concatenate strings # You can combine strings with the `+` operator. first_name = 'John' last_name = 'Doe' print(first_name + last_name) print('Hello, ' + first_name + ' ' + last_name) # #### Functions to modify strings # Below we have used functions to # - convert a string to uppercase # - convert a string to lowercase # - capitalize just the first word # - count all of the instances of a particular string. sentence = 'My name is <NAME>' print(sentence.upper()) print(sentence.lower()) print(sentence.capitalize()) print(sentence.count('o')) # You can use the escape character (backslash) `\` to insert characters that are illegal in a string. An example of an illegal character is a single quote inside a string that is surrounded by single quotes. first_name = input('What\'s your first name? ') last_name = input('What\'s is your last name? ') print('Hello, ' + first_name.capitalize() + ' ' + last_name.capitalize()) # #### Custom string formatting # To infuse things in strings dynamically, you can use string formatting. first_name = 'John' last_name = 'Doe' # There are two ways you can do this: # - formatting with `.format()` string method # name = 'Hello, {} {}'.format(first_name, last_name) print(name) # - formatting with string literals, called f-strings name = f'Hello, {first_name} {last_name}' print(name) # --- # ### Working with numbers # Numbers can be stored in variables. Make sure the variables have meaningful names. We can pass those variables inside functions. pi = 3.14159 print(pi) # #### Math with Numbers # - `+` for addition # - `-` for subtraction # - `*` for multiplication # - `/` for division # - `**` for exponent num1 = 9 num2 = 5 print(num1 + num2) print(num1 ** num2) # #### Type Conversion # You cannot combine strings with numbers in Python. For e.g., executing the code below will result in an error: # days_in_Dec = 31 print(days_in_Dec + ' days in December') # When displaying a string that contains numbers, you must convert the numbers into strings. days_in_Dec = 31 print(str(days_in_Dec) + ' days in December') # Numbers can be stored as strings. However, numbers stored as strings are treated as strings. num1 = '10' num2 = '20' print(num1 + num2) # Also, the input function always returns a string. num1 = input('Enter the first number: ') num2 = input('Enter the second number: ') print(num1 + num2) # But here you can see that you have a number stored in a string. What if you want to treat it as a number and do math with it? # You can do another data type conversion. The `int` function will convert it to a whole number, while the `float` function will convert it into a floating point number that might have decimal places. num1 = input('Enter the first number: ') num2 = input('Enter the second number: ') print(int(num1) + int(num2)) print(float(num1) + float(num2)) # --- # ### Working with Dates # We often need current date and time when logging errors and saving data. To get the current date and time, we need to use the `datetime` library. # + from datetime import datetime # the now function returns a datetime object current_date = datetime.now() print('Today is: ' + str(current_date)) # - # There are a whole bunch of functions you can use with `datetime` objects to manipulate dates. # `timedelta` is used to define a period of time. # + from datetime import datetime, timedelta today = datetime.now() print('Today is: ' + str(today)) one_day = timedelta(days=1) one_week = timedelta(weeks=1) yesterday = today - one_day past_week = today - one_week print('Yesterday was: ' + str(yesterday)) print('One week ago was: ' + str(past_week)) # - # You can also control the format of the date displayed on the screen. You can request just the day, month, year, hour, minutes and even seconds. print('Day: ' + str(current_date.day)) print('Month: ' + str(current_date.month)) print('Year: ' + str(current_date.year)) # Sometimes, you can receive a date as a string, and you might need to store it as a date. You'll need to convert it to a `datetime` object. # + birthday = input('When is your birthday (dd/mm/yyyy)? ') # the strptime function allows you to mention the # format in which you'll be receiving the date birthday_date = datetime.strptime(birthday, '%d/%m/%Y') print('Birthday: ' + str(birthday_date)) # - # So what date was it three days before you were born? birthday = input('When is your birthday (dd/mm/yyyy)? ') birthday_date = datetime.strptime(birthday, '%d/%m/%Y') print('Birthday: ' + str(birthday_date)) three_days = timedelta(days=3) three_before = birthday_date - three_days print('Date three days before birthday: ' + str(three_before)) # --- # ### Error Handling # *Error handling* is when you have a problem with your code that is running, and its not something that you're going to be able to predict when you push your code to production. For e.g., permissions issue, database change, server being down, etc. Basically things that happen in the wild, which you have no control over. # *Debugging* is when you know that there's something wrong (a bug) with your code because you did something incorrectly, and you're going to have to go in and correct it. # # The following tools we're going to talk about are concerned with error handling. There are three types of errors: # - syntax errors # - runtime errors # - logic errors # # # #### Syntax errors # With syntax errors, your code is not going to run at all. This type of error is easiest to track down. # this code won't run at all x = 35 y = 75 if x == y print('x = y') # We're missing a colon after `y`, which is why we're getting the error above. # #### Runtime errors # With runtime errors, your code will run, but it will fail when it encounters the error. # this code will fail when run x = 5 y = 0 print(x / y) # We're trying to divide by zero, which is not possible. Python tells you why you're getting the error and points towards the line which needs to be fixed. It's good practice to start from the line mentioned and work your way up to the error. Runtime errors can also be caused due to an error in the framework you're using, but the chances of that happening are extremely rare. Most probably, if you have a runtime error, it's because there's something wrong in your code. # #### Catching runtime errors # When a runtime error occurs, Python generates an exception during the execution and that can be handled, which avoids your program to interrupt. # Exception handling: # - `try`: this block will test the excepted error to occur # - `except`: here you can handle the error # - `else`: if there is no exception, then this block will be executed # - `finally`: finally block always gets executed whether exception is generated or not # # These tools are not used for finding bugs. # + x = 5 y = 0 try: print(x / y) except ZeroDivisionError as e: print('Sorry, something went wrong') except: print('Something really went wrong') finally: print('This line always runs, on success or failure') # - # #### Logic errors # Logic errors occur when the code compiles properly, doesn't give any syntax or runtime errors, but it doesn't give you the response you're looking for. # this code won't run at all x = 10 y = 20 if x > y: print(str(x) + ' is less than ' + str(y)) # In the code above, `x` is less than `y`; but the `if` statement includes `x > y`, instead of `x < y`. # # When you're figuring out what went wrong with your code, just make sure that you reread your code. You can check the documentation and also search the internet on sites like StackOverflow and Medium. # --- # ### Handling Conditions # Your code might need the ability to take different actions based on different conditions. Below are the operations that you'll need for comparisons: # - `>`: greater than # - `<`: less than # - `>=`: greater than or equal to # - `<=`: less than or equal to # - `==`: is equal to # - `!=`: is not equal to # # #### if statement # The `if` statement contains a logical expression using which the data is compared and a decision is made based on the result of the comparison. # + price = 250.0 if price >= 100.00: tax = 0.3 print(tax) # - # #### if - else statement # You can add a default action using `else`. An `else` statement contains the block of code that executes if the conditional expression in the `if` statement resolves to `0` or a `False` value. # + price = 50 if price >= 100.00: tax = 0.3 else: tax = 0 print(tax) # - # Be careful when comparing strings. String comparisons are case sensitive. country = 'INDIA' if country == 'india': print('Namaste') else: print('Hello') country = 'INDIA' if country.lower() == 'india': print('Namaste') else: print('Hello') # #### if - elif - else statement # You may need to check multiple conditions to determine the correct action. The `elif` statement allows you to check multiple expressions for `True` and execute a block of cide as soon as one of the conditions evaluates to `True`. # + # income tax percentage by state state = input('Which state do you live in? ') if state == 'Georgia': tax = 5.75 elif state == 'California': tax = 13.3 elif state == 'Texas' or state == 'Florida': tax = 0.0 else: tax = 4.0 print(tax) # - # #### OR statements # | first condition | second condition | evaluation | # |-----------------|------------------|------------| # |True |True |True | # |True |False |True | # |False |True |True | # |False |False |False | # # #### AND statements # | first condition | second condition | evaluation | # |-----------------|------------------|------------| # |True |True |True | # |True |False |False | # |False |True |False | # |False |False |False | # #### in operator # If you have a list of possible values to check, you can use the `in` operator. # + # income tax rates by state state = input('Which state do you live in? ') if state in ('Texas', 'Florida', 'Alaska', 'Wyoming', 'South Dakota'): tax = 0.0 elif state == 'California': tax = 13.3 elif state == 'Georgia': tax = 5.75 else: tax = 4.0 print(tax) # - # #### Nested if statement # There may be a situation when you want to check for another condition after a condition resolves to `True`. If an action depends on a combination of conditions, you can nest `if` statements. # + country = input("What country do you live in? ") if country.lower() == 'canada': province = input("What province/state do you live in? ") if province in('Alberta', 'Nunavut','Yukon'): tax = 0.05 elif province == 'Ontario': tax = 0.13 else: tax = 0.15 else: tax = 0.0 print(tax) # - # Sometimes you can combine conditions with `and` instead of nested `if` statements. # Let's assume that you're trying to calculate which students in a college have made the honor roll. The requirements for making the honor roll are a minimum 85% GPA and maintaining all your grades at at least 70%. # + # convert strings into float gpa = float(input('What\'s your GPA? ')) lowest_grade = float(input('What was your lowest grade? ')) if gpa >= 0.85 and lowest_grade >= 0.7: print('You made the honor roll') else: print('You\'re really stupid') # - # If you have a very complicated `if` statement, rather than copying and pasting it in different parts of you code to do different things, we can remember what happened the last time we looked at the `if` statement with a Boolean variable. # + gpa = float(input('What\'s your GPA? ')) lowest_grade = float(input('What was your lowest grade? ')) if gpa >= 0.85 and lowest_grade >= 0.7: honor_roll = True else: honor_roll = False ''' Somewhere later in your code if you need to check if a student is on honor roll, all you need to do is check the boolean variable set earlier in the code''' if honor_roll: # True by default print('You made the honor roll') # - # --- # ### Collections # # #### Lists # Lists are a collection of items. # + # prepopulate a list names = ['John', 'Will', 'Max'] # start with an empty list scores = [] # add new item to the end scores.append(90) scores.append(91) print(names) print(scores) # lists are zero-indexed print(scores[1]) # - # You can get the number of items in a list using `len`. # + names = ['John', 'Will', 'Max'] # get the number of items using len print(len(names)) # - # You can insert an item in a list using `insert`. This will insert the item at the specific index that you mention. # Bill will be inserted at index 0, i.e. the first item names.insert(0, 'Bill') print(names) # You can use `sort` to sort strings in alphabetical order. In case of numbers, it sorts them in the ascending order. Remember that using `sort` will modify the list! names.sort() print(names) # You can retrieve a range within the list by indicating the start and end index; the end index being exclusive, i.e. it will not be included in the list. # + names = ['Amy', 'Susan', 'Jackie', 'Kylie', 'Ellen'] # start and end index presenters = names[1:3] # all names up to but not including index 3 hosts = names[:3] # all names from 3 onwards, including index 3 judges = names[3:] print(names) print(presenters) print(hosts) print(judges) # - # #### Arrays # Arrays are a collection of numbered data types. Unlike a list, in order for you to use an array, you have to create an array object by importing it from the `array` library. # + from array import array # indicate the numerical type you'll use scores = array('d') # d indicates a double scores.append(80) scores.append(81) print(scores) print(scores[0]) # - # So what's the difference between an array and a list? # Arrays are only numerical data types and everything inside the array must be of the same data type. They can help add extra structure to your code. # Lists can store anything you want, can store any data type, and can have mixed data types. They give more flexibility to your code. # #### Dictionaries # Dictionaries give you the ability to put together a group of items; but instead of using numeric indexes, you can use key-value pairs. person = {'first': 'John'} person['last'] = 'Wick' print(person) print(person['first']) # + identity = { 'Batman': '<NAME>', 'Superman': '<NAME>', 'Spiderman': '<NAME>', 'Iron Man': '<NAME>' } print(identity) # - # When to use a dictionary vs a list? # It depends on whether you want to name things and whether you want items to be in a guaranteed order. # A dictionary will let you name key-value pairs but it does not guarantee you a specific order. # A list does guarantee you a specific order since it has a zero-based index. # --- # # ### Random Module # # One way to introduce random numbers in your code is to use the `random` module. # First you need to import the `random` module. # + import random # generate a random whole number between 1 and 50 # inclusive of 1 and 50 random_integer = random.randint(1, 50) print(random_integer) # + # generate a random floating point number between 0.0 and 1.0 # exclusive of 1.0 random_float = random.random() print(random_float) # generate a random floating point number between 0.0 and 5.0 print(random_float * 5) # - # There are so many more methods to the `random` module and you can check out the Python documentation to find out about all the things you can do with this module. # --- # ### Loops # # Loops are a concept that is used when you need to have things happening over and over again. # # #### for loops # `for` loops are used to loop through a collection. With a `for` loop, you can go through each item in a list and perform some action with each individual item in the list. # # > `for item in list_of_items: # # do something to each item` # go through the list of names for name in ['John', 'Will', 'Max']: print(name) wildcats = ['lion', 'tiger', 'puma', 'jaguar', 'cheetah', 'leopard'] for wildcat in wildcats: print(wildcat + ' is a wildcat.') # You can loop a particular number of times using `range`. `range` automatically creates a list of numbers for you. Remember that for the `range` function, the end index is exclusive. # # > `for number in range(a, b): # # do something # print(number)` # end index is exclusive for index in range(0, 5): print(index) # If you want the range to increase by any other number, you can add a step to the function after the starting and ending indices. for index in range(0, 15, 3): print(index) # #### while loop # `while` loops are used to loop with a condition. As long as something is `True`, the code will stay inside of the `while` loop i.e. the loop will continue going while the condition is true. You need to make sure that at some point you change the condition and it must result to `False`; otherwise the program will be stuck in an infinite loop, resulting in an error. # # > `while something_is_true: # # do something repeatedly` names = ['John', 'Will', 'Max'] index = 0 while index < len(names): print(names[index]) # change the condition index += 1 print(index) # + x = True while x: print('This is an example of a while loop.') # change the condition x = False while not x: print('This is another example of a while loop.') x = True # - # `for` loops are great when you want to iterate over something and you need to do something with each thing that you're iterating over. In cases like above, when you have a list, you almost always want use a `for` loop. # # `while` loops are useful when you don't care about the number in the sequence or about the item you're iterating through in a list, and you just simply want to carry out a functionality many times until a condition is met. You want to typically use a `while` loop when something is going to change automatically, e.g. when you need to read through a list of lines in a file, skip every alternate line, or if you're looking for something. # `while` loops are more dangerous because they can lead to infinite loops if the condition is not met. # --- # ### Functions # A function is a block of organized, reusable code that is used to perform a single, related action. Function provide better modularity for your application and a high degree of code reusing, e.g. the `print()` function. You can create your own functions, called *user-defined functions*. # Programming is all about copying and pasting code from one place to another. If you find yourself copying and pasting the exact same lines of code to more places in your program, you should probably move that into a function. # Functions must be declared before the line of code where the function is called. # # **Defining Functions** # > `def my_function(): # # do this # # then do this # # finally do this` # # **Calling Functions** # > `my_function()` # # # #### Functions with Inputs # # The input to a function is something that can be passed over when we call the function. # # > `def my_function(something): # # do this with something # # then do this # # finally do this` # # > `my_function(123)` # #### Functions with Outputs # # The output keyword for a function is `return`. The `return` line must be the last line of the function. You can have multiple return keywords or even a blank return keyword in a function. # + def my_function(): result = 5 * 4 return result my_function() # - # When you call a function that has an output, the returned output is what replaces the function call, and the output can be stored as a variable. # + def my_function(): return 5 * 8 output = my_function() print(output) # - # Imagine that you're trying to figure out why your program is taking a long time to run. So you write some print statements inside your code to tell you what time it is when the code is running, so you can see what time it is at different stages when your code is running. # + import datetime # print timestamps to see how long # sections take to run first_name = 'John' print('task completed') print(datetime.datetime.now()) print() for x in range(0, 10): print(x) print('task completed') print(datetime.datetime.now()) print() # - # The above code can be rewritten using a function. You can define the function using `def` keyword, followed by the name of the function, and then a colon (`:`). Remember to use indentation which determines what code belongs to that function. # + # import datetime class from datetime library from datetime import datetime # print the current time def print_time(): print('task completed') # no need for the extra datetime prefix # since the class is imported above print(datetime.now()) print() first_name = 'John' print_time() for x in range(0, 10): print(x) print_time() # - # Sometimes when you copy/paste your code, we want to change some part of it. In the above example, what if you want to display a different message each time you run it. Say you want to display a specific message depending on the command you were running. This is where function parameters come in. *Parameters* or *arguments* are placed or defined within the parentheses of a function. # + from datetime import datetime # print the current time and task name def print_time(task_name): print(task_name) print(datetime.now()) print() first_name = 'John' # pass in the task_name as a parameter print_time('first name assigned') for x in range(0, 10): print(x) # pass in the task_name as a parameter print_time('loop completed') # - # Let's take another example where the code looks different but we're using the same logic. Suppose you're interested in getting initials for a user ID after the user enters their name. # + first_name = input('Enter your first name: ') # get only the first letter of input first_name_initial = first_name[0:1] last_name = input('Enter your last name: ') last_name_initial = last_name[0:1] print('Your initials are: ' + first_name_initial + last_name_initial) # - # The above code can be written using a function. # + def get_initial(name): initial = name[0:1] # the return function returns a value return initial first_name = input('Enter your first name: ') first_name_initial = get_initial(first_name) last_name = input('Enter your last name: ') last_name_initial = get_initial(last_name) # nested function in another call print('Your initials are: ' + get_initial(first_name) + get_initial(last_name)) # - # Functions can accept multiple parameters. In the above example, suppose you want to the user initials to only be uppercase for a user ID but lowercase for an email ID. # + def get_initial(name, force_uppercase=True): # default to True if force_uppercase: initial = name[0:1].upper() else: initial = name[0:1] return initial first_name = input('Enter your first name: ') first_name_initial = get_initial(first_name) last_name = input('Enter your last name: ') last_name_initial = get_initial(last_name, False) print('Your initials are: ' + first_name_initial + last_name_initial) # - # When calling a function, you have to pass the parameters in the same order as when you defined the function. An exception to this is when you use named parameters, which offer better readability. # `first_name_initial = get_initial(force_uppercase=True, name=first_name)` # Functions make the code more readable if you use good function names. They make the code less clunky. Always add comments to explain the purpose of your function. # The main advantage of functions is that if you ever need to change your function code, you only need to change it in one place. You also reduce rework and the chance to introduce bugs when you change the code you copied. # ---
_notebooks/2020-12-19-python-basics-1.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # <img style="float: left;" src="earth-lab-logo-rgb.png" width="150" height="150" /> # # # Earth Analytics Education - EA Python Course Spring 2021 # ## Important - Assignment Guidelines # # 1. Before you submit your assignment to GitHub, make sure to run the entire notebook with a fresh kernel. To do this first, **restart the kernel** (in the menubar, select Kernel$\rightarrow$Restart & Run All) # 2. Always replace the `raise NotImplementedError()` code with your code that addresses the activity challenge. If you don't replace that code, your notebook will not run. # # ``` # # YOUR CODE HERE # raise NotImplementedError() # ``` # # 3. Any open ended questions will have a "YOUR ANSWER HERE" within a markdown cell. Replace that text with your answer also formatted using Markdown. # 4. **DO NOT RENAME THIS NOTEBOOK File!** If the file name changes, the autograder will not grade your assignment properly. # 6. When you create a figure, comment out `plt.show()` to ensure the autograder can grade your plots. For figure cells, DO NOT DELETE the code that says `DO NOT REMOVE LINE BELOW`. # # ``` # ### DO NOT REMOVE LINE BELOW ### # student_plot1_ax = nb.convert_axes(plt) # ``` # # * Only include the package imports, code, and outputs that are required to run your homework assignment. # * Be sure that your code can be run on any operating system. This means that: # 1. the data should be downloaded in the notebook to ensure it's reproducible # 2. all paths should be created dynamically using the `os.path.join` # # ## Follow to PEP 8 Syntax Guidelines & Documentation # # * Run the `autopep8` tool on all cells prior to submitting (HINT: hit shift + the tool to run it on all cells at once! # * Use clear and expressive names for variables. # * Organize your code to support readability. # * Check for code line length # * Use comments and white space sparingly where it is needed # * Make sure all python imports are at the top of your notebook and follow PEP 8 order conventions # * Spell check your Notebook before submitting it. # # For all of the plots below, be sure to do the following: # # * Make sure each plot has a clear TITLE and, where appropriate, label the x and y axes. Be sure to include UNITS in your labels. # # ### Add Your Name Below # **Your Name:** <NAME> # <img style="float: left;" src="colored-bar.png"/> # --- # + [markdown] deletable=false editable=false nbgrader={"cell_type": "markdown", "checksum": "482b6a6fad5a6b7297cd1f14b52b28e1", "grade": false, "grade_id": "hw-instructions", "locked": true, "schema_version": 3, "solution": false, "task": false} # # Week 04 and 05 Homework - Automate NDVI Workflow # # For this assignment, you will write code to generate a plot of the mean normalized difference vegetation index (NDVI) for two different sites in the United States across one year of data: # # * San Joaquin Experimental Range (SJER) in Southern California, United States # * Harvard Forest (HARV) in the Northeastern United States # # The data that you will use for this week is available from **earthpy** using the following download: # # `et.data.get_data('ndvi-automation')` # # ## Assignment Goals # # Your goal in this assignment is to create the most efficient and concise workflow that you can that allows for: # # 1. The code to scale if you added new sites or more time periods to the analysis. # 2. Someone else to understand your workflow. # 3. The LEAST and most efficient (i.e. runs fast, minimize repetition) amount of code that completes the task. # # ### HINTS # # * Remove values outside of the landsat valid range of values as specified in the metadata, as needed. # * Keep any output files SEPARATE FROM input files. Outputs should be created in an outputs directory that is created in the code (if needed) and/or tested for. # * Use the functions that we demonstrated during class to make your workflow more efficient. # * BONUS - if you chose - you can export your data as a csv file. You will get bonus points for doing this. # # # ## Assignment Requirements # # Your submission to the GitHub repository should include: # * This Jupyter Notebook file (.ipynb) with: # * The code to create a plot of mean NDVI across a year for 2 NEON Field Sites: # * NDVI on the x axis and formatted dates on the y for both NEON sites on one figure/axis object # * The **data should be cleaned to remove the influence of clouds**. See the [earthdatascience website for an example of what your plot might look like with and without removal of clouds](https://www.earthdatascience.org/courses/earth-analytics-python/create-efficient-data-workflows/). # * BONUS: Create one output `.csv` file that has 3 columns - NDVI, Date and Site Name - with values for SJER and HARV. # # Your notebook should: # * Have *at least* 2 well documented and well named functions with docstrings. # * Include a Markdown cell at the top of the notebook that outlines the overall workflow using pseudocode (i.e. plain language, not code) # * Include additional Markdown cells throughout the notebook to describe: # * the data that you used - and where it is from # * how data are being processing # * how the code is optimized to run fast and be more concise # - # # Workflow overview # ## Assignment outputs # 1. Mean NDVI over a year for two sites # 2. A .csv file of dataframe used to create plots that includes site name, date, and mean NDVI values # # ## Steps required to complete project # 1. Get a list of all directories with Landsat data for two sites of interest # 2. Open each site directory, get Landsat scenes, and calculate mean NDVI for each scene for that site. # * Get a list of all GeoTIFF files # * Subset the list of files to just what is needed to calculate NDVI (files with bands) # * Sort the list to have the bands in the same order # * Open and crop the bands needed to calculate NDVI # ** Write a function to open files and combine into an xarray object # ** Write function to calculate NDVI from Landsat bands NIR = band 5, Red = band 4 # * Mask cloud cover from the scenes # * Calculate mean NDVI for a scene # 3. Output variables (mean NDVI, date, and site name) into a dataframe indexed on date. # 4. Export the dataframe to a .csv file # + [markdown] deletable=false editable=false nbgrader={"cell_type": "markdown", "checksum": "ca51bc48f62e7d3602d0567f742e1b15", "grade": false, "grade_id": "pseudo-code", "locked": true, "points": 15, "schema_version": 3, "solution": false, "task": true} # # Replace this cell with your pseudocode for this workflow # # If you happen to be a diagram person a diagram is ok too # # # + deletable=false editable=false nbgrader={"cell_type": "code", "checksum": "9c7cd3e2e5089092e06ba301f2719a63", "grade": false, "grade_id": "core-imports", "locked": true, "schema_version": 3, "solution": false, "task": false} # Autograding imports - do not modify this cell import matplotcheck.autograde as ag import matplotcheck.notebook as nb import matplotcheck.timeseries as ts from datetime import datetime # + deletable=false nbgrader={"cell_type": "code", "checksum": "3c4d1141999885a9a9b09772962b180a", "grade": true, "grade_id": "student-imports-answer", "locked": false, "points": 10, "schema_version": 3, "solution": true, "task": false} tags=["hide", "hide_output"] # Import needed packages in PEP 8 order # and no unused imports listed (10 points total) # YOUR CODE HERE import os from glob import glob import matplotlib.pyplot as plt from matplotlib import patches as mpatches, colors from matplotlib.dates import DateFormatter import seaborn as sns import numpy as np from numpy import ma import pandas as pd import geopandas as gpd import xarray as xr import rioxarray as rxr from rasterio.plot import plotting_extent import earthpy as et import earthpy.spatial as es import earthpy.plot as ep import earthpy.mask as em # Prettier plotting with seaborn sns.set_style('white') sns.set(font_scale=1.5) # Download data and set working directory data = et.data.get_data('ndvi-automation') os.chdir(os.path.join(et.io.HOME, 'earth-analytics', 'data')) # + deletable=false editable=false hideCode=false hidePrompt=false nbgrader={"cell_type": "code", "checksum": "dcf5b59326bf066172ff61520b658a3d", "grade": true, "grade_id": "student-download-tests", "locked": true, "points": 0, "schema_version": 3, "solution": false, "task": false} # DO NOT MODIFY THIS CELL # Tests that the working directory is set to earth-analytics/data path = os.path.normpath(os.getcwd()) student_wd_parts = path.split(os.sep) if student_wd_parts[-2:] == ['earth-analytics', 'data']: print("\u2705 Great - it looks like your working directory is set correctly to ~/earth-analytics/data") else: print("\u274C Oops, the autograder will not run unless your working directory is set to earth-analytics/data") # + [markdown] deletable=false editable=false nbgrader={"cell_type": "markdown", "checksum": "35205d12dc9e8fa05a26fb927c0a2307", "grade": false, "grade_id": "ndvi-mean-site-instructions", "locked": true, "schema_version": 3, "solution": false, "task": false} # # Figure 1: Plot 1 - Mean NDVI For Each Site Across the Year (50 points) # # Create a plot of the mean normalized difference vegetation index (NDVI) for the two different sites in the United States across the year: # # * NDVI on the x axis and formatted dates on the y for both NEON sites on one figure/axis object. # * Each site should be identified with a different color in the plot and legend. # * The final plot **data should be cleaned to remove the influence of clouds**. # * Be sure to include appropriate title and axes labels. # # Add additional cells as needed for processing data (e.g. defining functions, etc), but be sure to: # * follow the instructions in the code cells that have been provided to ensure that you are able to use the sanity check tests that are provided. # * include only the plot code in the cell identified for the final plot code below # - # # Data description and processing # I compared normalized difference vegetation index (NDVI) values from two NEON research sites with different regional characteristics. The San Joaquin Experimental Range (SJER) site is 18.2 km2 and located north of Fresno, CA at the foothills of the Sierra Nevada Mountains. The SJER climate is characterized as a Mediterranean climate with cool, wet winters (October-April) and hot, dry summers. SJER vegetation is characterized as grassy open oak woodland. # The Harvard Forest (HARV) site is 48.1 km2 and located west of Boston, MA that spans a urban-rural gradient from the suburbs of Boston to northeast wildland. The HARV climate is temperate with stable annual precipitation. HARV vegetation is dominated by northern hardwood and coniferous forest and some agriculture. # Information on the sites: <a href="https://www.neonscience.org/field-sites/sjer" target="_blank">SJER</a> and <a href="https://www.neonscience.org/field-sites/harv" target="_blank">HARV</a> # # I calculated NDVI from Landsat 8 multispectral data collected at the SJER site from 1/7/2017 to 12/25/2017 and HARV site from 1/12/2017 to 12/30/2017. I collected the near infrared (NIR) and red bands from each date that were available. Bands were processed for each site and date by setting the valid pixel range and masking out pixels that were classified as cloud cover. NDVI was calculated by taking the difference between NIR and red spectral values and dividing by the sum NIR and red bands. Mean NDVI was calculated for each scene by date. # # # Code optimization # To accomplish the tasks, I developed code that could gather NIR and red bands for each of the dates and for each of the sites of interest to plot mean NDVI over the year. I created functions to open and clean bands and looped through scenes for each of the sites grabing the date from the file name. I was not able to get a nested loop to work to loop through each site but rather processed each site separately. This is an area that can be improved to reduce redundancy and simplify the code. To optimize the time it takes to run the code, I first calculated NDVI and then applied the cloud mask to that single layer - as opposed to applying the mask to each band layer separately. I also specified "from_disk = True" from rio.xarray to only open and load the specified crop layer. These steps helped to improve the speed and performance of the code. # + [markdown] deletable=false editable=false nbgrader={"cell_type": "markdown", "checksum": "ce17d4d685cd4c7034bd7b0bb389342a", "grade": false, "grade_id": "single-scene-instructions", "locked": true, "schema_version": 3, "solution": false, "task": false} # ## Task 1: # # In the cell below, create a single dataframe containing MEAN NDVI, the site name, # and the date of the data for the HARV site # scene `HARV/landsat-crop/LC080130302017031701T1-SC20181023151837`. The column names for the final # DataFrame should be`mean_ndvi`, and `site`, and the data should be **indexed on the date**. # # Use the functions that we reviewed in class (or create your own versions of them) to implement your code # # ### In the Cell below Place All Functions Needed to Run this Notebook (20 points) # + deletable=false editable=false nbgrader={"cell_type": "code", "checksum": "0ee340cd9af6c949a08eb4b325716ae0", "grade": false, "grade_id": "cell-618e3588853f3ed8", "locked": true, "schema_version": 3, "solution": false, "task": false} ### DO NOT REMOVE THIS LINE OR EDIT / MOVE THIS CELL ### start_time = datetime.now() # + deletable=false nbgrader={"cell_type": "code", "checksum": "653ebd5db668245408615979f6c20944", "grade": true, "grade_id": "function-definitions-check", "locked": false, "points": 40, "schema_version": 3, "solution": true, "task": false} # In this cell place all of the functions needed to run your notebook # You will be graded here on function application, docstrings, efficiency so ensure # All functions are placed here! # YOUR CODE HERE # FUNCTION TO OPEN A SINGLE LANDSAT BAND AND SPECIFY VALID PIXEL ATTRIBUTE RANGE def open_clean_bands(band_path, crop_extent, valid_range=None,): """Open and mask a single landsat band using a pixel_qa layer. Parameters ----------- band_path : string A path to the array to be opened valid_range : tuple (optional) A tuple of min and max range of values for the data. Default = None Returns ----------- arr : xarray DataArray An xarray DataArray with values that should be masked set to 1 for True (Boolean) """ # TODO add tests to ensure the arrays are the same .shape band = rxr.open_rasterio(band_path, masked=True).rio.clip(crop_extent.geometry, from_disk=True).squeeze() # Only run this step if a valid range tuple is provided if valid_range: mask = ((band < valid_range[0]) | (band > valid_range[1])) band = band.where(~xr.where(mask, True, False)) return band ############### # FUNCTION TO OPEN and CLEAN UP SPECIFIC LANDSAT BANDS # AND MASK USING PIXEL QA LAYER (CLOUD COVER) AND CALCULATE NDVI def mask_crop_ndvi(band_paths, crop_bound, pixel_qa_path, vals): """Open and mask a single landsat band using the open_clean_bands function, calculate NDVI and mask using a pixel_qa layer. Parameters ----------- band_paths : string A path to the landsat scene of interest all_bands : list A list containing two xarray objects for landsat bands 4 and 5 crop_bound: geopandas GeoDataFrame A geopandas dataframe to crop the raster data using rasterio mask(). pixel_qa_path: string A path to a pixel qa tif file. vals: list A list of values needed to create the cloud mask Returns ----------- ndvi_crop : Xarray Dataset a cropped and masked xarray object containing NDVI values """ # For loop using open_clean_bands function all_bands = [] for aband in band_paths: cleaned_band = open_clean_bands(band_path=aband, crop_extent=crop_bound, valid_range=(0, 10000)) all_bands.append(cleaned_band) crop_json = crop_bound.geometry # Open and clip qa layer pixel_qa = rxr.open_rasterio(pixel_qa_path[0], masked=True).rio.clip(crop_json, from_disk=True).squeeze() # Calculate NDVI ndvi_xr = (all_bands[1]-all_bands[0]) / (all_bands[1]+all_bands[0]) # Apply cloud mask to NDVI ndvi_mask = ndvi_xr.where(~pixel_qa.isin(vals)) return ndvi_mask ############### # FUNCTION to create a pandas dataframe from NDVI and grab site and date from file path def mean_ndvi_df(ndvi_mask, dir_path, file_path): """Calculate the mean NDVI for a single site and scene with NDVI calculated and clouds masked out Parameters ----------- ndvi_mask : Xarray Dataset A cropped and masked xarray object containing NDVI values. dir_path: string Path to the landsat site folders to grab the site name. file_path: string The name of a individual landsat scene file to grab the date. Returns ----------- mean_ndvi_df : Pandas dataframe a dataframe of mean NDVI by site name and indexed on the date """ # Calculate mean NDVI for the scene mean_ndvi = np.nanmean(ndvi_mask) # Grab site name from file path site = os.path.basename(os.path.normpath(dir_path)) # Grab date from file name file_name = os.path.basename(file_path) date = file_name[10:18] # Create pandas dataframe data = {'date': [date], 'site': [site], 'mean_ndvi': [mean_ndvi]} mean_ndvi_df = pd.DataFrame(data, columns=['date', 'site', 'mean_ndvi']) mean_ndvi_df['date'] = pd.to_datetime(mean_ndvi_df['date']) # date format mean_ndvi_df = mean_ndvi_df.set_index('date') # index dataframe on date return mean_ndvi_df # Note: "from_disk" needs updated rioxarray package # + deletable=false nbgrader={"cell_type": "code", "checksum": "d124eef1d9cf2d0063ab450c9a20dc8e", "grade": false, "grade_id": "single-scene-answer", "locked": false, "schema_version": 3, "solution": true, "task": false} tags=["hide"] # Create dataframe of mean NDVI in this cell using the functions created above # Important: to use the ungraded tests below as a sanity check, # name your columns: mean_ndvi and site # Call the dataframe at the end of the cell so the tests run on it! # Be sure that the date column is an index of type date # HINT: the time series lessons may help you remember how to do this! # Get a list of each directory path = os.path.join("ndvi-automation", "sites") # Get a list of both site directories sites = glob(path + "/*/") # Get the site name site_name = os.path.basename(os.path.normpath(sites[0])) # Open up the shapefile for clipping your landsat data to the study area for just HARV=sites[0] vector_dir = os.path.join(sites[0], "vector") # Open crop boundary shapefile site_boundary_path = os.path.join(vector_dir, site_name + "-crop.shp") crop_bound = gpd.read_file(site_boundary_path) # Landsat cropped files for HARV site landsat_dir = os.path.join(sites[0], "landsat-crop") # Open bands for specific Harvard site of interest band_paths = sorted(glob(os.path.join( landsat_dir, "LC080130302017031701T1-SC20181023151837", "*band*[4-5].tif"))) # Cloud no data vals for Landsat 8 - vals = [328, 392, 840, 904, 1350, 352, 368, 416, 432, 480, 864, 880, 928, 944, 992, 480, 992] # Open cloud mask layer pixel_qa_path = glob(os.path.join( landsat_dir, "LC080130302017031701T1-SC20181023151837", "*qa*")) # Run function to open, clean, and calculate NDVI and mask cloud pixels ndvi_clean = mask_crop_ndvi(band_paths=band_paths, crop_bound=crop_bound, pixel_qa_path=pixel_qa_path, vals=vals) # Run function to create a dataframe of mean NDVI values by site and date mean_ndvi = mean_ndvi_df(ndvi_mask=ndvi_clean, dir_path=site_name, file_path=os.path.join(landsat_dir, "LC080130302017031701T1-SC20181023151837")) mean_ndvi # + deletable=false editable=false nbgrader={"cell_type": "code", "checksum": "6121e3a0293ed64f09521b5d248496c3", "grade": true, "grade_id": "single-scene-tests", "locked": true, "points": 15, "schema_version": 3, "solution": false, "task": false} # This cell is testing your data output above student_ndvi_ts_single_site = _ single_scene_points = 0 # Ensure the data is stored in a dataframe. if isinstance(student_ndvi_ts_single_site, pd.DataFrame): print('\u2705 Your data is stored in a DataFrame!') single_scene_points += 1 else: print('\u274C It appears your data is not stored in a DataFrame. ', 'To see what type of object your data is stored in, check its type with type(object)') # Ensure that the date column is the index if isinstance(student_ndvi_ts_single_site.index, pd.core.indexes.datetimes.DatetimeIndex): print('\u2705 You have the index set to the date column!') single_scene_points += 2 else: print('\u274C You do not have the index set to the date column.') # Ensure that the date column is datetime if isinstance(student_ndvi_ts_single_site.index[0], pd._libs.tslibs.timestamps.Timestamp): print('\u2705 The data in your date column is datetime!') single_scene_points += 2 else: print('\u274C The data in your date column is not datetime.') # Ensure the site name is correct if student_ndvi_ts_single_site.site.values[0] == 'HARV': print('\u2705 You have the correct site name!') single_scene_points += 5 else: print('\u274C You do not have the correct site name.') if np.allclose(0.281131628228094, student_ndvi_ts_single_site.mean_ndvi.values[0]): print('\u2705 You have the correct mean NDVI value!') single_scene_points += 5 else: print('\u274C You do not have the correct mean ndvi value.') print("\n \u27A1 You received {} out of 15 points for creating a dataframe.".format( single_scene_points)) single_scene_points # - # ## Task 2: # # In the cell below, process all of the landsat scenes. Create a DataFrame that contains the following # information for each scene # # # | | index | site | mean_ndvi | # |---|---|---|---| # | Date | | | | # | 2017-01-07 | 0 | SJER | .4 | # # Be sure to call your dataframe at the end of the cell to ensure autograding works. # HINT: FOR THIS STEP, leave any rows containing missing values (`NAN`). # + deletable=false nbgrader={"cell_type": "code", "checksum": "848dd486333246e15b6b8f0dff745a4b", "grade": false, "grade_id": "cleaned_dataframes_answer", "locked": false, "schema_version": 3, "solution": true, "task": false} tags=["hide"] # Create dataframe of NDVI including the cleaning data to deal with clouds # Important: to use the ungraded tests below as a sanity check, # name your columns: mean_ndvi and site # Don't forget to set date as the index and make the values of type datetime # YOUR CODE HERE # RUN FOR EACH SITE SEPARATELY BC HAVING TROUBLE WITH NESTED FOR LOOP # Get a list of each directory #path = os.path.join("ndvi-automation", "sites") # Get a list of both site directories #sites = glob(path + "/*/") # Cloud no data vals for Landsat 8 - # vals = [328, 392, 840, 904, 1350, 352, 368, 416, # 432, 480, 864, 880, 928, 944, 992, 480, 992] # HARVARD # Get the site name site_name = os.path.basename(os.path.normpath(sites[0])) # Open up the shapefile for clipping your landsat data to the study area for just HARV=sites[0] vector_dir = os.path.join(sites[0], "vector") # Open crop boundary shapefile site_boundary_path = os.path.join(vector_dir, site_name + "-crop.shp") crop_bound = gpd.read_file(site_boundary_path) # Landsat cropped files for HARV site landsat_dir = os.path.join(sites[0], "landsat-crop") # FOR LOOP THROUGH SCENES IN HARVARD SITE scene_paths = sorted(glob(os.path.join(landsat_dir, "LC08*"))) mean_ndvi_all_harv = [] for ascene in scene_paths: ndvi_clean = mask_crop_ndvi(band_paths=sorted(glob(os.path.join(ascene, "*band*[4-5].tif"))), crop_bound=crop_bound, pixel_qa_path=glob( os.path.join(ascene, "*qa*")), vals=vals) # Create dataframe of mean ndvi values from the cleaned up ndvi scene site_mean_ndvi = mean_ndvi_df(ndvi_clean, dir_path=site_name, file_path=ascene) mean_ndvi_all_harv.append(site_mean_ndvi) mean_ndvi_all_harv_df = pd.concat(mean_ndvi_all_harv) # mean_ndvi_all_harv_df # SAN JOAQUIN EXPERIMENTAL STATION site_name_sjer = os.path.basename(os.path.normpath(sites[1])) # Directory to shapefile vector_dir_sjer = os.path.join(sites[1], "vector") # Open crop boundary shapefile site_boundary_path_sjer = os.path.join( vector_dir_sjer, site_name_sjer + "-crop.shp") crop_bound_sjer = gpd.read_file(site_boundary_path_sjer) # Landsat cropped files for HARV site landsat_dir_sjer = os.path.join(sites[1], "landsat-crop") # Path to site scenes scene_paths_sjer = sorted(glob(os.path.join(landsat_dir_sjer, "LC08*"))) # FOR LOOP THROUGH SCENES IN SJER SITE scene_paths = sorted(glob(os.path.join(landsat_dir, "LC08*"))) mean_ndvi_all_sjer = [] for ascene in scene_paths_sjer: ndvi_clean_sjer = mask_crop_ndvi(band_paths=sorted(glob(os.path.join(ascene, "*band*[4-5].tif"))), crop_bound=crop_bound_sjer, pixel_qa_path=glob( os.path.join(ascene, "*qa*")), vals=vals) # Create dataframe of mean ndvi values from the cleaned up ndvi scene site_mean_ndvi_sjer = mean_ndvi_df(ndvi_clean_sjer, dir_path=site_name_sjer, file_path=ascene) mean_ndvi_all_sjer.append(site_mean_ndvi_sjer) mean_ndvi_all_sjer_df = pd.concat(mean_ndvi_all_sjer) # Concatenate two dataframes from each site into a single dataframe mean_ndvi_all_sites = pd.concat( [mean_ndvi_all_harv_df, mean_ndvi_all_sjer_df], axis=0) mean_ndvi_all_sites # + deletable=false editable=false nbgrader={"cell_type": "code", "checksum": "1ce5d7d7519d5e569e6cf7c5927c6ffb", "grade": true, "grade_id": "cleaned_dataframes_test", "locked": true, "points": 10, "schema_version": 3, "solution": false, "task": false} # Last sanity check before creating your plot (10 points) # Ensure that you call your dataframe at the bottom of the cell above # and that it has columns called: mean_ndvi and site # Ensure the data is stored in a dataframe. student_ndvi_df = _ df_points = 0 if isinstance(student_ndvi_df, pd.DataFrame): print('\u2705 Your data is stored in a DataFrame!') df_points +=2 else: print('\u274C It appears your data is not stored in a DataFrame. ', 'To see what type of object your data is stored in, check its type with type(object)') # Check that dataframe contains the appropriate number of NAN values if student_ndvi_df.mean_ndvi.isna().sum() == 15: print('\u2705 Correct number of masked data values!') df_points +=2 else: print('\u274C The amount of null data in your dataframe is incorrect.') # Ensure that the date column is the index if isinstance(student_ndvi_df.index, pd.core.indexes.datetimes.DatetimeIndex): print('\u2705 You have the index set to the date column!') df_points +=3 else: print('\u274C You do not have the index set to the date column.') # Ensure that the date column is datetime if isinstance(student_ndvi_df.index[0], pd._libs.tslibs.timestamps.Timestamp): print('\u2705 The data in your date column is datetime!') df_points +=3 else: print('\u274C The data in your date column is not datetime.') # Output for timer, # DO NOT MODIFY end_time = datetime.now() total_time = end_time - start_time print( "Your total run time for processing the data was {0}.".format(total_time)) print("\n \u27A1 You received {} out of 10 points for creating a dataframe.".format( df_points)) df_points # + caption="Plot showing NDVI for each time period at both NEON Sites. In this example the cloudy pixels were removed using the pixel_qa cloud mask. Notice that this makes a significant different in the output values. Why do you think this difference is so significant?" deletable=false nbgrader={"cell_type": "code", "checksum": "f9d5ebf0557e366fa6f1727fd85a7e45", "grade": false, "grade_id": "plot_cleaned_dataframes_answer", "locked": false, "schema_version": 3, "solution": true, "task": false} tags=["hide"] # Add only the plot code to this cell # This is the final figure of mean NDVI # for both sites across the year # with data cleaned to deal with clouds # YOUR CODE HERE # Resample by month test = mean_ndvi_all_sites.groupby( ['site'])[['mean_ndvi']].resample('M').mean() # Reset so only index on date test.reset_index(inplace=True) mean_ndvi_for_plot = test.set_index('date') fig, ax = plt.subplots(figsize=(10, 10)) fig.suptitle( "Mean Normalized Difference Vegetation Index (NDVI)\n Jan 2017 - Dec 2017", fontsize=16) mean_ndvi_for_plot.groupby('site')['mean_ndvi'].plot(legend=True, ax=ax, marker='o') ax.set(xlabel="Month", ylabel="Mean NDVI", title="Mean NDVI 2017") # Define the date format date_form = DateFormatter("%b") # %m-%d ax.xaxis.set_major_formatter(date_form) ### DO NOT REMOVE LINES BELOW ### final_masked_solution = nb.convert_axes(plt, which_axes="current") # + deletable=false editable=false nbgrader={"cell_type": "code", "checksum": "d2bc7d91b553a74e6382776fface9c70", "grade": true, "grade_id": "plot_cleaned_dataframes_test_answers", "locked": true, "points": 0, "schema_version": 3, "solution": false, "task": false} # Ignore this cell for the autograding tests # + deletable=false editable=false nbgrader={"cell_type": "code", "checksum": "23a1c68916e304be754ea15d9495e781", "grade": true, "grade_id": "plot_cleaned_dataframes_tests", "locked": true, "points": 50, "schema_version": 3, "solution": false, "task": false} # Ignore this cell for the autograding tests # + [markdown] deletable=false editable=false nbgrader={"cell_type": "markdown", "checksum": "c660ce8da16752276c4b16e35c7d2726", "grade": false, "grade_id": "question-1", "locked": true, "schema_version": 3, "solution": false, "task": false} # # Question 1 (10 points) # # Imagine that you are planning NEON’s upcoming flight season to capture remote sensing data in these locations and want to ensure that you fly the area when the vegetation is the most green. # # When would you recommend the flights take place for each site? # # Answer the question in 2-3 sentences in the Markdown cell below. # + [markdown] deletable=false nbgrader={"cell_type": "markdown", "checksum": "26a85257b913135d401b6dc4fd2a4fc3", "grade": true, "grade_id": "question-1-answer", "locked": false, "points": 10, "schema_version": 3, "solution": true, "task": false} # Mean NDVI values are the greatest from June to end of September at the Harvard site and from Februar to April at the San Joaquin Experimental Range. These time periods are when the vegetation at each respective site is most green. # + [markdown] deletable=false editable=false nbgrader={"cell_type": "markdown", "checksum": "603922a2076d0940962432ebc5069ef9", "grade": false, "grade_id": "question-2", "locked": true, "schema_version": 3, "solution": false, "task": false} # # Question 2 (10 points) # # How could you modify your workflow to look at vegetation changes over time in each site? # # Answer the question in 2-3 sentences in the Markdown cell below. # + [markdown] deletable=false nbgrader={"cell_type": "markdown", "checksum": "f4ae5b1f3a47c9bf44714a2de486da54", "grade": true, "grade_id": "question-2-answer", "locked": false, "points": 10, "schema_version": 3, "solution": true, "task": false} # Adding Landsat scenes for other years would allow one to look at vegetation changes over time at each site. The workflow can be modified by adding another layer to loop through each year of data that is available. # + [markdown] deletable=false editable=false nbgrader={"cell_type": "markdown", "checksum": "df53001e9821bf3baef478a3b29bde33", "grade": false, "grade_id": "additional-markdown-cell-check", "locked": true, "points": 10, "schema_version": 3, "solution": false, "task": true} # # Do not edit this cell! (10 points) # # The notebook includes: # * additional Markdown cells throughout the notebook to describe: # * the data that you used - and where it is from # * how data are being processing # * how the code is optimized to run fast and be more concise # + [markdown] deletable=false editable=false nbgrader={"cell_type": "markdown", "checksum": "bcc0e446306a9db445d1ab243227c563", "grade": false, "grade_id": "pep8-formatting-check", "locked": true, "points": 30, "schema_version": 3, "solution": false, "task": true} # # Do not edit this cell! (20 points) # # The notebook will also be checked for overall clean code requirements as specified at the **top** of this notebook. Some of these requirements include (review the top cells for more specifics): # # * Notebook begins at cell [1] and runs on any machine in its entirety. # * PEP 8 format is applied throughout (including lengths of comment and code lines). # * No additional code or imports in the notebook that is not needed for the workflow. # * Notebook is fully reproducible. This means: # * reproducible paths using the os module. # * data downloaded using code in the notebook. # * all imports at top of notebook. # + [markdown] deletable=false editable=false nbgrader={"cell_type": "markdown", "checksum": "67969627ed2d8a81a168d0ed1831224d", "grade": false, "grade_id": "cell-bf1766fe2443b94a", "locked": true, "points": 0, "schema_version": 3, "solution": false, "task": true} # ## BONUS - Export a .CSV File to Share (10 points possible) # # This is optional - if you export a **.csv** file with the columns specified above: Site, Date and NDVI Value you can get an additional 10 points. # # * FULL CREDIT: File exists in csv format and contains the columns specified. # We will check your github repo for this file! # # + # Export mean NDVI dataframe as .csv file in "outputs" folder # If the dir does not exist, create it output_path = os.path.join("ndvi-automation", "outputs") if not os.path.isdir(output_path): os.mkdir(output_path) # Export the buffered point layer as a shapefile to use in zonal stats mean_ndvi_path = os.path.join(output_path, "mean_ndvi_all_sites.csv") mean_ndvi_all_sites.to_csv(mean_ndvi_path) #mean_ndvi_all_sites #df.to_csv('hrdata_modified.csv')
fergus_emi_ea-2021-04-ndvi-automation.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import numpy as np import copy import matplotlib.pyplot as plt # %matplotlib nbagg from chickpea import Waveform, Element, Sequence, Segment # ## Introducing: Segments # # A segment is part of a 'waveform' which represents a time slice on one channel of waveform generator with up to two markers also specified. Many segments can be put together to make the waveform on one channel. A good example would be to use a segment to specify a gate and then string together many gates on one waveform. # # A segment needs: # * a name (optional) # * a generator funtion OR a points_array # * if a generator function is specified then the arguments for this function should be supplied in a dictionary before you can readonably expect to get the points in the function. Sample rate ("SR") will always be one of these # * markers (optional) - These should be given as a dictionary with keys in [1, 2] for marker 1 and 2. If the points were specified in a points array the markers dictionary should spoecified by the raw_markers argument, if a generator function was given the markers can be specified by time_markers or points_markers. For both of these the delay and durations for each 'marker on' should be specified in nested dictionaries (see example below). Whichever you specify will be converted to points markers. # # A segment gives you: # * name (str) # * points (array) # * markers (dict) # ### First define some nice functions for segment use # + def ramp(start, stop, dur, SR): points = int(round(SR * dur)) return np.linspace(start, stop, points) def gaussian(sigma, sigma_cutoff, amp, SR): points = int(SR * 2 * sigma_cutoff * sigma) t = np.linspace(-1 * sigma_cutoff * sigma, sigma_cutoff * sigma, num=points) return amp * np.exp(-(t**2 / (2 * sigma**2))) def flat(amp, dur, SR): points = int(SR * dur) return amp * np.ones(points) def gaussian_derivative(sigma, sigma_cutoff, amp, SR): points = int(SR * 2 * sigma_cutoff * sigma) t = np.linspace(-1 * sigma_cutoff * sigma, sigma_cutoff * sigma, num=points) return -amp * t / sigma * np.exp(-(t / (2 * sigma))**2) # - # ### Now make some segments flat_segment = Segment(name='test_flat', gen_func=flat, func_args={'amp':0, 'dur':3, 'SR':10}) ramp_segment = Segment(name='test_ramp', gen_func=ramp, func_args={'start': 0, 'stop':1, 'dur':2, 'SR':10}, time_markers={1: {'delay_time':[-0.5], 'duration_time':[1]}}) # Segments have length, duration, markers, points etc ramp_segment.duration plt.figure() plt.plot(ramp_segment.points) # It is also possible to add markers to segments and clear all markers on a segment: flat_segment.add_bound_marker(2, 10, 20) flat_segment.markers len(flat_segment) flat_segment.duration flat_segment.clear_markers() flat_segment.markers # ## Introducing: Waveforms # # A Waveform is made up of a wave and up to 2 markers and represents what runs on one channel of a waveform generator at one time. # # A waveform needs: # * a length (optional) to tell it how many points it should have, this can be specified later or generated by adding segments # * a channel (optional) is necessary to set if you want to put the waveform together with others onto an 'element' which is what a multi-channel waveform generator will know about # * a segment_list (optional) can be used to generate the wave but it is also possible to specify the wave manually after waveform creation. # * a sample_rate (optional) is useful if you want to add lots of segments and don't want to bother setting their sample rates individually # # A waveform gives you: # * wave (an array) # * markers (a dict of arrays) # * channel (int) test_wf = Waveform(segment_list=[flat_segment, ramp_segment, flat_segment]) test_wf.sample_rate pl = test_wf.plot() # Waveforms also have length and (if you have set the sample rate) duration. Setting the sample rate will set the "SR" func_arg on all segments in the waveform. # # Markers can be added manually and cleared (either the ones bound to the waveform or all markers on segment and waveform). # # Segments can be added. test_wf.add_segment(flat_segment) test_wf.add_marker(1, 0, 10) test_wf.channel = 2 test_wf.clear_wave_markers() test_wf.clear_all_markers() plt.figure() plt.plot(test_wf.wave) plt.plot(test_wf.markers[1]) # lets make a few spares (using the raw points method rather then segment list) to use later another_test_wf = Waveform(length=len(test_wf), channel=1) another_test_wf.wave[0:20] = 1 # + yet_another_test_wf = Waveform(length=len(test_wf), channel=1) yet_another_test_wf.wave[20:40] = 1 the_last_test_wf = Waveform(length=len(test_wf), channel=2) the_last_test_wf.wave[50:60] = 1 the_last_test_wf.add_marker(2, 10, 15) # - plt.figure() plt.plot(the_last_test_wf.wave) plt.plot(the_last_test_wf.markers[2]) # ### Introducing: Elements # # An element is a bunch of waveforms which run simultaneously on different channels of the waveform generator. Hence it is just a dictionary of the waveforms with the channels as keys. # # An element needs: # * nothin really, its chill # * sample_rate (optional) handy if you want to set the sample_rate of all the waveforms at once and then get it later # # An element gives you: # * waveforms. In fact it should be used just like a dictionary # * duration (if you set the sample rate) test_element = Element() test_element.add_waveform(test_wf) test_element.add_waveform(another_test_wf) test_element # Let's make a spare for later another_test_element = Element() another_test_element.add_waveform(yet_another_test_wf) another_test_element.add_waveform(the_last_test_wf) test_element # ### Introducing: Sequences # # A sequence is a list of elements which should be executed one after another on the waveform generator. This is pretty useful if you have an AWG4014C (and really how could you not) and want to use sequencing mode which is much faster than manually stepping through elements. It's also good for things like varying one parameter on one waveform because you can just make a sequence where the only difference between the elements is the value of that one parameter. # # A sequence needs: # * a name (optional) # * a variable (optional) - This is useful if you want to vary one param across the sequence as mentioned above, this way you can easily ask the sequence what it varied across it's elements # * a variable_label (optional) - Good for labeling axes ;) # * a variable_unit (optional) - As above # * start (optional) is the start value of the variable specified which is used together with stop to make a 'variable_array' which you can use however you would like but I use to specify what changed throughout the sequence # * stop (optional) is used with 'start' to make the 'variable_array' # * step (optional) is the step size of the variable array # * nreps (default 1) is the number of times you want each element played before going to the next element # * trig_waits (default 0) is whether or not you want the elements to wait for a trigger # * goto_states (default 'the next element') is which element you want up next # * jump_tos (default 'the first element') is where you want to go if there is an 'event'. Don't ask # * labels (optional) is any other metadata you want to stick on the sequence in dict form # # A sequence gives you: # * a nice list of elements :) it's basically just a list with bells # * a variable_array (see above) # * a method for 'unwrapping' your element list into a format which your waveform generator wants to suck up and play test_seq = Sequence(name='test_seq') test_seq.add_element(test_element) test_seq.add_element(another_test_element) test_seq pl = test_seq.plot(elemnum=0) pl2 = test_seq.plot(elemnum=1) # Great, so that worked. I know I didn't use much of the 'varying a parameter in a sequence part' but head over to one of the other example notebooks for some examples of that and in the meantime let's wrap up with a quick demo of uploading to our favourite AWG5014C using QCoDeS (also our favourite). # ### Upload and Play import qcodes as qc import qcodes.instrument_drivers.tektronix.AWG5014 as awg awg1 = awg.Tektronix_AWG5014('AWG1', 'TCPIP0::172.20.3.170::inst0::INSTR', timeout=40) awg1.make_send_and_load_awg_file(*test_seq.unwrap()) awg1.ch1_state(1) awg1.ch2_state(1) awg1.run() wf = Waveform(length=10) wf.wave
examples/Pulse Building Tutorial.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] id="bHHxWxcC1zvl" # # Hate speech classification using Twitter dataset # # Consists of: in-domain results and domain adaptation on movies dataset results # # The class labels depict the following: # # 0: Normal speech, # 1: Offensive speech # 2: Hate speech # - # #### To work with this, the following folder paths needs to be created in the directory of this notebook: # # classification_reports/ : This will contain all the classification reports generated by the model # # data/ : Contains twitter.csv annotation file # # movies/ : contains all_movies.csv file # # movies/for_training/: contains 6 movies used for cross validation training and testing # # training_checkpoints/in_domain/twitter/cp_twitter.ckpt : for storing the weights of execution # + colab={"base_uri": "https://localhost:8080/"} id="JsvBIx41liLy" outputId="b0d440ee-7b6b-4f08-e3c9-3a554b0548ea" # ! pip install transformers==2.6.0 # + [markdown] id="qNLCw68nFu8W" # ## Training on twitter dataset # + id="3YwUM7V6OewJ" import pandas as pd from matplotlib import pyplot as plt import numpy as np import re import tensorflow as tf from sklearn.model_selection import train_test_split from sklearn.metrics import classification_report import os import glob # + [markdown] id="jrG4iXRJrAvE" # #### Initialize bert classification model for 3 labels # + colab={"base_uri": "https://localhost:8080/", "height": 330, "referenced_widgets": ["77a302bb0fee4bb9859beb40c300461f", "<KEY>", "<KEY>", "a9655fe3fdfe4957a232a44f5a61eef2", "<KEY>", "<KEY>", "e4e0d3a879a74514af83e2adf49dd017", "<KEY>", "<KEY>", "<KEY>", "<KEY>", "9f3673f0fa7f4cf48c976dee14582c2c", "9be1b3874c794c5dbacd5767785cade0", "57ae6d880e6a4632ad649b1c5e35ac6f", "<KEY>", "c327daef5bf349329eba9145a0cc196a", "34b8f35f8cc84a9687b7e144f78a7bc5", "<KEY>", "<KEY>", "2b8283368e634eaeab5380ec35f263c2", "471db887c8384b12a95ff74abb8039e3", "c53566cfa56c43eabd05f018e78dde61", "44aadc2560be495b8e9220e82293904a", "2df07e19db5e4a62bb4cd941c42d2b6e", "<KEY>", "<KEY>", "<KEY>", "c33e8985a00e4c40974710b378abe633", "8ec03ebc88e149ba816b6a97e61dd40a", "<KEY>", "<KEY>", "<KEY>", "37f94a91b3ed4e74bbe87c05a2a043a7", "<KEY>", "94f802ef369f4492a4ef2fdb5f37b94a", "70ff435f15ea4d6d88ffa424925dfa8e", "<KEY>", "ec832b6bbeb14e669d373e8bfd506e88", "aad70965e8df469f99e5eee178ba65e6", "7104b66b3b074355bd325ce1d4d99fab"]} id="SI4UAcYcOpxc" outputId="9c2fdbf8-b06f-4c5f-8431-f68c91c58dcc" from transformers import BertTokenizer, TFBertForSequenceClassification from transformers import InputExample, InputFeatures model = TFBertForSequenceClassification.from_pretrained("bert-base-uncased", trainable=True, num_labels=3) tokenizer = BertTokenizer.from_pretrained("bert-base-uncased") # + [markdown] id="97-sBX4P0Quk" # Initialize checkpoints # + id="tXpzylZOGzpT" checkpoint_path = "training_checkpoints/in_domain/twitter/cp_twitter.ckpt" checkpoint_dir = os.path.dirname(checkpoint_path) cp_callback = tf.keras.callbacks.ModelCheckpoint(filepath=checkpoint_path, save_weights_only=True, verbose=1) # + [markdown] id="DaTErsaJst7w" # Read hate dataset and convert it into train and test # + colab={"base_uri": "https://localhost:8080/"} id="kd0PtS8gQqaq" outputId="3825cc11-bd3b-4073-b01d-727143a5a61c" df = pd.read_csv("data/twitter.csv") df = df.drop(columns=['Unnamed: 0']) df['tweet'] = df['tweet'].str.strip() df.count() # + colab={"base_uri": "https://localhost:8080/"} id="6p4uB34gSwbI" outputId="09cb9ff0-4055-46e5-aaef-b1dfa93fb9d8" df.info() # + id="0zjTXZXeHWg0" def get_dataset(df, seed, test_size): return train_test_split(df, test_size=test_size, random_state=seed, shuffle=True) # + id="CgefoUK4RODW" train, test = get_dataset(df, 11, 0.2) # + colab={"base_uri": "https://localhost:8080/", "height": 204} id="bbG3RppguKks" outputId="80e9cb4f-9aa8-4745-c7b7-fe142f047012" train.head() # + colab={"base_uri": "https://localhost:8080/"} id="fKcwAZ0SIAmS" outputId="413366e4-b4fa-473d-b975-360a6d180362" train.info() # + id="fjBp7e80WqCQ" train.columns = ['DATA_COLUMN', 'LABEL_COLUMN'] test.columns = ['DATA_COLUMN', 'LABEL_COLUMN'] # + id="_cdRKItSW0Cz" def convert_data_to_examples(train, test, DATA_COLUMN, LABEL_COLUMN): train_InputExamples = train.apply(lambda x: InputExample(guid=None, # Globally unique ID for bookkeeping, unused in this case text_a = x[DATA_COLUMN], text_b = None, label = x[LABEL_COLUMN]), axis = 1) validation_InputExamples = test.apply(lambda x: InputExample(guid=None, # Globally unique ID for bookkeeping, unused in this case text_a = x[DATA_COLUMN], text_b = None, label = x[LABEL_COLUMN]), axis = 1) return train_InputExamples, validation_InputExamples train_InputExamples, validation_InputExamples = convert_data_to_examples(train, test, 'DATA_COLUMN', 'LABEL_COLUMN') def convert_examples_to_tf_dataset(examples, tokenizer, max_length=128): features = [] # -> will hold InputFeatures to be converted later for e in examples: # Documentation is really strong for this method, so please take a look at it input_dict = tokenizer.encode_plus( e.text_a, add_special_tokens=True, max_length=max_length, # truncates if len(s) > max_length return_token_type_ids=True, return_attention_mask=True, pad_to_max_length=True, # pads to the right by default # CHECK THIS for pad_to_max_length truncation=True ) input_ids, token_type_ids, attention_mask = (input_dict["input_ids"], input_dict["token_type_ids"], input_dict['attention_mask']) features.append( InputFeatures( input_ids=input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, label=e.label ) ) def gen(): for f in features: yield ( { "input_ids": f.input_ids, "attention_mask": f.attention_mask, "token_type_ids": f.token_type_ids, }, f.label, ) return tf.data.Dataset.from_generator( gen, ({"input_ids": tf.int32, "attention_mask": tf.int32, "token_type_ids": tf.int32}, tf.int64), ( { "input_ids": tf.TensorShape([None]), "attention_mask": tf.TensorShape([None]), "token_type_ids": tf.TensorShape([None]), }, tf.TensorShape([]), ), ) DATA_COLUMN = 'DATA_COLUMN' LABEL_COLUMN = 'LABEL_COLUMN' # + colab={"base_uri": "https://localhost:8080/"} id="mOm1oM-HXaby" outputId="ae56d840-787a-4402-a7e9-05781d50525f" train_InputExamples, validation_InputExamples = convert_data_to_examples(train, test, DATA_COLUMN, LABEL_COLUMN) train_data = convert_examples_to_tf_dataset(list(train_InputExamples), tokenizer) train_data = train_data.batch(32) validation_data = convert_examples_to_tf_dataset(list(validation_InputExamples), tokenizer) validation_data = validation_data.batch(32) # + id="ujXS3VX8XjpE" model.compile(optimizer=tf.keras.optimizers.Adam(learning_rate=3e-6, epsilon=1e-08, clipnorm=1.0), loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True), metrics=[tf.keras.metrics.SparseCategoricalAccuracy('accuracy')]) # + colab={"base_uri": "https://localhost:8080/"} id="hhGM8DV9VEsc" outputId="b5f1c8d8-3447-4d86-f39f-92acb19e546f" hist = model.fit(train_data, epochs=4, validation_data=validation_data, callbacks=[cp_callback]) # + colab={"base_uri": "https://localhost:8080/"} id="IWogEL_YnDJT" outputId="75e23845-9950-4c8a-a1d5-c36243408e1d" preds = model.predict(validation_data) # + id="VZFSpUcQvNz2" cr = classification_report(test['LABEL_COLUMN'],np.argmax(preds[0],axis=1),output_dict=True) # + id="JJLTRyWYvT_N" pd.DataFrame(cr).transpose().to_csv('classification_reports/classification_bert_twitter_indomain.csv') # + [markdown] id="aJwpxJTO2Ldy" # #### In-domain classification report for twitter # + colab={"base_uri": "https://localhost:8080/", "height": 235} id="LHZeKp0k7cqp" outputId="7b92b7f7-c053-404f-93ee-b56f4b2d4da8" pd.DataFrame(cr).transpose() # 0: Normal speech, 1: Offensive speech, 2: Hate speech # + [markdown] id="Of6cSDFrjTUq" # # # --- # # # # --- # # # # --- # # # # --- # # # #### Domain Adaptation, predicting on movies with the twitter trained model on 3 labels # + id="f-qYIKfU0Hbx" def convert_data_to_examples_valid(data, DATA_COLUMN, LABEL_COLUMN): train_InputExamples = data.apply(lambda x: InputExample(guid=None, # Globally unique ID for bookkeeping, unused in this case text_a = x[DATA_COLUMN], text_b = None, label = x[LABEL_COLUMN]), axis = 1) return train_InputExamples # + id="bWO0Gd0kJcjc" df_movies = pd.read_csv('movies/all_movies.csv') # + colab={"base_uri": "https://localhost:8080/", "height": 111} id="qE7LKSArCTwl" outputId="149e4276-f958-4d99-dbe9-925118c7d61b" df_movies.head(2) # + colab={"base_uri": "https://localhost:8080/", "height": 204} id="kPCquRoxzgbg" outputId="68e54500-be0e-423a-eec0-6eb7e9d6f491" df_movies = df_movies.rename(columns={"text": "DATA_COLUMN", "majority_answer": "LABEL_COLUMN"}) df_movies.head() # + id="4crkpHOjzSU-" movie_InputExamples = convert_data_to_examples_valid(df_movies, DATA_COLUMN, LABEL_COLUMN) # + colab={"base_uri": "https://localhost:8080/"} id="Eye0NOKZ0X60" outputId="1072f16e-ef57-4cc7-8b1f-6550cb14b09c" movie_data = convert_examples_to_tf_dataset(list(movie_InputExamples), tokenizer) movie_data = movie_data.batch(32) # + id="Y7Hj48JM0dQZ" preds_movie = model.predict(movie_data) # + id="LfGZDwCu1U4z" cr_movies = classification_report(df_movies['LABEL_COLUMN'], np.argmax(preds_movie[0], axis=1), output_dict=True) # + id="cyZGGzhH4hbU" pd.DataFrame(cr_movies).transpose().to_csv('classification_reports/bert_twitter_domain_adap_movies.csv') # + [markdown] id="aRPGsVc42e18" # #### Domain adaptation classification report from twitter on the movies dataset # + colab={"base_uri": "https://localhost:8080/", "height": 235} id="JxlniuH5jv88" outputId="5ccb283f-7f7e-4df1-c39c-310503990034" pd.DataFrame(cr_movies).transpose() # 0: None, 1: offensive, 2:hate # + [markdown] id="2J1kcn3M5nEi" # # # --- # # # # --- # # # # --- # ### Cross validation # # + [markdown] id="ccJl8pYO5yI-" # #### 6-fold cross validation on movies by fine tuning on above twitter dataset # + id="6K6Nab3AAMPg" def convert_data_to_examples_cv(train, DATA_COLUMN, LABEL_COLUMN): train_InputExamples = train.apply( lambda x: InputExample(guid=None, # Globally unique ID for bookkeeping, unused in this case text_a=x[DATA_COLUMN], text_b=None, label=x[LABEL_COLUMN]), axis=1) return train_InputExamples def convert_examples_to_tf_dataset_cv(examples, tokenizer, max_length=128): features = [] # -> will hold InputFeatures to be converted later for e in examples: # Documentation is really strong for this method, so please take a look at it input_dict = tokenizer.encode_plus( e.text_a, add_special_tokens=True, max_length=max_length, # truncates if len(s) > max_length return_token_type_ids=True, return_attention_mask=True, pad_to_max_length=True, # pads to the right by default # CHECK THIS for pad_to_max_length truncation=True ) input_ids, token_type_ids, attention_mask = (input_dict["input_ids"], input_dict["token_type_ids"], input_dict['attention_mask']) features.append( InputFeatures( input_ids=input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, label=e.label ) ) def gen(): for f in features: yield ( { "input_ids": f.input_ids, "attention_mask": f.attention_mask, "token_type_ids": f.token_type_ids, }, f.label, ) return tf.data.Dataset.from_generator( gen, ({"input_ids": tf.int32, "attention_mask": tf.int32, "token_type_ids": tf.int32}, tf.int64), ( { "input_ids": tf.TensorShape([None]), "attention_mask": tf.TensorShape([None]), "token_type_ids": tf.TensorShape([None]), }, tf.TensorShape([]), ), ) def train_bert(df_train, df_test, load_training = False): model = TFBertForSequenceClassification.from_pretrained("bert-base-uncased", trainable=True, num_labels=3) tokenizer = BertTokenizer.from_pretrained("bert-base-uncased") if load_training: model.load_weights('training_checkpoints/in_domain/twitter/cp_twitter.ckpt') train = df_train[['text', 'majority_answer']] train.columns = ['DATA_COLUMN', 'LABEL_COLUMN'] test = df_test[['text', 'majority_answer']] test.columns = ['DATA_COLUMN', 'LABEL_COLUMN'] DATA_COLUMN = 'DATA_COLUMN' LABEL_COLUMN = 'LABEL_COLUMN' train_InputExamples = convert_data_to_examples_cv(train, DATA_COLUMN, LABEL_COLUMN) test_InputExamples = convert_data_to_examples_cv(test, DATA_COLUMN, LABEL_COLUMN) train_data = convert_examples_to_tf_dataset_cv(list(train_InputExamples), tokenizer) train_data = train_data.batch(32) test_data = convert_examples_to_tf_dataset_cv(list(test_InputExamples), tokenizer) test_data = test_data.batch(32) # compile and fit model.compile(optimizer=tf.keras.optimizers.Adam(learning_rate=3e-6, epsilon=1e-08, clipnorm=1.0), loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True), metrics=[tf.keras.metrics.SparseCategoricalAccuracy('accuracy')]) model.fit(train_data, epochs=6) print('predicting') preds = model.predict(test_data) # classification return classification_report(test['LABEL_COLUMN'], np.argmax(preds[0], axis=1), output_dict=True) # + id="3-8P6vh8Np7i" # + id="DWsFTqlHlgv3" def load_movies_to_df(path): df_movies = [] for filename in glob.glob(path + '*.csv'): df_movies.append(pd.read_csv(filename)) return df_movies # + id="Xz1rCrpvB2RA" df_movies = load_movies_to_df('movies/for_training/') classification_reports = [] df_main = pd.DataFrame() # + colab={"base_uri": "https://localhost:8080/"} id="DpchIXr0lciQ" outputId="1cfdd5ef-5860-4738-d4a8-0782492b2cf6" # perform cross folding for i in range(len(df_movies)): df_train = pd.concat(df_movies[0:i] + df_movies[i + 1:]) df_test = df_movies[i] train_movies = df_train['movie_name'].unique() test_movie = df_test['movie_name'].unique() print(','.join(train_movies)) print(test_movie[0]) classification_reports.append(train_bert(df_train, df_test, True)) print('Train movies: ', str(','.join(train_movies))) print('Test movie: ', str(test_movie[0])) print('Classification report: \n', classification_reports[i]) print('------------------------------------------------') df_cr = pd.DataFrame(classification_reports[i]).transpose() df_cr['movie_train'] = str(','.join(train_movies)) df_cr['movie_test'] = str(test_movie[0]) df_cr.to_csv('classification_reports/'+'bert_twitter_cv_finetune_testmovie_'+str(test_movie[0])+'.csv') df_main = df_main.append(df_cr) # + id="LyfPoHfVB63k" df_main.to_csv('classification_reports/bert_crossvalid_finetune_twitter.csv') # + colab={"base_uri": "https://localhost:8080/"} id="e9c-yeiBHfGG" outputId="e5071a61-8489-4ad3-dfb0-c9e475004788" print(df_main) # + colab={"base_uri": "https://localhost:8080/"} id="iD3D9odZB7de" outputId="8f0d8e1f-14cb-492d-a3c6-4c4be80f68e6" len(classification_reports[0]) # + colab={"base_uri": "https://localhost:8080/", "height": 206} id="68L-CNl6GGg8" outputId="56633258-ebc6-4225-9630-4a1cabec8ba3" df_main.head() # + id="NRGObHolGHHF" def get_precision_recall_f1(category, result_df): precision = result_df[result_df.label==category].precision.mean() recall = result_df[result_df.label==category].recall.mean() f1 = result_df[result_df.label==category]['f1-score'].mean() return {'label': category, 'precision': precision, 'recall': recall, 'f1': f1} # + id="PVRT0b1thTtB" df_cv= pd.read_csv('classification_reports/bert_crossvalid_finetune_twitter.csv') # + colab={"base_uri": "https://localhost:8080/", "height": 206} id="SnXkEFJlmfSU" outputId="d7f90212-fe72-456a-e4bc-a2cc14d33e72" df_cv = df_cv.rename(columns={'Unnamed: 0': 'label', 'b': 'Y'}) df_cv.head() # + id="V81usTykmsnl" normal_dict = get_precision_recall_f1('0', df_cv) offensive_dict = get_precision_recall_f1('1',df_cv) hate_dict = get_precision_recall_f1('2',df_cv) # - # #### Aggregated results of all 6 folds # + colab={"base_uri": "https://localhost:8080/", "height": 143} id="9xnoFFrOmgu1" outputId="791e4815-ea99-4f08-b497-fd2b6a046fe5" df_result = pd.DataFrame([normal_dict, offensive_dict, hate_dict]) df_result # + colab={"base_uri": "https://localhost:8080/"} id="cLHfd4BFnloL" outputId="bc8f2090-258f-46b6-a8cf-2145b770750f" for cr in classification_reports: print(cr) # + id="rPVfWb2BDZ3z"
models/BERT/bert_twitter_domain_adap_cv.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + import os import random import pickle import pandas as pd from copy import deepcopy import torch from tqdm.notebook import tqdm from collections import defaultdict data_path = 'train, valid and test csv files path' save_path = 'pickle data save path' encode_dict = {"Br": 'Y', "Cl": 'X', "Si": 'A', 'Se': 'Z', '@@': 'R', 'se': 'E'} decode_dict = {v: k for k, v in encode_dict.items()} # + # Function definition for making data to be used for SMILES-MaskGAN def get_pair(tokens, mask_idxs, mask_id): idxs = [vocab[atom] for atom in tokens] def _pad(ls, pad_index, max_length=100): padded_ls = deepcopy(ls) while len(padded_ls) <= max_length: padded_ls.append(pad_index) return padded_ls srcs = deepcopy(idxs) srcs.append(vocab['<eos>']) # append eos id in srcs last tgts = deepcopy(idxs) tgts.insert(0, vocab['<eos>']) # insert eos id in tgts first srcs_pad = _pad(srcs, vocab['<pad>'], max_length=100) tgts_pad = _pad(tgts, vocab['<pad>'], max_length=100) mask = torch.zeros(len(tgts_pad)) for mask_idx in mask_idxs: offset = 1 mask[mask_idx + offset] = 1 srcs[mask_idx] = mask_id return srcs_pad, tgts_pad, len(srcs), mask def encode(smiles: str) -> str: """ Replace multi-char tokens with single tokens in SMILES string. Args: smiles: SMILES string Returns: sanitized SMILE string with only single-char tokens """ temp_smiles = smiles for symbol, token in encode_dict.items(): temp_smiles = temp_smiles.replace(symbol, token) return temp_smiles class Mask: mask_token = '__<m>__' def __call__(self, n): idxs = self.forward(n) # Verify indices are okay. assert (len(idxs) < n) valid_set = set(list(range(n))) for i in idxs: assert (i in valid_set) return idxs class StochasticMask(Mask): def __init__(self, probability): self.p = probability self.r = random.Random(42) def forward(self, n): # Starting from one, since masks are messed, k = int(n * self.p) idxs = self.r.sample(range(1, n), k) return idxs class VocabBuilder: def __init__(self, mask_builder): self.vocab_path = os.path.join(data_path, 'vocab', 'vocab.pt') self.mask_builder = mask_builder self._vocab = None def vocab(self): if self._vocab is None: self.build_vocab() return self._vocab def build_vocab(self): if os.path.exists(self.vocab_path): self._vocab = torch.load(self.vocab_path) else: self.rebuild_vocab() def rebuild_vocab(self): self.forbidden_symbols = {'Ag', 'Al', 'Am', 'Ar', 'At', 'Au', 'D', 'E', 'Fe', 'G', 'K', 'L', 'M', 'Ra', 'Re', 'Rf', 'Rg', 'Rh', 'Ru', 'T', 'U', 'V', 'W', 'Xe', 'Y', 'Zr', 'a', 'd', 'f', 'g', 'h', 'k', 'm', 'si', 't', 'te', 'u', 'v', 'y'} self._vocab = {'<unk>': 0, '<pad>': 1, '<eos>': 2, '#': 20, '%': 22, '(': 25, ')': 24, '+': 26, '-': 27, '.': 30, '0': 32, '1': 31, '2': 34, '3': 33, '4': 36, '5': 35, '6': 38, '7': 37, '8': 40, '9': 39, '=': 41, 'A': 7, 'B': 11, 'C': 19, 'F': 4, 'H': 6, 'I': 5, 'N': 10, 'O': 9, 'P': 12, 'S': 13, 'X': 15, 'Y': 14, 'Z': 3, '[': 16, ']': 18, 'b': 21, 'c': 8, 'n': 17, 'o': 29, 'p': 23, 's': 28, "@": 42, "R": 43, '/': 44, "\\": 45, 'E': 46, '__<m>__': 47 } self.idx_char = {v: k for k, v in self._vocab.items()} if self.vocab_path is not None: torch.save(self._vocab, self.vocab_path) # - dir_ = os.path.join(data_path, 'data.csv') # change data name data = pd.read_csv(dir_) smiles_list = [line.strip() for line in data['canonical_smiles']] encoded_list = [encode(line) for line in smiles_list if len(line) <= 100] # + rmask = StochasticMask(0.1) vocab = None if vocab is None: builder = VocabBuilder(rmask) vocab = builder.vocab() # + final_data = defaultdict( list, { k: [] for k in ('train_srcs', 'train_tgts', 'train_lengths', 'train_mask')}) with tqdm(total=len(encoded_list)) as tbar: for i, tokens in enumerate(encoded_list): seq_len = len(tokens) mask_idxs = rmask(seq_len) mask_id = vocab['__<m>__'] src, tgt, length, mask = get_pair(tokens, mask_idxs, mask_id) final_data['train_srcs'].append(src) final_data['train_tgts'].append(tgt) final_data['train_lengths'].append(length) final_data['train_mask'].append(mask) tbar.update(1) # - # Save pickle file with open(os.path.join(data_path, 'chembl26_canon_train_0.1.pkl'),'wb') as f: pickle.dump(final_data, f, pickle.HIGHEST_PROTOCOL) # load pickle file with open(os.path.join(data_path, 'chembl26_canon_train_0.1.pkl'),'rb') as f: val_data = pickle.load(f)
make_data.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # %matplotlib inline import matplotlib.pyplot as plt import seaborn as sns; import numpy as np import pandas as pd # # Why use Manifold Learning? # PCA is good at reducing the dimensionality, but it sucks when the data has nonlinear relationships within it. # # To fix this problem we can use a class of methods known as Manifold Learning - a class of unsupervised estimators that seeks to describe datasets as low-dimensional manifolds embedded in high-dimensional spaces. When you think of a manifold, I'd suggest imagining a sheet of paper: this is a two-dimensional object that lives in our familiar three-dimensional world, and can be bent or rolled in that two dimensions. In the parlance of manifold learning, we can think of this sheet as a two-dimensional manifold embedded in three-dimensional space. # + # создадим двумерные данные def make_hello(N=1000, rseed=42): # Make a plot with "HELLO" text; save as PNG fig, ax = plt.subplots(figsize=(4, 1)) fig.subplots_adjust(left=0, right=1, bottom=0, top=1) ax.axis('off') ax.text(0.5, 0.4, 'HELLO', va='center', ha='center', weight='bold', size=85) fig.savefig('hello.png') plt.close(fig) # Open this PNG and draw random points from it from matplotlib.image import imread data = imread('hello.png')[::-1, :, 0].T rng = np.random.RandomState(rseed) X = rng.rand(4 * N, 2) i, j = (X * data.shape).astype(int).T mask = (data[i, j] < 1) X = X[mask] X[:, 0] *= (data.shape[0] / data.shape[1]) X = X[:N] return X[np.argsort(X[:, 0])] X = make_hello(1000) colorize = dict(c=X[:, 0], cmap=plt.cm.get_cmap('rainbow', 5)) plt.scatter(X[:, 0], X[:, 1], **colorize) plt.axis('equal'); # - # # MDS - Multidimentional Scaling # # Looking at data like this, we can see that the particular choice of x and y values of the dataset are not the most fundamental description of the data: we can scale, shrink, or rotate the data, and the "HELLO" will still be apparent. For example, if we use a rotation matrix to rotate the data, the x and y values change, but the data is still fundamentally the same: # + def rotate(X, angle): theta = np.deg2rad(angle) R = [[np.cos(theta), np.sin(theta)], [-np.sin(theta), np.cos(theta)]] return np.dot(X, R) X2 = rotate(X, 20) + 5 plt.scatter(X2[:, 0], X2[:, 1], **colorize) plt.axis('equal'); # конкретные значения x и y неважны, важные расстояния между х и y # + # визуализируем расстояния между точками через .pairwise_distances, строящую матрицу N на N # где N=числу точек, а в ячейках матрицы лежат расстояния между соотв. точками from sklearn.metrics import pairwise_distances D = pairwise_distances(X) # визуализация матрицы plt.style.use('dark_background') plt.imshow(D, zorder=2, cmap='Blues', interpolation='nearest') plt.colorbar(); # заодно проверим будут ли равны матрицы для исходной картинки и повёрнутой D2 = pairwise_distances(X2) print('D == D2: ', np.allclose(D, D2)) # - # This distance matrix gives us a representation of our data that is invariant to rotations and translations, but the visualization of the matrix above is not entirely intuitive (ничего не понятно, слова HELLO на ней нет и в помине). # # Создать матрицу расстояний легко, а вот трансформировать её обратно в x и y координаты сложно. Это и делает MDS - given a distance matrix between points, it recovers a $D$-dimensional coordinate representation of the data. # востановим исходный вид данных (двумерный вид) from sklearn.manifold import MDS model = MDS(n_components=2, dissimilarity='precomputed', random_state=1) out = model.fit_transform(D) plt.scatter(out[:, 0], out[:, 1], **colorize) plt.axis('equal'); # MDS algorithm recovers one of the possible two-dimensional coordinate representations of our data, using only the $N\times N$ distance matrix describing the relationship between the data points # # Важный момент - матрицу расстояний можно построить из данных ЛЮБОЙ размерности и затем получить из неё "версию" этих данных с МЕНЬШЕЙ размерностью. # # Пример преобразования 2D -> 3D -> 2D, с использованием MDS: # + # Преобразуем исходную картинку в 3D def random_projection(X, dimension=3, rseed=42): assert dimension >= X.shape[1] rng = np.random.RandomState(rseed) C = rng.randn(dimension, dimension) e, V = np.linalg.eigh(np.dot(C, C.T)) return np.dot(X, V[:X.shape[1]]) X3 = random_projection(X, 3) # визуализируем результат преобразования from mpl_toolkits import mplot3d ax = plt.axes(projection='3d') ax.scatter3D(X3[:, 0], X3[:, 1], X3[:, 2], **colorize) ax.view_init(azim=70, elev=50) # - # применим MDS model = MDS(n_components=2, random_state=1) out3 = model.fit_transform(X3) plt.scatter(out3[:, 0], out3[:, 1], **colorize) plt.axis('equal'); # # Manifold Learning Goal: # Given high-dimensional embedded data, it seeks a low-dimensional representation of the data that preserves certain relationships within the data. # # In the case of MDS, the quantity preserved is the distance between every pair of points. # # # Nonlinear Embeddings: Where MDS Fails # # MDS отлично работает с линейными преобразованиями (вращения, изменение размера, т.д.) однако при нелинейных преобразованиях он лажает. # # Пример - трансформируем исходную картинку в форму буквы S и затем скормим её MDSу. # + def make_hello_s_curve(X): t = (X[:, 0] - 2) * 0.75 * np.pi x = np.sin(t) y = X[:, 1] z = np.sign(t) * (np.cos(t) - 1) return np.vstack((x, y, z)).T XS = make_hello_s_curve(X) from mpl_toolkits import mplot3d ax = plt.axes(projection='3d') ax.scatter3D(XS[:, 0], XS[:, 1], XS[:, 2], **colorize); # важно - исходные "отношения" между точками всё ещё остались здесь, # однако трансформация была произведенна нелинейная # - # применим MDS к нелинейно трансформированным данным from sklearn.manifold import MDS model = MDS(n_components=2, random_state=2) outS = model.fit_transform(XS) plt.scatter(outS[:, 0], outS[:, 1], **colorize) plt.axis('equal'); # MDS не смог восстановить исходные данные из нелинейно-трансформированных. Он просто отбросил ось Y, что является ошибкой. # # Причина в том, что MDS пытается сохранить расстояния между всеми парами точек. Однако если мы изменим его алгоритм так, чтобы он сохранял только расстояния между ближайшими точками то результат будет гораздо лучше. Такой алгоритм уже есть, и называется от locally linear embedding # # # LLE - locally linear embedding # # It instead tries to preserve only the distances between neighboring points. LLE comes in a number of flavors. # # Here we will use the modified LLE algorithm to recover the embedded two-dimensional manifold. In general, modified LLE does better than other flavors of the algorithm at recovering well-defined manifolds with very little distortion: # + # применим LLE к нелинейно изменённым данным from sklearn.manifold import LocallyLinearEmbedding model = LocallyLinearEmbedding(n_neighbors=100, n_components=2, method='modified', eigen_solver='dense') out = model.fit_transform(XS) fig, ax = plt.subplots() ax.scatter(out[:, 0], out[:, 1], **colorize) ax.set_ylim(0.15, -0.15); # - # # Summary of Manifold Methods # Несмотря на все свои плюсы эти алгоритмы в большинстве своём очень карпизны и редко используются для чего-то кроме простых визуализаций данных большой размерности. # # #### Их минусы (в сравнении с PCA): # # 1) In manifold learning, there is no good framework for handling missing data. In contrast, there are straightforward iterative approaches for missing data in PCA. # 2) In manifold learning, the presence of noise in the data can "short-circuit" the manifold and drastically change the embedding. In contrast, PCA naturally filters noise from the most important components. # 3) The manifold embedding result is generally highly dependent on the number of neighbors chosen, and there is generally no solid quantitative way to choose an optimal number of neighbors. In contrast, PCA does not involve such a choice. # 4) In manifold learning, the globally optimal number of output dimensions is difficult to determine. In contrast, PCA lets you find the output dimension based on the explained variance # 5) In manifold learning, the meaning of the embedded dimensions is not always clear. In PCA, the principal components have a very clear meaning. # 6) In manifold learning the computational expense of manifold methods scales as O[N^2] or O[N^3]. For PCA, there exist randomized approaches that are generally much faster (though see the megaman package for some more scalable implementations of manifold learning). # # Из-за этих недостатков и единственного плюса - возможности сохранять нелинейные отношения в данных имеет смысл использовать Manifold Learning Methods только после исследования данных с помошью РСА (т.е. manifold будет как дополнительное исследование). # # # Краткий гайд по выбору Manifold Learning Method: # # 1) Для простых задач (типо нашего S-преобразования) locally linear embedding (LLE) and its variants (especially modified LLE), perform very well. This is implemented in sklearn.manifold.LocallyLinearEmbedding. # # 2) For high-dimensional data from real-world sources, LLE often produces poor results, and isometric mapping (IsoMap) seems to generally lead to more meaningful embeddings. This is implemented in sklearn.manifold.Isomap # # 3) For data that is highly clustered, t-distributed stochastic neighbor embedding (t-SNE) seems to work very well, though can be very slow compared to other methods. This is implemented in sklearn.manifold.TSNE. # # # Пример - Isomap # # Manifold Learning чаще всего используется для понимания отношений между high-dimensional data points. A common case of high-dimensional data is images: for example, a set of images with 1,000 pixels each can be thought of as a collection of points in 1,000 dimensions – the brightness of each pixel in each image defines the coordinate in that dimension. # # В этом примере мы используем Isomap на сете с лицами политиков. # + from sklearn.datasets import fetch_lfw_people faces = fetch_lfw_people(min_faces_per_person=30) faces.data.shape # we have 1701 images, each with 2,914 pixels. So our data points is in the 2914-dimentional space # - fig, ax = plt.subplots(4, 8, subplot_kw=dict(xticks=[], yticks=[])) for i, axi in enumerate(ax.flat): axi.imshow(faces.images[i], cmap='gray') # We would like to plot a low-dimensional embedding of the 2,914-dimensional data to learn the fundamental relationships between the images. One useful way to start is to compute a PCA, and examine the explained variance ratio, which will give us an idea of how many linear features are required to describe the data: from sklearn.decomposition import PCA model = PCA(100).fit(faces.data) plt.plot(np.cumsum(model.explained_variance_ratio_)) plt.xlabel('n components') plt.ylabel('cumulative variance'); # We see that for this data, nearly 100 components are required to preserve 90% of the variance: this tells us that the data is intrinsically very high dimensional—it can't be described linearly with just a few components. # # When this is the case, nonlinear manifold embeddings like LLE and Isomap can be helpful. We can compute an Isomap embedding on these faces using the same pattern shown before: # from sklearn.manifold import Isomap model = Isomap(n_components=2) proj = model.fit_transform(faces.data) proj.shape # + # функция выводка маленьких картинок рядом с точками получеными от РСА from matplotlib import offsetbox def plot_components(data, model, images=None, ax=None, thumb_frac=0.05, cmap='gray'): ax = ax or plt.gca() proj = model.fit_transform(data) ax.plot(proj[:, 0], proj[:, 1], '.k') if images is not None: min_dist_2 = (thumb_frac * max(proj.max(0) - proj.min(0))) ** 2 shown_images = np.array([2 * proj.max(0)]) for i in range(data.shape[0]): dist = np.sum((proj[i] - shown_images) ** 2, 1) if np.min(dist) < min_dist_2: # don't show points that are too close continue shown_images = np.vstack([shown_images, proj[i]]) imagebox = offsetbox.AnnotationBbox( offsetbox.OffsetImage(images[i], cmap=cmap), proj[i]) ax.add_artist(imagebox) # вызов этой функции fig, ax = plt.subplots(figsize=(10, 10)) plot_components(faces.data, model=Isomap(n_components=2), images=faces.images[:, fc00:db20:35b:7399::5, ::2]) # - # The result is interesting: the first two Isomap dimensions seem to describe global image features: the overall darkness or lightness of the image from left to right, and the general orientation of the face from bottom to top. This gives us a nice visual indication of some of the fundamental features in our data.
Books and Courses/Python Data Science Handbook by Jake VanderPlas/SciKit-Learn/09 ML - Manifold Learning - unsupervised learning.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 2 # language: python # name: python2 # --- # # ### A recap on Scikit-learn's estimator interface # Scikit-learn strives to have a uniform interface across all methods. Given a scikit-learn *estimator* # object named `model`, the following methods are available (not all for each model): # # - Available in **all Estimators** # + `model.fit()` : fit training data. For supervised learning applications, # this accepts two arguments: the data `X` and the labels `y` (e.g. `model.fit(X, y)`). # For unsupervised learning applications, ``fit`` takes only a single argument, # the data `X` (e.g. `model.fit(X)`). # - Available in **supervised estimators** # + `model.predict()` : given a trained model, predict the label of a new set of data. # This method accepts one argument, the new data `X_new` (e.g. `model.predict(X_new)`), # and returns the learned label for each object in the array. # + `model.predict_proba()` : For classification problems, some estimators also provide # this method, which returns the probability that a new observation has each categorical label. # In this case, the label with the highest probability is returned by `model.predict()`. # + `model.decision_function()` : For classification problems, some estimators provide an uncertainty estimate that is not a probability. For binary classification, a decision_function >= 0 means the positive class will be predicted, while < 0 means the negative class. # + `model.score()` : for classification or regression problems, most (all?) estimators implement # a score method. Scores are between 0 and 1, with a larger score indicating a better fit. # + `model.transform()` : For feature selection algorithms, this will reduce the dataset to the selected features. For some classification and regression models such as some linear models and random forests, this method reduces the dataset to the most informative features. These classification and regression models can therefor also be used as feature selection methods. # # - Available in **unsupervised estimators** # + `model.transform()` : given an unsupervised model, transform new data into the new basis. # This also accepts one argument `X_new`, and returns the new representation of the data based # on the unsupervised model. # + `model.fit_transform()` : some estimators implement this method, # which more efficiently performs a fit and a transform on the same input data. # + `model.predict()` : for clustering algorithms, the predict method will produce cluster labels for new data points. Not all clustering methods have this functionality. # + `model.predict_proba()` : Gaussian mixture models (GMMs) provide the probability for each point to be generated by a given mixture component. # + `model.score()` : Density models like KDE and GMMs provide the likelihood of the data under the model. # Apart from ``fit``, the two most important functions are arguably ``predict`` to produce a target variable (a ``y``) ``transform``, which produces a new representation of the data (an ``X``). # The following table shows for which class of models which function applies: # # # <table> # <tr style="border:None; font-size:20px; padding:10px;"><th>``model.predict``</th><th>``model.transform``</th></tr> # <tr style="border:None; font-size:20px; padding:10px;"><td>Classification</td><td>Preprocessing</td></tr> # <tr style="border:None; font-size:20px; padding:10px;"><td>Regression</td><td>Dimensionality Reduction</td></tr> # <tr style="border:None; font-size:20px; padding:10px;"><td>Clustering</td><td>Feature Extraction</td></tr> # <tr style="border:None; font-size:20px; padding:10px;"><td>&nbsp;</td><td>Feature selection</td></tr> # # </table> # # #
notebooks/02.5 Review of Scikit-learn API.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="view-in-github" colab_type="text" # <a href="https://colab.research.google.com/github/aaronyu888/mat-494-notebooks/blob/main/K_Means_SVM.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # + [markdown] id="sEeBxqJRl0OZ" # # K-Means # # --- # # # + id="AeC0cmLMl4ie" import numpy as np import pandas as pd import statsmodels.api as sm import matplotlib.pyplot as plt import seaborn as sns sns.set() from sklearn.cluster import KMeans from sklearn.datasets.samples_generator import make_blobs # + colab={"base_uri": "https://localhost:8080/", "height": 285} id="jIokScfG9HUN" outputId="1d2f7a4a-19c9-42ec-d22b-5ded84636b55" # number of clusters should be 4 X, y_true = make_blobs(n_samples = 300, centers = 4, cluster_std = 0.60, random_state = 0) plt.scatter(X[:, 0], X[:, 1]) # + colab={"base_uri": "https://localhost:8080/", "height": 301} id="BDMe8IzIBxj6" outputId="3aecfa49-ff1c-4a21-ac30-81a48d6202ce" # use wcss to verify that optimal clusters is 4 wcss = [] for i in range(1, 11): kmeans = KMeans(n_clusters = i, init = 'k-means++', max_iter = 300, n_init = 10, random_state = 0) kmeans.fit(X) wcss.append(kmeans.inertia_) plt.plot(range(1, 11), wcss) plt.title('Elbow Method') plt.xlabel('Number of clusters') plt.ylabel('WCSS') plt.show() # + colab={"base_uri": "https://localhost:8080/", "height": 285} id="SeFootrl_jhz" outputId="97be8b44-2992-4c67-db41-65e0163ebe46" # kmeans = KMeans(n_clusters = 4) kmeans.fit(X) y_kmeans = kmeans.predict(X) plt.scatter(X[:, 0], X[:, 1], c = y_kmeans, s = 50, cmap = 'viridis') centers = kmeans.cluster_centers_ plt.scatter(centers[:, 0], centers[:, 1], c = 'black', s = 299, alpha = 0.5) # + [markdown] id="Vd7w9SeuDPxN" # # Support Vector Machine # from sklearn import dataset # # cancer = datasets.load_breast_cancer() # # print("Features: " # + id="qJukm8FjEqC6" from sklearn import datasets from sklearn.model_selection import train_test_split from sklearn import svm from sklearn import metrics cancer = datasets.load_breast_cancer() X_train, X_test, y_train, y_test = train_test_split(cancer.data, cancer.target, test_size = 0.3, random_state = 109) clf = svm.SVC(kernel = 'linear') clf.fit(X_train, y_train) y_pred = clf.predict(X_test) # + colab={"base_uri": "https://localhost:8080/"} id="LKJQWVPiFkBg" outputId="220cb1d1-e5d5-46d4-cccf-9d8b78c85dbb" print("Accuracy:", metrics.accuracy_score(y_test, y_pred)) print("Precision:", metrics.precision_score(y_test, y_pred)) print("Recall:", metrics.recall_score(y_test, y_pred))
K_Means_SVM.ipynb
# + [markdown] colab_type="text" id="view-in-github" # <a href="https://colab.research.google.com/github/TobiasSunderdiek/my_udacity_deep_learning_solutions/blob/master/intro-neural-networks/student_admissions.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # + [markdown] colab_type="text" id="9z0eKFKr8neG" # # Predicting student admissions # # # This notebook is based on the udacity deep learning nanodegree exercise for gradient descent, which can be found here: # # https://github.com/udacity/deep-learning-v2-pytorch/blob/master/intro-neural-networks/student-admissions/StudentAdmissions.ipynb # # The original version is implemented with python and numpy, I try to implement it with swift-only as an exercise to learn swift. # # Additionally to the implementation, to better understand the underlying calculus, I do some derivation of the formula for updating the weights. # + [markdown] colab_type="text" id="wwJXqDDcL7Xl" # ## Math - backpropagation with mean-squared-error as error function within perceptron # # The underlying single-layer perceptron [1] of this notebook can be shown as follows: # # # <p><a href="https://commons.wikimedia.org/wiki/File:Perceptron.svg#/media/File:Perceptron.svg"><img src="https://upload.wikimedia.org/wikipedia/commons/3/31/Perceptron.svg" alt="Perceptron.svg" height="353" width="440"></a><br>By <a href="https://en.wikipedia.org/wiki/User:Mat_the_w" class="extiw" title="wikipedia:User:Mat the w">Mat the w</a> at <a href="https://en.wikipedia.org/wiki/" class="extiw" title="wikipedia:">English Wikipedia</a>, <a href="https://creativecommons.org/licenses/by-sa/3.0" title="Creative Commons Attribution-Share Alike 3.0">CC BY-SA 3.0</a>, <a href="https://commons.wikimedia.org/w/index.php?curid=23766733">Link</a></p> # # In our case, we have inputs $greScaled$, $gpaScaled$ and $encodedRank$ and their weights. Due to the hot-encoding of rank, we have input size of 7 ($w_1 x_1 + w_2 x_2...+w_7 x_7$), so n=7, and we add a bias b and o is output (= $\hat{y}$): $$\hat{y} = f(w_1 x_1 + w_2 x_2... + b)$$ # The function f is our activation function, which is sigmoid: $$\hat{y} = \sigma(w_1 x_1 + w_2 x_2... + b)$$ # # #### Updating the weights # # Generally, gradient descent[2] describes the change in each weight for multilayer perceptrons as: # # $$\Delta w_{ji} (n) = -\eta\frac{\partial\mathcal{E}(n)}{\partial v_j(n)} y_i(n)$$ # # $n$ is the node, in this case we have a single-layer perceptron which has 1 node # # $v_j(n)$ is the weighted sum of the input connections in the node, in this case $w_1 x_1 + w_2 x_2...w_7 x_7 + b$, including bias # # $y_i(n)$ is the output of the previous node, the i-th node, in this case we don't have a previous node, our inputs are the general inputs of the perceptron $x_i$ # # $\mathcal{E}(n)$ is the error function, which in this case is the mean-squared-error $\mathcal{E}(n)=\frac{1}{2}\sum_i (y-\hat{y})_i^2(n)$ # # $\eta$ is the learning rate # # Given the information above, with one node $i=1$: # # $$\Delta w_{j} (n) = -\eta\frac{\partial (\frac{1}{2} (y-\hat{y})^2(n))}{\partial v_j} x$$ # # # How much of the total error can be influenced by an individual $v_j$ is calculated by getting the partial derivative of the loss-function with respect to $v_j$: # # $$\frac{\partial (\frac{1}{2} (y-\hat{y})^2)}{\partial v_j}$$ # # # As $\hat{y} = \sigma(w_1 x_1 + w_2 x_2... + b)$, and $v_j = w_1 x_1 + w_2 x_2... + b$, we have $\hat{y} = \sigma(v_j)$ # # $$\frac{\partial (\frac{1}{2} (y-\sigma(v_j))^2)}{\partial v_j}$$ # # Now, we use the chain-rule $\frac{\partial}{\partial z}p(q(z)) = \frac{\partial p}{\partial q} \frac{\partial q}{\partial z}$, where $q(z) = y-\sigma(v_j)$ and $p=\frac{1}{2}q^2$: # # $$\frac{\partial \frac{1}{2}q^2}{\partial q} \frac{\partial (y-\sigma(v_j))}{\partial v_j}=q\frac{\partial }{\partial v_j}(y-\sigma(v_j))=q(0- (\sigma(v_j)(1-\sigma(v_j))))=(y-\sigma(v_j))(-(\sigma(v_j)(1-\sigma(v_j)))=-(y-\hat{y})(\sigma^\prime(v_j))$$ # # Put this back in: # # $$\Delta w_{j} (n) = \eta(y-\hat{y})(\sigma^\prime(v_j)) x$$ # # This is the derivative of the formula for the weight update, same for bias: # # $$w_i \longrightarrow w_i + \eta(y-\hat{y})(\sigma^\prime(v_i)) x_i$$ # # [1] https://en.wikipedia.org/wiki/Perceptron # # [2] https://en.wikipedia.org/wiki/Multilayer_perceptron # + [markdown] colab_type="text" id="jb5V5Sb09V4W" # ## Loading dataset from github # The original dataset is located here: # # https://raw.githubusercontent.com/udacity/deep-learning-v2-pytorch/master/intro-neural-networks/student-admissions/student_data.csv # # which originally came from: http://www.ats.ucla.edu/ # + colab={} colab_type="code" id="kZRlD4utdPuX" import Foundation let url = "https://raw.githubusercontent.com/udacity/deep-learning-v2-pytorch/master/intro-neural-networks/student-admissions/student_data.csv" // author of this query function: https://gist.github.com/groz/85b95f663f79ba17946269ea65c2c0f4 func query(address: String) -> String { let url = URL(string: address) let semaphore = DispatchSemaphore(value: 0) var result: String = "" let task = URLSession.shared.dataTask(with: url!) {(data, response, error) in result = String(data: data!, encoding: String.Encoding.utf8)! semaphore.signal() } task.resume() semaphore.wait() return result } let rawData = query(address: url) # + [markdown] colab_type="text" id="MBCYDv0gPiHI" # ### Convert data # - Make data an array of Tensors # # - Drop first row with column header # # # |admit| gre| gpa| rank| # |-|-|-|-| # |0 |380 |3.61 |3| # |1 |660 |3.67 |3| # |1 |800 |4.00 |1| # |1 |640 |3.19 |4| # |0 |520 |2.93 |4| # # - One-hot encode the numerical values of column rank, which contains values from 1-4: # # > 1 -> [0.0, 1.0, 0.0, 0.0, 0.0] # # > 3 -> [0.0, 0.0, 0.0, 1.0, 0.0] # # > 4 -> [0.0, 0.0, 0.0, 0.0, 1.0] # # |admit|gre| gpa| encodedRank| # |-|-|-|-| # |0 |380 |3.61 |0.0 0.0 0.0 1.0 0.0| # |1 |660 |3.67 |0.0 0.0 0.0 1.0 0.0| # |1 |800 |4.00 |0.0 1.0 0.0 0.0 0.0| # |1 |640 |3.19 |0.0 0.0 0.0 0.0 1.0| # |0 |520 |2.93 |0.0 0.0 0.0 0.0 1.0| # # > **Difference to the pandas get_dummies-function used in the original solution: oneHotAtIndices starts counting indices at 0 and this results in 5 instead of 4 columns for the encoded rank** # # - Scaling the data # # > *gre* has values between 200-800, *gpa* has values between 1.0-4.0, so the features have to be scaled to be normalized. # # > Fit features into range within 0-1 by dividing *gre*/800 and *gpa*/4 # # |admit|greScaled| gpaScaled| encodedRank| # |-|-|-|-| # |0 |0.475 |0.9025 |0.0 0.0 0.0 1.0 0.0| # |1 |0.825 |0.9175 |0.0 0.0 0.0 1.0 0.0| # |1 |1.0 |1.0 |0.0 1.0 0.0 0.0 0.0| # |1 |0.8 |0.7975 |0.0 0.0 0.0 0.0 1.0| # |0 |0.65 |0.7325|0.0 0.0 0.0 0.0 1.0| # + colab={} colab_type="code" id="QThtcqTjMRyi" import TensorFlow let rows = rawData.components(separatedBy: "\n") let featuresAndTargetsAsString = rows.dropFirst().map({ $0.components(separatedBy: ",") }).filter {$0[0] != ""} var data = [Tensor<Double>]() for featureWithTarget in featuresAndTargetsAsString { let admit = Double(featureWithTarget[0])! let gre = Double(featureWithTarget[1])! let gpa = Double(featureWithTarget[2])! let rank = Int32(featureWithTarget[3])! let greScaled = gre/800 let gpaScaled = gpa/4 let encodedRank = Tensor<Double>(oneHotAtIndices: Tensor<Int32>(rank), depth:5) let admitGreGpa = Tensor<Double>([admit, greScaled, gpaScaled]) let feature = admitGreGpa.concatenated(with: encodedRank) data.append(feature) } # + [markdown] colab_type="text" id="nFXhUrU9895a" # ### Split into training and test set # Testing set will be 10% of total size # + colab={} colab_type="code" id="A8GPA_xB8zE2" let dataShuffled = data.shuffled() let ninetyPercentCount = data.count * 90 / 100 let dataTrain = dataShuffled.prefix(upTo: ninetyPercentCount) let dataTest = dataShuffled.suffix(from: ninetyPercentCount) let featuresTrain = dataTrain.map { $0.slice(lowerBounds: [1], upperBounds: [8]) } let targetsTrain = dataTrain.map { $0.slice(lowerBounds: [0], upperBounds: [1]).scalars[0] } let featuresTest = dataTest.map { $0.slice(lowerBounds: [1], upperBounds: [8]) } let targetsTest = dataTest.map { $0.slice(lowerBounds: [0], upperBounds: [1]) } # + [markdown] colab_type="text" id="cY-ZPkcRVC0M" # ### Output (prediction) formula, sigmoid # This functions are the same as in my `gradient_descent.ipynb` in # # https://github.com/TobiasSunderdiek/my_udacity_deep_learning_solutions/blob/master/intro-neural-networks/gradient_descent.ipynb # # Calculation of the output in the udacity version of this notebook is different than in my version: a bias is added here # + colab={} colab_type="code" id="X2FWf5CuVjpk" func mySigmoid(_ x: Tensor<Double>) -> Tensor<Double> { return 1 / (1 + exp(-x)) } func myOutputFormula(_ features: Tensor<Double>, _ weights: Tensor<Double>, _ bias: Tensor<Double>) -> Double { let res = mySigmoid((features * weights).sum() + bias) return res.scalar! } # + [markdown] colab_type="text" id="9RHfS4zURnNx" # ### Error function, error term formula, gradient descent step, backpropagation, sigmoid prime # # Error formula (result of error formula) is not used in original version. I use MSE for calculating loss as in original version and put it in error formula function instead. I sum up the errors and calculate mean during and after iteration of epoch. # # For updating the weights, the gradient descent step of the backpropagation, mean-squared error is used. In the original version, the update is calculated within each iteration in `error_term_formula` and performed after each epoch-iteration. Here it is performed at each step of iteration in `myUpdateWeights`. # # During this step, the bias is updated. # # I choose this approach just to keep the train function the same as in the `gradient_descent.ipynb`. # + colab={} colab_type="code" id="5aiSHPAxjZ39" func sigmoidPrime(_ x: Tensor<Double>) -> Tensor<Double> { return sigmoid(x) * (1 - sigmoid(x)) } func myErrorFormula(_ y: Double, _ output: Double) -> Double { return pow(y-output, 2) } func myUpdateWeights(_ features: Tensor<Double>, _ targets: Double, _ weights: Tensor<Double>, _ bias: Tensor<Double>, _ learningRate: Double) -> (Tensor<Double>, Tensor<Double>) { let delta = learningRate * (targets - myOutputFormula(features, weights, bias)) * sigmoidPrime(features).sum() //sum here because sigmoidPrime give shape of 7, we need a scalar here let updatedWeights = weights + delta * features let updatedBias = bias + delta return (updatedWeights, updatedBias) } # + [markdown] colab_type="text" id="DqgcSRLE_8W_" # ## Training function # Initialization of weights in the course # # `weights = np.random.normal(scale=1 / n_features**.5, size=n_features)` # # is different than in my version # # `var weights = Tensor<Double>(randomNormal: [n_features])` # # Training function is same as in https://github.com/TobiasSunderdiek/my_udacity_deep_learning_solutions/blob/master/intro-neural-networks/gradient_descent.ipynb # + colab={} colab_type="code" id="hWYCXa5S_-34" func train(_ features: [Tensor<Double>], _ targets: [Double], epochs: Int, learningRate: Double) { //make learningRate Double because swift's won't multiply Double with Float let numberRecords = Double(features.count) var weights = Tensor<Double>(randomNormal: features[0].shape) var bias = Tensor<Double>.zero var lastLoss = Double.infinity for epoch in 0...epochs { var errors = 0.0 var correctPredictions = 0.0 var prediction = 0.0 for (x, y) in zip(features, targets) { let output = myOutputFormula(x, weights, bias) errors += myErrorFormula(y, output) (weights, bias) = myUpdateWeights(x, y, weights, bias, learningRate) if (output > 0.5) { prediction = 1.0 } else { prediction = 0.0 } if (prediction == y) { correctPredictions+=1 } } let loss = errors / numberRecords if epoch % (epochs / 10) == 0 { print("Epoch: \(epoch)") let warning = lastLoss < loss ? "WARNING - Loss increasing" : "" print("Train loss: \(loss) \(warning)") lastLoss = loss let accuracy = correctPredictions / numberRecords print("Accuracy: \(accuracy)") print("Errors: \(errors)") } } } # + [markdown] colab_type="text" id="TcnGsWhpAFpl" # ## Train # + colab={"base_uri": "https://localhost:8080/", "height": 765} colab_type="code" id="PCfVTFS6AHbp" outputId="4053953d-9452-41d5-b06f-8f544f471308" train(featuresTrain, targetsTrain, epochs: 1000, learningRate: 0.5) # + [markdown] colab_type="text" id="RXJPnQuRtf8I" # Compare with python calculation # # Epoch: 0 # Train loss: 0.27151046424991654 # # Epoch: 100 # Train loss: 0.20925670061926063 # # Epoch: 900 # Train loss: 0.203646868060691 # # Prediction accuracy: 0.725 # + colab={} colab_type="code" id="UvERTaqT_o5r"
intro-neural-networks/student_admissions.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import pandas as pd import numpy as np import os os.listdir('/home/jupyter-k.sanzhar-13/shared/Res_Tree') # getting rid of system files (starts with '.') [x for x in os.listdir('/home/jupyter-k.sanzhar-13/shared/Res_Tree') if not x.startswith('.')] # desired data is in F000545 path = '/home/jupyter-k.sanzhar-13/shared/Res_Tree/F000545/res_2019.09.11_0.0_6493B6_Container-dat_2125_91-105-165_F000545/meals_list_2.txt' # + # we are dealing with such unordered data # necessary data starts with 'Non-deleted saved meals' and ends with '--------' for i in open(path).readlines(): print(i) # + # x = ['1', '2', 'start', '3', '4', 'end', '5', '6', 'start', '7', '8', 'end', '9', '10'] # scanning = False # for i in x: # if i == 'start': # scanning = True # continue # if i == 'end': # scanning = False # continue # if scanning: # print(i) # - all_data = [] scanning = False for i in open(path).readlines(): if 'Non-deleted saved meals' in i: scanning = True continue if i.startswith('------------------'): scanning = False continue if scanning: # now we need to deal with columns if 'Meal#' in i: col_data = i continue # now add necessary data to dataframe all_data.append([value.strip() for value in i.split(';')]) col_data # since there is some space between names, and ';' in col_data, we need to organize it final_columns = [x.strip() for x in col_data.split(';')] final_columns # finish with dataframe meal_df = pd.DataFrame(all_data, columns=final_columns[0:17]) meal_df
How extract only the necessary data_.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # %matplotlib inline # # # Actual Vs Expected Analysis # # # This example demonstrates how you can slice triangle objects to perform a # typical 'Actual vs Expected' analysis. We will use Medical Malpractice # payment patterns for the demo. # # # + import chainladder as cl import seaborn as sns sns.set_style('whitegrid') # Load the data tri_1997 = cl.load_dataset('clrd') tri_1997 = tri_1997.groupby('LOB').sum().loc['medmal']['CumPaidLoss'] # Create a triangle as of the previous valuation and build IBNR model tri_1996 = tri_1997[tri_1997.valuation < '1997'] model_1996 = cl.Chainladder().fit(cl.TailCurve().fit_transform(tri_1996)) # Slice the expected losses from the 1997 calendar period of the model ave = model_1996.full_triangle_.dev_to_val() ave = ave[ave.development == '1997'].rename('columns', 'Expected') # Slice the actual losses from the 1997 calendar period for prior AYs ave['Actual'] = tri_1997.latest_diagonal[tri_1997.origin < '1997'] ave['Actual - Expected'] = ave['Actual'] - ave['Expected'] # Plotting ave.to_frame().T.plot(y='Actual - Expected', kind='bar', legend=False) \ .set(title='Calendar Period 1997 Performance', xlabel='Accident Period', ylabel='Actual - Expected');
docs/auto_examples/plot_ave_analysis.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="view-in-github" colab_type="text" # <a href="https://colab.research.google.com/github/gagan-iitb/DataAnalyticsAndVisualization/blob/main/Tutorial/DataExploration.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # + id="UvpcJFbbyoN5" import numpy as np import pandas as pd import matplotlib.pyplot as plt from mpl_toolkits.mplot3d import Axes3D from sklearn import datasets # + id="UudPZ_Wv6Ly4" from google.colab import data_table data_table.enable_dataframe_formatter() # + id="fp1DuD8qyzR2" # import some data to play with iris = datasets.load_iris() # Features are Sepal Length, Sepal Width, Petal Length and Petal Width X = iris.data[:, :3] # we only take the first two features. y = iris.target # + colab={"base_uri": "https://localhost:8080/", "height": 389} id="gFmkDuety6ot" outputId="4b367452-a996-4cd8-dbae-4b800e169654" plt.figure(2, figsize=(8, 6)) plt.clf() # Plot the training points plt.scatter(X[:, 2], X[:, 1], c=y, cmap=plt.cm.Set1, edgecolor="k") plt.xlabel("Sepal/Petal length") plt.ylabel("Sepal width") x1_min, x1_max = X[:, 2].min() - 0.5, X[:, 2].max() + 0.5 x2_min, x2_max = X[:, 1].min() - 0.5, X[:, 1].max() + 0.5 plt.xlim(x1_min, x1_max) plt.ylim(x2_min, x2_max) plt.xticks(()) plt.yticks(()) # + colab={"base_uri": "https://localhost:8080/", "height": 819} id="u7WSt_tF32G6" outputId="3145db3a-ac06-4dcf-b5f0-039000f127bf" from vega_datasets import data cars=data.cars() cars # + colab={"base_uri": "https://localhost:8080/", "height": 237} id="hqoINUtT4qBy" outputId="36b68a19-1bab-472a-ddc0-b59eff958a56" cars.corr() # + id="c7bj8SZk6StB" import seaborn as sns; sns.set_theme() # + colab={"base_uri": "https://localhost:8080/", "height": 636} id="aRLB-NFJ7gxx" outputId="106b68ae-a751-4779-a401-3ba843ad65f4" cars[['Acceleration','Miles_per_Gallon','Cylinders','Displacement']] # + colab={"base_uri": "https://localhost:8080/", "height": 355} id="tDaT83fC6ahH" outputId="427472ac-c19f-4dcf-a916-2a02469bf4ed" ax = sns.heatmap(cars[['Acceleration','Miles_per_Gallon', 'Cylinders', 'Displacement', 'Horsepower', 'Weight_in_lbs']].corr()) # + colab={"base_uri": "https://localhost:8080/", "height": 299} id="eao11Db-5ts6" outputId="f9fe7f8d-d978-4bcf-cc45-7dd89bfa5dad" pd.scatter_matrix(cars) # + colab={"base_uri": "https://localhost:8080/", "height": 463} id="DZEqhk2Qy6wG" outputId="f9b5bb6e-21bf-43cd-9a0b-5b1ff059a1ef" # To getter a better understanding of interaction of the dimensions # plot the first three PCA dimensions from sklearn.decomposition import PCA fig = plt.figure(1, figsize=(8, 6)) ax = Axes3D(fig, elev=-150, azim=110) X_reduced = PCA(n_components=3).fit_transform(iris.data) ax.scatter( X_reduced[:, 0], X_reduced[:, 1], X_reduced[:, 2], c=y, cmap=plt.cm.Set1, edgecolor="k", s=40, ) ax.set_title("First three PCA directions") ax.set_xlabel("1st eigenvector") ax.w_xaxis.set_ticklabels([]) ax.set_ylabel("2nd eigenvector") ax.w_yaxis.set_ticklabels([]) ax.set_zlabel("3rd eigenvector") ax.w_zaxis.set_ticklabels([]) plt.show()
Tutorial/DataExploration.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Problem: Can rooks attack one another? # # You are given a configuration of a chessboard with rooks in a 2 dimensional array. # # Example Input: # [[1, 0, 0, 0], # [0, 1, 0, 0], # [0, 0, 0, 1], # [0, 0, 0, 0]] # # 1 represents that a rook is in the corresponding space on the board, and 0 represents that there's nothing there. # # ***Remember, rooks are able to move horizontally and vertically over any number of spaces.*** # # Write a function, rooks_are_safe(input) which returns True if none of the rooks can attack each other. # ### NOTE: Make sure to run your program once you write it :) # # Implement your function below. # + # Input: # chessboard: A 2-dimensional array that represents. Example below. # [[1, 0, 0, 0], # [0, 1, 0, 0], # [0, 0, 0, 1], # [0, 0, 0, 0]] # Returns: # True if none of the rooks can attack each other. # False if there is at least one pair of rooks that can attack each other. def rooks_are_safe(chessboard): tracker = {} for i in range(len(chessboard)): for j in range(len(chessboard[i])): row_value = tracker.get(f"row{i}", 0) col_value = tracker.get(f"col{j}", 0) if chessboard[i][j] == 1: row_value += 1 col_value += 1 # print("i:", i, ", j:", j, "|| row_value: ", row_value, ", col_value: ", col_value) # print(tracker) tracker[f"row{i}"] = row_value tracker[f"col{j}"] = col_value if row_value > 1 or col_value > 1: return False return True # - # # Use the code below to test your function. # + print(""" Are rooks safe in this board? (Should be True.) [[1, 0, 0, 0], [0, 1, 0, 0], [0, 0, 0, 1], [0, 0, 0, 0]] """) rooks_are_safe( [[1, 0, 0, 0], [0, 1, 0, 0], [0, 0, 0, 1], [0, 0, 0, 0]]) # + print(""" Are rooks safe in this board? (Should be True.) [[1]] """) rooks_are_safe([[1]]) # + print(""" Are rooks safe in this board? (Should be False.) [[1, 0], [1, 0]] """) rooks_are_safe( [[1, 0], [1, 0]]) # + print(""" Are rooks safe in this board? (Should be False.) [[0, 0, 0], [1, 0, 1], [0, 0, 0]] """) rooks_are_safe( [[0, 0, 0], [1, 0, 1], [0, 0, 0]])
10.Python Code Challenges/03_GettingReady_TwoDimensionalArray/Sample Question 3 - 2D Array.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import pandas as pd user_info = pd.read_json('user_info.json') user_info.head() user_info.info() user_info.groupby('favoriteFruit')['_id'].count() user_info.groupby('isActive')['_id'].count() user_info['age'].mean() user_info.groupby('gender')['_id'].count()
Chapter01/Exercise1.04/Exercise1.04.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 (ipykernel) # language: python # name: python3 # --- # # Sentiment Prediction in IMDB Reviews using an LSTM import tempfile import os import numpy as np import torch import torch.nn as nn import fastestimator as fe from fastestimator.dataset.data import imdb_review from fastestimator.op.numpyop.univariate.reshape import Reshape from fastestimator.op.tensorop.loss import CrossEntropy from fastestimator.op.tensorop.model import ModelOp, UpdateOp from fastestimator.trace.io import BestModelSaver from fastestimator.trace.metric import Accuracy from fastestimator.backend import load_model # + tags=["parameters"] MAX_WORDS = 10000 MAX_LEN = 500 batch_size = 64 epochs = 10 train_steps_per_epoch = None eval_steps_per_epoch = None # - # <h2>Building components</h2> # ### Step 1: Prepare training & evaluation data and define a `Pipeline` # We are loading the dataset from tf.keras.datasets.imdb which contains movie reviews and sentiment scores. All the words have been replaced with the integers that specifies the popularity of the word in corpus. To ensure all the sequences are of same length we need to pad the input sequences before defining the `Pipeline`. train_data, eval_data = imdb_review.load_data(MAX_LEN, MAX_WORDS) pipeline = fe.Pipeline(train_data=train_data, eval_data=eval_data, batch_size=batch_size, ops=Reshape(1, inputs="y", outputs="y")) # ### Step 2: Create a `model` and FastEstimator `Network` # First, we have to define the neural network architecture, and then pass the definition, associated model name, and optimizer into fe.build: class ReviewSentiment(nn.Module): def __init__(self, embedding_size=64, hidden_units=64): super().__init__() self.embedding = nn.Embedding(MAX_WORDS, embedding_size) self.conv1d = nn.Conv1d(in_channels=64, out_channels=32, kernel_size=3, padding=1) self.maxpool1d = nn.MaxPool1d(kernel_size=4) self.lstm = nn.LSTM(input_size=125, hidden_size=hidden_units, num_layers=1) self.fc1 = nn.Linear(in_features=hidden_units, out_features=250) self.fc2 = nn.Linear(in_features=250, out_features=1) def forward(self, x): x = self.embedding(x) x = x.permute((0, 2, 1)) x = self.conv1d(x) x = torch.relu(x) x = self.maxpool1d(x) output, _ = self.lstm(x) x = output[:, -1] # sequence output of only last timestamp x = torch.tanh(x) x = self.fc1(x) x = torch.relu(x) x = self.fc2(x) x = torch.sigmoid(x) return x # `Network` is the object that defines the whole training graph, including models, loss functions, optimizers etc. A `Network` can have several different models and loss functions (ex. GANs). `fe.Network` takes a series of operators, in this case just the basic `ModelOp`, loss op, and `UpdateOp` will suffice. It should be noted that "y_pred" is the key in the data dictionary which will store the predictions. model = fe.build(model_fn=lambda: ReviewSentiment(), optimizer_fn="adam") network = fe.Network(ops=[ ModelOp(model=model, inputs="x", outputs="y_pred"), CrossEntropy(inputs=("y_pred", "y"), outputs="loss"), UpdateOp(model=model, loss_name="loss") ]) # ### Step 3: Prepare `Estimator` and configure the training loop # `Estimator` is the API that wraps the `Pipeline`, `Network` and other training metadata together. `Estimator` also contains `Traces`, which are similar to the callbacks of Keras. # In the training loop, we want to measure the validation loss and save the model that has the minimum loss. `BestModelSaver` is a convenient `Trace` to achieve this. Let's also measure accuracy over time using another `Trace`: model_dir = tempfile.mkdtemp() traces = [Accuracy(true_key="y", pred_key="y_pred"), BestModelSaver(model=model, save_dir=model_dir)] estimator = fe.Estimator(network=network, pipeline=pipeline, epochs=epochs, traces=traces, train_steps_per_epoch=train_steps_per_epoch, eval_steps_per_epoch=eval_steps_per_epoch) # <h2>Training</h2> # + jupyter={"outputs_hidden": true} tags=[] estimator.fit() # - # <h2>Inferencing</h2> # For inferencing, first we have to load the trained model weights. We previously saved model weights corresponding to our minimum loss, and now we will load the weights using `load_model()`: model_name = 'model_best_loss.pt' model_path = os.path.join(model_dir, model_name) load_model(model, model_path) # Let's get some random sequence and compare the prediction with the ground truth: selected_idx = np.random.randint(10000) print("Ground truth is: ",eval_data[selected_idx]['y']) # Create data dictionary for the inference. The `Transform()` function in Pipeline and Network applies all the operations on the given data: infer_data = {"x":eval_data[selected_idx]['x'], "y":eval_data[selected_idx]['y']} data = pipeline.transform(infer_data, mode="infer") data = network.transform(data, mode="infer") # Finally, print the inferencing results. print("Prediction for the input sequence: ", np.array(data["y_pred"])[0][0])
apphub/NLP/imdb/imdb.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + id="kPDd_M416jtg" import multiprocessing as mp from string import ascii_uppercase import random from sys import stdout # + [markdown] id="mg3ZIdy234VM" # **OBJETIVO:** # # El objetivo de esta tarea consiste en poner en práctica los conceptos de cómputo concurrente vistos en clase. Se utilizarán procesos por medio del módulo `multiprocessing` del lenguaje de programación Python. # + [markdown] id="Bl-5iBIj7vqO" # ## Programa 1 # + [markdown] id="9WdTV3R32cRb" # 1. Realiza el **Programa1** que instancie, como clase o con el método `Process`, 10 procesos. Cada uno de los procesos hijos recibirán un valor entero, y un caracter $(i,c)$ enviados por el proceso padre, los procesos escribirán en la salida estándar $i$ veces el caracter $c$. # + id="ApuSZo876lqd" def imprimir_mensaje(i, c): for _ in range(i): stdout.write(c) # + colab={"base_uri": "https://localhost:8080/"} id="xmbH7_fw7Ik8" outputId="eabfc7d9-abe5-4ee2-deca-9cf0e5e0c130" num_procesos = 10 for i in range(num_procesos): c = ascii_uppercase[i] it = random.randint(3, 6) proceso = mp.Process(target=imprimir_mensaje, args=(it, c)) proceso.start() # + [markdown] id="R4Kw4LBE7zb4" # ## Programa 2 # + [markdown] id="3UYjogKy2rot" # 2. Refactoriza (reescribe) el programa anterior y elabora el **Programa2** que incluya un mecanismo de sincronización el cual permita escribir en orden todos los caracteres de cada proceso. Es decir, que se obtenga la secuencia $c_{1,1},\ldots,c_{i,1},c_{2,1},\ldots,c_{2,i},\ldots,c_{10,1},\ldots,c_{10,i}$, donde cada subsecuencia $c_{k,i}$ para cada $k=1,2,\ldots,10$ es la secuencia de caracteres del proceso hijo $k$ con longitud $i$. # + id="0RywVeBI95-s" mutex = mp.Lock() # + id="W92sZPrZ7yxK" def imprimir_mensaje(i, c): mutex.acquire() for _ in range(i): stdout.write(c) mutex.release() # + colab={"base_uri": "https://localhost:8080/"} id="tlR5CNJN7T2g" outputId="c8be21bb-0326-4884-e493-b1b186e2604f" num_procesos = 10 for i in range(num_procesos): c = ascii_uppercase[i] it = random.randint(3, 6) proceso = mp.Process(target=imprimir_mensaje, args=(it, c)) proceso.start() # + [markdown] id="y-IvLmbp_DgI" # ## Programa 3 # + [markdown] id="bl8NPOIT3Teg" # Refactoriza (reescribe) el Programa2 y elabora el **Programa3** donde construyas un mecanismo de sincronización el cual permita escribir en orden todos los caracteres de cada proceso siguiendo una política de orden $p$ que será una lista de números enteros aleatorios con los índices $k$ de cada proceso hijo. La esccritura de los caracteres seguirá la secuencia de $c_{k,i}(p)$ donde cada secuencia $c_{k,i}$ estará definida por la política $p$. # + id="wJYEUA7ayL3s" def imprimir_mensaje(i, c, turno): turn = q.get() while True: mutex.acquire() if (turno.value == turn): # Si es el turno adecuado stdout.write("El turno de " + str(c) + " es " + str(turn) + ". Se imprimirá " + str(i) + " veces\n") for _ in range(i): stdout.write(c) stdout.write("\n") turno.value = turno.value + 1 mutex.release() break mutex.release() # + colab={"base_uri": "https://localhost:8080/"} id="LLaGJmwT3vu1" outputId="e8cab3a4-4971-477a-c256-029738923863" mutex = mp.Lock() # Politica de orden p = list(range(10)) random.shuffle(p) print('La politica de orden es: ', p) q = mp.Queue() for num in p: q.put(num) num_procesos = 10 turno = mp.Value('i', 0) # Definimos un valor entero para llevar los turnos procesos = [None]*num_procesos for i in range(num_procesos): c = ascii_uppercase[i] it = random.randint(3, 6) proceso = mp.Process(target=imprimir_mensaje, args=(it, c, turno)) procesos[i] = proceso for p in procesos: p.start() # + id="hriWpLWe4HMf"
Tareas/Tarea2/Procesos.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + colab={"base_uri": "https://localhost:8080/", "height": 34} colab_type="code" id="PcJDApDcK0O9" outputId="47ac115a-2f76-4523-95ee-91e75fc77089" # !git clone https://github.com/parhamzm/Beijing-Pollution-DataSet # + colab={"base_uri": "https://localhost:8080/", "height": 34} colab_type="code" id="0-KcI9tFLGXP" outputId="1f050307-2b6f-4122-b424-8fc11542d7c5" # !ls Beijing-Pollution-DataSet # + colab={} colab_type="code" id="9NuzKJEnLNyx" import torch import torchvision import torch.nn as nn from torchvision import transforms import pandas as pd import matplotlib.pyplot as plt import numpy as np from torch.utils.data import random_split from math import sqrt from numpy import concatenate from matplotlib import pyplot from pandas import read_csv from pandas import DataFrame from pandas import concat from sklearn.preprocessing import MinMaxScaler from sklearn.preprocessing import LabelEncoder from sklearn.metrics import mean_squared_error from numpy import array from numpy import hstack # + [markdown] colab_type="text" id="Q_0pmTGAHWmf" # # **Data Pre Processing** # + colab={"base_uri": "https://localhost:8080/", "height": 136} colab_type="code" id="GjcwsJBc9eIG" outputId="0183cdb5-6976-4868-e9ae-ca6c622fc977" DATA_DIR = "Beijing-Pollution-DataSet/" from pandas import read_csv from datetime import datetime from random import randint def select_week(sequences, n_samples=250): X, y = list(), list() rand_hour = 360 #randint(0, 24) for i in range(0, n_samples): start_ix = rand_hour #+ 168 * i # 168 : Week hours! idxs = [] for j in range(0, 7): if j <=5: idx = start_ix + (j * 24) # Add different days in week # print("Id x Week:=> ", idx) idxs.append(idx) if j == 6: # Target idy = start_ix + (j * 24) seq_x = sequences[idxs, :] seq_y = sequences[idy, 0] y.append(seq_y) X.append(seq_x) # print("Id y Week:=> ", idy) rand_hour += 1 return X, y def select_month(sequences, n_samples=250): X, y = list(), list() rand_hour = 0 #randint(0, 24) rand_day = 0 #randint(0, 7) for i in range(0, n_samples): start_ix = rand_hour #+ rand_day*24 + 672 * i # 168 : Week hours! idxs = [] for j in range(0, 4): if j <=2: idx = start_ix + (j * 168) # Add different weeks idxs.append(idx) # print("Id x Month:=> ", idx) # print("Hello") if j == 3: # Target idy = start_ix + (j * 168) seq_x = sequences[idxs, :] seq_y = sequences[idy, 0] # print("Id y Month:=> ", idy) y.append(seq_y) X.append(seq_x) rand_hour += 1 return X, y # split a multivariate sequence into samples def split_sequences(sequences, n_steps=11, n_samples=12000, start_from=493): X, y = list(), list() for i in range(start_from, (start_from + n_samples)): # find the end of this pattern end_ix = i + n_steps # check if we are beyond the dataset # gather input and output parts of the pattern seq_x = sequences[i:end_ix, :] seq_y = sequences[end_ix, 0] y.append(seq_y) X.append(seq_x) return array(X), array(y) # load dataset DATA_DIR = "Beijing-Pollution-DataSet/" data = np.load(DATA_DIR + 'polution_dataSet.npy') scaled_data = data # specify the number of lag hours n_hours = 11 n_features = 8 x_week, y_week = select_week(data, n_samples=33000) print("X-Week shape => ", np.array(x_week).shape) print("y-Week shape => ", np.array(y_week).shape) x_week = np.array(x_week) y_week = np.array(y_week) x_month, y_month = select_month(data, n_samples=33000) print("X-Month shape => ", np.array(x_month).shape) print("y-Month shape => ", np.array(y_month).shape) x_month = np.array(x_month) y_month = np.array(y_month) n_timesteps = 11 dataset = data print(data.shape) day_X, day_y = split_sequences(sequences=dataset, n_steps=n_timesteps, n_samples=33000) print("X-Day shape => ", np.array(day_X).shape) print("y-Day shape => ", np.array(day_y).shape) x_day_test, y_day_test = day_X[27000:30000], day_y[27000:30000] x_week_test, y_week_test = x_week[27000:30000], y_week[27000:30000] x_month_test, y_month_test = x_month[27000:30000], y_month[27000:30000] x_day_train, y_day_train = day_X[:27000], day_y[:27000] x_week_train, y_week_train = x_week[:27000], y_week[:27000] x_month_train, y_month_train = x_month[:27000], y_month[:27000] # + colab={"base_uri": "https://localhost:8080/", "height": 34} colab_type="code" id="sIihkChW80JC" outputId="94f07045-3177-4e9d-d8d8-d39a651ce0ff" y_batch_test1 = torch.tensor(day_y, dtype=torch.float32) y_batch_test2 = torch.tensor(y_week, dtype=torch.float32) y_batch_test3 = torch.tensor(y_month, dtype=torch.float32) criterion = nn.MSELoss() loss_test = criterion(y_batch_test1, y_batch_test2) # train_X = torch.tensor(train_X, dtype=torch.float32) # train_y = torch.tensor(train_y, dtype=torch.float32) loss_test.item() # + colab={} colab_type="code" id="pDpTe3gIYsW4" class LSTM(torch.nn.Module): def __init__(self, n_features=8, n_output=1, seq_length=11, n_hidden_layers=233, n_layers=1, dropout=0): super(LSTM, self).__init__() self.n_features = n_features self.seq_len = seq_length self.n_hidden = n_hidden_layers # number of hidden states self.n_layers = n_layers # number of LSTM layers (stacked) self.n_output = n_output self.l_lstm = torch.nn.LSTM(input_size = n_features, hidden_size = self.n_hidden, num_layers = self.n_layers, dropout=dropout, batch_first = True) self.lstm2 = torch.nn.LSTM(input_size = n_features, hidden_size = self.n_hidden, num_layers = self.n_layers, dropout=dropout, batch_first = True) self.lstm3 = torch.nn.LSTM(input_size = n_features, hidden_size = self.n_hidden, num_layers = self.n_layers, dropout=dropout, batch_first = True) self.l_linear = torch.nn.Linear(self.n_hidden * self.seq_len, self.n_output) self.linear2 = torch.nn.Linear(self.n_hidden * 6, self.n_output) self.linear3 = torch.nn.Linear(self.n_hidden * 3, self.n_output) self.avgpool = nn.AvgPool1d(kernel_size=3, stride=3, padding=0) def forward(self, x1, x2, x3): hidden_state = torch.zeros(self.n_layers, x1.size(0), self.n_hidden).requires_grad_() cell_state = torch.zeros(self.n_layers, x1.size(0), self.n_hidden).requires_grad_() self.hidden = (hidden_state.detach(), cell_state.detach()) batch_size, seq_len, _ = x1.size() lstm_out, self.hidden = self.l_lstm(x1, self.hidden) x = lstm_out.contiguous().view(batch_size, -1) out1 = self.l_linear(x) hidden_state = torch.zeros(self.n_layers, x2.size(0), self.n_hidden).requires_grad_() cell_state = torch.zeros(self.n_layers, x2.size(0), self.n_hidden).requires_grad_() self.hidden = (hidden_state.detach(), cell_state.detach()) batch_size, seq_len, _ = x2.size() lstm_out, self.hidden = self.lstm2(x2, self.hidden) x = lstm_out.contiguous().view(batch_size, -1) out2 = self.linear2(x) hidden_state = torch.zeros(self.n_layers, x3.size(0), self.n_hidden).requires_grad_() cell_state = torch.zeros(self.n_layers, x3.size(0), self.n_hidden).requires_grad_() self.hidden = (hidden_state.detach(), cell_state.detach()) batch_size, seq_len, _ = x3.size() lstm_out, self.hidden = self.lstm3(x3, self.hidden) x = lstm_out.contiguous().view(batch_size, -1) out3 = self.linear3(x) x3 = torch.cat((out1, out2, out3), dim=1).view(1, 1, batch_size*3) #.unsqueeze_(-1) #.view(-1) # a.unsqueeze_(-1) # print("X3 shape :=> ", x3.shape) out = self.avgpool(x3) # print("out size = => ", out.shape) return out # + colab={} colab_type="code" id="PCxQTgWnavOJ" torch.manual_seed(13) model = LSTM(n_features=8, n_output=1, seq_length=11, n_hidden_layers=233, n_layers=1) criterion = nn.MSELoss() optimizer = torch.optim.Adam(model.parameters(), lr=0.0003) # + colab={"base_uri": "https://localhost:8080/", "height": 323} colab_type="code" id="oWbmF4vLbJ5N" outputId="1da38584-819d-4861-aca0-2d88058a1121" model = model #.to(device) criterion = criterion #.to(device) for p in model.parameters(): print(p.numel()) # + colab={"base_uri": "https://localhost:8080/", "height": 1000} colab_type="code" id="5R__1vOLzB-i" outputId="5b6730e9-b2ad-4be3-df93-60da276b9e50" import time start_time = time.time() # train_X, train_y epochs = 100 model.train() batch_size = 200 running_loss_history = [] val_running_loss_history = [] for epoch in range(epochs): running_loss = 0.0 val_running_loss = 0.0 model.train() for b in range(0, len(x_day_train), batch_size): inpt1 = x_day_train[b:b+batch_size, :, :] target1 = y_day_train[b:b+batch_size] inpt2 = x_week_train[b:b+batch_size, :, :] target2 = y_week_train[b:b+batch_size] inpt3 = x_month_train[b:b+batch_size, :, :] target3 = y_month_train[b:b+batch_size] # print("Input Shape :=> ", inpt.shape) x_batch1 = torch.tensor(inpt1, dtype=torch.float32) y_batch1 = torch.tensor(target1, dtype=torch.float32) x_batch2 = torch.tensor(inpt2, dtype=torch.float32) y_batch2 = torch.tensor(target2, dtype=torch.float32) x_batch3 = torch.tensor(inpt3, dtype=torch.float32) y_batch3 = torch.tensor(target3, dtype=torch.float32) output = model(x_batch1, x_batch2, x_batch3) loss = criterion(output.view(-1), y_batch1) running_loss += loss.item() loss.backward() optimizer.step() optimizer.zero_grad() else: with torch.no_grad(): # it will temprerorerly set all the required grad flags to be false model.eval() for b in range(0, len(x_day_test), batch_size): inpt1 = x_day_test[b:b+batch_size, :, :] target1 = y_day_test[b:b+batch_size] inpt2 = x_week_test[b:b+batch_size, :, :] target2 = y_week_test[b:b+batch_size] inpt3 = x_month_test[b:b+batch_size, :, :] target3 = y_month_test[b:b+batch_size] x_batch1 = torch.tensor(inpt1, dtype=torch.float32) y_batch1 = torch.tensor(target1, dtype=torch.float32) x_batch2 = torch.tensor(inpt2, dtype=torch.float32) y_batch2 = torch.tensor(target2, dtype=torch.float32) x_batch3 = torch.tensor(inpt3, dtype=torch.float32) y_batch3 = torch.tensor(target3, dtype=torch.float32) output_test = model(x_batch1, x_batch2, x_batch3) loss_test = criterion(output_test.view(-1), y_batch1) val_running_loss += loss_test.item() val_epoch_loss = val_running_loss / len(x_day_test) val_running_loss_history.append(val_epoch_loss) epoch_loss = running_loss / len(x_day_train) running_loss_history.append(epoch_loss) print('step : ' , epoch , ' Train loss : ' , epoch_loss, ', Valid Loss : => ', val_epoch_loss) print("***->>>-----------------------------------------------<<<-***") total_time = time.time() - start_time print("===========================================================") print("*********************************************************") print("The total Training Time is Equal with ==> : {0} Sec.".format(total_time)) print("*********************************************************") print("===========================================================") # + colab={"base_uri": "https://localhost:8080/", "height": 479} colab_type="code" id="H4jiSUbWu1fQ" outputId="9fba76f9-87fb-4006-e18b-d4811dafbc0f" f, ax = plt.subplots(1, 1, figsize=(10, 7)) plt.title("Valid & Test Loss - LSTM Fusion", fontsize=18) plt.xlabel("Epoch") plt.ylabel("Loss") plt.plot(running_loss_history, label='train') plt.plot(val_running_loss_history, label='test') # pyplot.plot(history.history['val_loss'], label='test') plt.legend() plt.show() x = torch.tensor([[[1.,2,3,4,5,6,7]]]) x.shape # + colab={} colab_type="code" id="uweASFbTzUrP" x_day_test, y_day_test = day_X[29900:30000], day_y[29900:30000] x_week_test, y_week_test = x_week[29900:30000], y_week[29900:30000] x_month_test, y_month_test = x_month[29900:30000], y_month[29900:30000] future = 100 window_size = 11 model.eval() x_day_test = torch.tensor(x_day_test, dtype=torch.float32) x_week_test = torch.tensor(x_week_test, dtype=torch.float32) x_month_test = torch.tensor(x_month_test, dtype=torch.float32) res = model(x_day_test, x_week_test, x_month_test) # print(preds[11:]) # + colab={"base_uri": "https://localhost:8080/", "height": 447} colab_type="code" id="8RWHd-TLJkVp" outputId="683fe92d-fa3b-426d-9eb3-f29f9087582e" fig = plt.figure(figsize=(20, 7)) plt.title("Beijing Polution Prediction - LSTM", fontsize=18) plt.ylabel('Polution') plt.xlabel('Num data') plt.grid(True) plt.autoscale(axis='x', tight=True) fig.autofmt_xdate() # plt.plot(data[15000:15100, 0]) plt.plot(y_day_test, label="Real") # plt.plot(preds[12:]) print(res.shape) plt.plot(res.flatten().detach().numpy(), label="Prediction") plt.legend() plt.show() # + colab={"base_uri": "https://localhost:8080/", "height": 207} colab_type="code" id="gWZk1LYA7dS_" outputId="c5b7921b-c00d-4060-b93c-0c2171794ed3" x_day_test, y_day_test = day_X[30000:33000], day_y[30000:33000] x_week_test, y_week_test = x_week[30000:33000], y_week[30000:33000] x_month_test, y_month_test = x_month[30000:33000], y_month[30000:33000] model.eval() x_day_test = torch.tensor(x_day_test, dtype=torch.float32) x_week_test = torch.tensor(x_week_test, dtype=torch.float32) x_month_test = torch.tensor(x_month_test, dtype=torch.float32) test_running_loss = 0 with torch.no_grad(): # it will temprerorerly set all the required grad flags to be false model.eval() for b in range(0, len(x_day_test), batch_size): inpt1 = x_day_test[b:b+batch_size, :, :] target1 = y_day_test[b:b+batch_size] inpt2 = x_week_test[b:b+batch_size, :, :] target2 = y_week_test[b:b+batch_size] inpt3 = x_month_test[b:b+batch_size, :, :] target3 = y_month_test[b:b+batch_size] x_batch1 = torch.tensor(inpt1, dtype=torch.float32) y_batch1 = torch.tensor(target1, dtype=torch.float32) x_batch2 = torch.tensor(inpt2, dtype=torch.float32) y_batch2 = torch.tensor(target2, dtype=torch.float32) x_batch3 = torch.tensor(inpt3, dtype=torch.float32) y_batch3 = torch.tensor(target3, dtype=torch.float32) output_test = model(x_batch1, x_batch2, x_batch3) loss_test = criterion(output_test.view(-1), y_batch1) test_running_loss += loss_test.item() test_epoch_loss = test_running_loss / len(x_day_test) print("##########################################################") print(">>>>---------------------------------------------------<<<<") print(">>>>----------***************************--------------<<<<") print("**** Test Loss :==>>> ", test_epoch_loss) print(">>>>----------***************************--------------<<<<") print(">>>>---------------------------------------------------<<<<") print("##########################################################") # + [markdown] colab_type="text" id="2wUlb4EkirD2" # # **Predict Only 12 & 24 Times!** # + colab={} colab_type="code" id="EdVj6XlI5Guj" # split a multivariate sequence into samples def split_sequences12(sequences, n_steps, n_samples=12000, start_from=0): X, y = list(), list() j = 0 for i in range(start_from, (start_from + n_samples)): # find the end of this pattern end_ix = j*12 + n_steps + start_from # check if we are beyond the dataset # gather input and output parts of the pattern j = j + 1 seq_x = sequences[end_ix-11:end_ix, :] seq_y = sequences[end_ix, 0] y.append(seq_y) X.append(seq_x) print("End :=> ", end_ix) return array(X), array(y) # + colab={} colab_type="code" id="mkMiRJt2Nrs8" x, y = split_sequences12(sequences=dataset, n_steps=11, n_samples=100, start_from=20500) x = torch.tensor(x, dtype=torch.float32) x.shape # + colab={} colab_type="code" id="iB6VElOqNt0m" model.eval() res = model(x) # + colab={} colab_type="code" id="dcy2paMLYJLL" fig = plt.figure(figsize=(20, 7)) plt.title("Beijing Polution Prediction - 12Hour", fontsize=18) plt.ylabel('Polution') plt.xlabel('Num data') plt.grid(True) plt.autoscale(axis='x', tight=True) fig.autofmt_xdate() # plt.plot(data[15000:15100, 0]) plt.plot(y, label="Real") # plt.plot(preds[12:]) print(res.shape) plt.plot(res.detach().numpy(), label="Prediction") plt.legend() plt.show() # + colab={} colab_type="code" id="CCHg1BGtYjlZ" df_y = DataFrame(y) df_y.columns = ['Real Values'] df_y['Predicted Values'] = res.detach().numpy() # dataset.index.name = 'date' # + colab={} colab_type="code" id="NDvBHJZBiCJr" pd.set_option("max_rows", None) df_y.to_csv('Predict_every12Hour_LSTM_ADAM_MSE.csv') df_y # + colab={} colab_type="code" id="1wFFK2YbiFSh"
Q1 - PartF/MiniProj_LSTM_Adam_MSE_Q1_PartF_Pytorch_AVGPool.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ![Callysto.ca Banner](https://github.com/callysto/curriculum-notebooks/blob/master/callysto-notebook-banner-top.jpg?raw=true) # # <a href="https://hub.callysto.ca/jupyter/hub/user-redirect/git-pull?repo=https%3A%2F%2Fgithub.com%2Fcallysto%2Fcallysto-sample-notebooks&branch=master&subPath=notebooks/General_Interest/D3JS Test.ipynb&depth=1" target="_parent"><img src="https://raw.githubusercontent.com/callysto/curriculum-notebooks/master/open-in-callysto-button.svg?sanitize=true" width="123" height="24" alt="Open in Callysto"/></a> from IPython.core.display import display, HTML from string import Template # ## Output HTML from Python # # Instead of raw text output, we can output rich HTML... display(HTML('<h1>Hello, world!</h1>')) # We can also output the other things we might use in developing a webpage, like CSS: display(HTML(''' <style> h1:hover { background-color: yellow; } </style> <h1>Hello, world!</h1> ''')) # Note that CSS is global, so even though we may have run this after outputting our first `h1` tag, it affects all `h1` tags in the document, even the ones we do in Markdown... # # # Oh, this is affected too? # We can use the properties of CSS that web developers use to avoid conflicts like these to target our changes to only the areas we intend. For example, the following will only affect `h1` headings with the `output` class on them: display(HTML(''' <style> h1.output:hover { background-color: red; } </style> <h1 class='output'>Hello, world!</h1> ''')) # And if we want to get even more specific, we can add a containing div and use its classname or ID to fence in our changes: display(HTML(''' <style> #block-1234 h1.output:hover { background-color: blue; } </style> <div id="block-1234"> <h1 class='output'>Hello, world!</h1> </div> ''')) # ## We can insert Javascript too # # The following is based on https://stackoverflow.com/questions/44349183/cant-run-d3js-to-a-website-jupyter-notebook but removes the direct use of `%%javascript` magic. Why? Well, whatever we can do directly in Python is something we can abstract into a function. This will be important for making it easy for others to insert these things into their notebooks without having to switch between multiple libraries. Note, there are more advanced integrations of D3 as well (https://github.com/ResidentMario/py_d3), which are great *if* you're comfortable switching back and forth between multiple languages. We want to *hide* that as much as possible. This way, we can use the power of D3 without the end user really knowing that they're using D3 or Javascript or HTML. # + from IPython.display import Javascript def notebook_init(): display(Javascript(''' require.config({ paths: { d3: "https://d3js.org/d3.v4.min", d3_selection: "https://d3js.org/d3-selection-multi.v0.4.min" } }); require(["d3"], function(d3) { window.d3 = d3; }); require(["d3_selection"]); ''')) # If notebook_init() is in a library, people can simply call it, without knowing that they're actually doing a # fancy D3JS import notebook_init() # - HTML(''' <style> .bar { fill: steelblue; } .bar:hover { fill: brown; } .axis--x path { display: none; } </style> <svg width="960" height="500"></svg> <script> var svg = d3.select("svg"), margin = {top: 20, right: 20, bottom: 30, left: 40}, width = +svg.attr("width") - margin.left - margin.right, height = +svg.attr("height") - margin.top - margin.bottom; var x = d3.scaleBand().rangeRound([0, width]).padding(0.1), y = d3.scaleLinear().rangeRound([height, 0]); var g = svg.append("g") .attr("transform", "translate(" + margin.left + "," + margin.top + ")"); d3.tsv("data/data.tsv", function(d) { d.frequency = +d.frequency; return d; }, function(error, data) { if (error) throw error; x.domain(data.map(function(d) { return d.letter; })); y.domain([0, d3.max(data, function(d) { return d.frequency; })]); g.append("g") .attr("class", "axis axis--x") .attr("transform", "translate(0," + height + ")") .call(d3.axisBottom(x)); g.append("g") .attr("class", "axis axis--y") .call(d3.axisLeft(y).ticks(10, "%")) .append("text") .attr("transform", "rotate(-90)") .attr("y", 6) .attr("dy", "0.71em") .attr("text-anchor", "end") .text("Frequency"); g.selectAll(".bar") .data(data) .enter().append("rect") .attr("class", "bar") .attr("x", function(d) { return x(d.letter); }) .attr("y", function(d) { return y(d.frequency); }) .attr("width", x.bandwidth()) .attr("height", function(d) { return height - y(d.frequency); }); }); </script> ''') def bouncing_balls(down_speed, up_speed): display(HTML( '<script>var down_speed = ' + str(down_speed) + '; var up_speed = ' + str(up_speed) + ';</script>' + ''' <style> .ball { position: absolute; background-color: red; border-radius: 50px; } </style> <div class="bounce-container" style="width: 100%; height: 300px"></div> <script> var width = 300; var height = 200; var radius = 50; var bounceTop = 0 + 'px'; var bounceBottom = (height - radius * 2) + 'px'; var ball = d3.select('.bounce-container') .selectAll('div') .data([0]) .enter() .append('div').attr('class', 'ball') .style("top", "bounceTop") .style("left", width / 2 - radius + 'px') .style("width", radius * 2 + 'px') .style("height", radius * 2 + 'px') function intervalFunc(){ ball.transition().duration(down_speed) .style('top',height + 'px') .transition().duration( up_speed ) .style('top',0+'px')}; if(document.bounceInterval) { clearInterval(document.bounceInterval); } document.bounceInterval = setInterval(intervalFunc, down_speed+up_speed); </script> ''')) bouncing_balls(500, 500) # [![Callysto.ca License](https://github.com/callysto/curriculum-notebooks/blob/master/callysto-notebook-banner-bottom.jpg?raw=true)](https://github.com/callysto/curriculum-notebooks/blob/master/LICENSE.md)
notebooks/General_Interest/D3JS Test.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python [Root] # language: python # name: Python [Root] # --- # + # -*- coding: utf-8 -*- from nltk.corpus import stopwords from string import punctuation from nltk import word_tokenize from nltk.tokenize import TweetTokenizer from unidecode import unidecode import pandas as pd # stopword list to use characters = ['¿', '¡', 'rt', '…'] spanish_stopwords = stopwords.words('spanish') + list(punctuation) + characters + list(map(str,range(10))) non_words = ' '.join([unidecode(word.lower()) for word in spanish_stopwords]) tokenizer = TweetTokenizer(strip_handles=True, reduce_len=True) # - all_mentions = pd.read_csv('event_mentions_text_es.csv', encoding='latin-1').sort_values('EventTimeDate', ascending=1) def tokenize(text): return [unidecode(word.lower()) for word in tokenizer.tokenize(text) if word.lower() not in non_words] sentences = [] for text in list(all_mentions.Text.dropna()): if text != '[]': sentences.append(tokenize(text)) # + # import modules & set up logging import gensim, logging logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=logging.INFO) # train word2vec on the two sentences model = gensim.models.Word2Vec(sentences, min_count=50, size=100) # default value is 5 # + #model.most_similar('timochenko', topn=50) relations = ["santos colombia uribe", "santos colombia farc", "santos colombia hollande"] for relation in relations: a, b, x = relation.split() predicted = model.most_similar([x, b], [a])[0][0] print ("'%s' is to '%s' as '%s' is to '%s'" % (a, b, x, predicted)) # -
Notebooks/GDELT Word Embeddings.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- from LineSplit import LineSplit from WordSplit import WordSplit from nltk.translate.bleu_score import sentence_bleu # + canFile = input("\nEnter Location of Candidate File: ") refFile = input("\nEnter Location of Reference File: ") canFile = './summary_worldwar2.txt' refFile = './summary_worldwar2.txt' canList = WordSplit(LineSplit(canFile)) refList = WordSplit(LineSplit(refFile)) # + canFinal = [] refFinal = [] for item in canList: if item in refList: canFinal.append(item) refFinal.append(item) canList.remove(item) refList.remove(item) canFinal += canList refFinal += refList # - canFinal == refFinal # + reference = [['this', 'is', 'a', 'test'], ['this', 'is' 'test'], ['more', 'test']] candidate = ['this', 'is', 'a', 'test', 'test', 'more', 'world', 'hello'] ref = [['this', 'is', 'a', 'test', 'more', 'test', 'hello', 'world']] score = sentence_bleu([refFinal], canFinal) print(score) # - data = LineSplit('./summary_worldwar2.txt') word = data[0].split(' ') word def wordSplit(data): splitList = [] for item in data: temp = item.split(' ') splitList += temp return splitList wordSplit(LineSplit('./summary_worldwar2.txt')) lis = [1, 2, 3, 4, 5, 5, 5] lis.remove(5) lis # + string = 'hello world' lis += string.split(' ') lis # - # two references for one document from nltk.translate.bleu_score import corpus_bleu references = [[['this', 'is', 'a', 'test'], ['this', 'is' 'test']]] candidates = [['this', 'is', 'a', 'test'], ['this', 'is', 'test']] score = corpus_bleu(references, candidates) print(score)
Evaluation/BLEU/BLEU Score.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import nltk #every sentence contains a lot of entities like a persons name, name of a place, etc #we are going to extract different entities out of the sentences paragraph = "The Taj Mahal was built by Emperor <NAME>" #tokenizing this paragraph into words words = nltk.word_tokenize(paragraph) words #now we will do POS tagging which is necessory for named entity recognition tagged_words = nltk.pos_tag(words) tagged_words #now this is what we are going to pass in the function which is going to create that named entity namedEnt = nltk.ne_chunk(tagged_words) namedEnt print(namedEnt) #note above you can see this is not a very good way to visualize it its in form tree,tree so we are going to call a #function in this named entity object namedEnt.draw() #so this will bring a popup where you can visualize the tree structure
named_entity_recognition.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # -------- This programming code is a raw version, an improved version will be uploaded soon --------- # + import pandas as pd import matplotlib.pyplot as plt import numpy as np # - # Check which ISCO-codes are not in official document but are in the document of the Netherlands def compare_df(df1,df2,compare_name): df_in = df1 np1 = df1[compare_name].unique() np2 = df2[compare_name].unique() df_output = df_in.iloc[0:0] xyz = False number_missing = 0 for i in np1: xyz = False for j in np2: if i == j: xyz = True if xyz == False: x = df_in[df_in[compare_name] == i] df_output = pd.concat([df_output,x]) df_output = df_output.reset_index() df_output = df_output.drop(['index'],axis=1) return df_output df_avg = pd.read_csv('avg_data.csv') df_min = pd.read_csv('min_data.csv') df_max = pd.read_csv('max_data.csv') df_avg = df_avg.drop('Unnamed: 0', axis = 1) df_min = df_min.drop('Unnamed: 0', axis = 1) df_max = df_max.drop('Unnamed: 0', axis = 1) df_avg = df_avg.rename(columns={"brc_beroep":'brc_code'}) df_min = df_min.rename(columns={"brc_beroep":'brc_code'}) df_max = df_max.rename(columns={"brc_beroep":'brc_code'}) df_avg = df_avg.drop('jobs_lost_low', axis = 1) df_avg = df_avg.drop('jobs_lost_high', axis = 1) df_avg = df_avg.rename(columns={"jobs_lost_avg":'jobs_lost_ET'}) df_min = df_min.drop('jobs_lost_avg', axis = 1) df_min = df_min.drop('jobs_lost_high', axis = 1) df_min = df_min.rename(columns={"jobs_lost_low":'jobs_lost_ET'}) df_max = df_max.drop('jobs_lost_low', axis = 1) df_max = df_max.drop('jobs_lost_avg', axis = 1) df_max = df_max.rename(columns={"jobs_lost_high":'jobs_lost_ET'}) df = df_avg df.head() df[df['brc_code'] == 711] ham = df[df['jobs_lost_ET'] == 0] ham.num_working.sum() len(df) len(df['brc_code'].unique()) # + df_occ = pd.read_excel('Keys_Occupation_data.xlsx') df_occ['brc_code'] = df_occ['Title'] df_occ['brc_name'] = df_occ['Title'] #x = df_occ.iloc[0]['Title'] #x.split(' ', maxsplit = 1) for i in range(0, len(df_occ)): x = df_occ.iloc[i]['Title'] y = df_occ.iloc[i]['brc_code'] a,b = x.split(" ", maxsplit = 1) df_occ['brc_code'].replace(y, value=a, inplace=True) for i in range(0, len(df_occ)): x = df_occ.iloc[i]['brc_name'] a,b = x.split(" ", maxsplit = 1) df_occ['brc_name'].replace(x, value =b, inplace=True) df_occ = df_occ[['occ_code','brc_code', 'brc_name']] df_occ["brc_code"] = pd.to_numeric(df_occ["brc_code"]) df_occ.head() # - compare_df(df_occ,df,'brc_code') df_ed = pd.read_excel('Keys_Education_data.xlsx') df_ed df_pol = pd.read_csv('Policies_data.csv', sep = ';') df_pol.head() len(df_pol['occ_code'].unique()) df_pol.head() df_pol[df_pol['occ_code'] == "A000163"].head() df_pol[df_pol['occ_code'] == "A000238"].tail() df_pol[df_pol['occ_code'] == "A000328"].tail() df_occ[df_occ['brc_code'] == 111] # + #df_pol[df_pol['occ_code'] == "A000230"] # + expl1 = df_occ[['occ_code','brc_code']] df1 = pd.merge(df_pol,expl1, on=['occ_code'],how ='inner') # - df1.head() df1[df1['brc_code'] == 1041].head() print(df1['num_working'].sum()) len(df1['occ_code'].unique()) # + #Transition to one BRC code of formed database # - df_ET = df_occ[['brc_code', 'brc_name']] df_ET['num_working'] = 0 df_ET['jobs_lost_ET'] = 0 df_ET.head() for i in range(0,len(df_ET)): #i = i - 1 brc_code = df_occ.iloc[i]['brc_code'] new_df = df[df['brc_code'] == brc_code] a = new_df['num_working'].sum() b = new_df['jobs_lost_ET'].sum() #x = df_ET.iloc[i]['num_working'] #y = df_ET.iloc[i]['jobs_lost_ET'] #df_ET['num_working'].replace(x, value=a, inplace=True) #df_ET['jobs_lost_ET'].replace(y, value=b, inplace=True) df_ET.loc[i,'num_working'] = a df_ET.loc[i,'jobs_lost_ET'] = b #i = i + 1 df_2 = df_ET #df_2 = df_2[0:114] df_2["brc_code"] = pd.to_numeric(df_2["brc_code"], downcast='integer') pd.options.display.max_colwidth = 100 df_2 poi = df_2[df_2['brc_code'] == 111] poi kaas = df_2[df_2['jobs_lost_ET']== 0] kaas.head() kaas.num_working.sum() expl7 = df_2[df_2['jobs_lost_ET']== 0] expl7 = expl7[['brc_code']] expl7 = expl7.reset_index(drop=True) expl7 # + # In the origianl df_pol the overall categories are still incorporated therefor we lose this data. But we see # that there are still 114 occupations in there # + expl2 = df_ed[['ed_code','education_level']] df2 = pd.merge(df1,expl2,on=['ed_code'],how ='inner') # - df2.head() df3 = df2[['ID','brc_code','periods','education_level','num_working']] df3.head() # + # Percentage growht per brc_code # + expl2003 = df3[df3['periods'] == ('2003JJ00')] expl2018 = df3[df3['periods'] == ('2018JJ00')] # - expl8 = expl7.copy() expl8['jc_03'] = 0 expl8['jc_18'] = 0 expl8['job_decrease'] = 0 expl8['job_growth'] = 0 # + #expl8 = expl8.reset_index(drop=True) # - expl8 # + for i in range(0,len(expl8)): brc_code = expl8.iloc[i]['brc_code'] df1 = expl2003[expl2003['brc_code'] == brc_code] df2 = expl2018[expl2018['brc_code'] == brc_code] x = df1['num_working'].sum() y = df2['num_working'].sum() expl8.loc[i,'jc_03'] = x expl8.loc[i,'jc_18'] = y z = ((y-x)/15) v = z/x if (z>0): expl8.loc[i,'job_growth'] = (x*v) * 12 # growth per year times 12 for the duration until 2030 else: expl8.loc[i,'job_decrease'] = (-x*v) * 12 # - expl8.sum() expl9 = expl8[['brc_code','job_decrease','job_growth']] df12 = pd.merge(df_2,expl9,on=['brc_code'],how ='left') df12 = df12.fillna(value = 0) df12 df12['jobs_lost'] = df12['jobs_lost_ET'] + df12['job_decrease'] # Incorporate sectors df_beroep = pd.read_excel("brc_sector.xlsx") df_beroep = df_beroep.drop_duplicates() df_beroep = df_beroep.reset_index() df_beroep = df_beroep.drop(labels="index", axis = 1) df_beroep = df_beroep.rename(columns={"brc_beroep":'brc_code'}) df_beroep.head() df13 = pd.merge(df12,df_beroep, on=['brc_code'], how='inner') df13 = df13[['brc_code','brc_sector','brc_name','num_working','job_growth','jobs_lost']] df13.head() df13.sum() # + ## Education level # - expl22 = df3[df3['education_level']==0] expl22.head() # + expl22 = df3[df3['education_level']==0] expl22['num_working'].sum() # + expl22 = df3[df3['education_level']==1] expl22['num_working'].sum() # + expl22 = df3[df3['education_level']==2] expl22['num_working'].sum() # + expl22 = df3[df3['education_level']==3] expl22['num_working'].sum() # - # + # Check if there are brc code missing compare_df(df, df3,'brc_code') # - # + # reset all the education levels that are 0 to 1 since we assume education levels of 1 are reseached at least # - df4 = df3 for i in range(0, len(df4)): x = df4.iloc[i]['education_level'] if (x == 0): df4["education_level"].replace(x, value=1, inplace=True) df4[df4['education_level']==0] df4[df4['education_level']==2].head() # + # Check per occupation what the education level is # + expl3 = df4[df4['brc_code'] == 111] X1 = expl3[expl3['education_level']==1] X2 = expl3[expl3['education_level']==2] X3 = expl3[expl3['education_level']==3] print (X3) Y1 = X1['num_working'].sum() Y2 = X2['num_working'].sum() Y3 = X3['num_working'].sum() print(Y1,Y2,Y3) if (Y2 > Y3 ) & ( Y2 > Y1): print ('hi') else: print('no') # + # df with brc_code and education level # + expl4 = df_occ['brc_code'] expl4 = pd.DataFrame(expl4) expl5 = expl4[0:0] #expl5['education_level_1'] = 1 #expl5['education_level_2'] = 1 #expl5['education_level_3'] = 1 expl5['percentage_ed_1'] = 1 expl5['percentage_ed_2'] = 1 expl5['percentage_ed_3'] = 1 # + for i in range(0, len(expl4)): x = expl4.iloc[i]['brc_code'] df = df4[df4['brc_code'] == x] X1 = df[df['education_level']==1] X2 = df[df['education_level']==2] X3 = df[df['education_level']==3] Y1 = X1['num_working'].sum() Y2 = X2['num_working'].sum() Y3 = X3['num_working'].sum() print(Y1,Y2,Y3) # + for i in range(0, len(expl4)): x = expl4.iloc[i]['brc_code'] df = df4[df4['brc_code'] == x] X1 = df[df['education_level']==1] X2 = df[df['education_level']==2] X3 = df[df['education_level']==3] Y1 = X1['num_working'].sum() Y2 = X2['num_working'].sum() Y3 = X3['num_working'].sum() YY = Y1 + Y2 + Y3 Z1 = Y1/YY Z2 = Y2/YY Z3 = Y3/YY expl5 = expl5.append({'brc_code':x,'percentage_ed_1': Z1, 'percentage_ed_2': Z2, 'percentage_ed_3': Z3}, ignore_index=True) expl5["brc_code"] = pd.to_numeric(expl5["brc_code"], downcast='integer') #expl5 = expl5.to_numeric('brc_code',downcast='integer') #pd.to_numeric(s, downcast='integer') #df.append({'Rank': new[0],'Probability': new[1],'soc_code': new[2],'Occupation': new[3]}, ignore_index=True) #if (Y3 > Y2 ) & ( Y3 > Y1): #expl4[''] df4["education_level"].replace(x, value=1, inplace=True) # - expl5.head() expl5.sum()/114 df4 = pd.merge(df13,expl5,on=['brc_code'], how = 'inner') df4 df4['job_growth1'] = df4['job_growth'] * df4['percentage_ed_1'] df4['job_growth2'] = df4['job_growth'] * df4['percentage_ed_2'] df4['job_growth3'] = df4['job_growth'] * df4['percentage_ed_3'] df4['jobs_lost1'] = df4['jobs_lost'] * df4['percentage_ed_1'] df4['jobs_lost2'] = df4['jobs_lost'] * df4['percentage_ed_2'] df4['jobs_lost3'] = df4['jobs_lost'] * df4['percentage_ed_3'] df5 = df4[['brc_code','brc_sector','brc_name','num_working','job_growth1','job_growth2','job_growth3','jobs_lost1','jobs_lost2','jobs_lost3']] df5.tail() df_class = pd.read_excel("class_label.xlsx") df_class df6 = df_class df6['num_working'] = 0 df6['job_growth1'] = 0 df6['job_growth2'] = 0 df6['job_growth3'] = 0 df6['jobs_lost1'] = 0 df6['jobs_lost2'] = 0 df6['jobs_lost3'] = 0 df6 for i in range(0,len(df_class)): brc_sector = df_class.iloc[i]['brc_sector'] df1 = df5[df5['brc_sector'] == brc_sector] x1 = df1['num_working'].sum() x2 = df1['job_growth1'].sum() x3 = df1['job_growth2'].sum() x4 = df1['job_growth3'].sum() x5 = df1['jobs_lost1'].sum() x6 = df1['jobs_lost2'].sum() x7 = df1['jobs_lost3'].sum() df6.loc[i,'num_working'] = x1 df6.loc[i,'job_growth1'] = x2 df6.loc[i,'job_growth2'] = x3 df6.loc[i,'job_growth3'] = x4 df6.loc[i,'jobs_lost1'] = x5 df6.loc[i,'jobs_lost2'] = x6 df6.loc[i,'jobs_lost3'] = x7 df6.sum() df7 = df6 df7 expl10 = df6 expl10['job_growth'] = expl10['job_growth1'] + expl10['job_growth2'] + expl10['job_growth3'] expl10['jobs_lost'] = expl10['jobs_lost1'] + expl10['jobs_lost2'] + expl10['jobs_lost3'] expl10 = expl10[[ 'brc_sector','jobs_lost','job_growth']] expl10 # + expl10.plot.bar(x ='brc_sector') plt.show() #incorporated are job growth and extra job loss through historical information # + # Replace workers with workers from the same sector # - df7 = df7.round(decimals = 3) df7 = df7[['brc_sector','job_growth1','job_growth2','job_growth3','jobs_lost1','jobs_lost2','jobs_lost3']] df7 df7.sum() # + for i in range(0,len(df7)): x1 = df7.iloc[i]['jobs_lost3'] x2 = df7.iloc[i]['job_growth3'] if (x1 < x2): df7.loc[i,'jobs_lost3'] = 0 df7.loc[i,'job_growth3'] = (x2 - x1) else: df7.loc[i,'jobs_lost3'] = (x1 - x2) df7.loc[i,'job_growth3'] = 0 x3 = df7.iloc[i]['jobs_lost2'] x4 = df7.iloc[i]['job_growth2'] if (x3 < x4): df7.loc[i,'jobs_lost2'] = 0 df7.loc[i,'job_growth2'] = (x4 - x3) else: df7.loc[i,'jobs_lost2'] = (x3 - x4) df7.loc[i,'job_growth2'] = 0 x5 = df7.iloc[i]['jobs_lost1'] x6 = df7.iloc[i]['job_growth1'] if (x5 < x6): df7.loc[i,'jobs_lost1'] = 0 df7.loc[i,'job_growth1'] = (x6 - x5) else: df7.loc[i,'jobs_lost1'] = (x5 - x6) df7.loc[i,'job_growth1'] = 0 # - df7 # + for i in range(0,len(df7)): x1 = df7.iloc[i]['jobs_lost3'] x2 = df7.iloc[i]['job_growth3'] x3 = df7.iloc[i]['jobs_lost2'] x4 = df7.iloc[i]['job_growth2'] x5 = df7.iloc[i]['jobs_lost1'] x6 = df7.iloc[i]['job_growth1'] if (x3 < x2): df7.loc[i,'jobs_lost2'] = 0 df7.loc[i,'job_growth3'] = (x2 - x3) else: df7.loc[i,'jobs_lost2'] = (x3 - x2) df7.loc[i,'job_growth3'] = 0 if (x5 < x4): df7.loc[i,'jobs_lost1'] = 0 df7.loc[i,'job_growth2'] = (x4 - x5) else: df7.loc[i,'jobs_lost1'] = (x5 - x4) df7.loc[i,'job_growth2'] = 0 # - df7 df7.sum() df8 = df7[['brc_sector','jobs_lost1','jobs_lost2', 'jobs_lost3']] df8 # What is the distribution of workers that is available df8.plot.bar(x ='brc_sector') plt.show() # + z1 = int(df8['jobs_lost1'].sum()*1000) z2 = int(df8['jobs_lost2'].sum()*1000) z3 = int(df8['jobs_lost3'].sum()*1000) print('Number of people lost current job educational level 1') print(z1) print('Number of people lost current job educational level 2') print(z2) print('Number of people lost current job educational level 3') print(z3) print('Total number of current jobs lost') print(z1+z2+z3) # -
Master Thesis - Paul Schot - Python code - Data integration 2.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # * Se puden mejorar las mascaras haciendolas de un bit # Image processing import cv2 import numpy as np import os import glob from ipynb.fs.full.Utils import * # # Variables de configuración segments_amount = 20 #Cantidad de segmentos window_ratio = 0.05 # 5% con 1024 son ventanas de 51 píxeles (ojo porque quedan pixeles a la derecha si procesar) top_crop_ratio = 0.1 width_crop_ratio = 0.1 kernel_ones_3 = np.ones((3,3),np.uint8) kernel_e1 = np.array(([0,0,0,0],[0,1,1,0],[0,1,1,0],[0,0,0,0]), np.uint8) kernel_e2 = np.array(([0,1,0],[0,1,0],[0,1,0]),np.uint8) kernel_e3 = np.array(([0,0,0],[1,1,1],[0,0,0]),np.uint8) kernel_e4 = np.array(([0,0,0,0,0],[0,1,1,1,0],[0,1,1,1,0],[0,1,1,1,0],[0,0,0,0,0]),np.uint8) kernel_cruz = np.array(([0,1,0],[1,1,1],[0,1,0]),np.uint8) kernel_x = np.array(([1,0,1],[0,1,0],[1,0,1]),np.uint8) # ### Detección de lineas de cultivo por Hough # Detecta una única linea con el contenido de cultivo superior, es donde más se juntan las plantas y hay mayor densidad de puntos original = take_random_picture('img/row_test_pcam/random/', 'test') to_detect = cv2.cvtColor(original, cv2.COLOR_RGB2HSV) by_color_index = img_to_color_index(original, 'cive') print(by_color_index) g_to_detect = segment_by_color(to_detect, binary=False) g_to_detect = cv2.erode(g_to_detect, kernel_e1,iterations=1) # mostrar_imgs(['original', 'seg'], [original_td, g_to_detect]) # + # Bordes por Canny # edges = cv2.Canny(seg_and,50,150) # mostrar_img('edges canny', edges) # Bordes horizontales por convolución (sabiendo que vemos las columnas cultivo de frente) sz_v = 0.4 kernel_edge = np.array([[-1,2,-1], #Segmentos verticales [-1,2,-1], [-1,2,-1]]) edges = cv2.filter2D(g_to_detect,-1,kernel_edge) # edges = cv2.resize(edges, ((int)(edges.shape[1]*sz_v),(int)(edges.shape[0]*sz_v))) mostrar_img('edges conv', edges) # - #Realizando erociones iterativas con kernel vertical g_to_detect_2 = cv2.erode(g_to_detect, kernel_e2,iterations=4) # g_to_detect_3 = cv2.erode(g_to_detect_2, kernel_e3,iterations=1) # mostrar_imgs(['det2', 'det3'], [g_to_detect_2, g_to_detect_3]) mostrar_img('edges conv', g_to_detect_2) #Esqueletizar para operar solo con la esqueletización #Se pueden borrar puntos esqueletizados más fácil img_ezq = g_to_detect_2.copy() size = np.size(img_ezq) skel = np.zeros(img_ezq.shape,np.uint8) element = cv2.getStructuringElement(cv2.MORPH_CROSS,(3,3)) done = False while( not done): eroded = cv2.erode(img_ezq,element) temp = cv2.dilate(eroded,element) temp = cv2.subtract(img_ezq,temp) skel = cv2.bitwise_or(skel,temp) img_ezq = eroded.copy() zeros = size - cv2.countNonZero(img_ezq) if zeros==size: done = True mostrar_img('skeleton', skel) with_lines = original_td.copy() lines = cv2.HoughLines(g_to_detect_2,10,np.pi/90,800) third_1 = (int)(g_to_detect.shape[1]/3) third_2 = (int)(g_to_detect.shape[1]*(2/3)) epsilon = (int)(0.01*g_to_detect.shape[1]) if type(lines) is np.ndarray: for line in lines: for rho,theta in line: a = np.cos(theta) b = np.sin(theta) x0 = a*rho y0 = b*rho x1 = int(x0 + 2000*(-b)) y1 = int(y0 + 2000*(a)) x2 = int(x0 - 2000*(-b)) y2 = int(y0 - 2000*(a)) if (x1 != x2 and y2 != y1): m = (y2-y1)/(x2-x1) b = y2 - m*x2 x = (g_to_detect.shape[0]-b)/m if (x > 0 and x < g_to_detect.shape[1]): if (x < third_1 and theta < 0.49 and theta > 0.2): cv2.line(with_lines,(x1,y1),(x2,y2),(255),1) if (x > third_2 and theta > 2.65 and theta < 2.92): cv2.line(with_lines,(x1,y1),(x2,y2),(255),1) if (x > third_1 and x < third_2 and (theta > 2.75 or theta < 0.39)): cv2.line(with_lines,(x1,y1),(x2,y2),(255),1) mostrar_img('lines', with_lines) diff = cv2.subtract(with_lines, g_to_detect) mostrar_img('diferencia', diff) # ### Imagen de prueba para clasificación # Se segmenta y se calculan bordes para tener referencia visual (No lo requiere el algoritmo final) file_name = 'artificial_1' original_td = cv2.imread(img_path + file_name + '.png', cv2.IMREAD_COLOR) # original_td = h_crop(original_td, 0.1) #Para que queden de igual ancho to_detect = cv2.cvtColor(original_td, cv2.COLOR_RGB2HSV) to_detect = v_crop_top(segment_by_color(to_detect), top_crop_ratio) kernel = np.ones((3,3), np.uint8) to_detect = cv2.erode(to_detect, kernel, iterations=1) # + # mostrar_img('to_detect', to_detect) # - to_detect_edge = cv2.Canny(to_detect,50,150) zeros = np.zeros(to_detect.shape, np.uint8) three_channel_edge = cv2.merge((to_detect, zeros, zeros)) mostrar_img('edges canny', three_channel_edge) # ### Clasificación (FIP + RCRD) # Trabajo en segmentos, divide la imagen en 20 segmentos (se puede optar por análizar solo la parte inferior, los primeros 15 segmentos) # + td_h, td_w = to_detect.shape seg_height = (int)(td_h/segments_amount) steps = (int)(td_h/seg_height) #Si td_h no es múltiplo de la cantidad de seg => steps != segments_amount window_width = (int)(td_w*window_ratio) #RCRD descripto como vectores para que sea más rápido verificar offset_seg_height = 0 rcrd_vectors = np.zeros((steps, td_w), np.bool) for i in range(0,steps): segment = seg_and[offset_seg_height:offset_seg_height + seg_height, 0:td_w] rcrd_vectors[i,:] = calculate_row(segment, seg_height, td_w) offset_seg_height = offset_seg_height + seg_height # + def is_crop(window): return True sum = 0 for elem in window: if elem: sum = sum + 1 sz = np.size(window) return ((sum/sz) > 0.80) def verificate_with_rcrd(seg_index, window_pos, window_width): # return True return np.any(rcrd_vectors[seg_index, window_pos:window_pos + window_width]) # + #FIP offset_seg_height = 0 # from matplotlib import pyplot as plt # x = np.arange(0,td_w) three_ch_copy = three_channel_edge.copy() #La imagen la voy recorriendo de abajo hacia arriba for seg_index in range(0, steps): #Calculo los vectores de cada segmento segment = to_detect[offset_seg_height:offset_seg_height + seg_height,0:td_w] vector = calculate_row(segment, seg_height, td_w) #Todo lo que no sea crop, es weed window_pos = 0; while (window_pos < td_w - window_width ): #Hay algo mal en el algoritmo if (is_crop(vector[window_pos:window_pos + window_width])): if (verificate_with_rcrd(seg_index, window_pos, window_width)): #Luego de verificar que sea crop, pinto ese sector sector = three_ch_copy[offset_seg_height:offset_seg_height + seg_height, window_pos: window_pos + window_width] sec_b, sec_g, sec_r = cv2.split(sector) sector = cv2.merge((sec_r, sec_g, sec_b)) #Lo pinto dando vuelta los canales three_ch_copy[offset_seg_height:offset_seg_height + seg_height, window_pos: window_pos + window_width] = sector window_pos = window_pos + window_width else: window_pos = window_pos + 1 else: window_pos = window_pos + 1 offset_seg_height = offset_seg_height + seg_height # - mostrar_img('th', three_ch_copy) # ### Contagio de color # + #De manera recursiva repinta lo que encuentre a su alrededor def paint(pixel): if (pixel[0]): aux = pixel[0] pixel [0] = pixel[2] pixel[2] = aux def contagio(img, y_init, x_init): img_h, img_w, ch = img.shape x1= x_init-1 x2= x_init+1 y1= y_init-1 y2= y_init+1 if (x1 >= 0): paint(img[y_init,x1]) if (x2 < img_w): paint(img[y_init,x2]) if (y1 >= 0): paint(img[y1,x_init]) if (y2 < img_h): paint(img[y2,x_init]) # - three_ch_copy2 = three_ch_copy.copy() for y in range(0,td_h-1): for x in range(0,td_w-1): if (three_ch_copy2[y,x,2]): contagio(three_ch_copy2, y, x) #BGR, el contagio es con R mostrar_imgs(['th contagio','th'], [three_ch_copy2, three_ch_copy]) #Colorearlo en imagen de bordes (Poner valor 255 en segundo (crop) o tercer canal (weed)) #Sumar bordes clasificados con imagen result = cv2.add(original_td, three_ch_copy2) mostrar_img('resultado', result) # ### Guardado de imágenes #Guardar resultado file_name='total' cv2.imwrite('img/row_test/' + file_name + '_resultado2.png', result) # cv2.imwrite('img/row_test/' + file_name + '_with_lines.png', with_lines)
tesis_project_env/algorithms/row_detection.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # High order positivity preserving methods # 1. Chapter 6: stability and positivity preserving high order methods # 1. [Section 1.1: Modified Patankar schemes](#mP) # 1. [Section 1.2: Strong Stability Preserving RK](#SSPRK) # + # If you do not have numpy, matplotlib, scipy or nodepy, run this cell # !pip install numpy # This is the basic package in python with all the numerical functions # !pip install scipy # This package has some functions to deal with polynomials # !pip install matplotlib # This package allows to plot # !pip install nodepy # This package has some interesting features for RK methods # + # We need a couple of packages in this chapter import numpy as np # This is the basic package in python with all the numerical functions import matplotlib.pyplot as plt # This package allows to plot from nodepy import rk #This package already implemented some functions for Runge Kutta and multistep methods # + import numpy as np ## Linear scalar Dahlquist's equation def linear_scalar_flux(u,t=0,k_coef=10): ff=np.zeros(np.shape(u)) ff[0]= -k_coef*u[0] return ff def linear_scalar_exact_solution(u0,t,k_coef=10): return np.array([np.exp(-k_coef*u0[0]*t)]) def linear_scalar_jacobian(u,t=0,k_coef=10): Jf=np.zeros((len(u),len(u))) Jf[0,0]=-k_coef return Jf #nonlinear problem y'=-ky|y| +1 def nonlinear_scalar_flux(u,t=0,k_coef=10): ff=np.zeros(np.shape(u)) ff[0]=-k_coef*abs(u[0])*u[0] +1 return ff def nonlinear_scalar_exact_solution(u0,t,k_coef = 10): sqrtk = np.sqrt(k_coef) ustar = 1 / sqrtk if u0[0] >= ustar: uex=np.array([1./np.tanh(sqrtk * t + np.arctanh(1/sqrtk /u0[0])) / sqrtk]) elif u0[0] < 0 and t < - np.atan(sqrtk * u0[0]) / sqrtk: uex=np.array([np.tan(sqrtk * t + np.arctan(sqrtk * u0[0])) / sqrtk]) else: uex=np.array([np.tanh(sqrtk * t + np.arctanh(sqrtk * u0[0])) / sqrtk]) return uex def nonlinear_scalar_jacobian(u,t=0,k_coef=10): Jf=np.zeros((len(u),len(u))) Jf[0,0]=-k_coef*abs(u[0]) return Jf # SYSTEMS # linear systems def linear_system2_flux(u,t=0): d=np.zeros(len(u)) d[0]= -5*u[0] + u[1] d[1]= 5*u[0] -u[1] return d def linear_system2_exact_solution(u0,t): A=np.array([[-5,1],[5,-1]]) u_e=u0+(1-np.exp(-6*t))/6*np.dot(A,u0) return u_e def linear_system2_jacobian(u,t=0): Jf=np.array([[-5,1],[5,-1]]) return Jf linear_system2_matrix = np.array([[-5,1],[5,-1]]) def linear_system2_production_destruction(u,t=0): p=np.zeros((len(u),len(u))) d=np.zeros((len(u),len(u))) p[0,1]=u[1] d[1,0]=u[1] p[1,0]=5*u[0] d[0,1]=5*u[0] return p,d #lin system 3 x3 def linear_system3_flux(u,t=0): d=np.zeros(len(u)) d[0]= -u[0] + 3*u[1] d[1]= -3*u[1] + 5*u[2] d[2]= -5*u[2] return d def linear_system3_exact_solution(u0,t=0): u_e = np.zeros(len(u0)) u_e[0] = 15.0/8.0*u0[2]*(np.exp(-5*t) - 2*np.exp(-3*t)+np.exp(-t)) u_e[1] = 5.0/2.0*u0[2]*(-np.exp(-5*t) + np.exp(-3*t)) u_e[2] = u0[2]*np.exp(-5*t) return u_e def linear_system3_jacobian(u,t=0): Jf=np.zeros((len(u),len(u))) Jf[0,0]=-1. Jf[0,1]=3 Jf[1,1] = -3 Jf[1,2] = 5 Jf[2,2] = -5 return Jf ## Nonlinear 3x3 system production destruction def nonlinear_system3_flux(u,t=0): ff=np.zeros(len(u)) ff[0]= -u[0]*u[1]/(u[0]+1) ff[1]= u[0]*u[1]/(u[0]+1) -0.3*u[1] ff[2]= 0.3*u[1] return ff def nonlinear_system3_production_destruction(u,t=0): p=np.zeros((len(u),len(u))) d=np.zeros((len(u),len(u))) p[1,0]=u[0]*u[1]/(u[0]+1) d[0,1]=p[1,0] p[2,1]=0.3*u[1] d[1,2]=p[2,1] return p,d # SIR Model def SIR_flux(u,t=0,beta=3,gamma=1): ff=np.zeros(len(u)) N=np.sum(u) ff[0]=-beta*u[0]*u[1]/N ff[1]=+beta*u[0]*u[1]/N - gamma*u[1] ff[2]= gamma*u[1] return ff def SIR_jacobian(u,t=0,beta=3,gamma=1): Jf=np.zeros((len(u),len(u))) N=np.sum(u) Jf[0,0]=-beta*u[1]/N Jf[0,1]=-beta*u[0]/N Jf[1,0]= beta*u[1]/N Jf[1,1]= beta*u[0]/N - gamma Jf[2,1] = gamma return Jf def SIR_production_destruction(u,t=0,beta=3,gamma=1): p=np.zeros((len(u),len(u))) d=np.zeros((len(u),len(u))) N=np.sum(u) p[1,0]=beta*u[0]*u[1]/N d[0,1]=p[1,0] p[2,1]=gamma*u[1] d[1,2]=p[2,1] return p,d # Nonlinear_oscillator def nonLinearOscillator_flux(u,t=0,alpha=0.): ff=np.zeros(np.shape(u)) n=np.sqrt(np.dot(u,u)) ff[0]=-u[1]/n-alpha*u[0]/n ff[1]=u[0]/n - alpha*u[1]/n return ff def nonLinearOscillator_exact_solution(u0,t): u_ex=np.zeros(np.shape(u0)) n=np.sqrt(np.dot(u0,u0)) u_ex[0]=np.cos(t/n)*u0[0]-np.sin(t/n)*u0[1] u_ex[1]=np.sin(t/n)*u0[0]+np.cos(t/n)*u0[1] return u_ex # Non linear oscillator damped def nonLinearOscillatorDamped_flux(u,t,alpha=0.01): ff=np.zeros(np.shape(u)) n=np.sqrt(np.dot(u,u)) ff[0]=-u[1]/n-alpha*u[0]/n ff[1]=u[0]/n - alpha*u[1]/n return ff def nonLinearOscillatorDamped_exact_solution(u0,t,alpha=0.01): u_ex=np.zeros(np.shape(u0)) n0=np.sqrt(np.dot(u0,u0)) n=n0*np.exp(-alpha*t) u_ex[0]=n/n0*(np.cos(t/n)*u0[0]-np.sin(t/n)*u0[1]) u_ex[1]=n/n0*(np.sin(t/n)*u0[0]+np.cos(t/n)*u0[1]) return u_ex # pendulum def pendulum_flux(u,t=0): ff=np.zeros(np.shape(u)) ff[0]=u[1] ff[1]=-np.sin(u[0]) return ff def pendulum_jacobian(u,t=0): Jf=np.zeros((2,2)) Jf[0,1]=1. Jf[1,0]=np.cos(u[0]) return Jf def pendulum_entropy(u,t=0): return np.array(0.5*u[1]**2.-np.cos(u[0]), dtype=np.float) def pendulum_entropy_variables(u,t=0): v=np.zeros(np.shape(u)) v[0]=np.sin(u[0]) v[1]=u[1] return v # Robertson def Robertson_flux(u,t=0,alpha=10**4,beta=0.04, gamma=3*10**7): ff=np.zeros(np.shape(u)) ff[0] = alpha*u[1]*u[2]-beta*u[0] ff[1] = beta*u[0]-alpha*u[1]*u[2] - gamma*u[1]**2 ff[2] = gamma*u[1]**2 return ff def Robertson_jacobian(u,t=0,alpha=10**4,beta=0.04, gamma=3*10**7): Jf=np.zeros((3,3)) Jf[0,0]= -beta Jf[0,1]= alpha*u[2] Jf[0,2]= alpha*u[1] Jf[1,0]= beta Jf[1,1]= -alpha*u[2]-2*gamma*u[1] Jf[1,2]= -alpha*u[1] Jf[2,1] = 2*gamma*u[1] return Jf def Robertson_production_destruction(u,t=0,alpha=10**4,beta=0.04, gamma=3*10**7): p=np.zeros((len(u),len(u))) d=np.zeros((len(u),len(u))) p[0,1]=alpha*u[1]*u[2] d[1,0]=p[0,1] p[1,0]=beta*u[0] d[0,1]=p[1,0] p[2,1]=gamma*u[1]**2 d[1,2]=p[2,1] return p,d # Lotka: def lotka_flux(u,t=0,alpha=1,beta=0.2,delta=0.5,gamma=0.2): ff=np.zeros(np.shape(u)) ff[0]=alpha*u[0]-beta*u[0]*u[1] ff[1]=delta*beta*u[0]*u[1]-gamma*u[1] return ff def lotka_jacobian(u,t=0,alpha=1,beta=0.2,delta=0.5,gamma=0.2): Jf=np.zeros((2,2)) Jf[0,0] = alpha -beta*u[1] Jf[0,1] = -beta*u[0] Jf[1,0] = delta*beta*u[1] Jf[1,1] = delta*beta*u[0] -gamma return Jf #3 bodies problem in 2D: U=(x_1,x_2,v_1,v_2,y_1,y_2,w_1,w_2,z_1,z_2,s_1,s_2) # where x is the 2D position of body1 and v is speed body1 sun # y, w are position and velocity body2 earth # z, s are position and velocity body3 mars def threeBodies_flux(u,t=0): m1=1.98892*10**30 m2=5.9722*10**24 m3=6.4185*10**23 G=6.67*10**(-11) f=np.zeros(np.shape(u)) x=u[0:2] v=u[2:4] y=u[4:6] w=u[6:8] z=u[8:10] s=u[10:12] dxy3=np.linalg.norm(x-y)**3 dxz3=np.linalg.norm(x-z)**3 dyz3=np.linalg.norm(y-z)**3 f[0:2]=v f[2:4]=-m2*G/dxy3*(x-y)-m3*G/dxz3*(x-z) f[4:6]=w f[6:8]=-m1*G/dxy3*(y-x)-m3*G/dyz3*(y-z) f[8:10]=s f[10:12]=-m1*G/dxz3*(z-x)-m2*G/dyz3*(z-y) return f class ODEproblem: def __init__(self,name): self.name=name if self.name=="linear_scalar": self.u0 = np.array([1.]) self.T_fin= 2. self.k_coef=10 self.matrix=np.array([-self.k_coef]) elif self.name=="nonlinear_scalar": self.k_coef=10 self.u0 = np.array([1.1/np.sqrt(self.k_coef)]) self.T_fin= 1. elif self.name=="linear_system2": self.u0 = np.array([0.9,0.1]) self.T_fin= 1. self.matrix = np.array([[-5,1],[5,-1]]) elif self.name=="linear_system3": self.u0 = np.array([0,0.,10.]) self.T_fin= 10. elif self.name=="nonlinear_system3": self.u0 = np.array([9.98,0.01,0.01]) self.T_fin= 30. elif self.name=="SIR": self.u0 = np.array([1000.,1,10**-20]) self.T_fin= 10. elif self.name=="nonLinearOscillator": self.u0 = np.array([1.,0.]) self.T_fin= 50 elif self.name=="nonLinearOscillatorDamped": self.u0 = np.array([1.,0.]) self.T_fin= 50 elif self.name=="pendulum": self.u0 = np.array([2.,0.]) self.T_fin= 50 elif self.name=="Robertson": self.u0 = np.array([1.,10**-20,10**-20]) self.T_fin= 10.**10. elif self.name=="lotka": self.u0 = np.array([1.,2.]) self.T_fin= 100. elif self.name=="threeBodies": self.u0 = np.array([0,0,0,0,149*10**9,0,0,30*10**3,-226*10**9,0,0,-24.0*10**3]) self.T_fin= 10.**8. else: raise ValueError("Problem not defined") def flux(self,u,t=0): if self.name=="linear_scalar": return linear_scalar_flux(u,t,self.k_coef) elif self.name=="nonlinear_scalar": return nonlinear_scalar_flux(u,t,self.k_coef) elif self.name=="linear_system2": return linear_system2_flux(u,t) elif self.name=="linear_system3": return linear_system3_flux(u,t) elif self.name=="nonlinear_system3": return nonlinear_system3_flux(u,t) elif self.name=="SIR": return SIR_flux(u,t) elif self.name=="nonLinearOscillator": return nonLinearOscillator_flux(u,t) elif self.name=="nonLinearOscillatorDamped": return nonLinearOscillatorDamped_flux(u,t) elif self.name=="pendulum": return pendulum_flux(u,t) elif self.name=="Robertson": return Robertson_flux(u,t) elif self.name=="lotka": return lotka_flux(u,t) elif self.name=="threeBodies": return threeBodies_flux(u,t) else: raise ValueError("Flux not defined for this problem") def jacobian(self,u,t=0): if self.name=="linear_scalar": return linear_scalar_jacobian(u,t,self.k_coef) elif self.name=="nonlinear_scalar": return nonlinear_scalar_jacobian(u,t,self.k_coef) elif self.name=="linear_system2": return linear_system2_jacobian(u,t) elif self.name=="linear_system3": return linear_system3_jacobian(u,t) elif self.name=="pendulum": return pendulum_jacobian(u,t) elif self.name=="SIR": return SIR_jacobian(u,t) elif self.name=="Robertson": return Robertson_jacobian(u,t) elif self.name=="lotka": return lotka_jacobian(u,t) else: raise ValueError("Jacobian not defined for this problem") def exact(self,u,t): if self.name=="linear_scalar": return linear_scalar_exact_solution(u,t,self.k_coef) elif self.name=="nonlinear_scalar": return nonlinear_scalar_exact_solution(u,t,self.k_coef) elif self.name=="linear_system2": return linear_system2_exact_solution(u,t) elif self.name=="linear_system3": return linear_system3_exact_solution(u,t) elif self.name=="nonLinearOscillator": return nonLinearOscillator_exact_solution(u,t) elif self.name=="nonLinearOscillatorDamped": return nonLinearOscillatorDamped_exact_solution(u,t) else: raise ValueError("Exact solution not defined for this problem") def exact_solution_times(self,u0,tt): exact_solution=np.zeros((len(u0),len(tt))) for it, t in enumerate(tt): exact_solution[:,it]=self.exact(u0,t) return exact_solution def prod_dest(self,u,t=0): if self.name=="linear_system2": return linear_system2_production_destruction(u,t) if self.name=="nonlinear_system3": return nonlinear_system3_production_destruction(u,t) elif self.name=="Robertson": return Robertson_production_destruction(u,t) elif self.name=="SIR": return SIR_production_destruction(u,t) else: raise ValueError("Prod Dest not defined for this problem") # - # ## Modified Patankar scheme for production-destruction systems <a id='mP'></a> # Consider production-destruction systems (PDS) # # \begin{equation} # \begin{cases} # d_t c_i = P_i(c ) - D_i(c ) , \quad i=1,\dots,I,\quad & P_i(c) = \sum_{j=1}^I p_{i,j}(c),\\ # c(t=0)=c_0,& D_i(c) = \sum_{j=1}^I d_{i,j}(c), # \end{cases} # \end{equation} # # where # # $$p_{i,j}(c) , d_{i,j} (c) \geq 0, \qquad \forall i,j \in I, \quad \forall c \in \mathbb R^{+,I}.$$ # # Applications: Chemical reactions, biological systems, population evolutions and PDEs. # # Problems: linear_system2, nonlinear_system3, Robertson, SIR. # # Example: SIR # # \begin{equation*} # \begin{cases} # d_tS = - \beta \frac{SI}{N}\\ # d_t I = \beta \frac{SI}{N} -\gamma I\\ # d_t R = \gamma I # \end{cases} # \end{equation*} # # Property 1: Conservation # \begin{align*} # &\sum_{i=1}^I c_i(0) = \sum_{i=1}^I c_i(t), \quad \forall t\geq 0 \\ # \Longleftrightarrow \quad &p_{i,j}(c)=d_{j,i} (c) , \qquad \forall i,j \in I, \quad \forall c \in \mathbb R^{+,I}. \end{align*} # # # Property 2: Positivity # # \begin{align*} # &\text{If }P_i,D_i\text{ Lipschitz, and if when } c_i\to 0 \Rightarrow D_i(c)\to 0 \Longrightarrow \\ # &c_i(0) > 0 \, \forall i \in I \Longrightarrow c_i(t)>0 \,\forall i \in I\; \forall t >0. # \end{align*} # # Goal: # * One step method # * Unconditionally positive # * Unconditionally conservative # * High order accurate # # **Explicit Euler** # * $c^{n+1}= c^n +\Delta t \left( P(c^n) - D(c^n) \right)$ # * **Conservative** # * First order # * Not unconditionally positive, if $\Delta t$ is too big... # # Consider a conservative and positive PDS where we assume that # the right hand side is not identical zero. Then, there exists # a $c^n\geq0$ such that ${P}(c^n)-{D}(c^n)\neq0$. Since the PDS is conservative, # we can at least # find one constituent $i\in \lbrace 1,\dots, I \rbrace$, where $D_i(c^n)>P_i(c^n)\geq0$. Choosing # \begin{equation} # \Delta t >\frac{c_i^n}{D_i(c^n)-P_i(c^n)} > 0, # \end{equation} # we obtain # \begin{equation} # c_i^{n+1}=c_i^{n} +\Delta t\left(P_i(c^n)-D_i(c^n)\right)<c_i^{n} +\frac{c_i^n}{D_i(c^n)-P_i(c^n)} \left(P_i(c^n)-D_i(c^n)\right) # =c_i^{n}-c_i^{n}=0. # \end{equation} # This demonstrates the violation of the positivity for the explicit Euler method for unbounded timesteps $\Delta t$. # **Patankar's scheme** [Patankar's book 1980](https://books.google.it/books/about/Numerical_Heat_Transfer_and_Fluid_Flow.html?id=N2MVAQAAIAAJ&redir_esc=y) # * Unconditionally positive # * Weighting the destruction term in the original explicit Euler method # # $$ # c_i^{n+1}=c_i^n+\Delta t \left( \sum_{j=1}^I p_{i,j}(c^n) - # \sum_{j=1}^I d_{i,j}(c^n) \frac{c^{n+1}_i}{c_i^n} \right), \quad i=1,\dots, I,\\ # \Longleftrightarrow \left(1 +\Delta t \sum_{j=1}^I \frac{d_{i,j}(c^n)}{c_i^n} \right) c_i^{n+1}=c_i^n+\Delta t \left( \sum_{j=1}^I p_{i,j}(c^n) \right), \quad i=1,\dots, I, # $$ # # * conservation relation is violated. # + ## Modified Patankar 1st order scheme def patankar(prod_dest, tspan, u0): ''' Input: prod_dest is the function that returns the matrices p_{i,j}(c) and d_{i,j}(c) tspan is the time vector u0 is the initial condition ''' dim=len(u0) # Dimension of the problem Nt=len(tspan) # Length of time span U=np.zeros((dim,Nt)) # Solution vector p=np.zeros((dim,dim)) # Temporary production matrix d=np.zeros((dim,dim)) # Temporary destruction matrix U[:,0]=u0 for it in range(1,Nt): # Loop over timesteps dt=tspan[it]-tspan[it-1] p,d =prod_dest(U[:,it-1]) # Computing the production and destruction at the previous timestep for i in range(dim): # Adding all the terms lhs = 1. # Initializing the lhs coefficients rhs = U[i,it-1] # Initializing the rhs for j in range(dim): lhs = lhs + dt*d[i,j]/U[i,it-1] rhs = rhs + dt*p[i,j] U[i,it] = rhs/lhs # Solve the final system return tspan, U # - pr=ODEproblem("SIR") tt=np.linspace(0,pr.T_fin, 100) tt,UU=patankar(pr.prod_dest,tt,pr.u0) plt.plot(tt,UU[0,:], label="S") plt.plot(tt,UU[1,:], label="I") plt.plot(tt,UU[2,:], label="R") plt.plot(tt,np.sum(UU,0), label="Total") plt.legend() # **Modified Patankar Scheme** [<NAME>, <NAME>, <NAME>ister 2003](https://www.researchgate.net/profile/Andreas-Meister-2/publication/225796292_Application_of_modified_Patankar_schemes_to_stiff_biogeochemical_models_for_the_water_column/links/5538e85a0cf247b8587d7efb/Application-of-modified-Patankar-schemes-to-stiff-biogeochemical-models-for-the-water-column.pdf) # * Modification of Patankar scheme # * Unconditionally positive # * Conservative # * Linearly implicit (with a mass inversion) # # \begin{equation} # c_i^{n+1}:=c_i^n+\Delta t \left( \sum_{j=1}^I p_{i,j}(c^n) \frac{c^{n+1}_j}{c_j^n} - \sum_{j=1}^I d_{i,j}(c^n) \frac{c^{n+1}_i}{c_i^n} \right), \quad i=1,\dots, I. # \end{equation} # # The scheme is implicit and can be solved inverting # the mass matrix $M$ in the system $Mc^{n+1}=c^n$ where $M$ is # # \begin{equation} # M_{i,j}(c^n) = # \begin{cases} # 1+\Delta t \sum_{l=1}^I \frac{d_{i,l}(c^n)}{c_i^n} , \quad & \text{if } i=j,\\ # - \Delta t \frac{p_{i,j}(c^n)}{c_j^n} , \quad & \text{if } i\neq j. # \end{cases} # \end{equation} # # The mass matrix # * has positive values on the diagonal # * has negative values on the off-diagonal # * is strictly diagonally dominant by columns # * the inverse is positive # # #### Jacobi's iterative method # Let $M$ be a strictly diagonally dominant (by columns) matrix, with $M=D-L$ with $D>0$ diagonal matrix and $L>0$ matrix with 0 entries on the diagonal. Then, $M$ is invertible and $M^{-1}$ is positive. # ##### Proof # Suppose we want to solve # # $$ # Mx=b # $$ # # for $b\geq 0$ a nonnegative vector. Proving that $M^{-1}$ is positive is equivalent to prove that $x$ is nonnegative for any $b\geq 0$. # # To get to Jacobi's iteration let us rewrite the system as # # $$ # Dx=Lx+b \Longleftrightarrow x=D^{-1}(Lx+b) # $$ # # If $x$ is a solution of the orginal system, it is a solution of the previous one. # # How to get to the solution $x$? Iterations # # $$ # x^{(k)}=D^{-1}(Lx^{(k-1)}+b). # $$ # # * Do the iterations converge? # # $$ # e^{(k)}:=x^{(k)}-x \\ # e^{(k)}=x^{(k)}-x= D^{-1}(Lx^{(k-1)}+b)-D^{-1}(Lx+b) = D^{-1}L e^{(k-1)}. # $$ # # Now, the $\infty$-norm of $D^{-1}L$ is smaller than 1, because # # $$ # ||D^{-1}L||_\infty = \max_i \sum_{j} \frac{|L_{ji}|}{|D_{ii}|}=\max_i \frac{\sum_{j} L_{ji}}{D_{ii}} < 1. # $$ # # Hence, # # $$ # ||e^{(k)}||_\infty \leq ||D^{-1}L||_\infty||e^{(k-1)}||_\infty < ||e^{(k-1)}||_\infty. # $$ # # * Is the solution $x$ positive? # Suppose we start from a positive guess $x^{(0)}$, then, by induction, # # $$ # x^{(k)}=\underbrace{D^{-1}L}_{\geq 0} \underbrace{x^{(k-1)}}_{\geq 0}+\underbrace{D^{-1}b}_{\geq 0}. # $$ # # So, # # $$x=\lim_{k\to \infty} x^{(k)} \geq 0.$$ # + ## Modified Patankar 1st order scheme def mPEuler(prod_dest, tspan, u0): ''' Input: prod_dest is the function that returns the matrices p_{i,j}(c) and d_{i,j}(c) tspan is the time vector u0 is the initial condition ''' dim=len(u0) # Dimension of the problem Nt=len(tspan) # Length of time span U=np.zeros((dim,Nt)) # Solution vector p=np.zeros((dim,dim)) # Temporary production matrix d=np.zeros((dim,dim)) # Temporary destruction matrix U[:,0]=u0 for it in range(1,Nt): # Loop over timesteps dt=tspan[it]-tspan[it-1] p,d =prod_dest(U[:,it-1]) # Computing the production and destruction at the previous timestep MM = np.eye(dim) # Initializing the mass matrix for i in range(dim): # Adding all the terms for j in range(dim): MM[i,j] = MM[i,j] - dt*p[i,j]/U[j,it-1] MM[i,i] = MM[i,i] + dt*d[i,j]/U[i,it-1] U[:,it] = np.linalg.solve(MM,U[:,it-1]) # Solve the final system return tspan, U # - pr=ODEproblem("SIR") tt=np.linspace(0,pr.T_fin, 100) tt,UU=mPEuler(pr.prod_dest,tt,pr.u0) plt.plot(tt,UU[0,:], label="S") plt.plot(tt,UU[1,:], label="I") plt.plot(tt,UU[2,:], label="R") plt.plot(tt,np.sum(UU,0), label="Total") plt.legend() # **High Order modified Patankar Runge--Kutta Methods** # * One mPRK22 [<NAME>, <NAME>, <NAME> 2003](https://www.researchgate.net/profile/Andreas-Meister-2/publication/225796292_Application_of_modified_Patankar_schemes_to_stiff_biogeochemical_models_for_the_water_column/links/5538e85a0cf247b8587d7efb/Application-of-modified-Patankar-schemes-to-stiff-biogeochemical-models-for-the-water-column.pdf) # * [<NAME> and <NAME> 2018](https://arxiv.org/abs/1702.04589) [2019](https://arxiv.org/abs/1703.05052) Categorization of families of mPRK22$(\alpha)$ and mPRK43$(\alpha,\beta)$ # * [<NAME>, C-W. Shu 2018](https://www.brown.edu/research/projects/scientific-computing/sites/brown.edu.research.projects.scientific-computing/files/uploads/Positivity-preserving%20time%20discretizations.pdf) mPRK22$(\alpha, \beta)$ # * [<NAME>, <NAME>, C-W. Shu 2019](https://doi.org/10.1007/s10915-018-0881-9) Third order RK43 # * [<NAME>, <NAME> 2020](https://arxiv.org/abs/1905.09237) Arbitrarily high order mPDeC # # **Remark**: It has been proven that there are not modified Patankar RK schemes of order 3 with 3 stages. # ### Modified Patankar Deferred Correction schemes # # Blackboard, slides and [article](https://arxiv.org/abs/1905.09237)! # Final formulation: # at each correction $(k)$ at each subtimestep $m$ # # \begin{equation} # \begin{split} # &\mathcal L^{1,m}_i (\mathbf{c}^{(k)})-\mathcal L^{1,m}_i (\mathbf{c}^{(k-1)})+\mathcal L^{2,m}_i (\mathbf{c}^{(k)},\mathbf{c}^{(k-1)})=0\\ # &c_i^{m,(k)}-c^0_i -\Delta t \sum_{r=0}^M \theta_r^m \sum_{j=1}^I # \left( p_{i,j}(c^{r,(k-1)}) # \frac{c^{m,(k)}_{\gamma(j,i, \theta_r^m)}}{c_{\gamma(j,i, \theta_r^m)}^{m,(k-1)}} # - d_{i,j}(c^{r,(k-1)}) \frac{c^{m,(k)}_{\gamma(i,j, \theta_r^m)}}{c_{\gamma(i,j, \theta_r^m)}^{m,(k-1)}} \right)=0. # \end{split} # \end{equation} # # with # $$ # \gamma(i,j,\theta^m_r):=\begin{cases} # i& \text{if } \theta_r^m\geq 0\\ # j& \text{if } \theta_r^m<0 # \end{cases} # $$ # **CODE!** # + ## Reminder: DeC code from scipy.interpolate import lagrange from numpy.polynomial.legendre import leggauss def equispaced(order): ''' Takes input d and returns the vector of d equispaced points in [-1,1] And the integral of the basis functions interpolated in those points ''' nodes= np.linspace(-1,1,order) w= np.zeros(order) for k in range(order): yy= np.zeros(order) yy[k]=1. zz=lagrange(nodes,yy) pp=zz.integ() w[k]=pp(1)-pp(-1) return nodes, w def lglnodes(n,eps=10**-15): ''' Python translation of lglnodes.m Computes the Legendre-Gauss-Lobatto nodes, weights and the LGL Vandermonde matrix. The LGL nodes are the zeros of (1-x^2)*P'_N(x). Useful for numerical integration and spectral methods. Parameters ---------- n : integer, requesting an nth-order Gauss-quadrature rule on [-1, 1] Returns ------- (nodes, weights) : tuple, representing the quadrature nodes and weights. Note: (n+1) nodes and weights are returned. Example ------- >>> from lglnodes import * >>> (nodes, weights) = lglnodes(3) >>> print(str(nodes) + " " + str(weights)) [-1. -0.4472136 0.4472136 1. ] [0.16666667 0.83333333 0.83333333 0.16666667] Notes ----- Reference on LGL nodes and weights: <NAME>, <NAME>, <NAME>, <NAME>, "Spectral Methods in Fluid Dynamics," Section 2.3. Springer-Verlag 1987 Written by <NAME> - 04/17/2004 Contact: <EMAIL> Translated and modified into Python by <NAME> - 9/15/2018 ''' w = np.zeros((n+1,)) x = np.zeros((n+1,)) xold = np.zeros((n+1,)) # The Legendre Vandermonde Matrix P = np.zeros((n+1,n+1)) epss = eps # Use the Chebyshev-Gauss-Lobatto nodes as the first guess for i in range(n+1): x[i] = -np.cos(np.pi*i / n) # Compute P using the recursion relation # Compute its first and second derivatives and # update x using the Newton-Raphson method. xold = 2.0 for i in range(100): xold = x P[:,0] = 1.0 P[:,1] = x for k in range(2,n+1): P[:,k] = ( (2*k-1)*x*P[:,k-1] - (k-1)*P[:,k-2] ) / k x = xold - ( x*P[:,n] - P[:,n-1] )/( (n+1)*P[:,n]) if (max(abs(x - xold).flatten()) < epss ): break w = 2.0 / ( (n*(n+1))*(P[:,n]**2)) return x, w def lagrange_basis(nodes,x,k): y=np.zeros(x.size) for ix, xi in enumerate(x): tmp=[(xi-nodes[j])/(nodes[k]-nodes[j]) for j in range(len(nodes)) if j!=k] y[ix]=np.prod(tmp) return y def get_nodes(order,nodes_type): if nodes_type=="equispaced": nodes,w = equispaced(order) elif nodes_type == "gaussLegendre": nodes,w = leggauss(order) elif nodes_type == "gaussLobatto": nodes, w = lglnodes(order-1,10**-15) nodes=nodes*0.5+0.5 w = w*0.5 return nodes, w def compute_theta_DeC(order, nodes_type): nodes, w = get_nodes(order,nodes_type) int_nodes, int_w = get_nodes(order,"gaussLobatto") # generate theta coefficients theta = np.zeros((order,order)) beta = np.zeros(order) for m in range(order): beta[m] = nodes[m] nodes_m = int_nodes*(nodes[m]) w_m = int_w*(nodes[m]) for r in range(order): theta[r,m] = sum(lagrange_basis(nodes,nodes_m,r)*w_m) return theta, beta def dec(func, tspan, y_0, M_sub, K_corr, distribution): N_time=len(tspan) dim=len(y_0) U=np.zeros((dim, N_time)) u_p=np.zeros((dim, M_sub+1)) u_a=np.zeros((dim, M_sub+1)) rhs= np.zeros((dim,M_sub+1)) Theta, beta = compute_theta_DeC(M_sub+1,distribution) U[:,0]=y_0 for it in range(1, N_time): delta_t=(tspan[it]-tspan[it-1]) for m in range(M_sub+1): u_a[:,m]=U[:,it-1] u_p[:,m]=U[:,it-1] for k in range(1,K_corr+1): u_p=np.copy(u_a) for r in range(M_sub+1): rhs[:,r]=func(u_p[:,r]) for m in range(1,M_sub+1): u_a[:,m]= U[:,it-1]+delta_t*sum([Theta[r,m]*rhs[:,r] for r in range(M_sub+1)]) U[:,it]=u_a[:,M_sub] return tspan, U # + # Modified Patankar code! # One function decMPatankar for the main algorithm # One function patankar_type_dec to form the Mass matrix and solve the system def decMPatankar(prod_dest, tspan, y_0, M_sub, K_corr, distribution): ''' Input: prod_dest is the function that returns the matrices p_{i,j}(c) and d_{i,j}(c) tspan is the time vector y_0 is the initial condition M_sub is the number of subtimesteps K_corr is the number of iterations distribution is the subtimestep distribution equispaced, gaussLobatto ''' N_time=len(tspan) # number of timestep dim=len(y_0) # dimension of the problem U=np.zeros((dim, N_time)) # solution vector u_p=np.zeros((dim, M_sub+1)) # solution at the correction $(k-1)$ u_a=np.zeros((dim, M_sub+1)) # solution at the correction $(k)$ prod_p = np.zeros((dim,dim,M_sub+1)) # production matrix at corr $(k-1)$ dest_p = np.zeros((dim,dim,M_sub+1)) # destruction matrix at corr $(k-1)$ Theta, beta = compute_theta_DeC(M_sub+1,distribution) #Theta and beta coefficients of the DeC algorithm U[:,0]=y_0 # Initial solution for it in range(1, N_time): # timestep loop delta_t=(tspan[it]-tspan[it-1]) for m in range(M_sub+1): # Initialization of ua, up u_a[:,m]=U[:,it-1] u_p[:,m]=U[:,it-1] for k in range(1,K_corr+1): # Loop on the corrections u_p=np.copy(u_a) for r in range(M_sub+1): # Computation of production and destruction prod_p[:,:,r], dest_p[:,:,r]=prod_dest(u_p[:,r]) for m in range(1,M_sub+1): # Loop on the subtimesteps #Solution of the system u_a[:,m]= patankar_type_dec(prod_p,dest_p,delta_t,m,Theta,u_p) U[:,it]=u_a[:,M_sub] return tspan, U def patankar_type_dec(prod_p,dest_p,delta_t,m,Theta,u_p): ''' Solution of the Modified Patankar DeC system First computing the mass matrix and then solving the system Input: prod_p, dest_p production and destruction matrices of up at all subtimesteps (tensors dim x dim x M_sub) delta_t time step m is the current subtimestep Theta: are the theta coefficients of DeC algorithm u_p is the solution at the previous correction ''' dim=prod_p.shape[0] M_sub=prod_p.shape[2]-1 mass= np.eye(dim) # Initialization of the mass matrix as the identity RHS= u_p[:,0] # RHS is simply the solution at time t^n for i in range(dim): # Loop on i of mass matrix for r in range(M_sub+1): # Sum on subtimesteps r if Theta[r,m]>0: # Check on the sign of theta for j in range(dim): # Distribution on diagonal and off-diagonal of prod and dest terms mass[i,j]=mass[i,j]-delta_t*Theta[r,m]*(prod_p[i,j,r]/u_p[j,m]) mass[i,i]=mass[i,i]+ delta_t*Theta[r,m]*(dest_p[i,j,r]/u_p[i,m]) elif Theta[r,m]<0: for j in range(dim): mass[i,i]=mass[i,i]- delta_t*Theta[r,m]*(prod_p[i,j,r]/u_p[i,m]) mass[i,j]=mass[i,j]+ delta_t*Theta[r,m]*(dest_p[i,j,r]/u_p[j,m]) return np.linalg.solve(mass,RHS) # Solution of the system # + pr=ODEproblem("SIR") tt=np.linspace(0,pr.T_fin, 100) order = 5 tt,UU=decMPatankar(pr.prod_dest,tt,pr.u0, order-1 , order , "gaussLobatto") plt.plot(tt,UU[0,:], label="S") plt.plot(tt,UU[1,:], label="I") plt.plot(tt,UU[2,:], label="R") plt.plot(tt,np.sum(UU,0), label="Total") plt.legend() print("The minimum value reached is %e"%(np.amin(UU))) print("The conservation error is %e"%(np.max(np.sum(UU,0)-np.sum(UU[:,0])) )) # + pr=ODEproblem("nonlinear_system3") tt=np.linspace(0,pr.T_fin, 100) order = 5 tt,UU=decMPatankar(pr.prod_dest,tt,pr.u0, order-1 , order , "equispaced") plt.plot(tt,UU[0,:], label="c0") plt.plot(tt,UU[1,:], label="c1") plt.plot(tt,UU[2,:], label="c2") plt.plot(tt,np.sum(UU,0), label="Total") plt.legend() print("The minimum value reached is %e"%(np.amin(UU))) print("The conservation error is %e"%(np.max(np.sum(UU,0)-np.sum(UU[:,0])) )) # + #Test convergence accuracy def compute_integral_error(c,c_exact): # c is dim x times times=np.shape(c)[1] error=0. for t in range(times): error = error + np.linalg.norm(c[:,t]-c_exact[:,t],2)**2. error = np.sqrt(error/times) return error pr = ODEproblem("linear_system2") NN=4 dts=[pr.T_fin/2.0**k for k in range(4,NN+4)] errorsmPDeC=np.zeros(len(dts)) for order in range(2,10): for k in range(NN): dt0=dts[k] tt=np.arange(0,pr.T_fin,dt0) u_exact=pr.exact_solution_times(pr.u0,tt) t2,U2=decMPatankar(pr.prod_dest, tt, pr.u0, order-1, order, "gaussLobatto") errorsmPDeC[k]=compute_integral_error(U2,u_exact) plt.loglog(dts,errorsmPDeC,"--",label="mPDeC%d"%(order)) plt.loglog(dts,[dt**order*errorsmPDeC[2]/dts[2]**order for dt in dts],":",label="ref %d"%(order)) plt.title("mPDeC") plt.legend() #plt.savefig("convergence_RDeC.pdf") plt.show() # + # Stiff problem: Robertson pr = ODEproblem("Robertson") Nt=40 order = 5 #2 tt = np.array([np.exp(k) for k in np.linspace(-14,np.log(pr.T_fin),Nt)]) tt,yy=decMPatankar(pr.prod_dest, tt, pr.u0, order-1,order,"gaussLobatto") plt.semilogx(tt,yy[0,:]) plt.semilogx(tt,yy[1,:]*10**4) plt.semilogx(tt,yy[2,:]) plt.semilogx(tt,np.sum(yy,0),label="Total") plt.legend() print("The minimum value reached is %e"%(np.amin(yy))) print("The conservation error is %e"%(np.max(np.sum(yy,0)-np.sum(yy[:,0])) )) # - # ## Strong Stability Preserving Runge Kutta schemes <a id='SSPRK'></a> # # References: # 1. <NAME>, <NAME>. Total Variation Diminishing Runge--Kutta 1996 [ResearchGate link](https://www.researchgate.net/publication/24288196_Total_Variation_Diminishing_Runge-Kutta_Schemes) # <NAME>, <NAME>, <NAME>. 2000 [Brown page](https://www.brown.edu/research/projects/scientific-computing/sites/brown.edu.research.projects.scientific-computing/files/uploads/Strong%20Stability-Preserving%20High-Order%20Time%20Discretization%20Methods.pdf) # # We have seen that under certain conditions the **explicit Euler method** can preserve **stability** properties of some PDEs. # For example, in hyperbolic scalar problems # # $$u_t+f(u)_x=0,$$ # # where $f\in \mathcal C^1$, we can prove that # # $$ TV(u(t))\leq TV(u(t^0)),\qquad t>t^0, \qquad \text{with } TV(u) = \sum_j |u_{j+1}-u_j|. $$ # # This property can be preserved in the **explicit Euler** time discretization with the incremental form spatial discretization # # $$ # U^{n+1}_j=U^{n}_j +\Delta t \left [ C_{j+1/2}(U_{j+1}^{n}-U_{j}^{n})-D_{j-1/2}(U_{j}^{n}-U_{j-1}^{n}) \right] # $$ # # if $C_{j+1/2},D_{j+1/2}\geq 0$ for all $j$ and # # $$ # \Delta t |C_{j+1/2}+D_{j+1/2}|\leq 1,\qquad \text{for all }j. # $$ # # We can say that under this restriction the explicit Euler method is **total variation diminishing** (TVD). # # ![Example TVD vs nonTVD](images/chapter6/exampleSSPvsNo.png) # # These restrictions are the so-called CFL conditions, which are a restriction on the time step $\Delta t\leq \Delta t_{eE}$, which are the equivalent of the restrictions we observe on the time step for ODEs, where # # $$ # \Delta t \leq \frac{C}{L}, # $$ # # with $C$ a constant and $L$ the Lipschitz continuity constant of $F$. # ### Strong Stability Preserving (SSP) Runge Kutta methods # #### Explicit # Suppose that explicit Euler method is SSP (for example TVD, but not only) under the restriction # # $$ # \Delta t \leq \Delta t_{eE}, # $$ # # how can we be sure that a RK method is as well SSP. # # A general RK method is written as # # $$ # \begin{cases} # y^{(1)}=y^n # y^{(k)}=y^n + \Delta t \sum_{j=1}^{k-1} a_{kj} F(y^{(j)}), \quad k=1,\dots, S,\\ # y^{n+1} = y^n+ \Delta t \sum_{j=1}^S b_{j} F(y^{(j)}) # \end{cases} # $$ # # We can rewrite them in the SSP form # # $$ # \begin{cases} # y^{(0)} = y^n # y^{(k)}=\sum_{j=0}^{k-1} \alpha_{kj} y^{(j)} +\Delta t \beta_{kj} F(y^{(j)}), \quad k=1,\dots, S,\\ # y^{n+1} = y^{(S)}, # \end{cases} # $$ # # where $\sum_{j=1}^S\alpha_{kj}=1$ for all $k$ for consistency. Suppose, moreover, that we can find nonnegative $\alpha_{kj}, \beta_{kj}\geq 0$. Then, we can find similar restrictions on this form to preserve the strong stability belonging to the explicit Euler method. # # Indeed, each step can be rewritten as a **convex combination** of explicit Euler steps weighted by some coefficients # # $$ # y^{(k)}=\sum_{j=0}^{k-1} \alpha_{kj} \left( y^{(j)} +\Delta t \frac{\beta_{kj}}{\alpha_{kj} } F(y^{(j)})\right), \quad k=1,\dots, S, # $$ # # hence, if # # $$ # \Delta t \frac{\beta_{kj}}{\alpha_{kj} } \leq \Delta t_{eE} \quad \forall j<k\leq S\\ # \text{or } \Delta t \leq c \Delta t_{eE}, \qquad c:=\min_{j<k\leq S} \frac{\alpha_{kj} }{\beta_{kj}}, # $$ # # then the scheme is **SSP**. # # **Remark 1**: It is not always possible to find nonnegative $\beta_{kj}$, in those cases, one can proceed similarly, adding a stability hypothesis on the explicit Euler method for the backward in time problem ($\tilde F$). (Cost of computing the operators $\tilde F$ and $F$ rises as well as storing memory). # If you can, avoid the negative coefficient. # # **Remark 2**: Multistep method can be rendered into SSP version with analogous arguments. # # **Remark 3**: the goal is to optimize $c$ by finding the coefficients $\alpha,\beta$ keeping the order of accuracy required. # This can be done starting from a known RK method where the coefficients are half of the SSPRK method, hence there's a lot to optimize. # # **Example** # Consider the RK22 method # # $$ # u^{(1)}=u^n\\ # u^{(2)}=u^{(1)} +\Delta t F(u^{(1)})\\ # u^{n+1}=u^n+\frac{1}{2} \left(F(u^{(1)}) + F(u^{(2)}) \right) # $$ # # and try to write it into a SSPRK method # # $$ # u^{(0)}=u^n\\ # u^{(1)} = u^{(0)} +\Delta t F(u^{(0)})\\ # u^{(2)} = \alpha_{2,0} u^{(0)} +\alpha_{2,1} u^{(1)} + \Delta t \left( \beta_{2,0} F(u^{(0)}) + \beta_{2,1} F(u^{(1)})\right) \\ # u^{n+1}=u^{(2)} # $$ # # which, expanding becomes # # $$ # u^{n+1}= u^{(0)} + \Delta t \left( \left( \beta_{2,0}+\alpha_{2,1}\right) F(u^{(0)}) + \beta_{2,1} F(u^{(1)})\right), # $$ # # which, if we want to make the two method coincide, leads to the system # # $$ # \begin{cases} # \alpha_{2,0}+\alpha_{2,1}=1\\ # \beta_{2,0}+\alpha_{2,1} =\frac{1}{2}\\ # \beta_{2,1} = \frac{1}{2}. # \end{cases} # $$ # # #### Theorem 1 # An $S$-stages, $S$-order SSPRK method has at most CFL $c\leq 1$. # # #### Theorem 2 # For **linear problems** ($F(y)=My$), there exist $S$-stages, $S$-order SSPRK methods with CFL $c=1$. (For nonlinear problems they are not order $S$). # # #### Theorem 3 # Optimal 2nd order 2 stages SSPRK with CFL $c=1$ is # # $$ # u^{(1)} = u^n +\Delta t F(u^n)\\ # u^{n+1} = \frac{1}{2} u^n + \frac{1}{2} u^{(1)} +\frac{1}{2} \Delta t F(u^{(1)}). # $$ # # Optimal 3nd order 3 stages SSPRK with CFL $c=1$ is # # $$ # u^{(1)} = u^n +\Delta t F(u^n)\\ # u^{(2)} = \frac{3}{4} u^n + \frac{1}{4} u^{(1)} +\frac{1}{4} \Delta t F(u^{(1)})\\ # u^{n+1} = \frac{1}{3} u^n + \frac{2}{3} u^{(2)} +\frac{2}{3} \Delta t F(u^{(2)}). # $$ # # There is no 4th order 4 stages with positive coefficients SSPRK. # + A=np.array([[0,0],[1,0]]) b=np.array([0.5,0.5]) rk2 = rk.ExplicitRungeKuttaMethod(A,b) A=np.array([[0]]) b=np.array([1]) rk1 = rk.ExplicitRungeKuttaMethod(A,b) print(rk2.optimal_shu_osher_form()) A=np.array([[0,0],[0.5,0]]) b=np.array([0,1]) rk2 = rk.ExplicitRungeKuttaMethod(A,b) print(rk2.optimal_shu_osher_form()) # - # #### Implicit RK # It is not so trivial to prove properties for implicit schemes, as a nonlinear solver is involved. When restricting to specific cases, it is possible to recast some results. # # ##### Theorem 1 (Harten) # Implicit Euler for incremental finite difference form is **unconditionally** TVD (SSP) (for any $\Delta t$). # # *Proof in Chapter 2* # # ##### Theorem 2 # It does not exists an implicit RK scheme of the form # # $$ # u^{(0)}=u^n\\ # u^{(k)}=\sum_{j=0}^{k-1}\alpha_{kj}u^{(j)}+\Delta t \beta_k F(u^{(k)}),\qquad \alpha_{kj}\geq 0, \qquad k=1,\dots, S,\\ # u^{n+1}=u^{(S)}, # $$ # # with order $\geq 2$ that is also unconditionally stable. # ### SSP multistep methods # For multistep methods we have analogous results, there are a series of optimal values for the coefficients of explicit SSP multistep methods, while there exists no high order unconditionally SSP implicit multistep method. # A collection of optimal values of SSPRK can be found at [Gottlieb's page at Brown University](http://www.cfm.brown.edu/people/sg/ssp.html) # # More optimal values in [<NAME> 2006](https://epubs.siam.org/doi/abs/10.1137/S0036142901389025?journalCode=sjnaam) #Few examples of order 3 ssprk33=rk.loadRKM("SSP33") ssprk33.print_shu_osher() print(ssprk33) ssprk33.plot_stability_region() plt.show() for method in ["SSP33","SSP43","SSP53","SSP63"]: ssprk=rk.loadRKM(method) ssprk.print_shu_osher() print(ssprk) ssprk.plot_stability_region() plt.show() #Few examples ssprk104=rk.loadRKM("SSP104") ssprk104.print_shu_osher() print(ssprk104) ssprk104.plot_stability_region() plt.show() # #### Exercise # Test few methods with your favorite problems.
solutions/Chapter 6 Positivity preserving schemes.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: mlfinlab_FYgnSzyi # language: python # name: mlfinlab_fygnszyi # --- # # Financial Data Structures: Imbalance Bars # # 上一节的Volume Bars和Dollar Bars统计特性还是不错,看看这节的Imbalance Bars情况如何 # # 这个bar和传统的bar相差很远,它的核心在于某种能量往某一个方向累积偏离到一定的程度,就行成了一个bar,然后再重新累积这个能量。这个能量和上节一样有volume, dollar, ticks。 # # 能量$b_t$的公式如下,t为时间,$p_t$为t时刻的价格,$\Delta p_t$为t时间相对于t-1时刻的价格变化 # # # $$b_t = \begin{cases} b_{t-1}, & \mbox{if } \Delta p_t\mbox{=0} \\ |\Delta p_t| / \Delta p_{t}, & \mbox{if } \Delta p_t \neq\mbox{0} \end{cases}$$ # # $$\theta_{T} = \sum_{t=1}^{T}b_t$$ # # $\theta$就是累积的能量,上面是Tick Imbalance Bars的累积能量公式,当$\theta$超过一定阈值时就产生一个新的bar,然而这个阈值是动态生成的,用了指数移动平均线,具体方法可以看书中的解释和mlfinlab中的代码实现。 # # VIB的能量为volume,$\theta_T$的公式如下: # # $$\theta_{T} = \sum_{t=1}^{T}b_t v_t$$ # # 其实就是在单个能量值$b_t$的基础上乘了volume。如果往上形成了新的Volume Imbalance Bar,说明量价齐升进行了突破,当然这是主观分析中的思路,貌似这么分析也合理。 # # Dollar Imbalance Bars 的计算方式也类似,乘的是交易额。 # # Run Bars 和 Imbalance Bars一样是另一种Information-Driven Bar,只是他的 $\theta$ 取的多空方向中能量值绝对值较大的那一个。本篇文章重点分析一下Imbalance Bars,Run Bars不做分析。 # # 合成Imbalance Bars需要用到逐笔的tick,但是国内期货的行情好像只有快照tick数据,那么用Imbalace Bars的方式产出Bar应该是不准确的。 # # 因为快照数据中间缺失了很多订单数据,中间发生了什么我们都不太清楚。比如一个tick的信息为,20手,买入,价格上涨0.1元。这20手的组成是难以估算的,我以前尝试分析过,成交单的类型及数量还能做估算,但是两个tick间的价格变化就全然不知。 # # 我这边就用快照tick数据测试一下,但是结果可能会不准确,纯属为了了解Imbalance Bars的特性。 # # --- # # Volume Bars and Dollar Bars statistics in the previous section are still good, see how is the Imbalance Bars in this section # # This bar is a far cry from the traditional bar, and its core is that some energy accumulates in a certain direction to a certain extent, becomes a bar, and then reaccumulates that energy. This energy is the same as the upper section is volume, dollar, ticks. # # The formula for energy $b_t$is as follows, t is time, $p_t$is the price of t-time, $\Delta p_t$is the price change of t-time relative to t-1 time # # $$b_t = \begin{cases} b_{t-1}, & \mbox{if } \Delta p_t\mbox{=0} \\ |\Delta p_t| / \Delta p_{t}, & \mbox{if } \Delta p_t \neq\mbox{0} \end{cases}$$ # # $$\theta_{T} = \sum_{t=1}^{T}b_t$$ # # The $\theta$is the accumulated energy, above is the cumulative energy formula of Tick Imbalance Bars, which produces a new bar when the $\theta$ exceeds a certain threshold, however this threshold is dynamically generated, using exponential moving averages, which can be implemented in the interpretation of the book and the code in mlfinlab. # # Dollar Imbalance Bars is calculated in a similar way, multiplied by the amount of tick. # # Run Bars, like Imbalance Bars, is another Kind of Information-Driven Bar, just the one whose energy value is the absolute value of the larger in the long short direction of his $\theta$. This article focuses on Imbalance Bars, run Bars does not do analysis. # # Synthetic Imbalance Bars requires a real tick, but the Chinese futures market data seems to have only snapshot tick, so it should be inaccurate to produce Bar in the way of Imbalace Bars. # # Because there is a lot of order data missing from the snapshot data, we don't know what's going on in the middle. For example, a tick information is, 20 lots, buy, the price increases 0.1 yuan. The composition of these 20 lots is difficult to estimate, I have tried to analyze before, the type and number of deals can be estimated, but the price change between the two ticks is completely unknown. # # I'll test it with snapshot tick data on my side, but the results may not be accurate, purely to understand the characteristics of The Imbalance Bars. # # + import pandas as pd import numpy as np import seaborn as sns import matplotlib.pyplot as plt from statsmodels.graphics.tsaplots import plot_acf from mlfinlab.data_structures_snapshot_tick.imbalance_data_structures import get_ema_tick_imbalance_bars, \ get_ema_volume_imbalance_bars from mlfinlab.data_structures_snapshot_tick.run_data_structures import get_ema_tick_run_bars, \ get_ema_volume_run_bars # - # --- # ## 准备数据(Preparing data) # # 中国上海期货交易所的品种:螺纹钢(RB)。2019年整年的tick数据。 # # Instrument of Shanghai Futures Exchange: Rebar (RB). 2019 full-year tick data. # + TICK_FILE_PATH = '../data/rb_1year_tick.csv' def get_data_df(): df = pd.read_csv(TICK_FILE_PATH) df['date_time'] = pd.to_datetime(df['date_time']) df.set_index('date_time', inplace=True) return df def generate_time_bars(): df = get_data_df() return df, df['price'].resample('5min').ohlc().dropna() def analysis_thresholds(imb_bars:pd.DataFrame, thresholds_df): thresholds_df['threshold'] = np.abs(thresholds_df['expected_imbalance']*thresholds_df['exp_num_ticks']) if 'timestamp' in thresholds_df.columns: thresholds_df.set_index('timestamp', inplace=True) # print(thresholds_df) thresholds_df_down_sample = thresholds_df.resample("5min").bfill() plt.figure(figsize=(10,12)) ax1 = plt.subplot(311) thresholds_df_down_sample[['cum_theta', 'threshold']].plot(ax=ax1) plt.axhline(0, c='green') ax2 = plt.subplot(312) time_bars['close'].plot(ax=ax2, label='time bar') time_bars['imb_close'] = np.NaN last_time = None for row in imb_bars[['date_time', 'close']].itertuples(): if last_time is None: time_bars['imb_close'].loc[time_bars.index<=row.date_time] = row.close else: time_bars['imb_close'].loc[(time_bars.index>last_time) & (time_bars.index<=row.date_time)] = row.close last_time = row.date_time time_bars['imb_close'].plot(ax=ax2, label='imbalance bar') plt.legend() ax3 = plt.subplot(313) thresholds_df_down_sample[['exp_num_ticks']].plot(ax=ax3) plt.show() imb_bars['log_ret'] = np.log(imb_bars['close']).diff().dropna() plt.figure(figsize=(14,10)) sns.kdeplot((imb_bars.log_ret - imb_bars.log_ret.mean()) / imb_bars.log_ret.std(), label="Imbalance bars") sns.kdeplot(np.random.normal(size=len(imb_bars)), label="Normal", color='black', linestyle="--") plt.show() plot_acf(imb_bars['log_ret'].dropna(), lags=10, zero=False) plt.title('AutoCorrelation') plt.show() # + tick_df, time_bars = generate_time_bars() time_bars['close'].plot() # + print(tick_df.shape) print(tick_df.head()) # nearly 10 million ticks # - # --- # ## Use mlfinlab: Create TIB Bars TIB, TIB_thresholds_df = get_ema_tick_imbalance_bars(tick_df.reset_index(), analyse_thresholds=True, exp_num_ticks_constraints=[800, 80000]) TIB.head() # 我在获取TIB时,碰到了一些问题,在我没有加约束条件(exp_num_ticks_constraints[min_exp_num_ticks, max_exp_num_ticks])时,出现了如下情况,程序一直在运行,产生了无数根bar。 # # 我们通过日志可以看到exp_num_ticks到后面一直是1,而expected_imbanlance 一直是个很小的数,他们相乘是个非常小的数远小于1,这样每个tick的b_value 1 都会大于这个阈值,会导致每个tick都会产生一根bar,我相信这是有问题的,而且运行很长时间也无法逃离这个困境。 # # 想想其实可以发现,Tick Imbalance Bar 的expected_imbalance的期望其实是0,所以在实际环境中应该是接近于0的,如果exp_num_ticks也在衰减的话,那么一旦进入到1 * 0.00? 的困境中就会一直套在里面出不来 # # 所以我加了最小800个tick的条件,最大80000个条件是因为有时exp_num_ticks会无限的增大,所以我觉得TIB这个算法是有些问题的,我并不觉得这种类型的bar能用在实际环境中。 # # 下面为进入到困境时的日志。 # # --- # # When I got TIB, I had some problems. when I didn't add constraints (exp_num_ticks_constraints (min_exp_num_ticks, max_exp_num_ticks), the program was running all the time, producing countless bars. # # We can see through the log that exp_num_ticks to the back has always been 1, and expected_imbanlance has always been a very small number, they multiply is a very small number far less than 1, so that each tick's b_value 1 will be greater than this threshold, resulting in each tick will produce a bar, I believe this is problematic, and run for a long time can not escape this dilemma. # # Think about it, and you can see that the expected_imbalance expectation of Tick Imbalance Bars is actually 0, so in the real world it should be close to 0, and if the exp_num_ticks is also decaying, then once you get to 1 x 0.00? in the trouble will always be stuck in the inside out can not come out # # So I added a minimum of 800 ticks, a maximum of 80,000 conditions because sometimes the exp_num_ticks will increase indefinitely, so I think the TIB algorithm is a bit of a problem, I don't think this type of bar can be used in the real environment. # # Below is the log that enters the dilemma. # # ------------------------------------------------------------------------------------------ # <pre> # new bar: cum_theta -393.0, exp_num_ticks 20000 * expected_imbalance 0.004606430289912242 # new bar: cum_theta -93.0, exp_num_ticks 20000.0 * expected_imbalance 0.004606430289912242 # new bar: cum_theta -126.0, exp_num_ticks 6950.0 * expected_imbalance -0.018082763088593235 # new bar: cum_theta -161.0, exp_num_ticks 4143.142857142857 * expected_imbalance -0.03877713597093915 # new bar: cum_theta -123.0, exp_num_ticks 2268.714285714286 * expected_imbalance -0.054159511732974945 # new bar: cum_theta 147.0, exp_num_ticks 2366.285714285714 * expected_imbalance -0.06189258080298448 # new bar: cum_theta 31.0, exp_num_ticks 1617.5714285714287 * expected_imbalance -0.018559776602432916 # new bar: cum_theta 8.0, exp_num_ticks 711.2857142857143 * expected_imbalance -0.010290755421168463 # new bar: cum_theta -2.0, exp_num_ticks 203.57142857142858 * expected_imbalance -0.008671808277382986 # new bar: cum_theta -1.0, exp_num_ticks 30.428571428571427 * expected_imbalance -0.00913084853734123 # new bar: cum_theta -1.0, exp_num_ticks 2.5714285714285716 * expected_imbalance -0.009391603348769791 # new bar: cum_theta -1.0, exp_num_ticks 1.1428571428571428 * expected_imbalance -0.00965230601445051 # new bar: cum_theta -1.0, exp_num_ticks 1.0 * expected_imbalance -0.009912956544811965 # new bar: cum_theta -1.0, exp_num_ticks 1.0 * expected_imbalance -0.010173554950279636 # new bar: cum_theta -1.0, exp_num_ticks 1.0 * expected_imbalance -0.010371500444740092 # </pre> # # ### analysis TIB threshold analysis_thresholds(TIB, TIB_thresholds_df) # analysis_thresholds 函数是我用来分析 Imbalance Bars 的。主要分析Threshold的变化,产生bar的情况。 # # 第一张折线图画出了Threshold和$\theta$值的变化。可以看出前期Threshold有一个快速变化的过程,稳定后变化非常缓慢,一年也没有几根Bar。而且在后期有一段时间在震荡,一直没产生新的Bar。说到这里是不是可以把t时刻距离上一根Bar的时长当作一个特征呢?结合阈值的大小,当作Regime Switch的一个判断因子?瞎想的。 # # 第二张折线图画出了价格趋势和Tick Imbalance Bars。和第一张结合起来一看,有点意思,很多时候行情上涨时TIB的$\theta$值下降速度很快,这里可以额外做一个相关性分析。假如从主观上分析是不是小单抛得多,大单扫得快? 如果小单代表散户,大单代表主力资金,那么就是统计出来的小单的方向就是主力未来的反方向? # # 这些猜想都需要进行分析和实证才能知道。 # # 第三张折线图画出了期望的Bar中的Tick数,很快就达到了我们设置的上限80000。现在来看TIB一个很大的问题就是调参,初始阈值,上限和下限,而且起始的t不一样,产出的Bar可能也不一样。 # # 分布图就不用看了,Bars的量太小没什么意义。 # # 我觉得TIB直接用可能有坑,需要再深入细化。 # # --- # # analysis_thresholds function is what I used to analyze Imbalance Bars. The main analysis of the changes in Threshold produces the bar situation. # # The first line chart sits out the changes in the values of Threshold and $\theta$. It can be seen that the early threshold has a rapid change process, and the change process is very slow, there are just few Bars in a year. And there was a concussion at a later stage and there was no new Bar. # # Can it be said that the t-moment distance from the duration of the last Bar as a feature? Combined with the size of the threshold, as a judge on Regime Switch? I'm just thinking about it. # # The second line chart illustrates the price trend and the Tick Imbalance Bars. # # Combined with the first one, it's a bit interesting, a lot of times when the market goes up, the value of TIB's $\theta$drops quickly, and here we can do an additional correlation analysis. # # If from a subjective analysis, is it not a small order to throw a lot, big order sweep faster? # # If small order on behalf of retail, large rorder represents the main fund, then is the direction of the small order is the main future of the opposite direction? # # These guesses need to be analyzed and empirical. # # The third line drawing the desired number of Ticks in the Bar quickly reached the upper limit of 80,000 set by us. One of the big problems with TIB now is the parameter tunning, initial threshold, upper limit and lower limit, and the starting t is different, and the output of The Bar may not be the same. # # The distribution map doesn't have to be seen, and bars' size is too small to make sense. # # I think TIB direct use may have pits, need to further research. # --- # ## Use mlfinlab: Create VIB Bars VIB, VIB_thresholds_df = get_ema_volume_imbalance_bars(tick_df.reset_index(), analyse_thresholds=True, exp_num_ticks_constraints=[1300, 10000]) VIB.head() analysis_thresholds(VIB, VIB_thresholds_df) # VIB和TIB一样也需要调整exp_num_ticks_constraints才能让产生的bar更合理。 # # VIB, like TIB, needs to adjust exp_num_ticks_constraints to make the resulting bar more reasonable. # ## 结论(Conclusion) # # 这样看下来,其实EMW动态调整阈值的形式其实不太靠谱,所以mlfinlab还准备了固定阈值的函数,想得很周到,我这里就不分析了。 # # 用Imbalance Bars可能不是一件很容易的事,更何况国内的货期tick数据还是快照数据。 # # 回顾一下上面两种 Imbalance Bars 的自相关性分析图,自相关性都很高,远远高于Standard Bars。Imbalance Bars的主要目标是每个Bar的信息量相等。 这就是为什么我们应该考虑使用信息理论来研究 Imbalance Bars 的特性。 # # --- # # In this way, the form of the EMW dynamic adjustment threshold is not quite reliable, so mlfinlab also prepared a fixed threshold function, think very thoughtful, but I will not analyze here. # # Using Imbalance Bars may not be an easy task, not to mention domestic cargo tick data or snapshot data. # # As we can see imbalance bars are more autocorrelated comparing to dollar bars, however, the key goal of imbalance/run bars is equal amount of information inside of each bar. That is why we should consider using information theory to research properties of imbalance bars in comparison with time/dollar bars.
chapter2/Imbalance Bars.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ![](https://github.com/callysto/callysto-sample-notebooks/blob/master/notebooks/images/Callysto_Notebook-Banner_Top_06.06.18.jpg?raw=true) # ## This is a title # ### This is a subtitle # # Date: November 2019 # # Description: Coding with Python Intro # Printing to screen print("My name is Laura") # + language="html" # <h1> Heading </h1> # - # !git clone https://github.com/lfunderburk/Interactive_Jupyter 1 + 2 # ### Mathematical operations # # * Addition: use `+` # * Subtraction: use `-` # * Multiplication: use `*` # * Division: use `/` # * Power: use `**` print(2-3) 2**2 1 + 2*3 # ### Creating Variables instructor_age = 27 instructor_first_name = "Laura" instructor_email = '<EMAIL>' print(instructor_age) print(instructor_first_name + " is " + str(instructor_age) + " old.") print(instructor_age, instructor_first_name, instructor_email) # ### Practice # Define two variables called `my_age` and `my_first_name` with your age and first name. Use `print` to print a similar message I did above using your information. # # Remember that there are two instructors available to help if you need it! my_age = 27 my_name = "Laura" print(my_name + " is " + str(my_age) + " years old.") print(my_name, my_age) # ## Variable Types type(instructor_age) type(instructor_email) type(print) # + # This is bad practice... do not do it! # print = 123 # - print("Hello") 1 + 1.2 str(1) + "a" # ## Arrays my_arr = [1,2,"pe", "c",[1,2,3]] # Slicing # Indeces start from 0 and go all the to n-1, where n is the size of the array length_of_arr = len(my_arr) list(range(5)) my_arr[0] my_arr[0] + my_arr[1] my_arr[4][-1] # 0 1 2 3 my_arr[0:3] list(range(1,5)) # ## Manipulating Strings instructor_first_name instructor_first_name[0:2] instructor_first_name[0] list(instructor_first_name) instructor_first_name[::-1] # ## Methods # Turn all letters into upper case instructor_first_name.upper() instructor_first_name.lower() my_name my_new_name = my_name.replace("ra","ren") my_name entry_one = "String + \n" entry_two = "String + \t" entry_two.replace("\t", "\n") h = 3 #h.replace(3,4) my_new_name dir(my_name) # ?my_name.replace # ## List methods measurements= [0.273, 0.275, 0.277, 0.275] len(measurements) measurements[1:3] measurements.append(0.37) measurements measurements.insert(2,1) measurements measurements.remove(0.37) measurements measurements.pop(2) measurements # ### Practice # # Create a list containing the names of at least 3 people you know. # * Using that list, add a fourth person to the end. # * Using that list, capitalize the name of the second person (you may need to look back at the methods we used on strings) # * Use the `index` method for lists (look at help) to find the position of one of the names. Use `del` to then remove that person. my_list= ["Pete","Joe","Nina"] #my_list[1] = my_list[1].upper() my_list # Complete solution my_list= ["Pete","Joe","Nina"] print(my_list) my_list[1] = my_list[1].upper() print(my_list) help(my_list.index) del my_list[1] print(my_list) help(my_list.index) del my_list[1] my_list # ##### Lunch break ... # ## For Loops ### Make objects like lists in Python friends = ["Peter", "Laura", "Louis"] friends ##We also looked at methods applied to python objects "Peter".upper() friends.upper() friends[0].upper() # + # How do I apply the upper() method to all elements of my list? # The answer is: FOR LOOPs for item in friends: print(item.upper()) #How do I make a new list with capitalized letters????? new_capital_list = [] for item in friends: new_capital_list.append( item.upper() ) # - new_capital_list # + ### Some helpful notes no For Loops: #1. We start a for loop with the keyword "for" #2. We define a temporary variable (in this case item is my temporary variable). # item will be equal to each element of my list I'm iterating over #3. We use th keyword "in" to indicate which list we wawnt to iterate over #(in this case) it's the friends list #4. We end the line with a colon, to tell Python when to start our code # + for item in friends: friend_upper_case = item.upper() print("Hello", friend_upper_case) print("Goodbye", friend_upper_case) print() #This prints a blank space print("For loop is finished") # - friend_upper_case # + #Now lets work with lists of numbers: list(range(0,5)) ###Use a for loop to calculate the sum of 1,2,3,...,100 my_sum = 0 for number in range(0,101): my_sum = my_sum + number print(my_sum) # - my_sum = 0 for number in range(0,4): my_sum = my_sum + number print(my_sum) # + #### #round(4.56, 0) my_numbers = [1.23, 4.56, 7.68] # Write a for loop that rounds reach number to 1 decimal place # (using the round function) #Note: round(my_numbers) will give error!!! my_rounded_numbers = [] for item in my_numbers: my_rounded_numbers.append(round(item, 1)) print(my_rounded_numbers) # + ### One last on For loops: # List comprehension #Does the exact same thing as the previous for loop: rounded_numbers2 = [round(item, 1) for item in my_numbers] print(rounded_numbers2) # + # Lets work with functions now my_numbers2 = [1.567, 4.222, 3.112] # Let's write a function that returns the rounded numbers of any object def rounded_list(my_list): my_rounded_numbers = [] for item in my_list: my_rounded_numbers.append(round(item, 1)) return my_rounded_numbers # - rounded_list(my_numbers) rounded_list(my_numbers2) # + ### Recall previously, we used a for loop to obtain the sum of 0,1,2,3,...100. # LEt's now try to write a function that calculate the sum of n integers. my_sum = 0 for number in range(0,101): my_sum = my_sum + number #print(my_sum) #Exercise: Write a function that calculates the sum of n integers. set n as a paramter # in your function def sum_n(n): ''' calculate sum of n integers ''' my_sum = 0 for number in range(0,n+1): my_sum = my_sum + number return my_sum sum_n(3) #Test: n = 2 should return 3 #Test: n = 3 should return 6 #Test< n = 10 should be ??? # - # ## If- (and if-else) statements # + ### If else statements allow us to choose when we apply a transformation on objects # within a list. my_numbers2 = [1.11, 3.2, 5.78, 9.0, 13.7] #Suppose I want rounded numbers ONLY for numbers greater than 10 for number in my_numbers2: if number > 0 and number < 4: print(round(number,0)) print("Condition True") else: print("Condition False") print(round(number,1)) ### Let's write a function that returns a list of numbers, where the nuumbers greater #than 5 is rounded to 1 decimal place. my_numbers3 = [1.222, 2.345, 3.444, 5.021, 14.67] def rounding_greater5(my_list): my_result = [] for number in my_list: if number > 5: my_result.append(round(number,1)) else: my_result.append(number) return my_result # - rounding_greater5(my_numbers3) # ## Review # Arrays and data structures my_dict = {"keys":["value1","value2","value3"], "keys2":[str(1),str(2),str(3)]} my_arr = [1,2,3,4,5,"a","abc",[1,2,3], my_dict] my_arr[0] # We can iterate over the elements of an array by using a for loop length = len(my_arr) for i in range(length): print(my_arr[i]) my_dict["keys"] + my_dict['keys2'] # Iterating over elements in both lists to create a list with four elements # whose elements are the concatenation of list 1 to list 2 new_list = [] for i in range(3): print( my_dict["keys"][i] + my_dict["keys2"][i]) new_list.append(my_dict["keys"][i] + my_dict["keys2"][i]) # Practice with dictionaries my_dictionary_2 = { 0: "a", 1 : "b", 2 : "c", 3 : "d" } array = [1, 2, 3, 1, 1, 3, 2, 2] # Create new array that transforms integers in array into corresponding string # values in my_dictionary_2 new_array2 = [] for i in range(len(array)): new_array2.append(my_dictionary_2[array[i]]) print(new_array2) new_array3 = [] for item in array: new_array3.append(my_dictionary_2[item]) print(new_array3) my_dictionary_2[2] my_dictionary_2.keys() my_dictionary_2.values() # ## Numpy Python Library # # Numpy facilitates manipulating arrays (like in Matlab or in R) # import numpy as np num_arr = [1,1,1,1] num_arr2 = [0,1,0,1] num_arr + num_arr2 # Create numpy arrays using array() method from the numpy ("np") python library np_num_arr = np.array(num_arr) np_num_arr2 = np.array(num_arr2) # Adding pairwise elements in two arrays np_num_arr + np_num_arr2 # Multiply pairwise elements np_num_arr * np_num_arr2 # + # Divide pairwise elements #np_num_arr / np_num_arr2 # - np_num_arr2 / np_num_arr type(num_arr) type(np_num_arr) # ## Other np methods # np.add(np_num_arr, np_num_arr2) np.subtract(np_num_arr, np_num_arr2) # Take e to the power of each value in the array np.exp(np_num_arr) # Take cos, sin or log of each value in the array print(np.cos(np_num_arr)) print(np.sin(np_num_arr)) print(np.log(np_num_arr)) # Perform dot product of two vectors np_num_arr.dot(np_num_arr2) # Perform logical operations np_num_arr == np_num_arr2 # Compare np_num_arr < np_num_arr2 np_num_arr np_num_arr2 # Aggreagate functions # Return array-wise sum np_num_arr.sum() # Return array-wise minimum np_num_arr.min() # Return array-wise maximum np_num_arr.max() # ## 2D Arrays using Numpy TwoD_num_arr = np.array( [ np_num_arr , np_num_arr2 ] ) TwoD_num_arr np.array([ [ np_num_arr, np_num_arr2] , [np_num_arr**2 , np_num_arr2**2] ]) # Initialize numpy 2D array np.zeros((3,4)) np.ones((3,4)) np.ones(3) # ## Pandas Python Library import pandas as pd # Read a CSV file! ### You can also provide full path to file name instead of uploading file_name = "./DATA/gapminder_all.csv" my_pandas_csv = pd.read_csv(file_name) # First entries (5 is default, can enter more) my_pandas_csv.head(10) my_pandas_csv.columns # Describe to get basic stats summary of the data my_pandas_csv.describe() # Get unique values from a column with unique() method my_pandas_csv["continent"].unique() condition = my_pandas_csv["continent"]=="Africa" africa_gdp = my_pandas_csv[ condition] condition2 = africa_gdp["country"]=="Algeria" africa_dgp[condition2] sub_pd = my_pandas_csv[ (my_pandas_csv["continent"] == "Africa") & (my_pandas_csv["country"] == "Algeria") ] sub_pd.iloc[:,:] variable_x = sub_pd.iloc[:,2:14] variable_y = sub_pd.iloc[:,14:26] my_pandas_csv.boxplot(column=["gdpPercap_1952","gdpPercap_1962","gdpPercap_1972"]); # # Matplotlib plotting import matplotlib.pyplot as plt #importing library to python using an alias # + time = [0, 1, 2, 3] position = [0, 50, 100, 150] plt.plot(time, position) plt.xlabel("Time (hours)") plt.ylabel("Distance (Km)") plt.title("Distance Plot") # - import pandas as pd oceania_data = pd.read_csv("./DATA/gapminder_gdp_oceania.csv", index_col ="country") oceania_data ### LEt's look at the column names of our dataset: oceania_data.columns # + ###Goal: PLot a time series plot for year vs. GDP for both Autralia and N-Z #First thing we need to do is extract a "year" dataset years = [] for item in oceania_data.columns: years.append(int( item[len(item)-4:]) ) ##FYI: Each loop is doing this: #n = len(oceania_data.columns[0]) #oceania_data.columns[0][n-4:] # - years # + #### Lets plot GDP for Australia oceania_data.loc['Australia', :] # - plt.plot(years, oceania_data.loc['Australia', :], color = "#1ED0A5", linestyle = "--") plt.xlabel("Year") plt.ylabel("GDP") plt.title("GDP per capita for Australia") # + ###That was so much fun! Let's plot Australia's and NZ's gdp vs year on the same graph now # Note that the .T method takes the transpose of the dataframe # (i.e. we switch rows <-> columns) #oceania_data.T oceania_data.T["Australia"] # - plt.plot(years, oceania_data.T["Australia"], color = "#22D3E3") plt.plot(years, oceania_data.T["New Zealand"], color = "red") plt.xlabel("Year") plt.ylabel("GDP per capita") plt.title("GDP per capita for Oceania") plt.legend(["Australia","New Zealand"]) ###Let's produce some barplots oceania_data.T.plot(kind="bar") # + ### Let's do scatterplots Australia_gdp = oceania_data.loc["Australia"] NZ_gdp = oceania_data.loc["New Zealand"] plt.scatter(Australia_gdp, NZ_gdp) # + my_data = [[1,2,3,4,5], [3,5,7,8,9], [7,6,5,7,8] ] my_data2 = pd.DataFrame(my_data, columns = ["Var1", "Var2", "Var3", "Var4", "Var5"]) my_data2.corr() import seaborn as sns #changing the colour layout sns.set_palette("pastel") #Plot a heatmap using my correlation values sns.heatmap(my_data2.corr(),alpha = 0.8, annot=True) # - # ![](https://github.com/callysto/callysto-sample-notebooks/blob/master/notebooks/images/Callysto_Notebook-Banners_Bottom_06.06.18.jpg?raw=true)
WorkshopIntroToPython.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: 'Python 3.8.5 64-bit (''base'': conda)' # name: python3 # --- # + from transformers import AutoTokenizer from datasets import load_from_disk from encoder import RobertaEncoder from dense_retrieval import DenseRetrieval_with_Faiss # - tokenizer = AutoTokenizer.from_pretrained('./dense_retireval_roberta_small/tokenizer') q_encoder = RobertaEncoder.from_pretrained('./dense_retireval_roberta_small/q_encoder') p_encoder = RobertaEncoder.from_pretrained('./dense_retireval_roberta_small/p_encoder') p_encoder.cuda() q_encoder.cuda() # + import torch def embed(inputs): p_inputs = tokenizer( inputs, padding="max_length", truncation=True, return_tensors="pt", ).to('cuda') result = p_encoder(**p_inputs).squeeze().to("cpu").detach().numpy() del p_inputs torch.cuda.empty_cache() return result # - def qued(inputs): q_inputs = tokenizer( inputs, padding="max_length", truncation=True, return_tensors="pt", ).to('cuda') result = q_encoder(**q_inputs).squeeze().to("cpu").detach().numpy() del q_inputs torch.cuda.empty_cache() return result embed('이 질문은 나라목록에 대한것으로') wiki_data = load_from_disk('/opt/ml/data/wiki_preprocessed_droped') ds_with_embeddings = wiki_data.map(lambda example: {'embeddings': embed(example['text'])}) # + ds_with_embeddings.add_faiss_index(column='embeddings') ds_with_embeddings.save_faiss_index('embeddings', 'my_index.faiss') wiki_data.load_faiss_index('embeddings', 'my_index.faiss') # - scores, retrieved_examples = wiki_data.get_nearest_examples('embeddings', embed('대한민국에서 대중적으로 인기를 얻은 짜장면과 짬뽕 등 한국식 중국 요리는 화교들이 많이 살던 인천'), k=10) scores retrieved_examples
deprecated/dpr/code/dense_test.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # <center> CTA200 Computing Assignment # <center><NAME> - <EMAIL> # # <center>Supervisors: <NAME> and <NAME> from astropy.io import fits import matplotlib.pyplot as plt from matplotlib import transforms import numpy as np import astropy.units as u from mpl_toolkits.axes_grid1.inset_locator import inset_axes from photutils.aperture import aperture_photometry, CircularAperture from numpy import unravel_index import scipy.constants as con from astropy.cosmology import WMAP9 as cosmo from scipy import integrate as inte def read(filename): hdu = fits.open(filename) hdr = hdu[0].header data = hdu[0].data return hdr,data # # <center> Question 1 - Visualising the galaxy # ##### Part 1.1 # Using $\texttt{astropy.io.fits}$, open $\texttt{galaxy_hydro.fits}$ and label the different galaxy components hdr_hydro,data_hydro = read('galaxy_hydro.fits') stmass, strate, gmass, dmass = data_hydro # ##### Part 1.2 # Using $\texttt{matplotlib.pyplot.imshow}$ and $\texttt{matplotlib.pyplot.contour}$, plot the images of the galaxy components. log_stmass = np.log10(stmass) log_strate = np.log10(strate) log_gmass = np.log10(gmass) log_dmass = np.log10(dmass) # + fig,axs = plt.subplots(1,4, figsize=(12,6), sharey=True, gridspec_kw={'wspace': 0.4}) def axx(i): axi = inset_axes(axs[i],width="3%",height="50%",loc='upper right',bbox_to_anchor=(0.06, 0., 1, 1), bbox_transform=axs[i].transAxes,borderpad=0) return axi # plot the image of the stellar mass im = axs[0].imshow(stmass, cmap='pink') axs[0].set_title('Stellar Mass', fontsize=20,pad=13) cb = fig.colorbar(im, cax=axx(0), orientation='vertical', aspect=50, shrink=0.7) cb.ax.locator_params(nbins=4) cb.ax.tick_params(labelsize=12) cb.ax.get_xaxis().labelpad = 10 cb.ax.set_xlabel('$M_\odot$', rotation=0,loc='left',fontsize=14) # plot the image of the star formation rate im = axs[1].imshow(strate, cmap='pink') axs[1].set_title('Rate of \nStar Formation', fontsize=20,pad=10) cb = fig.colorbar(im, cax=axx(1), orientation='vertical', aspect=50, shrink=0.7) cb.ax.locator_params(nbins=4) cb.ax.tick_params(labelsize=12) cb.ax.get_xaxis().labelpad = 10 cb.ax.set_xlabel(r'$\dfrac{M_\odot}{yr}$', rotation=0,loc='left',fontsize=13) # plot the image of the gas mass im = axs[2].imshow(gmass, cmap='pink') axs[2].set_title('Gas Mass', fontsize=20,pad=13) cb = fig.colorbar(im, cax=axx(2), orientation='vertical', aspect=50, shrink=0.7) cb.ax.locator_params(nbins=4) cb.ax.tick_params(labelsize=12) cb.ax.get_xaxis().labelpad = 10 cb.ax.set_xlabel('$M_\odot$', rotation=0,loc='left',fontsize=14) # plot the image of the dust mass im = axs[3].imshow(dmass, cmap='pink') axs[3].set_title('Dust Mass', fontsize=20,pad=13) cb = fig.colorbar(im, cax=axx(3), orientation='vertical', aspect=50, shrink=0.7) cb.ax.locator_params(nbins=4) cb.ax.tick_params(labelsize=12) cb.ax.get_xaxis().labelpad = 10 cb.ax.set_xlabel('$M_\odot$', rotation=0,loc='left',fontsize=14) for ax in axs: ax.set_ylim(145,40) ax.set_xlim(70,122) ax.tick_params(labelsize=14) ax.set_xlabel('X [pix]',fontsize=17) axs[0].set_ylabel('Y [pix]',fontsize=17) plt.savefig('all_mass_images.pdf') plt.close() # + # plot the contours of all components on one set of axes fig,axs = plt.subplots(1,1, figsize=(4,8)) im = axs.imshow(np.log10(stmass+gmass+dmass), cmap='twilight_shifted') cb = fig.colorbar(im, ax=axs, orientation='vertical', aspect=50, shrink=0.65) cb.ax.tick_params(labelsize=12) cb.ax.get_yaxis().labelpad = 20 cb.ax.set_ylabel(r'log( $M_\odot$)',rotation=270,loc='center',fontsize=14) #axs.contour(log_strate, cmap='winter') #axs.contour(log_gmass, cmap='winter') #axs.contour(log_dmass, cmap='winter') axs.set_xlim(70,122) axs.set_ylim(145,40) axs.set_xlabel('X [pix]',fontsize=17) axs.set_ylabel('Y [pix]',fontsize=17) plt.xticks(fontsize=14) plt.yticks(fontsize=14) plt.title('All Mass Components', fontsize=20,pad=10) plt.savefig('all_components_mass.pdf') plt.close() # - # plot the contours of all components on one set of axes fig,axs = plt.subplots(1,1, figsize=(4,8)) axs.contour(log_stmass, cmap='winter') axs.contour(log_strate, cmap='winter') axs.contour(log_gmass, cmap='winter') axs.contour(log_dmass, cmap='winter') axs.set_xlim(70,122) axs.set_ylim(145,40) axs.set_xlabel('X [pix]',fontsize=17) axs.set_ylabel('Y [pix]',fontsize=17) plt.xticks(fontsize=14) plt.yticks(fontsize=14) plt.title('All Galaxy Components', fontsize=20,pad=10) plt.savefig('all_components_contour.pdf') plt.close() # + fig,axs = plt.subplots(1,4, figsize=(12,6), sharey=True, gridspec_kw={'wspace': 0.1}) # plot the contour of the stellar mass im = axs[0].contour(log_stmass, cmap='winter') axs[0].set_title('Stellar Mass', fontsize=20,pad=10) # plot the contour of the star formation rate im = axs[1].contour(log_strate, cmap='winter') axs[1].set_title('Rate of \nStar Formation', fontsize=20,pad=10) # plot the contour of the gas mass im = axs[2].contour(log_gmass, cmap='winter') axs[2].set_title('Gas Mass', fontsize=20,pad=10) # plot the contour of the dust mass im = axs[3].contour(log_dmass, cmap='winter') axs[3].set_title('Dust Mass', fontsize=20,pad=10) for ax in axs: ax.set_ylim(145,40) ax.set_xlim(70,122) ax.tick_params(labelsize=14) ax.set_xlabel('X [pix]',fontsize=17) axs[0].set_ylabel('Y [pix]',fontsize=17) plt.savefig('separated_components_contour.pdf') plt.close() # + fig,axs = plt.subplots(1,2, figsize=(12,6), sharey=True, gridspec_kw={'wspace': 0.07}) # plot the image of the stellar mass with the dust mass contour overlayed im1 = axs[0].imshow(log_stmass, cmap='twilight_shifted') axs[0].contour(log_dmass, cmap='Greys') axs[0].set_title('Stellar Mass \nand Dust Mass', fontsize=20) cb = fig.colorbar(im1, ax=axs[0], orientation='vertical', aspect=50, shrink=0.65) cb.ax.tick_params(labelsize=12) cb.ax.get_yaxis().labelpad = 20 cb.ax.set_ylabel(r'log( $M_\odot$)',rotation=270,loc='center',fontsize=14) # plot the image of the star formation rate with the gas mass contour overlayed im2 = axs[1].imshow(log_strate, cmap='twilight_shifted') axs[1].contour(log_gmass, cmap='ocean') axs[1].set_title('Star Formation Rate \nand Gas Mass', fontsize=20) cb = fig.colorbar(im2, ax=axs[1], orientation='vertical', aspect=55, shrink=0.65) cb.ax.tick_params(labelsize=12) cb.ax.get_yaxis().labelpad = 20 cb.ax.set_ylabel(r'log( $M_\odot yr^{-1}$ )',rotation=270,loc='center',fontsize=14) for ax in axs: ax.tick_params(labelsize=14) ax.set_xlabel('X [pix]',fontsize=17) axs[0].set_ylabel('Y [pix]',fontsize=17) plt.savefig('contour_image_compare.pdf') plt.close() # + fig,axs = plt.subplots(1,2, figsize=(8,8), sharey=True, gridspec_kw={'wspace': 0.22}) # plot the cropped image of the stellar mass with the dust mass contour overlayed im1 = axs[0].imshow(log_stmass, cmap='twilight_shifted') axs[0].contour(log_dmass, cmap='Greys') axs[0].set_title('Stellar Mass \nand Dust Mass', fontsize=20) cb = fig.colorbar(im1, ax=axs[0], orientation='vertical', aspect=50, shrink=0.65) cb.ax.tick_params(labelsize=12) cb.ax.get_yaxis().labelpad = 20 cb.ax.set_ylabel(r'log( $M_\odot$)',rotation=270,loc='center',fontsize=14) # plot the cropped image of the star formation rate with the gas mass contour overlayed im2 = axs[1].imshow(log_strate, cmap='twilight_shifted') axs[1].contour(log_gmass, cmap='ocean') axs[1].set_title('Star Formation Rate \nand Gas Mass', fontsize=20) cb = fig.colorbar(im2, ax=axs[1], orientation='vertical', aspect=55, shrink=0.65) cb.ax.tick_params(labelsize=12) cb.ax.get_yaxis().labelpad = 20 cb.ax.set_ylabel(r'log( $M_\odot yr^{-1}$ )',rotation=270,loc='center',fontsize=14) for ax in axs: ax.set_ylim(145,40) ax.set_xlim(70,122) ax.tick_params(labelsize=14) ax.set_xlabel('X [pix]',fontsize=17) axs[0].set_ylabel('Y [pix]',fontsize=17) plt.savefig('contour_image_compare_cropped.pdf') plt.close() # - # ##### Part 1.3 # Calculate the total stellar mass, dust mass, gas mass and star formation rate of this galaxy. # calculate the stellar mass and ensure it is consistent with the header Stellar_Mass = np.sum(stmass)*u.M_sun if np.round(np.log10(Stellar_Mass.value),3) == np.round(hdr_hydro['LMSTAR'],3): SM = Stellar_Mass SM # calculate the dust mass and ensure it is consistent with the header Dust_Mass = np.sum(dmass)*u.M_sun if np.round(np.log10(Dust_Mass.value),3) == np.round(hdr_hydro['LMDUST'],3): DM = Dust_Mass DM # calculate the gas mass and ensure it is consistent with the header Gas_Mass = np.sum(gmass)*u.M_sun if np.round(np.log10(Gas_Mass.value),3) == np.round(hdr_hydro['LMGAS'],3): GM = Gas_Mass GM # calculate the star formation rate and ensure it is consistent with the header Star_Formation = np.sum(strate)*u.M_sun/u.yr if np.round(Star_Formation.value,1) == np.round(hdr_hydro['SFR'],1): SF = Star_Formation SF # ##### Part 1.1 - 2 # Plot a few images of the galaxy at different wavelengths. hdr_allwav,data_allwav = read('galaxy_allwav.fits') # + # put all data and wavelengths into lists sorted from lowest to highest wavelength intex = [] all_wavelengths = np.empty(data_allwav[0].shape[0]) for i in range(data_allwav[0].shape[0]): all_wavelengths[i] = hdr_allwav['IWAV'+str(i)] sort_waves = np.sort(all_wavelengths) index = [] for i in sort_waves: index.append(np.where(all_wavelengths == i)[0][0]) waves = np.empty(data_allwav[0].shape[0]) datas = [] for i,ind in enumerate(index): waves[i]=hdr_allwav['IWAV'+str(ind)] datas.append(data_allwav[0][ind]) # - # define all the chosen wavelengths and data sets using variables UVw,BLw,RDw,IRw,FIRw = hdr_allwav['IWAV0'],hdr_allwav['IWAV3'],hdr_allwav['IWAV1'],hdr_allwav['IWAV9'],hdr_allwav['IWAV17'] UV,BL,RD,IR,FIR = data_allwav[0][0], data_allwav[0][3], data_allwav[0][1], data_allwav[0][9], data_allwav[0][17] def colorplot(wavelength): ''' PARAMETERS: wavelength <ndarray>: data set at desired wavelength RETURNS: image of the data with x and y densities ''' x = np.sum(wavelength,axis=0) y = np.sum(wavelength,axis=1) if (wavelength == UV).all(): title = 'Ultraviolet' i=0 elif (wavelength == BL).all(): title = 'Blue' i=3 elif (wavelength == RD).all(): title = 'Red' i=1 elif (wavelength == IR).all(): title = 'Infrared' i=9 elif (wavelength == FIR).all(): title = 'Far Infrared' i=17 fig,axs = plt.subplots(2,2,figsize=(7,7), gridspec_kw={'wspace': 0,'hspace': 0,'height_ratios': [1, 5], 'width_ratios': [5,1]}) axs[0,0].plot(x/max(x),c='black') axs[0,0].axis("off") axs[0,0].set_title(title, fontsize=20) axs[0,0].tick_params(labelsize=0,left=False) axs[1,0].imshow(wavelength,cmap='pink') base = plt.gca().transData rot = transforms.Affine2D().rotate_deg(270) axs[1,1].plot(y/max(y),c='black', transform= rot + base) axs[1,1].tick_params(labelsize=0,bottom=False) axs[1,1].axis("off") axs[0,1].tick_params(labelsize=0,bottom=False,left=False) axs[0,1].axis("off") axs[1,0].tick_params(labelsize=14) axs[1,0].set_xlabel('X [pix]',fontsize=17) axs[1,0].set_ylabel('Y [pix]',fontsize=17) plt.savefig('IWAV'+str(i)+'_image_dense.pdf') plt.close() return # plot the full images for all the chosen wavelengths as well as their x and y densities colorplot(UV),colorplot(BL),colorplot(RD),colorplot(IR),colorplot(FIR) # + fig,axs = plt.subplots(1,5, figsize=(17,7), sharey=True, gridspec_kw={'wspace': 0.25}) def axx(i): axi = inset_axes(axs[i],width="3%",height="50%",loc='upper right',bbox_to_anchor=(0.05, 0., 1, 1), bbox_transform=axs[i].transAxes,borderpad=0) return axi # plot the cropped Ultraviolet image im = axs[0].imshow(UV, cmap='pink') axs[0].set_title('Ultraviolet \n'+str(UVw)+'µm', fontsize=20,pad=10) cb = fig.colorbar(im, cax=axx(0), orientation='vertical', aspect=50, shrink=0.7) cb.ax.locator_params(nbins=3) cb.ax.tick_params(labelsize=12) # plot the cropped Blue image im = axs[1].imshow(BL, cmap='pink') axs[1].set_title('Blue \n'+str(BLw)+'µm', fontsize=20,pad=10) cb = fig.colorbar(im, cax=axx(1), orientation='vertical', aspect=50, shrink=0.7) cb.ax.locator_params(nbins=3) cb.ax.tick_params(labelsize=12) # plot the cropped Red image im = axs[2].imshow(RD, cmap='pink') axs[2].set_title('Red \n'+str(RDw)+'µm', fontsize=20,pad=10) cb = fig.colorbar(im, cax=axx(2), orientation='vertical', aspect=50, shrink=0.7) cb.ax.locator_params(nbins=3) cb.ax.tick_params(labelsize=12) # plot the cropped Infrared image im = axs[3].imshow(IR, cmap='pink') axs[3].set_title('Infrared \n'+str(IRw)+'µm', fontsize=20,pad=10) cb = fig.colorbar(im, cax=axx(3), orientation='vertical', aspect=50, shrink=0.7) cb.ax.locator_params(nbins=3) cb.ax.tick_params(labelsize=12) # plot the cropped Far Infrared image im = axs[4].imshow(FIR, cmap='pink') axs[4].set_title('Far Infrared \n'+str(FIRw)+'µm', fontsize=20,pad=10) cb = fig.colorbar(im, cax=axx(4), orientation='vertical', aspect=50, shrink=0.7) cb.ax.locator_params(nbins=3) cb.ax.tick_params(labelsize=12) for ax in axs: ax.set_ylim(270,70) ax.set_xlim(130,250) ax.tick_params(labelsize=14) ax.set_xlabel('X [pix]',fontsize=17) axs[0].set_ylabel('Y [pix]',fontsize=17) plt.savefig('all_IWAV_images.pdf') plt.close() # - # ##### Part 1.2 - 2 # Plot the total fluxes of the galaxy as a function of wavelength. # # - What does this plot tell you about how much light is emitted at different wavelengths? # - What do you think is determining how much light is emitted at different wavelengths? # - Younger stars emit light at shorter wavelengths (because they are hotter). # - Dust preferentially obscures light at shorter wavelengths and re-emits them in the infrared.. # # + sumwave = np.empty(data_allwav[0].shape[0]) for i,co in enumerate(datas): sumwave[i]=np.sum(co) # + fig,axs = plt.subplots(1,2, figsize=(15,5)) # plot the total flux for each wavelength #ax.set_title('Total Fluxes vs. Wavelength',fontsize=20) for ax in axs: ax.plot(waves,sumwave,'--*',ms=10, c='black') ax.set_xlabel('Wavelength [ µm ]', fontsize=17) ax.set_xscale('log') ax.grid(alpha=0.5) ax.tick_params(axis='x',labelsize=14) ax.tick_params(axis='y',labelsize=14) ax.yaxis.offsetText.set_fontsize(14) axs[0].set_ylabel('Flux [ a.u. ]', fontsize=17) axs[1].set_ylim(-1,3e4) plt.savefig('flux_v_wave.pdf') plt.close() # - # # <center> Question 2 - Galaxy Size # ##### Part 2 - 1 # Use $\texttt{aperture_photometry}$ to measure the circularized half-mass size of the galaxy. # # Place a circular aperture of radius 10 pixels at the center of the galaxy and measure the total mass inside the aperture. Change the aperture sizes to find the radius at which the total mass inside the aperture=half the total mass of the galaxy (from previous section). Hint: Automate this by iteratively placing hundred apertures of increasing sizes! Contact me for more hints. def half_size(data,mass=False,real=False): ''' PARAMETERS: data <ndarray>: data set being used mass=False or True: whether or not half mass is being found RETURNS: aper[h] <float>: sum within the aperture which contains half the total sum position <tuple>: pixel coordinates with the highest value (the centre of the galaxy) size_h <float>: half size in pixels ''' # find the sum over all the pixels tot = np.sum(data) # find the coordinates for the center of the galaxy if mass == True: position = (96,104) # this was manually selected elif real == True: position = unravel_index(data.argmax(), data.shape) # if the real galaxy image is used else: q=np.empty(data_allwav[0].shape[0]) y=np.empty(data_allwav[0].shape[0]) # find all the coordinates with max value at different wavelengths for i,d in enumerate(datas): pos = unravel_index(d.argmax(), d.shape) q[i]=pos[0] y[i]=pos[1] # take median of the coordinates with max value position = np.median(q),np.median(y) x=np.linspace(1,200,1000) aper = np.empty(x.shape) radii = np.empty(x.shape) # iterate through different radii for the aperture photometry for i,rad in enumerate(x): aperture = CircularAperture(position, r=rad) a = aperture_photometry(data,aperture)[0][3] radii[i] = rad aper[i] = a # find where the difference between the total within the aperture and the half total is minimum h = np.where(aper == min(aper, key=lambda z:abs(z-tot/2)))[0][0] # find the radius of the aperture at the half size size_h = radii[h] return aper[h],position,size_h # ##### Part 2 - 2 # # Find the half-mass size of the galaxy in kpc. # # Use $\texttt{PIXELSCALE}$ in the $\texttt{header}$. def comp_size(pixel_size,pixel_scale,redshift): ''' NOTE: this function uses astropy functions to calculate the size in kpc - it was not used for the computation but to check that the manual computation worked PARAMETERS: pixel_size <float>: size in pixels pixel_scale <float>: how pixels scale with arcseconds on the image redshift <int>: redshift of the galaxy RETURNS: size_kpc <astropy.units.quantity.Quantity>: size in kpc ''' # add units to the pixel scale pixel_scale = pixel_scale*u.arcsec #per pixel # find how kpc scales with arcmin at the given redshift kpc_arcmin = (cosmo.kpc_proper_per_arcmin(redshift)) # finds angular size of the galaxy angular_size = pixel_size*pixel_scale # find the size of the galaxy in kpc size_kpc = (angular_size*kpc_arcmin) return size_kpc.to(u.kpc) def size(pixel_size, pixel_scale, redshift, Omega_M, Omega_A): ''' PARAMETERS: pixel_size <float>: size in pixels pixel_scale <float>: how pixels scale with arcseconds on the image redshift <int>: redshift of the galaxy Omega_M, Omega_A <floats>: current density parameters of the universe Ho <astropy.units.quantity.Quantity>: current Hubble parameter of the universe RETURNS: length <astropy.units.quantity.Quantity>: size in kpc ''' # add units to speed of light c = con.c *u.m/u.s # add units to the pixel scale pixel_scale = pixel_scale*u.arcsec #per pixel # finds angular size of the galaxy angular_size = (pixel_size*pixel_scale).decompose()/u.rad # define the scale factor as a function of redshift R = lambda z: 1/(1+z) # define the derivative of scale factor as a function of density parameters and scale factor Rdot = lambda R: (Omega_M/R + Omega_A*R**2)**(1/2) # define function to integrate func = lambda R: 1/(R*Rdot(R)) integr = inte.quad(func,R(redshift),1) # find the comoving distance (Dc) and the angular size distance (Da) Dc = c*integr/Ho Da = R(redshift)*Dc # find length using angular size and Da length = Da*angular_size return length[0].decompose().to(u.kpc) # ##### Part 2.1 # Measure the half-light size of this galaxy at optical wavelength (∼500 nm). # # How does this size compare to the half-mass size of the galaxy? # define constants pixel_scale,redshift = hdr_allwav['PIXSCALE'],hdr_allwav['Z'] Omega_M, Omega_A, Ho = cosmo.Om0, 1-cosmo.Om0, cosmo.H(0) dat500nm.shape[0]/stmass.shape[0] # find the data set closest to the given optical wavelength i = np.where(waves == min(waves, key=lambda x:abs(x-0.5)))[0][0] dat500nm, wav500nm = datas[i], waves[i] '''Find the half-light size''' _,hl_pos,hl_size = half_size(dat500nm) half_light_size500nm = size(hl_size, pixel_scale, redshift, Omega_M, Omega_A) half_light_size500nm '''Find the half-mass size''' _,hm_pos,hm_size = half_size(stmass+gmass+dmass,mass=True) half_mass_size500nm = size(hm_size, pixel_scale*dat500nm.shape[0]/stmass.shape[0], redshift, Omega_M, Omega_A) half_mass_size500nm # ratio of half mass to half light half_mass_size500nm/half_light_size500nm # + y,x = hl_pos pos = (x,y) fig,axs = plt.subplots(1,1,figsize=(6,6)) # define the circle with radius of the half light size at the centre of the galaxy h_light = plt.Circle(pos,hl_size,color='r',ls='-.',lw=3,fill=False,label='Half-Light') # define the circle with radius of the half mass size at the centre of the galaxy h_mass = plt.Circle(pos,hm_size*dat500nm.shape[0]/stmass.shape[0],color='black',ls='--',lw=3,fill=False,label='Half-Mass') # plot the image of the galaxy at optical wavelength im = axs.imshow(np.log10(dat500nm),cmap='twilight') axi = inset_axes(axs,width="3%",height="100%",loc='center right',bbox_to_anchor=(0.06, 0., 1, 1), bbox_transform=axs.transAxes,borderpad=0) cb = plt.colorbar(im,cax=axi) cb.ax.tick_params(labelsize=12) cb.ax.get_yaxis().labelpad = 20 cb.ax.set_ylabel(r'log( $Flux$)',rotation=270,loc='center',fontsize=14) # plot the two circles for the half sizes axs.add_patch(h_light) axs.add_patch(h_mass) axs.set_ylim(310,50) axs.set_xlim(120,265) axs.tick_params(labelsize=14) axs.legend(fontsize=14) axs.set_xlabel('X [pix]',fontsize=17) axs.set_ylabel('Y [pix]',fontsize=17) plt.savefig('circles.pdf') plt.close() # - # ##### Part 2.2 # Repeat this technique to measure the sizes at all wavelengths in kpc. # + half_light_sizes = np.empty(data_allwav[0].shape[0]) pixel_scale,redshift = hdr_allwav['PIXSCALE'],hdr_allwav['Z'] for i,dat in enumerate(datas): _,_,hlsize = half_size(dat) half_light_sizes[i] = size(hlsize, pixel_scale, redshift, Omega_M, Omega_A).value # - # ##### Park 2.3 # Plot size vs. wavelength of this galaxy. Over-plot the half-mass size of the galaxy as dashed line. # + plt.figure(figsize=(8,6)) # plot the half light sizes at each wavelength plt.plot(waves,half_light_sizes,'-.o',c='black',ms=7,label='Half-Light') # plot the half mass size plt.axhline(y = half_mass_size500nm.value, color='indigo', linestyle='--',label='Half-Mass') plt.grid(alpha=0.5) plt.xscale('log') plt.xlabel('Wavelength [µm]',fontsize=17) plt.ylabel('Size [kpc]',fontsize=17) plt.xticks(fontsize=14) plt.yticks(fontsize=14) plt.ylim(6,12) plt.legend(fontsize=14,loc=2) plt.savefig('half_v_wave.pdf') plt.close() # - # find the ratio of half mass to half light at all wavelenghts ratios = half_mass_size500nm/(half_light_sizes*u.kpc) ratios # ##### Part 2.4 # Here is an image on the same galaxy on the sky $\texttt{galaxy_onsky_F160W.fits}$. Can you use the methods described above to measure the size of the galaxy from this image? Explain why or why not. exponent = np.log(data_onsky) / np.log(0.01) clean = 900**(3*exponent) datbool = np.empty(clean.shape, dtype=bool) for i in range(194): for j in range(194): if i < 75 or i > 115 or j > 125 or j < 75: datbool[j,i] = False else: if clean[j,i]>100: datbool[j,i] = True elif clean[j,i]<100: datbool[j,i] = False # + x0 = np.mean(data_onsky,axis=0) y0 = np.mean(data_onsky,axis=1) x1 = np.mean(data_onsky*datbool,axis=0) y1 = np.mean(data_onsky*datbool,axis=1) fig,axs = plt.subplots(1,2, sharey=True, figsize=(14,4), gridspec_kw={'wspace': 0.1}) axs[0].plot(x0/max(x0), c='black', ls='--', label='X values') axs[0].plot(y0/max(y0), alpha=0.8, c='green', ls='-', label='Y values') axs[0].set_title('Unfiltered', fontsize=19) axs[1].plot(x1/max(x1), c='black', ls='--', label='X values') axs[1].plot(y1/max(y1), alpha=0.8, c='green', ls='-', label='Y values') axs[1].set_title('Filtered', fontsize=19) for ax in axs: ax.legend(fontsize=14) ax.tick_params(labelsize=14) ax.set_xlabel('Pixel', fontsize=16) axs[0].set_ylabel('Flux Density', fontsize=16) plt.savefig('onsky_filt.pdf') plt.close() # - data = data_onsky*datbool _,_,size_onsky = halfsize(data) pixelscale,reds = hdr_onsky['PIXSCALE'], hdr_onsky['Z'] size_onsky,size(size_onsky, pixelscale, reds, Omega_M, Omega_A) # # <center> Question 3 - Using $\texttt{statmorph}$ # ##### Part 3-1 # Install $\texttt{statmorph}$ - ensure that this is installed before running the following script on your comuter import statmorph from statmorph.utils.image_diagnostics import make_figure import scipy.ndimage as ndi from astropy.visualization import simple_norm from astropy.modeling import models from astropy.convolution import convolve import photutils import time # %matplotlib inline # ##### Part 3-2 # Follow the tutorial to make sure $\texttt{statmorph}$ can run. # + ny, nx = 240, 240 y, x = np.mgrid[0:ny, 0:nx] sersic_model = models.Sersic2D( amplitude=1, r_eff=20, n=2.5, x_0=120.5, y_0=96.5, ellip=0.5, theta=-0.5) image = sersic_model(x, y) plt.imshow(image, cmap='gray', origin='lower', norm=simple_norm(image, stretch='log', log_a=10000)) plt.show() size = 20 # on each side from the center sigma_psf = 2.0 y, x = np.mgrid[-size:size+1, -size:size+1] psf = np.exp(-(x**2 + y**2)/(2.0*sigma_psf**2)) psf /= np.sum(psf) plt.imshow(psf, origin='lower', cmap='gray') plt.show() # + image = convolve(image, psf) plt.imshow(image, cmap='gray', origin='lower', norm=simple_norm(image, stretch='log', log_a=10000)) plt.show() np.random.seed(1) snp = 100.0 image += (1.0 / snp) * np.random.standard_normal(size=(ny, nx)) plt.imshow(image, cmap='gray', origin='lower', norm=simple_norm(image, stretch='log', log_a=10000)) plt.show() # - gain = 10000.0 threshold = photutils.detect_threshold(image, 1.5) npixels = 5 # minimum number of connected pixels segm = photutils.detect_sources(image, threshold, npixels) # + # Keep only the largest segment label = np.argmax(segm.areas) + 1 segmap = segm.data == label plt.imshow(segmap, origin='lower', cmap='gray') plt.show() segmap_float = ndi.uniform_filter(np.float64(segmap), size=10) segmap = segmap_float > 0.5 plt.imshow(segmap, origin='lower', cmap='gray') plt.show() # - start = time.time() source_morphs = statmorph.source_morphology( image, segmap, gain=gain, psf=psf) print('Time: %g s.' % (time.time() - start)) morph = source_morphs[0] print('xc_centroid =', morph.xc_centroid) print('yc_centroid =', morph.yc_centroid) print('ellipticity_centroid =', morph.ellipticity_centroid) print('elongation_centroid =', morph.elongation_centroid) print('orientation_centroid =', morph.orientation_centroid) print('xc_asymmetry =', morph.xc_asymmetry) print('yc_asymmetry =', morph.yc_asymmetry) print('ellipticity_asymmetry =', morph.ellipticity_asymmetry) print('elongation_asymmetry =', morph.elongation_asymmetry) print('orientation_asymmetry =', morph.orientation_asymmetry) print('rpetro_circ =', morph.rpetro_circ) print('rpetro_ellip =', morph.rpetro_ellip) print('rhalf_circ =', morph.rhalf_circ) print('rhalf_ellip =', morph.rhalf_ellip) print('r20 =', morph.r20) print('r80 =', morph.r80) print('Gini =', morph.gini) print('M20 =', morph.m20) print('F(G, M20) =', morph.gini_m20_bulge) print('S(G, M20) =', morph.gini_m20_merger) print('sn_per_pixel =', morph.sn_per_pixel) print('C =', morph.concentration) print('A =', morph.asymmetry) print('S =', morph.smoothness) print('sersic_amplitude =', morph.sersic_amplitude) print('sersic_rhalf =', morph.sersic_rhalf) print('sersic_n =', morph.sersic_n) print('sersic_xc =', morph.sersic_xc) print('sersic_yc =', morph.sersic_yc) print('sersic_ellip =', morph.sersic_ellip) print('sersic_theta =', morph.sersic_theta) print('sky_mean =', morph.sky_mean) print('sky_median =', morph.sky_median) print('sky_sigma =', morph.sky_sigma) print('flag =', morph.flag) print('flag_sersic =', morph.flag_sersic) ny, nx = image.shape y, x = np.mgrid[0:ny, 0:nx] fitted_model = statmorph.ConvolvedSersic2D( amplitude=morph.sersic_amplitude, r_eff=morph.sersic_rhalf, n=morph.sersic_n, x_0=morph.sersic_xc, y_0=morph.sersic_yc, ellip=morph.sersic_ellip, theta=morph.sersic_theta) fitted_model.set_psf(psf) # required when using ConvolvedSersic2D image_model = fitted_model(x, y) bg_noise = (1.0 / snp) * np.random.standard_normal(size=(ny, nx)) fig = plt.figure(figsize=(15,5)) ax = fig.add_subplot(131) ax.imshow(image, cmap='gray', origin='lower', norm=simple_norm(image, stretch='log', log_a=10000)) ax.set_title('Original image') ax = fig.add_subplot(132) ax.imshow(image_model + bg_noise, cmap='gray', origin='lower', norm=simple_norm(image, stretch='log', log_a=10000)) ax.set_title('Fitted model') ax = fig.add_subplot(133) residual = image - image_model ax.imshow(residual, cmap='gray', origin='lower', norm=simple_norm(residual, stretch='linear')) ax.set_title('Residual') fig = make_figure(morph) plt.close(fig) # ##### Part 3-3 # Take a shot at measuring the morphological parameters of this example galaxy $\texttt{galaxy_onsky_F160W.fits}$. hdr_onsky,data_onsky = read('galaxy_onsky_F160W.fits') image=data_onsky plt.imshow(image, cmap='gray', origin='lower', norm=simple_norm(image, stretch='log', log_a=10000)) plt.show() size = 20 # on each side from the center sigma_psf = 2.0 y, x = np.mgrid[-size:size+1, -size:size+1] psf = np.exp(-(x**2 + y**2)/(2.0*sigma_psf**2)) psf /= np.sum(psf) #plt.imshow(psf, origin='lower', cmap='gray') image = convolve(image, psf) plt.imshow(image, cmap='gray', origin='lower', norm=simple_norm(image, stretch='log', log_a=10000)) plt.show() # + gain = 100.0 threshold = photutils.detect_threshold(image, 1.1) npixels = 5 # minimum number of connected pixels segm = photutils.detect_sources(image, threshold, npixels) # Keep only the largest segment label = np.argmax(segm.areas) + 1 segmap = segm.data == label #plt.imshow(segmap, origin='lower', cmap='gray') segmap_float = ndi.uniform_filter(np.float64(segmap), size=10) segmap = segmap_float > 0.5 plt.imshow(segmap, origin='lower', cmap='gray') plt.show() # - start = time.time() source_morphs = statmorph.source_morphology( image, segmap, gain=gain, psf=psf) print('Time: %g s.' % (time.time() - start)) morph = source_morphs[0] print('xc_centroid =', morph.xc_centroid) print('yc_centroid =', morph.yc_centroid) print('ellipticity_centroid =', morph.ellipticity_centroid) print('elongation_centroid =', morph.elongation_centroid) print('orientation_centroid =', morph.orientation_centroid) print('xc_asymmetry =', morph.xc_asymmetry) print('yc_asymmetry =', morph.yc_asymmetry) print('ellipticity_asymmetry =', morph.ellipticity_asymmetry) print('elongation_asymmetry =', morph.elongation_asymmetry) print('orientation_asymmetry =', morph.orientation_asymmetry) print('rpetro_circ =', morph.rpetro_circ) print('rpetro_ellip =', morph.rpetro_ellip) print('rhalf_circ =', morph.rhalf_circ) print('rhalf_ellip =', morph.rhalf_ellip) print('r20 =', morph.r20) print('r80 =', morph.r80) print('Gini =', morph.gini) print('M20 =', morph.m20) print('F(G, M20) =', morph.gini_m20_bulge) print('S(G, M20) =', morph.gini_m20_merger) print('sn_per_pixel =', morph.sn_per_pixel) print('C =', morph.concentration) print('A =', morph.asymmetry) print('S =', morph.smoothness) print('sersic_amplitude =', morph.sersic_amplitude) print('sersic_rhalf =', morph.sersic_rhalf) print('sersic_n =', morph.sersic_n) print('sersic_xc =', morph.sersic_xc) print('sersic_yc =', morph.sersic_yc) print('sersic_ellip =', morph.sersic_ellip) print('sersic_theta =', morph.sersic_theta) print('sky_mean =', morph.sky_mean) print('sky_median =', morph.sky_median) print('sky_sigma =', morph.sky_sigma) print('flag =', morph.flag) print('flag_sersic =', morph.flag_sersic) # + ny, nx = image.shape y, x = np.mgrid[0:ny, 0:nx] fitted_model = statmorph.ConvolvedSersic2D( amplitude=morph.sersic_amplitude, r_eff=morph.sersic_rhalf, n=morph.sersic_n, x_0=morph.sersic_xc, y_0=morph.sersic_yc, ellip=morph.sersic_ellip, theta=morph.sersic_theta) fitted_model.set_psf(psf) # required when using ConvolvedSersic2D image_model = fitted_model(x, y) bg_noise = (1.0 / snp) * np.random.standard_normal(size=(ny, nx)) fig = plt.figure(figsize=(15,5)) ax = fig.add_subplot(131) ax.imshow(image, cmap='gray', origin='lower', norm=simple_norm(image, stretch='log', log_a=10000)) ax.set_title('Original image') ax = fig.add_subplot(132) ax.imshow(image_model + bg_noise, cmap='gray', origin='lower', norm=simple_norm(image, stretch='log', log_a=10000)) ax.set_title('Fitted model') ax = fig.add_subplot(133) residual = image - image_model ax.imshow(residual, cmap='gray', origin='lower', norm=simple_norm(residual, stretch='linear')) ax.set_title('Residual') plt.show() # - fig = make_figure(morph) plt.savefig('statmorph.pdf') plt.close(fig)
Computing_Project/CTA200_Computing_Project_DaniellaMorrone.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Notebook performs qualitative tag inspection on the BR24 data set # ### /data/processed_data.pkl is used for this notebook # ### Results are saved to /data/processed_data_extra_columns.pkl import pickle as pk import pandas as pd import re from collections import Counter import matplotlib.pyplot as plt df = pd.read_pickle("../data/processed_data.pkl", compression='zip') df.shape pd.set_option('display.max_colwidth', 50) # # Tag Cloud of top 100 most common tags tags = df['tags'].apply(pd.Series).stack().str.strip() tag_counter = dict(Counter(tags)) tag_counter = dict(sorted(tag_counter.items(), key=lambda x: x[1], reverse=True)) tag_freq_100 = dict(list(tag_counter.items())[:100]) # + # #!pip install wordcloud # - import matplotlib.pyplot as plt from wordcloud import WordCloud wordcloud = WordCloud() wordcloud.generate_from_frequencies(frequencies=tag_freq_100) plt.figure(figsize=(20, 5), facecolor='white') plt.imshow(wordcloud, interpolation="bilinear") plt.axis("off") plt.show() # # Parts-of-Speech of top 100 most common tags # ## Using Spacy # + # #!pip install spacy # #!python -m spacy download de_core_news_sm # - import spacy nlp = spacy.load('de_core_news_sm') pos_tags_100 = [] for tag in list(tag_freq_100.keys()): t = nlp(tag) pos_tags_100.append(t[0].pos_) c = dict(Counter(pos_tags_100)) c = dict(sorted(c.items(), key=lambda x: x[1], reverse=True)) c # + jupyter={"outputs_hidden": true} a_zip = zip(list(tag_freq_100.keys()), pos_tags_100) zipped_list = list(a_zip) zipped_list # - pos_tags_all = [] for tag in list(set(tags)): t = nlp(tag) pos_tags_all.append(t[0].pos_) c = dict(Counter(pos_tags_all)) c = dict(sorted(c.items(), key=lambda x: x[1], reverse=True)) c # ## Using Textblob # + # #!pip install -U textblob-de # #!python -m textblob.download_corpora # - from textblob_de import TextBlobDE as TextBlob pos_tags_100 = [] for tag in list(tag_freq_100.keys()): t = TextBlob(tag) pos_tags_100.append(t.tags[0][1]) c = dict(Counter(pos_tags_100)) c = dict(sorted(c.items(), key=lambda x: x[1], reverse=True)) c # # Tag Separation into In-Text and Out-of-Text tags from tqdm import tqdm tqdm.pandas() df.head(1) def separate_in_text_tags(row): tags_in_text = [] for tag in row.tags: if tag in row.clean_text: tags_in_text.append(tag) return tags_in_text df['in_text'] = df.progress_apply(separate_in_text_tags, axis=1) df['out_of_text'] = df.progress_apply(lambda row: [word for word in row.tags if word not in row.in_text], axis=1) df.head(1) df['in_text_percent'] = df.apply(lambda row: int((len(row.in_text)/len(row.tags))*100), axis=1) df['out_of_text_percent'] = 100-df['in_text_percent'] df['in_text_percent'].describe() df['in_text_percent'].hist() df['out_of_text_percent'].describe() df['out_of_text_percent'].hist() print(df[df.in_text_percent == 0].shape[0]) print(df[df.in_text_percent == 100].shape[0]) # # Parts-of-Speech of in_text tags # + pos_tags_all_in_text = [] all_in_text = df['in_text'].apply(pd.Series).stack().str.strip() for tag in list(set(all_in_text)): t = nlp(tag) pos_tags_all_in_text.append(t[0].pos_) c = dict(Counter(pos_tags_all_in_text)) c = dict(sorted(c.items(), key=lambda x: x[1], reverse=True)) c # - len(list(set(all_in_text))) # + jupyter={"outputs_hidden": true} df[df['in_text_percent'] == 100] # - # # Common out_of_text tags out = df['out_of_text'].apply(pd.Series).stack().str.strip() out_counter = dict(Counter(out)) out_counter = dict(sorted(out_counter.items(), key=lambda x: x[1], reverse=True)) out_tags_freq = dict(list(out_counter.items())[:100]) wordcloud = WordCloud() wordcloud.generate_from_frequencies(frequencies=out_tags_freq) plt.figure(figsize=(20, 5), facecolor='white') plt.imshow(wordcloud, interpolation="bilinear") plt.axis("off") plt.show() # # Intersection of top 100 most common tags and top 100 out_of_text tags # %pprint (set(tag_freq_100.keys() ) & set(out_tags_freq.keys())) # # Intersection of in_text tags and out_of_text tags i = df['in_text'].apply(pd.Series).stack().str.strip() o = df['out_of_text'].apply(pd.Series).stack().str.strip() len(set(i) & set(o)) # # POS of Out-of-text tags # + pos_tags_all_out_of_text = [] all_out_of_text = df['out_of_text'].apply(pd.Series).stack().str.strip() for tag in list(set(all_out_of_text)): t = nlp(tag) pos_tags_all_out_of_text.append(t[0].pos_) c = dict(Counter(pos_tags_all_out_of_text)) c = dict(sorted(c.items(), key=lambda x: x[1], reverse=True)) c # - # # Overlapping tags df['primary_category'].apply(pd.Series).stack().unique() bayern_tags = df[df['primary_category'] == 'bayern'].tags.apply(pd.Series).stack().unique() len(bayern_tags) kultur_tags = df[df['primary_category'] == 'kultur'].tags.apply(pd.Series).stack().unique() len(kultur_tags) sport_tags = df[df['primary_category'] == 'sport'].tags.apply(pd.Series).stack().unique() len(sport_tags) deutschland_welt_tags = df[df['primary_category'] == 'deutschland-welt'].tags.apply(pd.Series).stack().unique() len(deutschland_welt_tags) wirtschaft_tags = df[df['primary_category'] == 'wirtschaft'].tags.apply(pd.Series).stack().unique() len(wirtschaft_tags) netzwelt_tags = df[df['primary_category'] == 'netzwelt'].tags.apply(pd.Series).stack().unique() len(netzwelt_tags) wissen_tags = df[df['primary_category'] == 'wissen'].tags.apply(pd.Series).stack().unique() len(wissen_tags) das_wichtigste_tags = df[df['primary_category'] == 'das-wichtigste'].tags.apply(pd.Series).stack().unique() len(das_wichtigste_tags) def check_common(a): print('With bayern_tags ', len(list(set(a) & set(bayern_tags)))) print('With kultur_tags ', len(list(set(a) & set(kultur_tags)))) print('With sport_tags ', len(list(set(a) & set(sport_tags)))) print('With deutschland_welt_tags ', len(list(set(a) & set(deutschland_welt_tags)))) print('With wirtschaft_tags ', len(list(set(a) & set(wirtschaft_tags)))) print('With netzwelt_tags ', len(list(set(a) & set(netzwelt_tags)))) print('With wissen_tags ', len(list(set(a) & set(wissen_tags)))) print('With das_wichtigste_tags ', len(list(set(a) & set(das_wichtigste_tags)))) check_common(das_wichtigste_tags) df.to_pickle('../data/processed_data_extra_columns.pkl', compression='zip')
notebooks/2_tag_inspection.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: text-as-data # language: python # name: text-as-dada # --- # + active="" # 1. In a few paragraphs, explain when and why dictionaries successfully measure that which they attempt to measure, when they do not, and the possible risks of analyzing text with dictionaries. # # # # # Problems with Dictionaries: # # As discussed in the lecture @15:37, "dictionaries are context invariant". Thus begs the quesitons, what determines one context different from another and, a bit more interesting, to what extent can a dictionary be used outside of its origional context? How do we know when it cannot be used? # As I was thinking about these I recalled a youtube video I watched from a couple days ago, on another COVID-resultant youtube binge of mine, to kill some time. # # https://www.youtube.com/watch?v=oAbQEVmvm8Y # # In the video the speaker showcases how data scientists are using datasets from different domains, together, to solve one specific problem. That got me wondering, are there absolute bounds to which a datset can be used (perhaps a dictionary as well) in solving a problem? The starry eyed Elon Musk in me says no, no way (wave arms magically and say, "there's infinate possiblites") but the practical side of me says yes. I guess you can use 'anything' as long as validation works out? # # # # # - # 2. Using a dictionary in qdapDictionaries, conduct sentiment analysis on your corpus. Write up the results in a pdf and interpret them. # Submit the answers to both assignment questions in a single pdf. file. # # Setup # + import os as os import sys as sys import numpy as np import pandas as pd import seaborn as sns from matplotlib import rc from pylab import rcParams import matplotlib.pyplot as plt from collections import defaultdict from sklearn.model_selection import train_test_split from sklearn.metrics import confusion_matrix, classification_report #torch goodness import torch from torch import nn, optim import torch.nn.functional as F from torch.utils.data import Dataset, DataLoader #huggingface goodness import transformers from transformers import BertModel, BertTokenizer, AdamW, get_linear_schedule_with_warmup #set graph config # %matplotlib inline # %config InlineBackend.figure_format='retina' sns.set(style='whitegrid', palette='muted', font_scale=1.2) HAPPY_COLORS_PALETTE = ["#01BEFE", "#FFDD00", "#FF7D00", "#FF006D", "#ADFF02", "#8F00FF"] sns.set_palette(sns.color_palette(HAPPY_COLORS_PALETTE)) rcParams['figure.figsize'] = 12, 8 #set seed RANDOM_SEED = 42 np.random.seed(RANDOM_SEED) torch.manual_seed(RANDOM_SEED) #check gpu device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") device # - # # load training datasets # + # imdb from http://ai.stanford.edu/~amaas/data/sentiment/ #"We provide a set of 25,000 highly polar movie reviews for training, and 25,000 for testing." #this file was larger than 100mb (github's file size limit) df_imdb = pd.read_csv('imdb.csv') #https://www.kaggle.com/lava18/google-play-store-apps #google play app reviews ~16k df_google = pd.read_csv("reviews.csv") # - # Crystal Feel http://www.crystalfeel.socialanalyticsplus.net/ # Crystal Feel Manual http://172.16.58.3/crystalfeel/[CrystalFeel]_User_Manual.pdf # Super Cool App Thingy https://medium.com/@mirzamujtaba10/sentiment-analysis-642b935ab6f9
Assignments/.ipynb_checkpoints/TAD_Week_5_Broker_Carl-checkpoint.ipynb
# -*- coding: utf-8 -*- # --- # jupyter: # jupytext: # text_representation: # extension: .jl # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Julia 0.4.5 # language: julia # name: julia-0.4 # --- using ODE,Plots pyplot(size=(300,200),leg=false, guidefont=font(7), titlefont=font(7)); #function oscillator(t, y) # y[2] = - 3* + y[1] - y[2] / 10 #end oscillator(t, y) = [y[2], - 3* + y[1] - y[2] / 10] initial = [1.0,0.0]; t = 0:0.01:50 t,y = ode23(oscillator, initial, t) y=hcat(y...).'; y[1:5,:]; vₓ=y[:,1]; vᵥ=y[:,2]; plot(t,y,title="Mouvement", bg=RGB(.2,.2,.2), xlabel ="Temps",ylabel = "Vitesse") plot(vₓ,vᵥ,title="portrait de phase", bg=RGB(.2,.2,.2), xlabel ="Temps",ylabel = "Vitesse") using ODE,Plots pyplot(size=(700,300),leg=false, guidefont=font(7), titlefont=font(7)); #function oscillator(t, y) # y[2] = - 3* + y[1] - y[2] / 10 #end oscillator(t, y) = [y[2], - 3* + y[1] - y[2] / 10] initial = [1.0,0.0]; t = float([0:0.01:50]); t,y = ode23(oscillator, initial, t) y=hcat(y...).'; y[1:5,:]; vₓ=y[:,1]; vᵥ=y[:,2]; o=plot(t,y,title="Mouvement", bg=RGB(.2,.2,.2), xlabel ="Temps",ylabel = "Vitesse") l=plot(vₓ,vᵥ,title="portrait de phase", bg=RGB(.2,.2,.2), xlabel ="Temps",ylabel = "Vitesse") plot(o,l,layout=2)
plotsodetest.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 2 # language: python # name: python2 # --- import numpy as np import pickle # #%matplotlib notebook from Linearity import Neuron import matplotlib import matplotlib.pyplot as plt import matplotlib.cm as cm import matplotlib.colors as colors pickleList = '../data/current_clamp_files_with_GABAzine.txt' with open (pickleList, 'r') as fp: neuronList = pickle.load(fp) for i, neuron in enumerate(neuronList): print (i, neuron.date, neuron.index) neuron = neuronList[19] # + fullWindowSize = 1000 excTrialList, controlTrialList = {}, {} exc = neuron.experiment['GABAzine'] control = neuron.experiment['Control'] for numSquares in set(exc).intersection(control): if numSquares > 1: for coord in exc[numSquares].coordwise: excTrialList[coord] = np.average([trial.interestWindow for trial in exc[numSquares].coordwise[coord].trials if not trial.AP_flag],axis=0) controlTrialList[coord] = np.average([trial.interestWindow for trial in control[numSquares].coordwise[coord].trials if not trial.AP_flag],axis=0) # excTrialList+=[exc[numSquares].trial[trialNum] for trialNum in exc[numSquares].trial] #controlTrialList+=[control[numSquares].trial[trialNum] for trialNum in control[numSquares].trial] # print (excTrialList), controlTrialList fig, ax = plt.subplots() e_max, c_max, expected, excMaxDiff = [],[],[],[] for trialIndex,trial in excTrialList.items(): if type(trial) is np.ndarray: e_max.append(np.max(trial[:fullWindowSize])) c_max.append(np.max(controlTrialList[trialIndex][:fullWindowSize])) expected.append(np.max(trial)) excMaxDiff.append(np.max(np.diff(trial[:fullWindowSize]))) ax.scatter(expected, c_max,c='g') ax_copy = ax.twinx() ax_copy.scatter(expected, excMaxDiff,c='b') ax.set_ylabel("$\dot{C}_{V}^{max}$") # ax[1].scatter(i_max, inhMaxDiff, c='g',s=8) ax.set_xlim(xmin=0.) ax_copy.set_ylim(ymin=0.)#,ymax=0.09) ax.set_xlabel("$V_{Exc}^{max}$") ax_copy.set_ylabel("$\dot{V}_{Exc}^{max}$") ax.set_ylim(ymin=0.) # ax.set_ylim(ymin=0.,ymax=5) plt.show() # - zip(c_max, expected)
Untitled1.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 (ipykernel) # language: python # name: python3 # --- import tensorflow.compat.v1 as tf tf.disable_v2_behavior() a = tf.placeholder(tf.bool,name ="a") b = tf.placeholder(tf.bool,name ="b") y = tf.math.logical_and(a,b,name="AND") y= tf.math.logical_not(a,name="NOT") z= tf.math.logical_and(y,y,name="y") # + from tensorflow.python.summary.writer.writer import FileWriter sess1 = tf.compat.v1.Session() #writer=tf.summary.FileWriter('./my_graph',sess.graph) writer = tf.summary.FileWriter("log_dir1/logic_and", sess1.graph) # + #run in terminal below command #tensorboard --logdir=/log_dir1/logic_and # -
TensorflowbasedGraph.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: 'Python 3.7.3 64-bit (''base'': conda)' # language: python # name: python37364bitbaseconda210f926cb548430eaeeaaca39b8496cc # --- # # Some manipulations on (Kahraman, 1994) # [1] <NAME>, "Natural Modes of Planetary Gear Trains", Journal of Sound and Vibration, vol. 173, no. 1, pp. 125-130, 1994. https://doi.org/10.1006/jsvi.1994.1222. # + from sympy import * init_printing() def symb(x,y): return symbols('{0}_{1}'.format(x,y), type = float) # - # # Displacement vector: # + n = 3 # number of planets N = n + 3 # number of degrees of freedom crs = ['c', 'r', 's'] # carrier, ring, sun pla = ['p{}'.format(idx + 1) for idx in range(n)] # planet crs = crs + pla # put them together coeff_list = symbols(crs) c = coeff_list[0] r = coeff_list[1] s = coeff_list[2] X = Matrix([symb('u', v) for v in coeff_list]) coeff_list[3:] = symbols(['p']*n) p = coeff_list[3] X.transpose() # Eq. (1a) # - # ## Stiffness matrix: # ![alt text](stiffness_matrix.png) # # where: # * $k_1$: mesh stiffness for the ring-planet gear pair # * $k_2$: mesh stiffness for the sun-planet gear pair # * $k_c$: carrier housing stiffness # * $k_r$: ring housing stiffness # * $k_s$: sun housing stiffness # * Diagonal 1, in red # * Diagonal 2, in grey # * Off-diagonal, in blue # + k_1, k_2, k_c, k_r, k_s = symbols('k_1 k_2 k_c k_r k_s', type = float) # Diagonal 1: K_d1 = zeros(3, 3) K_d1[0, 0] = n*(k_1 + k_2) + k_c K_d1[1, 1] = n* k_1 + k_r K_d1[2, 2] = n* k_2 + k_s K_d1[0, 1] = K_d1[1, 0] = -n*k_1 K_d1[0, 2] = K_d1[2, 0] = -n*k_2 # Diagonal 2: K_d2 = eye(n)*(k_1 + k_2) # Off diagonal: K_od = zeros(n, n) K_od[:, 0] = (k_1 - k_2)*ones(n, 1) K_od[:, 1] = -k_1 *ones(n, 1) K_od[:, 2] = k_2 *ones(n, 1) K = BlockMatrix([[K_d1, K_od.transpose()], [K_od, K_d2]]) K = Matrix(K) if(not K.is_symmetric()): print('error.') K # - # ## Inertia matrix: M = diag(*[symb('m', v) for v in coeff_list]) M # ## Remove ring degree of freedom X.row_del(1) K.row_del(1) K.col_del(1) M.row_del(1) M.col_del(1) coeff_list.remove(r) N = N - 1 # ## Coordinate transformation: # # First from translational to torsional coordinates, them making the sun DOF to be the last one, making it easier to assemble a multi-stage gearbox. R_1 = diag(*[symb('r', v) for v in coeff_list]) R_1 # making the sun DOF to be the last one: # + N1 = N - 1 R_2 = zeros(N, N) R_2[0, 0] = 1 R_2[1, N1] = 1 R_2[2:N, 1:N1] = eye(n) R_2 # - R = R_1*R_2 RMR = lambda m: transpose(R)*m*R # ### Inertia matrix # + M = RMR(M) if(not M.is_symmetric()): print('error in M matrix') M # - # ### Stiffness matrix # + K = RMR(K) if(not K.is_symmetric()): print('error in K matrix') # - # The housing stiffness for both carrier and sunare null: K = K.subs([(k_c, 0), (k_s, 0)]) K # From that, one can write the matrices for a planetary system with $n$-planets using the following code: # + m_c, m_s, m_p, r_c, r_s, r_p = symbols('m_c m_s m_p r_c r_s r_p', type = float) M_p = zeros(N, N) M_p[0, 0] = m_c*r_c**2 M_p[N1, N1] = m_s*r_s**2 M_p[1:N1, 1:N1] = m_p*r_p**2 * eye(n) K_p = zeros(N, N) K_p[0, 0] = n*(k_1 + k_2)*r_c**2 K_p[N1, 0] = -n*k_2*r_s*r_c K_p[0, N1] = -n*k_2*r_s*r_c K_p[N1, N1] = n*k_2*r_s**2 K_p[0, 1:N1] = (k_1 - k_2)*r_c*r_p*ones(1, n) K_p[1:N1, 0] = (k_1 - k_2)*r_c*r_p*ones(n, 1) K_p[N1, 1:N1] = k_2*r_p*r_s*ones(1, n) K_p[1:N1, N1] = k_2*r_p*r_s*ones(n, 1) K_p[1:N1, 1:N1] = (k_1 + k_2)*r_p**2 * eye(n) m_diff = abs(matrix2numpy(simplify(M_p - M))).sum() k_diff = abs(matrix2numpy(simplify(K_p - K))).sum() if(m_diff != 0.0): print('Error in M matrix.') if(k_diff != 0.0): print('Error in K matrix.') # - # ## Combining planet DOFs: # + C = zeros(N, 3) C[ 0, 0] = 1 C[ N1, 2] = 1 C[1:N1, 1] = ones(n, 1) CMC = lambda m: transpose(C)*m*C # - # ### Inertia matrix # + M_C = CMC(M) if(not M_C.is_symmetric()): print('error in M_C matrix') M_C # - # ### Stiffness matrix # + K_C = CMC(K) if(not K_C.is_symmetric()): print('error in M_C matrix') K_C # - # ## Adapting it to a parallel gear set # # Considering only one of the sun-planets pairs, one should change the sub-indexes in the following way: # * [p]lanet => [w]heel # * [s]un => [p]inion; # It also necessary to remove the mesh stiffness of the ring-planet pair # ### Inertia matrix # + k, w, p = symbols('k w p', type = float) m_w, m_p, r_w, r_p = symbols('m_w m_p r_w r_p', type = float) N2 = N - 2 M_par = M[N2:, N2:] M_par = M_par.subs([(m_p, m_w), (m_s, m_p), (r_p, r_w), (r_s, r_p)]) # M_par # - # ### Stiffness matrix # + K_par = K[N2:, N2:] K_par = K_par.subs(k_1, 0) # ring-planet mesh stiffness K_par = K_par.subs(k_s, 0) # sun's bearing stiffness K_par = K_par.subs(n*k_2, k_2) # only one pair, not n K_par = K_par.subs(k_2, k) # mesh-stiffness of the pair K_par = K_par.subs([(r_p, r_w), (r_s, r_p)]) K_par # - # From that, one can write the matrices for a parallel system using the following code: # + M_p = diag(m_w*r_w**2, m_p*r_p**2) mat_diff = abs(matrix2numpy(simplify(M_p - M_par))).sum() if(mat_diff != 0.0): print('Error in M_p matrix.') K_p = diag(r_w**2, r_p**2) K_p[0, 1] = r_p*r_w K_p[1, 0] = r_p*r_w K_p = k*K_p mat_diff = abs(matrix2numpy(simplify(K_p - K_par))).sum() if(mat_diff != 0.0): print('Error in K_p matrix.')
notes/Kahraman_1994.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Recovering the homology of immersed manifolds: demo # # <NAME> https://raphaeltinarrage.github.io/ # <br/> # Nov 2020 # This page describes the implementation and use of the method described in our paper *Recovering the homology of immersed manifolds* https://arxiv.org/abs/1912.03033. # ## Background # # The method described in this notebook has been designed to answer the following estimation problem: given a sample of the image of an immersed manifold, estimate the homology groups of the abstract initial manifold. # # Let $\mathbb{R}^n$ be the Euclidean space, and $M(\mathbb{R}^n)$ the space of $n \times n$ matrices. # Consider a finite subset $X$ of $\mathbb{R}^n$. # # We suppose that $X$ is a sample of an immersed manifold $\mathcal{M} \subset \mathbb{R}^n$. # That is, there exists an abstract manifold $\mathcal{M}_0$ and an immersion $u\colon \mathcal{M}_0 \rightarrow \mathbb{R}^n$ whose image is $\mathcal{M}$. # We aim at estimating the homology of $\mathcal{M}$, from the mere observation of $X$. # # The method consists in computing the **lifted set** $\check X$, which is a finite subspace of $\mathbb{R}^n \times M(\mathbb{R}^n)$, and in applying DTM-based filtrations in the ambient space $\mathbb{R}^n \times M(\mathbb{R}^n)$. The DTM-filtrations are defined in https://arxiv.org/abs/1811.04757. # This method depends on three parameters: $r \in (0, +\infty)$, $\gamma \in [0, +\infty)$ and $m \in [0,1]$. # # First, consider the set $\check X$, defined as # $$\check X = \{ (x, \gamma \overline \Sigma(x)), x \in X \},$$ # where $\overline \Sigma(x)$, the **normalized covariance matrix** at $x$, is computed with respect to a radius $r$. # # Then, compute the **DTM-filtration** on $\check X$ with parameter $m$. It is denoted $W[\check X, m]$, and is defined as the collection of subsets $(W^t[X,m,p])_{t \geq 0}$ of $\mathbb{R}^n \times M(\mathbb{R}^n)$, with # $$W^t[X,m,p] = \bigcup_{x \in X} \overline{\mathcal{B}}\big(x,t - \mathrm{d}_{\mu,m}(x)\big),$$ # where $\mathrm{d}_{\mu,m}$ is the DTM of the empirical probability measure on $\check X$ with parameter $m$, and $\overline{\mathcal{B}}(x,r)$ denotes the closed ball of center $x$ and radius $r$ if $r \geq 0$, or the emptyset if $r < 0$. # # # The corresponding persistent module of $i^\text{th}$ homology is obtained by applying the $i^\text{th}$ homology functor to $W[\check X,m]$. Throughout this notebook, we will compute homology over the finite field $\mathbb{Z}/2\mathbb{Z}$. # ## Datasets # # We consider here two datasets: # - Bernouilli's lemniscate # - the Olympic rings # ## Package # # The functions are contained in the `Velour` package (https://pypi.org/project/velour/). # <br/> # It is based on the `Gudhi` library (https://gudhi.inria.fr/python/latest/). import velour # ## First dataset: lemniscate # # We start with a sample $X$ of the lemniscate $\mathcal{M} \subset \mathbb{R}^2$. # It is to be seen as an immersion of the circle $\mathbb{S}_1 \rightarrow \mathbb{R}^2$. # + N_observation = 200 #number of points sampled on the lemniscate N_anomalous = 0 #number of outliers X = velour.SampleOnLemniscate(N_observation, N_anomalous) #samples points velour.PlotPointCloud(X) #plots the point cloud # - # Consider the Rips filtration of $X$. # Its $H_0$-barcode is represented in red, and $H_1$ in green. # + filtration_max = 1 st = velour.RipsComplex(X, filtration_max = filtration_max, dimension_max = 2) #creates a Rips complex velour.PlotPersistenceBarcodes(st, tmax = filtration_max) #computes the persistence # - # On these diagrams, one can read the homology of the lemniscate. Its Betti numbers are $(\beta_0, \beta_1) = (1,2)$. # # We now illustrate our method. We aim to recover the homology of the original circle $\mathbb{S}_1$. # First, we compute the lifted set $\check X$, with given parameters $r$ and $\gamma$. r = 0.05 gamma = 3 X_check = velour.Lifting(X, r, gamma) #builds the lifted set # We then compute the persistence diagram of the DTM-filtration on $\check X$ with parameter $m = 0.03$. # + m = 0.01 p = 1 dimension_max = 2 st = velour.DTMFiltration(X_check, m, p, dimension_max) #creates a DTM-filtration velour.PlotPersistenceBarcodes(st) #displays the persistence barcode # - # On these diagrams, there is an interval where one reads the homology of the original manifold $\mathbb{S}_1$: $(\beta_0, \beta_1) = (1,1)$. # We successfully recovered the homology of the original manifold. # As illustrated by the following example, this construction is stable in Wasserstein distance: adding a few outliers in the dataset results in a small change (in bottleneck distance) in the persistence barcodes. # + ' Sampling on the lemniscate with outliers ' N_observation = 200 #number of points sampled on the lemniscate N_anomalous = 50 #number of anomalous points X = velour.SampleOnLemniscate(N_observation, N_anomalous) #samples points velour.PlotPointCloud(X) ' Computing the lifted set ' r = 0.05 gamma = 3 X_check = velour.Lifting(X, r, gamma) #builds the lifted set ' DTM-filtration on the lifted set ' m = 0.01 p = 1 dimension_max = 2 st = velour.DTMFiltration(X_check, m, p, dimension_max) #creates a DTM-filtration velour.PlotPersistenceBarcodes(st) #displays the persistence barcode # - # ## Second dataset: Olympic rings # # We consider a sample $X$ of the Olympic rings $\mathcal{M} \subset \mathbb{R}^2$. # It is to be seen as an immersion of the disjoint union of five circles $\cup_{i=1}^5 \mathbb{S}_1 \rightarrow \mathbb{R}^2$. # + N_observation = 300 #number of points sampled on each Olympic rings X = velour.SampleOnOlympics(N_observation) #samples points velour.PlotPointCloud(X) # - # As before, we start by having a look at the Delaunay filtration (alpha-complex) filtration of $X$. # We only plot the bars of larger greater than `eps = 0.01`. # + st_alpha = velour.AlphaComplex(X) #Delaunay filtration over X velour.PlotPersistenceBarcodes(st_alpha, tmax = 1, eps = 0.01) #computes the persistence # - # On these diagrams, one can read the following Betti numers: $(\beta_0, \beta_1) = (1,9)$. # # We now illustrate our method. Our goal is to recover the homology of five (disconnected) circles. # First, we compute the lifted set $\check X$, with given parameters $r$ and $\gamma$. r = 0.03 gamma = 1 X_check = velour.Lifting(X, r, gamma) #builds the lifted set # We then compute the persistence diagram of the DTM-filtration on $\check X$ with parameter $m = 0.01$. # Only bars of length larger than $0.1$ are displayed. # + m = 0.01 p = 1 dimension_max = 2 filtration_max = 1 #maximal filtration value st = velour.DTMFiltration(X_check, m, p, dimension_max, filtration_max) #creates a DTM-filtration velour.PlotPersistenceBarcodes(st, eps=0.1) #displays the persistence barcode # - # On these diagrams, there is an interval where one reads the Betti numbers of the original manifold $\cup_{i=1}^5 \mathbb{S}_1$, that is, $(\beta_0, \beta_1) = (5,5)$. # # Take-home message # # This implementation depends on 3 parameters: # - $r$: scale to compute covariance matrices, # - $\gamma$: dilatation of the space $M(\mathbb{R}^n) \subset \mathbb{R}^n \times M(\mathbb{R}^n)$, # - $m$: parameter of the DTM. # # It can be used as follows: # ``` # # X is a Nxn np.array, N = number of points, n = dimension of the ambient space # r = 0.05 # gamma = 1 # m = 0.01 # # p = 1 # dimension_max = 2 # filtration_max = 1 # # X_check = velour.Lifting(X, r, gamma) #creates the lifted set # st = velour.DTMFiltration(X_check, m, p, dimension_max, filtration_max) #builds a DTM-filtration # velour.PlotPersistenceBarcodes(st) #displays the persistence barcode # ```
Demo.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ![terrainbento logo](../images/terrainbento_logo.png) # # # terrainbento model Basic with variable $m$ steady-state solution # This model shows example usage of the Basic model from the TerrainBento package with a variable drainage-area exponent, $m$: # # $\frac{\partial \eta}{\partial t} = - KQ^m S + D\nabla^2 \eta$ # # where $K$ and $D$ are constants, $Q$ is discharge, $S$ is local slope, $m$ is the drainage area exponent, and $\eta$ is the topography. # # Note that the units of $K$ depend on $m$, so that the value of $K$ used in Basic cannot be meaningfully compared to other values of $K$ unless the valuess of $m$ are the same. # # Refer to [Barnhart et al. (2019)](https://www.geosci-model-dev-discuss.net/gmd-2018-204/) for further explaination. For detailed information about creating a Basic model, see [the detailed documentation](https://terrainbento.readthedocs.io/en/latest/source/terrainbento.derived_models.model_basic.html). # # This notebook (a) shows the initialization and running of this model, (b) saves a NetCDF file of the topography, which we will use to make an oblique Paraview image of the landscape, and (c) creates a slope-area plot at steady state. # + # import required modules import os import numpy as np import matplotlib.pyplot as plt import matplotlib matplotlib.rcParams["font.size"] = 20 matplotlib.rcParams["pdf.fonttype"] = 42 # %matplotlib inline from landlab import imshow_grid from landlab.io.netcdf import write_netcdf from terrainbento import Basic np.random.seed(42) # + # create the parameter dictionary needed to instantiate the model params = { # create the Clock. "clock": {"start": 0, "step": 10, "stop": 1e7}, # Create the Grid. "grid": {"grid": {"RasterModelGrid":[(100, 160), {"xy_spacing": 10}]}, "fields": {"at_node": {"topographic__elevation":{"random":[{"where":"CORE_NODE"}]}}}}, # Set up Boundary Handlers "boundary_handlers":{"NotCoreNodeBaselevelHandler": {"modify_core_nodes": True, "lowering_rate": -0.001}}, # Parameters that control output. "output_interval": 1e4, "save_first_timestep": True, "output_prefix": "output_netcdfs/basicVm.", "fields":["topographic__elevation"], # Parameters that control process and rates. "water_erodibility" : 0.001, "m_sp" : 0.25, "n_sp" : 1.0, "regolith_transport_parameter" : 0.01, } # - # we can use an output writer to run until the model reaches steady state. class run_to_steady(object): def __init__(self, model): self.model = model self.last_z = self.model.z.copy() self.tolerance = 0.1 def run_one_step(self): if model.model_time > 0: diff = (self.model.z[model.grid.core_nodes] - self.last_z[model.grid.core_nodes]) if max(abs(diff)) <= self.tolerance: self.model.clock.stop = model._model_time print("Model reached steady state in " + str(model._model_time) + " time units\n") else: self.last_z = self.model.z.copy() if model._model_time <= self.model.clock.stop - self.model.output_interval: self.model.clock.stop += self.model.output_interval # + # initialize the model using the Model.from_dict() constructor. # We also pass the output writer here. model = Basic.from_dict(params, output_writers={"class": [run_to_steady]}) # to run the model as specified, we execute the following line: model.run() # + #MAKE SLOPE-AREA PLOT # plot nodes that are not on the boundary or adjacent to it core_not_boundary = np.array(model.grid.node_has_boundary_neighbor(model.grid.core_nodes)) == False plotting_nodes = model.grid.core_nodes[core_not_boundary] # assign area_array and slope_array area_array = model.grid.at_node["drainage_area"][plotting_nodes] slope_array = model.grid.at_node["topographic__steepest_slope"][plotting_nodes] # instantiate figure and plot fig = plt.figure(figsize=(6, 3.75)) slope_area = plt.subplot() slope_area.scatter(area_array, slope_array, marker="o", c="k", label = "Model Basic (m=0.25)") # make axes log and set limits slope_area.set_xscale("log") slope_area.set_yscale("log") slope_area.set_xlim(9*10**1, 3*10**5) slope_area.set_ylim(1e-2, 1e0) # set x and y labels slope_area.set_xlabel(r"Drainage area [m$^2$]") slope_area.set_ylabel("Channel slope [-]") slope_area.legend(scatterpoints=1,prop={"size":12}) slope_area.tick_params(axis="x", which="major", pad=7) # save out an output figure output_figure = os.path.join("output_figures/maintext_variable_m_slope_area.pdf") fig.savefig(output_figure, bbox_inches="tight", dpi=1000) # save figure # + # Save stack of all netcdfs for Paraview to use. model.save_to_xarray_dataset(filename="output_netcdfs/basicVm.nc", time_unit='years', reference_time='model start', space_unit='meters') # remove temporary netcdfs model.remove_output_netcdfs() # - # make a plot of the final steady state topography imshow_grid(model.grid, "topographic__elevation") # ## Next Steps # # - We recommend you review the [terrainbento manuscript](https://www.geosci-model-dev-discuss.net/gmd-2018-204/). # # **The links to other notebooks will not work on GitHub. To use them you'll need to clone this repository and open the notebooks using [`jupyter notebook`](https://jupyter.org).** # # - There are three additional introductory tutorials: # # 1) [Introduction terrainbento](http://localhost:8888/notebooks/example_usage/Introduction_to_terrainbento.ipynb) # # 2) [Introduction to boundary conditions in terrainbento](http://localhost:8888/notebooks/example_usage/introduction_to_boundary_conditions.ipynb) # # 3) [Introduction to output writers in terrainbento](http://localhost:8888/notebooks/example_usage/introduction_to_output_writers.ipynb). # # # - Five examples of steady state behavior in coupled process models can be found in the following notebooks: # # 1) [Basic](http://localhost:8888/notebooks/coupled_process_elements/model_basic_steady_solution.ipynb) the simplest landscape evolution model in the terrainbento package. # # 2) **This Notebook**: [BasicVm](http://localhost:8888/notebooks/coupled_process_elements/model_basic_var_m_steady_solution.ipynb) which permits the drainage area exponent to change # # 3) [BasicCh](http://localhost:8888/notebooks/coupled_process_elements/model_basicCh_steady_solution.ipynb) which uses a non-linear hillslope erosion and transport law # # 4) [BasicVs](http://localhost:8888/notebooks/coupled_process_elements/model_basicVs_steady_solution.ipynb) which uses variable source area hydrology # # 5) [BasisRt](http://localhost:8888/notebooks/coupled_process_elements/model_basicRt_steady_solution.ipynb) which allows for two lithologies with different K values
coupled_process_elements/model_basic_var_m_steady_solution.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="view-in-github" colab_type="text" # <a href="https://colab.research.google.com/github/probml/probml-notebooks/blob/main/notebooks/DiracGAN.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # + [markdown] id="Tioj76xwOWeK" # ## The DiracGAN example # # Author: <NAME> # # We show DiracGAN (https://arxiv.org/abs/1801.04406), where the true distribution is is Dirac delta distribution with mass at zero. The generator is modeling a Dirac delta distribution with parameter $\theta$: $G_{\theta}(z) = \theta$ and the discriminator is a linear function of the input with learned # parameter $\phi$: $D_{\phi}(x) = \phi x$. This results in the zero-sum game given by: # $$ # L_D = - l(\theta \phi) - l(0) \\ # L_G = + l(\theta \phi) + l(0) # $$ # # where $l$ depends on the GAN formulation used ($l(z) = - \log (1 + e^{-z})$ for instance). The unique equilibrium point is $\theta = \phi = 0$. # + id="snZfbH_TtO5j" import numpy as np import matplotlib.pylab as plt from scipy.integrate import odeint from scipy.integrate import solve_ivp from scipy.misc import derivative import seaborn as sns # + id="u1i8mZWCvugd" def set_up_fonts(): sns.reset_orig() import matplotlib matplotlib.rcParams["pdf.fonttype"] = 42 matplotlib.rcParams["ps.fonttype"] = 42 # + [markdown] id="OqrocK0iMMqM" # ### Display variables # + id="X2bi6yWfI45j" hw = 10 hl = 6 minshaft = 2 scale = 1.5 # + id="m-FMcNsmInsE" color = ["blue", "red", "green", "orange", "magenta"] # + id="UMdG1ZxrwBEs" set_up_fonts() # + [markdown] id="N1S_k02OMHaE" # ## Defining the Euler updates (gradient descent) # + id="cPUpPCOGVR1E" def euler_alternating(fn, v, t): last_t = t[0] vs = [v] num_dims = len(v) last_v = list(v) for current_t in t[1:]: delta_t = current_t - last_t for i in range(num_dims): interim_v = last_v + delta_t * np.array(fn(current_t, last_v)) last_v[i] = interim_v[i] last_t = current_t vs.append(last_v.copy()) return np.array(vs) # + id="fEi0ZekkIyXg" def euler(fn, v, t): last_t = t[0] vs = [v] last_v = v for current_t in t[1:]: current_v = last_v + (current_t - last_t) * np.array(fn(current_t, last_v)) last_t = current_t last_v = current_v vs.append(current_v) return np.array(vs) # + [markdown] id="8p73c5zYhExV" # # <NAME> # # # + id="BkcTXKS76hyV" grad_f = lambda x: 1.0 / (1 + np.exp(-x)) vect0 = [(1, 1)] # + colab={"base_uri": "https://localhost:8080/", "height": 306} id="C8yQukseIcOo" outputId="ebe24249-c6b7-42f7-da6b-61314fbd6829" # Write the problem in a way compatible with solve_ivp. # Return the gradients for each player. def system(t, vect): x, y = vect return [-grad_f(x * y) * y, grad_f(x * y) * x] t = np.arange(0, 100, 0.2) plot = plt.figure() v = vect0[0] sol = solve_ivp(system, (0, 200), v, t_eval=t, dense_output=True, method="RK45") sol = sol.sol(t).T widths = np.linspace(0, 2, sol.size) plt.quiver( sol[:-1, 0], sol[:-1, 1], sol[1:, 0] - sol[:-1, 0], sol[1:, 1] - sol[:-1, 1], scale_units="xy", angles="xy", scale=2, color=color[0], linewidths=widths, edgecolors=color[0], label="Continuous dynamics", headwidth=hw, headlength=hl, minshaft=2, ) plt.title("Dirac GAN", fontsize=16) plt.plot(v[0], v[1], "go", markersize=10) plt.plot(0, 0, "rx", markersize=12) plt.plot(0, 0, "rx", markersize=12, label="equilibruim (0, 0)") plt.legend(loc="upper right", bbox_to_anchor=(0.8, 1), fontsize=13, framealpha=0) plt.xlabel(r"$\phi$", fontsize=16) plt.ylabel(r"$\theta$", fontsize=16) plt.xticks([]) plt.yticks([]) plt.xlim((-4, 4)) plt.ylim((-3, 4.5)) # + colab={"base_uri": "https://localhost:8080/", "height": 306} id="gxMpZMt71ieS" outputId="8584e989-a254-44e9-e057-86d884f9adb7" disc_lr = 0.1 gen_lr = 0.1 vect0 = [(1, 1)] t = np.arange(0, 100, disc_lr) plot = plt.figure() v = vect0[0] sol = euler(system, v, t) widths = np.linspace(0, 2, sol.size) plt.quiver( sol[:-1, 0], sol[:-1, 1], sol[1:, 0] - sol[:-1, 0], sol[1:, 1] - sol[:-1, 1], scale_units="xy", angles="xy", scale=2, color=color[0], linewidths=widths, edgecolors=color[0], label="Simultaneous gradient descent", headwidth=hw, headlength=hl, minshaft=2, ) plt.title("Dirac GAN", fontsize=16) plt.plot(v[0], v[1], "go", markersize=10) plt.plot(0, 0, "rx", markersize=12, label="equilibruim (0, 0)") plt.legend(loc="upper right", bbox_to_anchor=(0.8, 1), fontsize=13, framealpha=0) plt.xlabel(r"$\phi$", fontsize=16) plt.ylabel(r"$\theta$", fontsize=16) plt.xticks([]) plt.yticks([]) plt.xlim((-4, 4)) plt.ylim((-3, 4.5)) # + colab={"base_uri": "https://localhost:8080/", "height": 269} id="ZRPE1Fc_73kd" outputId="97dc6f0d-02fa-42e2-d872-02e62de03043" plt.vlines(0, 0, 10, lw=3, colors="b", label=r"$p^*$") plt.vlines(2, 0, 10, lw=3, colors="g", label=r"$q_{\theta}$", linestyles="--") plt.hlines(0, -1, 10, lw=2, colors="k") xlim = np.linspace(-0.5, 2.5, 50) plt.plot(xlim, 1.7 * xlim, color="r", label=r"$D_{\phi}(x) = \phi x$", ls="-.") plt.xlim(-0.5, 2.5) plt.yticks([]) plt.xticks([]) plt.legend(framealpha=0, loc="upper center", fontsize=14) # + colab={"base_uri": "https://localhost:8080/", "height": 306} id="OybzsaDz2Nmk" outputId="004dbdb5-f855-43c2-e1a4-471d86067b88" lr = 0.1 vect0 = [(1, 1)] t = np.arange(0, 100, lr) plot = plt.figure() v = vect0[0] sol = euler_alternating(system, v, t) widths = np.linspace(0, 2, sol.size) plt.quiver( sol[:-1, 0], sol[:-1, 1], sol[1:, 0] - sol[:-1, 0], sol[1:, 1] - sol[:-1, 1], scale_units="xy", angles="xy", scale=2, color=color[0], linewidths=widths, edgecolors=color[0], label="Alternating gradient descent", headwidth=hw, headlength=hl, minshaft=2, ) plt.title("Dirac GAN", fontsize=16) plt.plot(v[0], v[1], "go", markersize=10) plt.plot(0, 0, "rx", markersize=12, label="equilibruim (0, 0)") plt.legend(loc="upper right", bbox_to_anchor=(0.8, 1), fontsize=13, framealpha=0) plt.xlabel(r"$\phi$", fontsize=16) plt.ylabel(r"$\theta$", fontsize=16) plt.xticks([]) plt.yticks([]) plt.xlim((-4, 4)) plt.ylim((-3, 4.5))
notebooks/misc/DiracGAN.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # <script async src="https://www.googletagmanager.com/gtag/js?id=UA-59152712-8"></script> # <script> # window.dataLayer = window.dataLayer || []; # function gtag(){dataLayer.push(arguments);} # gtag('js', new Date()); # # gtag('config', 'UA-59152712-8'); # </script> # # # The NRPy+ code required to generate the needed C code for the lowering operator: $g_{ij} \beta^i$, with the result set to C variables "`betaD0out`", "`betaD1out`", and "`betaD2out`" # # ## *<NAME>* # + # The NRPy_param_funcs module sets up global structures that manage free parameters within NRPy+ import NRPy_param_funcs as par # NRPy+: Parameter interface # The indexedexp module defines various functions for defining and managing indexed quantities like tensors and pseudotensors import indexedexp as ixp # NRPy+: Symbolic indexed expression (e.g., tensors, vectors, etc.) support # The grid module defines various parameters related to a numerical grid or the dimensionality of indexed expressions # For example, it declares the parameter DIM, which specifies the dimensionality of the indexed expression import grid as gri # NRPy+: Functions having to do with numerical grids from outputC import outputC # NRPy+: Basic C code output functionality # Set the dimension to 3 par.set_parval_from_str("DIM",3) # Declare rank-1 contravariant ("U" and "D") vectors betaU = ixp.declarerank1("betaU") betaD = ixp.zerorank1() # Declare h_{ij}=hDD[i][j] and h^{ij}=hUU[i][j] hDD = ixp.declarerank2("hDD","sym01") # Get the dimension we just set (should be set to 3). DIM = par.parval_from_str("DIM") for i in range(DIM): for j in range(DIM): betaD[j] += betaU[i]*hDD[i][j] outputC(betaD,["betaD0out","betaD1out","betaD2out"])
Tutorial-Indexed_Expressions_soln.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Exercises For Session 10 (Scipy) # ## Exercise 1: Doing Economics with Scipy # # Solve the exercises below to become acquainted with scipy. # ### Exercise 1.1: Pooled OLS # Consider again the function we use to draw data in Session 9. # + # Initialization import numpy as np import pandas as pd from scipy.optimize import minimize # Setup np.random.seed(208) ID = 20 Periods = 5 beta = np.array([1, 0.5, 1.4, 3, 0.2, 5]) # True values # Define function def create_data(ID, Periods, beta): data_mu = np.array([1, 0.7, -0.25, 0.6, 0.4, -0.1]) data_var = [ [ 1.0000, -0.2962, 0.3144, 0.5061, -0.0014, 0.0077], [-0.2962, 1.0000, 0.3082, 0.0301, -0.0101, 0.5034], [ 0.3144, 0.3082, 1.0000, 0.7012, 0.6674, 0.6345], [ 0.5061, 0.0301, 0.7012, 1.0000, 0.1950, 0.2173], [-0.0014, -0.0101, 0.6674, 0.1950, 1.0000, 0.1860], [ 0.0077, 0.5034, 0.6345, 0.2173, 0.1860, 1.0000] ] year = np.sum(np.kron(np.linspace(1,Periods,Periods),np.identity(ID)),0) idx = np.sum(np.kron(np.identity(Periods),np.linspace(1,ID,ID)),0) X = np.exp(np.array(np.random.multivariate_normal(data_mu, data_var, ID*Periods))) y = X @ beta + np.random.normal(0,1,ID*Periods) data = np.c_[year, idx, X, y] return data # Call function data = create_data(ID, Periods, beta) #print(pd.DataFrame(data)) # - # In the last session, we used the fact that the Pooled OLS estimator admits a closed form solution and we estimated the parameters of interests with matrix algebra. However, we can achieve the same result by minimizing the sum of squares residual # # \begin{align} # RSS = \sum_{i=1}^{N}(y_i - X_i\beta)^2 # \end{align} # # Create a new function, call it "SSR", that takes a (6 x 1) vector of parameters as input and produces the RSS formula above as output (Hint: consider using [np.square](https://docs.scipy.org/doc/numpy/reference/generated/numpy.square.html). Else, $x^2$ is coded as "x**2"). Pay attention to the fact that the Pooled OLS objective function must return a scalar. Use [minimize](https://docs.scipy.org/doc/scipy/reference/generated/scipy.optimize.minimize.html#scipy.optimize.minimize) to find the vector $\beta$ that minimizes the RSS. If you do this correctly, you should get exactly the same point estimates as in Session 9. # ### Exercise 1.2: Bertand-Nash Equilibrium # Two firms, $i \in \{1,2\}$, compete on prices à la Bertand-Nash. Let $\mathbf{p} = [p_1 \ p_2]^T$ and let's assume that consumers' demand reads # # \begin{align} # q_i(\mathbf{p}) = \frac{\exp(\alpha p_{i})}{\sum_{l \in \{1,2\}}\exp(\alpha p_{l})}, \ \ \forall i. # \end{align} # # Firms have the same maginal costs, $c>0$. The first order conditions associated with their optimization problem read # # \begin{align} # p_i = c - \frac{1}{\alpha(1-q_i(\mathbf{p}))} # \end{align} # # Create a new function, call it "FOC", that takes a (2x1) vector of prices as input and produces the FOC above as output. Set $c=2$ and $\alpha = -0.5$. Then, use one of the routines in [scipy.optimize](https://docs.scipy.org/doc/scipy/reference/optimize.html) to find the vector of prices that constitues the unique Bertand-Nash equilibrium of this game. # # A few hints: # # - I recommend to use either [root](https://docs.scipy.org/doc/scipy/reference/generated/scipy.optimize.root.html#scipy.optimize.root), [least_squares](https://docs.scipy.org/doc/scipy/reference/generated/scipy.optimize.least_squares.html#scipy.optimize.least_squares) or [fixed_point](https://docs.scipy.org/doc/scipy/reference/generated/scipy.optimize.fixed_point.html#scipy.optimize.fixed_point). In the first two cases, you should minimize the following implicit function # # \begin{align} # F_i := p_i - c + \frac{1}{\alpha(1-q_i(\mathbf{p}))} # \end{align} # # In the third case, you can directly supply the original FOC. Please see the documentation of the two solvers for an explanation. # # - Make sure you explicitly define $q_i(\mathbf{p})$ as a function of *both* $p_1$ and $p_2$ inside the objective function. Remember that the solver has to search for the vector of prices s.t. $ FOC(\mathbf{p}) = \mathbf{p}$. # # - Assume that both firms set the same initial prices. In other words, use the same numbers as starting values for the solver (e.g. [0,0]). Note: if you use optimize.least_squares or optimize.root, experiment with asymmetric starting values. The solution must always be symmetrical because firms have the same marginal costs. # # - No matter which solver you use, you should get always the same result. The correct solution is $[p_1 \ p_2]^T = [6 \ 6]^T$. # # ## Exercise 2: Searching for Global Minima # Consider the following non-linear function # \begin{align} # f(x) = 2 - 34\sin(x) + x^2. # \end{align} # # It clearly admits one global minimum and multiple local minima. # + # Initialization import numpy as np import matplotlib.pyplot as plt # %matplotlib inline # Define function def my_fun(x): return 2 - 34*np.sin(x) + x**2 # Plot x = np.linspace(-20,20,100) y = my_fun(x) plt.plot(x, y, 'b-', linewidth=2) plt.show() # - # Suppose that you use [minimize](https://docs.scipy.org/doc/scipy/reference/generated/scipy.optimize.minimize.html#scipy.optimize.minimize) to find the value of x that minimizes this function. The solution clearly depends on the starting values that you set. from scipy.optimize import minimize x = np.linspace(-20, 20, 10) for i in x: print("The point of minimum attained starting from x = ", round(i,2), "is", minimize(my_fun, i).x) # Eye-detection suggests that the global minimum is attained at $x=1.4834249$. Use one of the global optimization routines in scipi.optimize, e.g. [brute](https://docs.scipy.org/doc/scipy/reference/generated/scipy.optimize.brute.html#scipy.optimize.brute), to find the unique global minimum of the function.
Session_10/exercises_10.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Sparkify Project # This workspace contains a tiny subset (128MB) of the full dataset available (12GB). Feel free to use this workspace to build your project, or to explore a smaller subset with Spark before deploying your cluster on the cloud. Instructions for setting up your Spark cluster is included in the last lesson of the Extracurricular Spark Course content. # # You can follow the steps below to guide your data analysis and model building portion of this project. # + # import libraries from pyspark.sql import SparkSession from pyspark.sql.functions import udf from pyspark.sql.types import IntegerType from pyspark.ml.classification import RandomForestClassifier from pyspark.ml.evaluation import MulticlassClassificationEvaluator from pyspark.ml.feature import StandardScaler, VectorAssembler from pyspark.ml.tuning import CrossValidator, ParamGridBuilder from datetime import datetime # - # create a Spark session spark = SparkSession.builder \ .master("local") \ .appName("Creating Features") \ .getOrCreate() # # Load and Clean Dataset df = spark.read.json('mini_sparkify_event_data.json') df.columns df.createOrReplaceTempView('sparkify_data') # # Exploratory Data Analysis # When you're working with the full dataset, perform EDA by loading a small subset of the data and doing basic manipulations within Spark. In this workspace, you are already provided a small subset of data you can explore. # # ### Define Churn # # Once you've done some preliminary analysis, create a column `Churn` to use as the label for your model. I suggest using the `Cancellation Confirmation` events to define your churn, which happen for both paid and free users. As a bonus task, you can also look into the `Downgrade` events. # # ### Explore Data # Once you've defined churn, perform some exploratory data analysis to observe the behavior for users who stayed vs users who churned. You can start by exploring aggregates on these two groups of users, observing how much of a specific action they experienced per a certain time unit or number of songs played. df.toPandas().head() #number of lines df.count() #checking columns properties df.printSchema() # Let's count the number of times each user appears: spark.sql("""select userId, count(userId) as count from sparkify_data group by userId order by count desc""").show() # There seems to exist a user with no id, I'll investigate this later. #number of unique users spark.sql("""select count (distinct userId) from sparkify_data""").show() # Now let's check how frequent each page is in the dataset: spark.sql("""select page, count(page) as count from sparkify_data group by page order by count desc""").toPandas() #proportion of paid/free users df.groupBy('level').count().show() spark.sql("""select level, count(level) from sparkify_data group by level""").show() df.groupBy('gender').count().show() # It seems that users without Id also don't have gender assigned. Let's drop them. drop_df = df.filter(df.userId != "") drop_df.count() drop_df.groupBy('gender').count().show() # + #defining churn as every user who confirmed a cancellation churn = udf(lambda x: 1 if x == 'Cancellation Confirmation' else 0) df_2 = drop_df.withColumn('churn_flag', churn(df.page)) # - df_2.printSchema() df_2.createOrReplaceTempView('sparkify_data_churn') spark.sql("""select churn_flag, count(churn_flag) from sparkify_data_churn group by churn_flag""").show() spark.sql("""select churn_flag, gender, count(gender) from sparkify_data_churn group by churn_flag, gender""").show() spark.sql("""select churn_flag, level, count(level) from sparkify_data_churn group by churn_flag, level""").show() # # Feature Engineering # Once you've familiarized yourself with the data, build out the features you find promising to train your model on. To work with the full dataset, you can follow the following steps. # - Write a script to extract the necessary features from the smaller subset of data # - Ensure that your script is scalable, using the best practices discussed in Lesson 3 # - Try your script on the full data set, debugging your script if necessary # # If you are working in the classroom workspace, you can just extract features based on the small subset of data contained here. Be sure to transfer over this work to the larger dataset when you work on your Spark cluster. # From the observed pages it's possible to make the following features: # # - listening time per user # - days since registration # - number of sessions # - number of songs per user # - number of thumbs up # - number of thumbs down # - number of added friends # ### Calculating listening time # + listening_time = spark.sql("""select userId, sum(length) as listening_time from sparkify_data_churn where page = 'NextSong' group by userId""") listening_time.show() # - # ### Calculating number of thumbs up # + thumbs_up = spark.sql("""select userId, count(page) as thumbs_up from sparkify_data_churn where page = 'Thumbs Up' group by userId""") thumbs_up.show() # - # ### Calculating number of thumbs down # + thumbs_down = spark.sql("""select userId, count(page) as thumbs_down from sparkify_data_churn where page = 'Thumbs Down' group by userId""") thumbs_down.show() # - # ### Calculating number of friends friends = spark.sql("""select userId, count(page) as friends from sparkify_data_churn where page = 'Add Friend' group by userId""") friends.show() # ### Calculating number of sessions per user sessions = spark.sql("""select userId, count(distinct sessionId) as sessions from sparkify_data_churn group by userId""") sessions.show() # ### Calculating number of listened songs per user # + songs = spark.sql("""select userId, count(Song) as total_songs from sparkify_data_churn group by userId""") songs.show() # - # ### Calculating number of active days for each user active = spark.sql("""select userId, min(registration) as created, max(ts) as last_session from sparkify_data_churn group by userId""") # + def compute_active_days(created, last_session): """Calculates the difference between an account date of registration and its most recent login date. Parameters: ----------- created date of creation timestamp : str last_session last login date : str Returns: -------- active_days : int number of active days """ created = int(created)/1000 last_session = int(last_session)/1000 active_days = (datetime.fromtimestamp(last_session) - datetime.fromtimestamp(created)).days return active_days active_days = udf(compute_active_days, IntegerType()) active = active.withColumn("active_days", active_days(active.created, active.last_session)) # - active.show() # ### Churn flag churn = spark.sql("""select userId, max(churn_flag) as churn from sparkify_data_churn group by UserId""") churn.show() user_data = listening_time.join(thumbs_up, on='userId', how='outer')\ .join(thumbs_down, on='userId', how='outer')\ .join(songs, on='userId', how='outer')\ .join(sessions, on='userId', how='outer')\ .join(friends, on='userId', how='outer')\ .join(active, on='userId', how='outer')\ .join(churn, on='userId', how='outer') user_data.toPandas().head(20) # As we can see above, there is still some work to be done: # - I'll input NaNs with 0, as it makes sense that some users don't have friends or never used the thumbs up/down feature of the app. # - Columns `created` and `last_session` can be dropped. full_df = user_data.drop('created').drop('last_session').fillna(0) full_df.toPandas().head() full_df.printSchema() # + #transforming features to vectors feature_cols = ['listening_time', 'thumbs_up', 'thumbs_down', 'total_songs', 'sessions', 'friends', 'active_days'] assembler = VectorAssembler( inputCols = feature_cols, outputCol = 'features') model_df = assembler.transform(full_df) #renaming churn column to 'label' model_df = model_df.withColumn("label", model_df['churn'].cast(IntegerType())) # - #scaling scaler = StandardScaler(inputCol = 'features', outputCol='scaled_features') model_df = scaler.fit(model_df).transform(model_df) #checking if everything is correct model_df.printSchema() # # Modeling # Split the full dataset into train and test sets. Test out several of the machine learning methods you learned. Evaluate the accuracy of the various models, tuning parameters as necessary. Determine your winning model based on test accuracy and report results on the validation set. Since the churned users are a fairly small subset, I suggest using F1 score as the metric to optimize. # + train, test = model_df.randomSplit([0.8, 0.2], seed = 42) rf = RandomForestClassifier(labelCol='label', featuresCol='scaled_features', numTrees=10) rf_model = rf.fit(train) preds = rf_model.transform(test) evaluator = MulticlassClassificationEvaluator( labelCol="label", predictionCol="prediction", metricName="f1") score = evaluator.evaluate(preds) print('Random Forest - F1:', score) # + from pyspark.ml.classification import LogisticRegression, GBTClassifier lr = LogisticRegression(featuresCol='scaled_features', maxIter = 10, regParam = 0.3, elasticNetParam = 0.8) lr_model = lr.fit(train) preds = lr_model.transform(test) score = evaluator.evaluate(preds) print('Logistic Regression - F1:', score) # + gbt = GBTClassifier(featuresCol='scaled_features', labelCol='label', maxIter=10) gbt_model = gbt.fit(train) preds = gbt_model.transform(test) score = evaluator.evaluate(preds) print('GBT - F1:', score) # - # ## Hyperparameter Tuning # + from pyspark.ml.tuning import ParamGridBuilder, CrossValidator param_grid = ParamGridBuilder() \ .addGrid(rf_model.numTrees, [50, 100, 200]) \ .addGrid(rf_model.impurity, ['gini', 'entropy']) \ .build() cross_val = CrossValidator(estimator=rf, estimatorParamMaps=param_grid, evaluator=MulticlassClassificationEvaluator(), numFolds=3) cv_model = cross_val.fit(train) # + cv_preds = cv_model.transform(test) score = evaluator.evaluate(cv_preds) print('Hyperparameter model - F1:', score) # - cv_model.avgMetrics cv_model.bestModel.getNumTrees cv_model.bestModel.featureImportances # ## Feature importances importances = dict(zip(feature_cols, cv_model.bestModel.featureImportances)) ordered_importances = dict(sorted(importances.items(), key=lambda item: item[1])) # + import seaborn as sns import matplotlib.pyplot as plt sns.barplot(y=list(ordered_importances.keys()), x=list(ordered_importances.values())) plt.title('Top 10 features') plt.ylabel('feature') plt.xlabel('importance');
churn-prediction-with-spark/Sparkify.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Security Refresher # > Introduction to Security # # - toc: true # - master- badges: true # - comments: true # - author: <NAME> # - categories: [Security] # ### Introduction # Attacker in the News # 205 days median between Date of Evidence & Date of Discovery # What We Tell Others # When we educate people those also help attacker how to trick people # url text & hyperlink can be different # different browser can have different parsing way # Trusted vs Trustworthy # We trust browser # browser trust Certificate authority # CA can also be compromise # Security Features # One of the common misconceptions developers have is that engaging security features # means you have a secure system. # Encryption # Authentication # Authorization # Network & Controls # Why you might be a target # Quality of the software # Error Handling # Logging Strategy # Principle of Least Privilege # Avoid Fraud # Avoid Abuse # Avoid Privilege Escalation Attack # Attacking Infrastructure # Convincing Developers # Developer Support # Requirements # Budget # Training # Executive Backing # Beyond Perimeter Defense # Behind the Firewall # Do you rely on Physical security # Do you understand the risk in your system # Do you allow BYOD # Do you allow VPN # Do you allow employees to install software # Are their browser up to date # Are they trained to detect phishing/social engineering attack # Do you focus on code quality # ### Social Engineering # # Word Stew # Secrecy # Privacy # Confidentiality # Integrity # Authenticity # # Economics of Security # Are Security Products,Tools,Efforts Worth it? # The value of the alarm system # How do you test it # Attacker doesn't participate in the test # Motivation of the attacker(profits) # Motivation of the victim(costs) # # Motivation # System Fails when # People protect the wrong things # Protecting the right things in wrong way # # Security Protocols
_notebooks/2020-06-06-Security course from Safari.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Students Do: Understanding customers # # ## Instructions # # You are given a dataset that contains historical data from purchases of an online store made by 200 customers. In this activity you will put in action your data preprocessing superpowers, also you'll add some new skills needed to start finding customers clusters. # Initial imports import pandas as pd from pathlib import Path file_path = Path("../Resources/shopping_data.csv")
01-Lesson-Plans/20-Unsupervised-Machine-Learning/1/Activities/03-Stu_Preparing_Data/Unsolved/preparing_data.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + from functools import reduce from datetime import datetime from bs4 import BeautifulSoup import urllib.request import pandas as pd import numpy as np # - # # Manual exploration of html tags # + base_url = 'https://www.infocasas.com.uy' # source_url = 'https://www.infocasas.com.uy/venta/casas-apartamento/montevideo/' source_url = 'https://www.infocasas.com.uy/venta/inmuebles/montevideo/' url_page = source_url + 'pagina{}'.format(953) print(url_page) page = urllib.request.urlopen(url_page) soup = BeautifulSoup(page, 'html.parser') table = soup.find_all('div', attrs={'class': 'propiedades-slider'}) # - a1 = 'https://www.infocasas.com.uy/venta/inmuebles/montevideo/pagina953' a2 = 'https://www.infocasas.com.uy/venta/inmuebles/montevideo/pagina954' a2>a1 soup.find('div', attrs={'id': 'paginado'}).find('a', attrs={'class': 'next', }).attrs['href'] # + neighborhood = [ [k.text for k in p.find_all('p')] for t in table \ for p in t.find_all('div')\ if 'singleLineDots' in p['class'] ] price = [p.text.split()[-1] for t in table \ for p in t.find_all('div') if 'precio' in p['class']] desc = [[k.text for k in p.find_all('p')] for t in table \ for p in t.find_all('div') if 'inDescription' in p['class']] desc = [k[0] for k in desc] details = [[d.find_all('span')[0].text for d in p.find_all('div')]\ for t in table for p in t.find_all('div')\ if 'contentIcons' in p['class']] details = pd.DataFrame(details, columns=['rooms', 'bathrooms', 'area_m2']) data_id = [k.get('data-id', '') for k in table] data_idproject = [k.get('data-idproyecto', '') for k in table] link = [base_url + k.find('a')['href'] for k in table] title = [k.find('a')['title'] for k in table] proyecto_label = [k.find(class_='proyectoLabel').get_text() if k.find(class_='proyectoLabel') else None for k in table] df = pd.DataFrame(neighborhood, columns=['neighborhood', 'type']) df['price'] = price df['desc'] = desc df['uris'] = link df['id'] = data_id df['idproject'] = data_idproject df['title'] = title df['project_label'] = proyecto_label df = pd.concat([details, df], axis=1) # - df
notebooks/extras/PRE_scrapping_infocasas.ipynb